input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
into first-tier attributes of the record object, like `details` and
`stacktrace`, for easy consumption.
Note the termination signal is not always an error, it can also be explicit
pass signal or abort/skip signals.
Attributes:
test_name: string, the name of the test.
begin_time: Epoch timestamp of when the test started.
end_time: Epoch timestamp of when the test ended.
uid: Unique identifier of a test.
termination_signal: ExceptionRecord, the main exception of the test.
extra_errors: OrderedDict, all exceptions occurred during the entire
test lifecycle. The order of occurrence is preserved.
result: TestResultEnum.TEAT_RESULT_*, PASS/FAIL/SKIP.
"""
def __init__(self, t_name, t_class=None):
self.test_name = t_name
self.test_class = t_class
self.begin_time = None
self.end_time = None
self.uid = None
self.termination_signal = None
self.extra_errors = collections.OrderedDict()
self.result = None
@property
def details(self):
"""String description of the cause of the test's termination.
Note a passed test can have this as well due to the explicit pass
signal. If the test passed implicitly, this field would be None.
"""
if self.termination_signal:
return self.termination_signal.details
@property
def stacktrace(self):
"""The stacktrace string for the exception that terminated the test.
"""
if self.termination_signal:
return self.termination_signal.stacktrace
@property
def extras(self):
"""User defined extra information of the test result.
Must be serializable.
"""
if self.termination_signal:
return self.termination_signal.extras
def test_begin(self):
"""Call this when the test begins execution.
Sets the begin_time of this record.
"""
self.begin_time = utils.get_current_epoch_time()
def _test_end(self, result, e):
"""Marks the end of the test logic.
Args:
result: One of the TEST_RESULT enums in TestResultEnums.
e: A test termination signal (usually an exception object). It can
be any exception instance or of any subclass of
mobly.signals.TestSignal.
"""
if self.begin_time is not None:
self.end_time = utils.get_current_epoch_time()
self.result = result
if e:
self.termination_signal = ExceptionRecord(e)
def update_record(self):
"""Updates the content of a record.
Several display fields like "details" and "stacktrace" need to be
updated based on the content of the record object.
As the content of the record change, call this method to update all
the appropirate fields.
"""
if self.extra_errors:
if self.result != TestResultEnums.TEST_RESULT_FAIL:
self.result = TestResultEnums.TEST_RESULT_ERROR
# If no termination signal is provided, use the first exception
# occurred as the termination signal.
if not self.termination_signal and self.extra_errors:
_, self.termination_signal = self.extra_errors.popitem(last=False)
def test_pass(self, e=None):
"""To mark the test as passed in this record.
Args:
e: An instance of mobly.signals.TestPass.
"""
self._test_end(TestResultEnums.TEST_RESULT_PASS, e)
def test_fail(self, e=None):
"""To mark the test as failed in this record.
Only test_fail does instance check because we want 'assert xxx' to also
fail the test same way assert_true does.
Args:
e: An exception object. It can be an instance of AssertionError or
mobly.base_test.TestFailure.
"""
self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)
def test_skip(self, e=None):
"""To mark the test as skipped in this record.
Args:
e: An instance of mobly.signals.TestSkip.
"""
self._test_end(TestResultEnums.TEST_RESULT_SKIP, e)
def test_error(self, e=None):
"""To mark the test as error in this record.
Args:
e: An exception object.
"""
self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)
def add_error(self, position, e):
"""Add extra error happened during a test.
If the test has passed or skipped, this will mark the test result as
ERROR.
If an error is added the test record, the record's result is equivalent
to the case where an uncaught exception happened.
If the test record has not recorded any error, the newly added error
would be the main error of the test record. Otherwise the newly added
error is added to the record's extra errors.
Args:
position: string, where this error occurred, e.g. 'teardown_test'.
e: An exception or a `signals.ExceptionRecord` object.
"""
if self.result != TestResultEnums.TEST_RESULT_FAIL:
self.result = TestResultEnums.TEST_RESULT_ERROR
if position in self.extra_errors:
raise Error('An exception is already recorded with position "%s",'
' cannot reuse.' % position)
if isinstance(e, ExceptionRecord):
self.extra_errors[position] = e
else:
self.extra_errors[position] = ExceptionRecord(e, position=position)
def __str__(self):
d = self.to_dict()
l = ['%s = %s' % (k, v) for k, v in d.items()]
s = ', '.join(l)
return s
def __repr__(self):
"""This returns a short string representation of the test record."""
t = utils.epoch_to_human_time(self.begin_time)
return '%s %s %s' % (t, self.test_name, self.result)
def to_dict(self):
"""Gets a dictionary representating the content of this class.
Returns:
A dictionary representating the content of this class.
"""
d = {}
d[TestResultEnums.RECORD_NAME] = self.test_name
d[TestResultEnums.RECORD_CLASS] = self.test_class
d[TestResultEnums.RECORD_BEGIN_TIME] = self.begin_time
d[TestResultEnums.RECORD_END_TIME] = self.end_time
d[TestResultEnums.RECORD_RESULT] = self.result
d[TestResultEnums.RECORD_UID] = self.uid
d[TestResultEnums.RECORD_EXTRAS] = self.extras
d[TestResultEnums.RECORD_DETAILS] = self.details
d[TestResultEnums.RECORD_EXTRA_ERRORS] = {
key: value.to_dict()
for (key, value) in self.extra_errors.items()
}
d[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
return d
class TestResult(object):
"""A class that contains metrics of a test run.
This class is essentially a container of TestResultRecord objects.
Attributes:
requested: A list of strings, each is the name of a test requested
by user.
failed: A list of records for tests failed.
executed: A list of records for tests that were actually executed.
passed: A list of records for tests passed.
skipped: A list of records for tests skipped.
error: A list of records for tests with error result token.
controller_info: list of ControllerInfoRecord.
"""
def __init__(self):
self.requested = []
self.failed = []
self.executed = []
self.passed = []
self.skipped = []
self.error = []
self.controller_info = []
def __add__(self, r):
"""Overrides '+' operator for TestResult class.
The add operator merges two TestResult objects by concatenating all of
their lists together.
Args:
r: another instance of TestResult to be added
Returns:
A TestResult instance that's the sum of two TestResult instances.
"""
if not isinstance(r, TestResult):
raise TypeError('Operand %s of type %s is not a TestResult.' %
(r, type(r)))
sum_result = TestResult()
for name in sum_result.__dict__:
r_value = getattr(r, name)
l_value = getattr(self, name)
if isinstance(r_value, list):
setattr(sum_result, name, l_value + r_value)
return sum_result
def add_record(self, record):
"""Adds a test record to test result.
A record is considered executed once it's added to the test result.
Adding the record finalizes the content of a record, so no change
should be made to the record afterwards.
Args:
record: A test record object to add.
"""
record.update_record()
if record.result == TestResultEnums.TEST_RESULT_SKIP:
self.skipped.append(record)
return
self.executed.append(record)
if record.result == TestResultEnums.TEST_RESULT_FAIL:
self.failed.append(record)
elif record.result == TestResultEnums.TEST_RESULT_PASS:
self.passed.append(record)
else:
self.error.append(record)
def add_controller_info_record(self, controller_info_record):
"""Adds a controller info record to results.
This can be called multiple times for each test class.
Args:
controller_info_record: ControllerInfoRecord object to be added to
the result.
"""
self.controller_info.append(controller_info_record)
def add_class_error(self, test_record):
"""Add a record to indicate a test class has failed before any test
could execute.
This is only called before any test is actually executed. So it only
adds an error entry that describes why the class failed to the tally
and does not affect the total number of tests requrested or exedcuted.
Args:
test_record: A TestResultRecord object for the test class.
"""
test_record.update_record()
self.error.append(test_record)
def is_test_executed(self, test_name):
"""Checks if a specific test has been executed.
Args:
test_name: string, the name of the test to check.
Returns:
True if the test has been executed according to the test result,
False otherwise.
"""
for record in self.executed:
if record.test_name == test_name:
return True
return False
@property
def is_all_pass(self):
"""True if no tests failed or threw errors, False otherwise."""
num_of_failures = len(self.failed) + len(self.error)
if num_of_failures == 0:
return True
return False
def requested_test_names_dict(self):
"""Gets the requested test names of a test run in a dict format.
Note a test can be requested multiple times, so there can be duplicated
values
Returns:
A dict with a key and the list of strings.
"""
return {'Requested Tests': copy.deepcopy(self.requested)}
def summary_str(self):
"""Gets a string that summarizes the stats of this test result.
The summary provides the counts of how many tests fall into each
category, like 'Passed', 'Failed' etc.
Format of the string is:
Requested <int>, Executed <int>, ...
Returns:
A summary string of this test result.
"""
l = ['%s %d' % (k, v) for k, v in self.summary_dict().items()]
# Sort the list so the order is the same every time.
msg = ', '.join(sorted(l))
return msg
def summary_dict(self):
"""Gets a dictionary that summarizes | |
plt.gca()
p._attach(ax)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = dedent("""\
Draw a line plot with possibility of several semantic groupings.
{main_api_narrative}
{relational_semantic_narrative}
By default, the plot aggregates over multiple ``y`` values at each value of
``x`` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{data_vars}
hue : {long_form_var}
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : {long_form_var}
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : {long_form_var}
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{data}
{palette}
{hue_order}
{hue_norm}
{sizes}
{size_order}
{size_norm}
dashes : boolean, list, or dictionary, optional
Object determining how to draw the lines for different levels of the
``style`` variable. Setting to ``True`` will use default dash codes, or
you can pass a list of dash codes or a dictionary mapping levels of the
``style`` variable to dash codes. Setting to ``False`` will use solid
lines for all subsets. Dashes are specified as in matplotlib: a tuple
of ``(segment, gap)`` lengths, or an empty string to draw a solid line.
{markers}
{style_order}
{units}
{estimator}
{ci}
{n_boot}
{seed}
sort : boolean, optional
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars", optional
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional paramters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on ``err_style``.
{legend}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{ax_out}
See Also
--------
scatterplot : Show the relationship between two variables without
emphasizing continuity of the ``x`` variable.
pointplot : Show the relationship between two variables when one is
categorical.
Examples
--------
Draw a single line plot with error bands showing a confidence interval:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> import matplotlib.pyplot as plt
>>> fmri = sns.load_dataset("fmri")
>>> ax = sns.lineplot(x="timepoint", y="signal", data=fmri)
Group by another variable and show the groups with different colors:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal", hue="event",
... data=fmri)
Show the grouping variable with both color and line dashing:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal",
... hue="event", style="event", data=fmri)
Use color and line dashing to represent two different grouping variables:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal",
... hue="region", style="event", data=fmri)
Use markers instead of the dashes to identify groups:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal",
... hue="event", style="event",
... markers=True, dashes=False, data=fmri)
Show error bars instead of error bands and plot the standard error:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal", hue="event",
... err_style="bars", ci=68, data=fmri)
Show experimental replicates instead of aggregating:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="timepoint", y="signal", hue="event",
... units="subject", estimator=None, lw=1,
... data=fmri.query("region == 'frontal'"))
Use a quantitative color mapping:
.. plot::
:context: close-figs
>>> dots = sns.load_dataset("dots").query("align == 'dots'")
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... data=dots)
Use a different normalization for the colormap:
.. plot::
:context: close-figs
>>> from matplotlib.colors import LogNorm
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... hue_norm=LogNorm(),
... data=dots.query("coherence > 0"))
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... palette="ch:2.5,.25", data=dots)
Use specific color values, treating the hue variable as categorical:
.. plot::
:context: close-figs
>>> palette = sns.color_palette("mako_r", 6)
>>> ax = sns.lineplot(x="time", y="firing_rate",
... hue="coherence", style="choice",
... palette=palette, data=dots)
Change the width of the lines with a quantitative variable:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="time", y="firing_rate",
... size="coherence", hue="choice",
... legend="full", data=dots)
Change the range of line widths used to normalize the size variable:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(x="time", y="firing_rate",
... size="coherence", hue="choice",
... sizes=(.25, 2.5), data=dots)
Plot from a wide-form DataFrame:
.. plot::
:context: close-figs
>>> import numpy as np, pandas as pd; plt.close("all")
>>> index = pd.date_range("1 1 2000", periods=100,
... freq="m", name="date")
>>> data = np.random.randn(100, 4).cumsum(axis=0)
>>> wide_df = pd.DataFrame(data, index, ["a", "b", "c", "d"])
>>> ax = sns.lineplot(data=wide_df)
Plot from a list of Series:
.. plot::
:context: close-figs
>>> list_data = [wide_df.loc[:"2005", "a"], wide_df.loc["2003":, "b"]]
>>> ax = sns.lineplot(data=list_data)
Plot a single Series, pass kwargs to :meth:`matplotlib.axes.Axes.plot`:
.. plot::
:context: close-figs
>>> ax = sns.lineplot(data=wide_df["a"], color="coral", label="line")
Draw lines at points as they appear in the dataset:
.. plot::
:context: close-figs
>>> x, y = np.random.randn(2, 5000).cumsum(axis=1)
>>> ax = sns.lineplot(x=x, y=y, sort=False, lw=1)
Use :func:`relplot` to combine :func:`lineplot` and :class:`FacetGrid`:
This allows grouping within additional categorical variables. Using
:func:`relplot` is safer than using :class:`FacetGrid` directly, as it
ensures synchronization of the semantic mappings across facets.
.. plot::
:context: close-figs
>>> g = sns.relplot(x="timepoint", y="signal",
... col="region", hue="event", style="event",
... kind="line", data=fmri)
""").format(**_relational_docs)
@_deprecate_positional_args
def scatterplot(
*,
x=None, y=None,
hue=None, style=None, size=None, data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None,
x_bins=None, y_bins=None,
units=None, estimator=None, ci=95, n_boot=1000,
alpha=None, x_jitter=None, y_jitter=None,
legend="brief", ax=None, **kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(
data=data, variables=variables,
x_bins=x_bins, y_bins=y_bins,
estimator=estimator, ci=ci, n_boot=n_boot,
alpha=alpha, x_jitter=x_jitter, y_jitter=y_jitter, legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
p._attach(ax)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = dedent("""\
Draw a scatter plot with possibility of several semantic groupings.
{main_api_narrative}
{relational_semantic_narrative}
Parameters
----------
{data_vars}
hue : {long_form_var}
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : {long_form_var}
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : {long_form_var}
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{data}
{palette}
{hue_order}
{hue_norm}
{sizes}
{size_order}
{size_norm}
{markers}
{style_order}
{{x,y}}_bins : lists or arrays or functions
*Currently non-functional.*
{units}
*Currently non-functional.*
{estimator}
*Currently non-functional.*
{ci}
*Currently non-functional.*
{n_boot}
*Currently non-functional.*
alpha : float
Proportional opacity of the points.
{{x,y}}_jitter : booleans or floats
*Currently non-functional.*
{legend}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{ax_out}
See Also
--------
lineplot : Show the relationship between two variables connected with
lines to emphasize continuity.
swarmplot : Draw a scatter plot with one categorical variable, arranging
the points to show the distribution of values.
Examples
--------
Draw a simple scatter plot between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> import matplotlib.pyplot as plt
>>> tips = sns.load_dataset("tips")
>>> ax = sns.scatterplot(x="total_bill", y="tip", data=tips)
Group by another variable and show the groups with different colors:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip", hue="time",
... data=tips)
Show the grouping variable by varying both color and marker:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="time", style="time", data=tips)
Vary colors and markers to show two different grouping variables:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="day", style="time", data=tips)
Show a quantitative variable by varying the size of the points:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip", size="size",
... data=tips)
Also show the quantitative variable by also using continuous colors:
.. plot::
:context: close-figs
>>> ax = sns.scatterplot(x="total_bill", y="tip",
... hue="size", size="size",
... data=tips)
Use a different continuous color map:
.. plot::
:context: close-figs
>>> cmap = | |
# -*- coding: utf-8 -*-
"""
Configuration for the simulations, for the single-player case.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# Tries to know number of CPU
try:
from multiprocessing import cpu_count
CPU_COUNT = cpu_count() #: Number of CPU on the local machine
except ImportError:
CPU_COUNT = 1
from os import getenv
if __name__ == '__main__':
print("Warning: this script 'configuration.py' is NOT executable. Use 'main.py' or 'make single' ...") # DEBUG
exit(0)
# Import arms and algorithms
try:
from Arms import *
from Policies import *
except ImportError:
from SMPyBandits.Arms import *
from SMPyBandits.Policies import *
#: HORIZON : number of time steps of the experiments.
#: Warning Should be >= 10000 to be interesting "asymptotically".
HORIZON = 10000
HORIZON = int(getenv('T', HORIZON))
#: REPETITIONS : number of repetitions of the experiments.
#: Warning: Should be >= 10 to be statistically trustworthy.
REPETITIONS = 1 # XXX To profile the code, turn down parallel computing
REPETITIONS = 4 # Nb of cores, to have exactly one repetition process by cores
# REPETITIONS = 1000
REPETITIONS = int(getenv('N', REPETITIONS))
#: To profile the code, turn down parallel computing
DO_PARALLEL = False # XXX do not let this = False
DO_PARALLEL = True
DO_PARALLEL = (REPETITIONS > 1 or REPETITIONS == -1) and DO_PARALLEL
#: Number of jobs to use for the parallel computations. -1 means all the CPU cores, 1 means no parallelization.
N_JOBS = -1 if DO_PARALLEL else 1
if CPU_COUNT > 4: # We are on a server, let's be nice and not use all cores
N_JOBS = min(CPU_COUNT, max(int(CPU_COUNT / 3), CPU_COUNT - 8))
N_JOBS = int(getenv('N_JOBS', N_JOBS))
if REPETITIONS == -1:
REPETITIONS = max(N_JOBS, CPU_COUNT)
# Random events
RANDOM_SHUFFLE = False #: The arms won't be shuffled (``shuffle(arms)``).
# RANDOM_SHUFFLE = True #: The arms will be shuffled (``shuffle(arms)``).
RANDOM_SHUFFLE = getenv('RANDOM_SHUFFLE', str(RANDOM_SHUFFLE)) == 'True'
RANDOM_INVERT = False #: The arms won't be inverted (``arms = arms[::-1]``).
# RANDOM_INVERT = True #: The arms will be inverted (``arms = arms[::-1]``).
RANDOM_INVERT = getenv('RANDOM_INVERT', str(RANDOM_INVERT)) == 'True'
NB_BREAK_POINTS = 0 #: Number of true breakpoints. They are uniformly spaced in time steps (and the first one at t=0 does not count).
# NB_BREAK_POINTS = 3 #: Number of true breakpoints. They are uniformly spaced in time steps (and the first one at t=0 does not count).
# NB_BREAK_POINTS = 5 #: Number of true breakpoints. They are uniformly spaced in time steps (and the first one at t=0 does not count).
NB_BREAK_POINTS = int(getenv('NB_BREAK_POINTS', NB_BREAK_POINTS))
#: Parameters for the epsilon-greedy and epsilon-... policies.
EPSILON = 0.1
#: Temperature for the Softmax policies.
TEMPERATURE = 0.05
#: Learning rate for my aggregated bandit (it can be autotuned)
LEARNING_RATE = 0.01
# To try more learning rates in one run
LEARNING_RATES = [LEARNING_RATE]
#: Constant time tau for the decreasing rate for my aggregated bandit.
# FIXED I tried to make self.learningRate decrease when self.t increase, it was not better
DECREASE_RATE = None
DECREASE_RATE = HORIZON / 2.0
DECREASE_RATE = 'auto' # FIXED using the formula from Theorem 4.2 from [Bubeck & Cesa-Bianchi, 2012](http://sbubeck.com/SurveyBCB12.pdf)
#: To know if my WrapRange policy is tested.
TEST_WrapRange = True
TEST_WrapRange = False # XXX do not let this = False if you want to test my WrapRange policy
#: Should we cache rewards? The random rewards will be the same for all the REPETITIONS simulations for each algorithms.
CACHE_REWARDS = False # XXX to manually disable this feature?
CACHE_REWARDS = True # XXX to manually enable this feature?
#: Should the Aggregator policy update the trusts in each child or just the one trusted for last decision?
UPDATE_ALL_CHILDREN = True
UPDATE_ALL_CHILDREN = False # XXX do not let this = False
#: Should the rewards for Aggregator policy use as biased estimator, ie just ``r_t``, or unbiased estimators, ``r_t / p_t``
UNBIASED = True
UNBIASED = False
#: Should we update the trusts proba like in Exp4 or like in my initial Aggregator proposal
UPDATE_LIKE_EXP4 = True # trusts^(t+1) = exp(rate_t * estimated rewards upto time t)
UPDATE_LIKE_EXP4 = False # trusts^(t+1) <-- trusts^t * exp(rate_t * estimate reward at time t)
# Parameters for the arms
UNBOUNDED_VARIANCE = 1 #: Variance of unbounded Gaussian arms
VARIANCE = 0.05 #: Variance of Gaussian arms
#: Number of arms for non-hard-coded problems (Bayesian problems)
NB_ARMS = 9
NB_ARMS = int(getenv('K', NB_ARMS))
NB_ARMS = int(getenv('NB_ARMS', NB_ARMS))
#: Default value for the lower value of means
LOWER = 0.
#: Default value for the amplitude value of means
AMPLITUDE = 1.
#: Type of arms for non-hard-coded problems (Bayesian problems)
ARM_TYPE = "Bernoulli"
ARM_TYPE = str(getenv('ARM_TYPE', ARM_TYPE))
# WARNING That's nonsense, rewards of unbounded distributions just don't have lower, amplitude values...
if ARM_TYPE in [
"UnboundedGaussian",
# "Gaussian",
]:
LOWER = -5
AMPLITUDE = 10
LOWER = float(getenv('LOWER', LOWER))
AMPLITUDE = float(getenv('AMPLITUDE', AMPLITUDE))
assert AMPLITUDE > 0, "Error: invalid amplitude = {:.3g} but has to be > 0." # DEBUG
VARIANCE = float(getenv('VARIANCE', VARIANCE))
ARM_TYPE_str = str(ARM_TYPE)
ARM_TYPE = mapping_ARM_TYPE[ARM_TYPE]
#: True to use bayesian problem
ENVIRONMENT_BAYESIAN = False
ENVIRONMENT_BAYESIAN = getenv('BAYES', str(ENVIRONMENT_BAYESIAN)) == 'True'
#: Means of arms for non-hard-coded problems (non Bayesian)
MEANS = uniformMeans(nbArms=NB_ARMS, delta=0.05, lower=LOWER, amplitude=AMPLITUDE, isSorted=True)
import numpy as np
# more parametric? Read from cli?
MEANS_STR = getenv('MEANS', '')
if MEANS_STR != '':
MEANS = [ float(m) for m in MEANS_STR.replace('[', '').replace(']', '').split(',') ]
print("Using cli env variable to use MEANS = {}.".format(MEANS)) # DEBUG
#: True to use full-restart Doubling Trick
USE_FULL_RESTART = True
USE_FULL_RESTART = getenv('FULL_RESTART', str(USE_FULL_RESTART)) == 'True'
#: This dictionary configures the experiments
configuration = {
# --- Duration of the experiment
"horizon": HORIZON,
# --- Number of repetition of the experiment (to have an average)
"repetitions": REPETITIONS,
# --- Parameters for the use of joblib.Parallel
"n_jobs": N_JOBS, # = nb of CPU cores
"verbosity": 6, # Max joblib verbosity
# --- Random events
"random_shuffle": RANDOM_SHUFFLE,
"random_invert": RANDOM_INVERT,
"nb_break_points": NB_BREAK_POINTS,
# --- Should we plot the lower-bounds or not?
"plot_lowerbound": True, # XXX Default
# "plot_lowerbound": False,
# --- Cache rewards: use the same random rewards for the Aggregator[..] and the algorithms
"cache_rewards": CACHE_REWARDS,
"environment_bayesian": ENVIRONMENT_BAYESIAN,
# --- Arms
"environment": [ # XXX Bernoulli arms
# { # The easier problem: 2 arms, one perfectly bad, one perfectly good
# "arm_type": Bernoulli,
# "params": [0, 1]
# },
# { # A very very easy problem: 2 arms, one better than the other
# "arm_type": Bernoulli,
# "params": [0.8, 0.9]
# },
# { # A very very easy problem: 2 arms, one better than the other
# "arm_type": Bernoulli,
# "params": [0.375, 0.571]
# },
# { # A very very easy problem: 3 arms, one bad, one average, one good
# "arm_type": Bernoulli,
# "params": [0.1, 0.5, 0.9]
# },
# { # Another very easy problem: 3 arms, two very bad, one bad
# "arm_type": Bernoulli,
# "params": [0.04, 0.05, 0.1]
# },
{ # Use vector from command line
"arm_type": ARM_TYPE,
"params": MEANS
},
# { # XXX A very easy problem, but it is used in a lot of articles
# "arm_type": Bernoulli,
# "params": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# },
# # XXX Default! FIXME always bring this back as default after experimenting
# { # A very easy problem (X arms), but it is used in a lot of articles
# "arm_type": ARM_TYPE,
# "params": uniformMeans(nbArms=NB_ARMS, delta=1./(1. + NB_ARMS), lower=LOWER, amplitude=AMPLITUDE, isSorted=False)
# },
# { # An other problem, best arm = last, with three groups: very bad arms (0.01, 0.02), middle arms (0.3 - 0.6) and very good arms (0.78, 0.8, 0.82)
# "arm_type": Bernoulli,
# "params": [0.01, 0.02, 0.3, 0.4, 0.5, 0.6, 0.78, 0.8, 0.82]
# },
# { # Another example problem, from [F<NAME> et al, 2018](https://arxiv.org/abs/1804.05929)
# "arm_type": Bernoulli,
# "params": [0.01, 0.01, 0.01, 0.02, 0.02, 0.02, 0.05, 0.05, 0.05, 0.1]
# },
# { # Lots of bad arms, significative difference between the best and the others
# "arm_type": Bernoulli,
# "params": [0.001, 0.001, 0.005, 0.005, 0.01, 0.01, 0.02, 0.02, 0.02, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.3]
# },
# { # VERY HARD One optimal arm, much better than the others, but *lots* of bad arms (34 arms!)
# "arm_type": Bernoulli,
# "params": [0.001, | |
<reponame>jarethholt/teospy
"""Dry air Helmholtz potential and air-water virial coefficients.
This module implements the Helmholtz free energy of dry air and its
derivatives with respect to temperature and density. This module also
includes the virial coefficients for dry air-water vapour mixtures.
:Examples:
>>> dry_f(0,0,300.,1e-3)
-696239.965190
>>> air_baw(0,300.)
-2.95672747e-05
>>> air_caaw(0,300.)
8.01977741e-10
>>> air_caww(0,300.)
-1.15552784e-07
:Functions:
* :func:`dry_f`: Dry air Helmholtz free energy with derivatives.
* :func:`air_baw`: Air-water virial coefficient with derivatives.
* :func:`air_caaw`: Air-air-water virial coefficient with derivatives.
* :func:`air_caww`: Air-water-water virial coefficient with derivatives.
"""
__all__ = ['dry_f','air_baw','air_caaw','air_caww']
import numpy
from teospy import constants0
# Single constants
_RDRY = constants0.RDRY_L2000
_MDRY = constants0.MDRY
_TRED_DRY = 132.6312 # Reducing temperature, K
_DRED_DRY = 10447.7 * _MDRY # Reducing density, kg/m3
_TRED_AW = 100.0 # Reducing temperature (K) for air-water coefficient
_TRED_AAW = 1.0 # Reducing temperature (K) for air-air-water coefficient
_TRED_AWW = 1.0 # Reducing temperature (K) for air-water-water coefficient
# Constants used in empirical equations
_C_DRYF0 = (
(9.7450251743948, 10.0986147428912),
(6.057194e-8, -2.10274769e-5, -1.58860716e-4),
(1.5, -1.9536342e-4),
2.490888032,
(
(1., 1., -1., -25.36365, 0.791309509),
(1., 1., -1., -16.90741, 0.212236768),
(2., 3., 1., 87.31279, -0.197938904)
)
)
_C_DRYFR = (
(
(1, 0. , 0.118160747229 ),
(1, 0.33, 0.713116392079 ),
(1, 1.01, -1.61824192067 ),
(2, 0. , 0.0714140178971 ),
(3, 0. , -0.0865421396646 ),
(3, 0.15, 0.134211176704 ),
(4, 0. , 0.0112626704218 ),
(4, 0.2 , -0.0420533228842 ),
(4, 0.35, 0.0349008431982 ),
(6, 1.35, 0.000164957183186)
),
(
( 1, 1, 1.6 , -0.101365037912 ),
( 3, 1, 0.8 , -0.17381369097 ),
( 5, 1, 0.95, -0.0472103183731 ),
( 6, 1, 1.25, -0.0122523554253 ),
( 1, 2, 3.6 , -0.146629609713 ),
( 3, 2, 6. , -0.0316055879821 ),
(11, 2, 3.25, 0.000233594806142),
( 1, 3, 3.5 , 0.0148287891978 ),
( 3, 3, 15. , -0.00938782884667 )
)
)
_C_AW = (
(-0.237, 66.5687),
(-1.048, -238.834 ),
(-3.183, -176.755 )
)
_C_AAW = (4.82737e2,1.05678e5,-6.56394e7,2.94442e10,-3.19317e12)
_C_AWW = (-1.0728876e1,3.47802e3,-3.83383e5,3.3406e7)
## Dry air auxiliary functions
def _alpha_ideal(tau):
"""Calculate dry air potential ideal term.
Calculate the temperature-dependent terms of the ideal gas component
of the Helmholtz potential (scaled Helmholtz free energy) for dry
air.
:arg float tau: Reduced temperature _TRED_DRY/temp(K).
:returns: Helmholtz potential, unitless.
"""
alpha = 0.0
n4, n5 = _C_DRYF0[0]
alpha += n4 + n5*tau
for (k,n) in enumerate(_C_DRYF0[1]):
alpha += n * tau**(k-3)
k, n = _C_DRYF0[2]
alpha += n * tau**k
alpha += _C_DRYF0[3] * numpy.log(tau)
for (a1,a2,b,c,n) in _C_DRYF0[4]:
alpha += n * numpy.log(a1/a2 + b*numpy.exp(c*tau))
return alpha
def _alpha_ideal_t(tau):
"""Calculate dry air potential ideal term T-derivative.
Calculate the derivative of the ideal gas component of the dry air
Helmholtz potential (scaled Helmholtz free energy) with respect to
reduced temperature.
:arg float tau: Reduced temperature _TRED_DRY/temp(K).
:returns: Helmholtz potential derivative, unitless.
"""
alpha = 0.0
n4, n5 = _C_DRYF0[0]
alpha += n5
for (k,n) in enumerate(_C_DRYF0[1]):
alpha += n * (k-3) * tau**(k-4)
k, n = _C_DRYF0[2]
alpha += n * k*tau**(k-1)
alpha += _C_DRYF0[3] / tau
for (a1,a2,b,c,n) in _C_DRYF0[4]:
eterm = numpy.exp(c*tau)
alpha += n * b*c*eterm / (a1/a2 + b*eterm)
return alpha
def _alpha_ideal_tt(tau):
"""Calculate dry air potential ideal term TT-derivative.
Calculate the second derivative of the ideal gas component of the
dry air Helmholtz potential (scaled Helmholtz free energy) with
respect to reduced temperature.
:arg float tau: Reduced temperature _TRED_DRY/temp(K).
:returns: Helmholtz potential second derivative, unitless.
"""
alpha = 0.0
for (k,n) in enumerate(_C_DRYF0[1]):
alpha += n * (k-3)*(k-4) * tau**(k-5)
k, n = _C_DRYF0[2]
alpha += n * k*(k-1)*tau**(k-2)
alpha += -_C_DRYF0[3] / tau**2
for (a1,a2,b,c,n) in _C_DRYF0[4]:
eterm = numpy.exp(c*tau)
denom = a1/a2 + b*eterm
alpha += n * a1/a2 * b * c**2 * eterm / denom**2
return alpha
def _alpha_res(drvt,drvd,tau,dta):
"""Calculate dry air potential residual term.
Calculate the residual (non-ideal gas) component of the Helmholtz
potential (scaled Helmholtz free energy) or its derivatives with
respect to reduced temperature and density. Derivatives up to
second-order are available.
:arg int drvt: Number of reduced temperature derivatives.
:arg int drvd: Number of reduced density derivatives.
:arg float tau: Reduced temperature _TRED_DRY/temp(K).
:arg float dta: Reduced density ddry(kg/m3)/_DRED_DRY.
:returns: Helmholtz potential or derivative, unitless.
:raises ValueError: If drvt<0, drvd<0, or drvt+drvd>2.
"""
if (drvt < 0 or drvd < 0 or drvt+drvd > 2):
errmsg = 'Derivative {0} not recognized'.format((drvt,drvd))
raise ValueError(errmsg)
alpha = 0.0
# First part: dual power series
for (j,k,n) in _C_DRYFR[0]:
if drvt == 0:
a_tau = tau**k
elif drvt == 1:
a_tau = k * tau**(k-1)
elif drvt == 2:
a_tau = k*(k-1) * tau**(k-2)
if drvd == 0:
a_dta = dta**j
elif drvd == 1:
a_dta = j * dta**(j-1)
elif drvd == 2:
a_dta = j*(j-1) * dta**(j-2)
alpha += n * a_tau * a_dta
# Second part: power series with exponential correction
for (j,l,k,n) in _C_DRYFR[1]:
if drvt == 0:
a_tau = tau**k
elif drvt == 1:
a_tau = k * tau**(k-1)
elif drvt == 2:
a_tau = k*(k-1) * tau**(k-2)
dtal = dta**l
eterm = numpy.exp(-dtal)
if drvd == 0:
a_dta = dta**j * eterm
elif drvd == 1:
a_dta = dta**(j-1)*eterm * (j-l*dtal)
elif drvd == 2:
a_dta = dta**(j-2)*eterm * ((j-1-l*dtal)*(j-l*dtal) - l**2*dtal)
alpha += n * a_tau * a_dta
return alpha
### Public functions
def dry_f(drvt,drvd,temp,ddry,chkbnd=False):
"""Calculate dry air Helmholtz free energy.
Calculate the specific Helmholtz free energy of dry air or its
derivatives with respect to temperature and density. Derivatives up
to second order are available.
:arg int drvt: Number of temperature derivatives.
:arg int drvd: Number of density derivatives.
:arg float temp: Temperature in K.
:arg float ddry: Dry air density in kg/m3.
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:returns: Helmholtz free energy in units of
(J/kg) / K^drvt / (kg/m3)^drvd.
:raises ValueError: If either temp or ddry are nonpositive.
:raises RuntimeWarning: If temp or ddry are outside the recommended
bounds and chkbnd is True.
:raises ValueError: If drvt<0, drvd<0, or drvt+drvd>2.
:Examples:
>>> dry_f(0,0,300.,1e-3)
-696239.965190
>>> dry_f(1,0,300.,1e-3)
-2124.55145456
>>> dry_f(0,1,300.,1e-3)
8.61147149596e+07
>>> dry_f(2,0,300.,1e-3)
-2.39242390806
>>> dry_f(1,1,300.,1e-3)
287049.624545
>>> dry_f(0,2,300.,1e-3)
-8.61147380361e+10
"""
constants0.chkdrybnds(temp,ddry,chkbnd=chkbnd)
tau = _TRED_DRY / temp
dta = ddry / _DRED_DRY
# Run through each derivative case
if (drvt,drvd) == (0,0):
alpha = numpy.log(dta) + _alpha_ideal(tau)
alpha += _alpha_res(0,0,tau,dta)
f = _RDRY * temp * alpha
elif (drvt,drvd) == (0,1):
alpha_d = 1./dta + _alpha_res(0,1,tau,dta)
f = _RDRY * temp * alpha_d / _DRED_DRY
elif (drvt,drvd) == (0,2):
alpha_dd = -1./dta**2 + _alpha_res(0,2,tau,dta)
f = _RDRY * temp * alpha_dd / _DRED_DRY**2
elif (drvt,drvd) == (1,0):
alpha = numpy.log(dta) + _alpha_ideal(tau)
alpha += _alpha_res(0,0,tau,dta)
alpha_t = _alpha_ideal_t(tau)
alpha_t += _alpha_res(1,0,tau,dta)
f = _RDRY * (alpha - tau*alpha_t)
elif (drvt,drvd) == (1,1):
alpha_d = 1./dta + _alpha_res(0,1,tau,dta)
alpha_td = _alpha_res(1,1,tau,dta)
f = _RDRY * (alpha_d - tau*alpha_td) / _DRED_DRY
elif (drvt,drvd) == (2,0):
alpha_tt = _alpha_ideal_tt(tau)
alpha_tt += _alpha_res(2,0,tau,dta)
f = _RDRY * tau**2 * alpha_tt / temp
else:
errmsg = 'Derivatives {0} not recognized'.format((drvt,drvd))
raise ValueError(errmsg)
return f
def air_baw(drvt,temp):
"""Calculate air-water virial coefficient.
Calculate the first dry air-water vapour virial coefficient or its
derivative with respect to temperature. Derivatives up to second
order are available.
:arg int drvt: Number of temperature derivatives.
:arg float temp: Temperature in K.
:returns: Air-water virial coefficient, in units of
(m3/mol) / K^drvt.
:raises ValueError: If drvt<0 or drvt>2.
:Examples:
>>> air_baw(0,300.)
-2.95672747e-05
>>> air_baw(1,300.)
2.80097360e-07
>>> air_baw(2,300.)
-2.42599241e-09
"""
baw = 0.0
tau = temp / _TRED_AW
for (k,n) in _C_AW:
if drvt == 0: baw += n * tau**k
elif drvt == 1: baw += n * k * tau**(k-1) / _TRED_AW
elif drvt == 2: baw += n * k*(k-1) * tau**(k-2) / _TRED_AW**2
else:
errmsg = 'Derivative {0} not recognized'.format(drvt)
raise ValueError(errmsg)
# Convert from cm3/mol to m3/mol
baw *= 1e-6
return baw
def air_caaw(drvt,temp):
"""Calculate air-air-water virial coefficient.
Calculate the second dry air-dry air-water vapour virial coefficient
or its derivative with respect to | |
view: elements may be silently omitted or re-ordered
* plenary view: provides a complete set or is an error condition
Generally, the comparative view should be used for most applications
as it permits operation even if there is data that cannot be
accessed. For example, a browsing application may only need to
examine the ``Calendars`` it can access, without breaking execution.
However, an administrative application may require all ``Calendar``
elements to be available.
Calendars may have an additional records indicated by their
respective record types. The record may not be accessed through a
cast of the ``Calendar``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_lookup_calendars(self):
"""Tests if this user can perform ``Calendar`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_calendar_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as
authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_calendar_view(self):
"""A complete view of the ``Calendar`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_calendar(self, calendar_id):
"""Gets the ``Calendar`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Calendar`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to a ``Calendar`` and retained for
compatility.
:param calendar_id: ``Id`` of the ``Calendar``
:type calendar_id: ``osid.id.Id``
:return: the calendar
:rtype: ``osid.calendaring.Calendar``
:raise: ``NotFound`` -- ``calendar_id`` not found
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.calendaring.Calendar
@abc.abstractmethod
def get_calendars_by_ids(self, calendar_ids):
"""Gets a ``CalendarList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the calendars
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Calendar`` objects may be omitted from the list
and may present the elements in any order including returning a
unique set.
:param calendar_ids: the list of ``Ids`` to retrieve
:type calendar_ids: ``osid.id.IdList``
:return: the returned ``Calendar`` list
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``calendar_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
@abc.abstractmethod
def get_calendars_by_genus_type(self, calendar_genus_type):
"""Gets a ``CalendarList`` corresponding to the given calendar genus ``Type`` which does not include calendars
of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known calendars
or an error results. Otherwise, the returned list may contain
only those calendars that are accessible through this session.
:param calendar_genus_type: a calendar genus type
:type calendar_genus_type: ``osid.type.Type``
:return: the returned ``Calendar`` list
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NullArgument`` -- ``calendar_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
@abc.abstractmethod
def get_calendars_by_parent_genus_type(self, calendar_genus_type):
"""Gets a ``CalendarList`` corresponding to the given calendar genus ``Type`` and include any additional
calendars with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known calendars
or an error results. Otherwise, the returned list may contain
only those calendars that are accessible through this session.
:param calendar_genus_type: a calendar genus type
:type calendar_genus_type: ``osid.type.Type``
:return: the returned ``Calendar`` list
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NullArgument`` -- ``calendar_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
@abc.abstractmethod
def get_calendars_by_record_type(self, calendar_record_type):
"""Gets a ``CalendarList`` containing the given calendar record ``Type``.
In plenary mode, the returned list contains all known calendars
or an error results. Otherwise, the returned list may contain
only those calendars that are accessible through this session.
:param calendar_record_type: a calendar record type
:type calendar_record_type: ``osid.type.Type``
:return: the returned ``Calendar`` list
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NullArgument`` -- ``calendar_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
@abc.abstractmethod
def get_calendars_by_provider(self, resource_id):
"""Gets a ``CalendarList`` for the given provider.
In plenary mode, the returned list contains all known calendars
or an error results. Otherwise, the returned list may contain
only those calendars that are accessible through this session.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:return: the returned ``Calendar`` list
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
@abc.abstractmethod
def get_calendars(self):
"""Gets all ``Calendars``.
In plenary mode, the returned list contains all known calendars
or an error results. Otherwise, the returned list may contain
only those calendars that are accessible through this session.
:return: a ``CalendarList``
:rtype: ``osid.calendaring.CalendarList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
calendars = property(fget=get_calendars)
class CalendarQuerySession:
"""This session provides methods for searching among ``Calendar`` objects.
The search query is constructed using the ``CalendarQuery``.
Calendars may have a query record indicated by their respective
record types. The query record is accessed via the
``CalendarQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_search_calendars(self):
"""Tests if this user can perform ``Calendar`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_query(self):
"""Gets a calendar query.
:return: a calendar query
:rtype: ``osid.calendaring.CalendarQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarQuery
calendar_query = property(fget=get_calendar_query)
@abc.abstractmethod
def get_calendars_by_query(self, calendar_query):
"""Gets a list of ``Calendar`` objects matching the given calendar query.
:param calendar_query: the calendar query
:type calendar_query: ``osid.calendaring.CalendarQuery``
:return: the returned ``CalendarList``
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NullArgument`` -- ``calendar_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``calendar_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
class CalendarSearchSession:
"""This session provides methods for searching among ``Calendar`` objects.
The search query is constructed using the ``CalendarQuery``.
``get_calendars_by_query()`` is the basic search method and returns
a list of ``Calendar`` objects.A more advanced search may be
performed with ``getCalendarsBySearch()``. It accepts a
``CalendarSearch`` in addition to the query for the purpose of
specifying additional options affecting the entire search, such as
ordering. ``get_calendars_by_search()`` returns a
``CalendarSearchResults`` that can be used to access the resulting
``CalendarList`` or be used to perform a search within the result
set through ``CalendarSearch``.
Calendars may have a query record indicated by their respective
record | |
r"""
:param Data: 事件详情
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
class DescribeLeakDetectionListRequest(AbstractModel):
"""DescribeLeakDetectionList请求参数结构体
"""
def __init__(self):
r"""
:param Filters: 筛选条件
:type Filters: list of Filter
:param Limit: 每页数量
:type Limit: int
:param Page: 页码
:type Page: int
:param StartTime: 起始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
"""
self.Filters = None
self.Limit = None
self.Page = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Limit = params.get("Limit")
self.Page = params.get("Page")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeLeakDetectionListResponse(AbstractModel):
"""DescribeLeakDetectionList返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 总数
:type TotalCount: int
:param List: 数据列表
:type List: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.List = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.List = params.get("List")
self.RequestId = params.get("RequestId")
class DescribeSafetyEventListRequest(AbstractModel):
"""DescribeSafetyEventList请求参数结构体
"""
def __init__(self):
r"""
:param Filter: 搜索过滤查询参数
:type Filter: str
:param Limit: 限制数目
:type Limit: int
:param Offset: 页偏移
:type Offset: int
:param Order: 排序列名
:type Order: str
:param By: 排序升降:desc-降序 asc-升序
:type By: str
:param StartTime: 开始查询时间
:type StartTime: str
:param EndTime: 结束查询时间
:type EndTime: str
:param IsFilterResponseTime: 是否过滤响应时间
:type IsFilterResponseTime: bool
"""
self.Filter = None
self.Limit = None
self.Offset = None
self.Order = None
self.By = None
self.StartTime = None
self.EndTime = None
self.IsFilterResponseTime = None
def _deserialize(self, params):
self.Filter = params.get("Filter")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Order = params.get("Order")
self.By = params.get("By")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.IsFilterResponseTime = params.get("IsFilterResponseTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSafetyEventListResponse(AbstractModel):
"""DescribeSafetyEventList返回参数结构体
"""
def __init__(self):
r"""
:param List: 事件列表
:type List: list of DataEvent
:param Total: 事件总条数
:type Total: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.List = None
self.Total = None
self.RequestId = None
def _deserialize(self, params):
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = DataEvent()
obj._deserialize(item)
self.List.append(obj)
self.Total = params.get("Total")
self.RequestId = params.get("RequestId")
class DescribeSocAlertDetailsRequest(AbstractModel):
"""DescribeSocAlertDetails请求参数结构体
"""
def __init__(self):
r"""
:param AlertId: 告警id
:type AlertId: str
:param AlertTimestamp: 告警时间,取Timestamp字段
:type AlertTimestamp: str
"""
self.AlertId = None
self.AlertTimestamp = None
def _deserialize(self, params):
self.AlertId = params.get("AlertId")
self.AlertTimestamp = params.get("AlertTimestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSocAlertDetailsResponse(AbstractModel):
"""DescribeSocAlertDetails返回参数结构体
"""
def __init__(self):
r"""
:param Data: 返回详情数据
注意:此字段可能返回 null,表示取不到有效值。
:type Data: :class:`tencentcloud.ssa.v20180608.models.AlertDetail`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = AlertDetail()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeSocAlertListRequest(AbstractModel):
"""DescribeSocAlertList请求参数结构体
"""
def __init__(self):
r"""
:param PageSize: 页大小
:type PageSize: int
:param PageIndex: 页码
:type PageIndex: int
:param Scenes: 业务场景 参考ScenesType
:type Scenes: int
:param Filter: 查询参数
:type Filter: list of QueryFilter
:param Sorter: 排序参数
:type Sorter: list of QuerySort
"""
self.PageSize = None
self.PageIndex = None
self.Scenes = None
self.Filter = None
self.Sorter = None
def _deserialize(self, params):
self.PageSize = params.get("PageSize")
self.PageIndex = params.get("PageIndex")
self.Scenes = params.get("Scenes")
if params.get("Filter") is not None:
self.Filter = []
for item in params.get("Filter"):
obj = QueryFilter()
obj._deserialize(item)
self.Filter.append(obj)
if params.get("Sorter") is not None:
self.Sorter = []
for item in params.get("Sorter"):
obj = QuerySort()
obj._deserialize(item)
self.Sorter.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSocAlertListResponse(AbstractModel):
"""DescribeSocAlertList返回参数结构体
"""
def __init__(self):
r"""
:param Data: 业务数据
:type Data: :class:`tencentcloud.ssa.v20180608.models.AlertListData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = AlertListData()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeSocCspmComplianceRequest(AbstractModel):
"""DescribeSocCspmCompliance请求参数结构体
"""
class DescribeSocCspmComplianceResponse(AbstractModel):
"""DescribeSocCspmCompliance返回参数结构体
"""
def __init__(self):
r"""
:param Data: 数据
注意:此字段可能返回 null,表示取不到有效值。
:type Data: :class:`tencentcloud.ssa.v20180608.models.SocComplianceInfoResp`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SocComplianceInfoResp()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeVulDetailRequest(AbstractModel):
"""DescribeVulDetail请求参数结构体
"""
def __init__(self):
r"""
:param UniqId: 漏洞唯一标识符
:type UniqId: str
:param Source: 查看详情来源
:type Source: str
"""
self.UniqId = None
self.Source = None
def _deserialize(self, params):
self.UniqId = params.get("UniqId")
self.Source = params.get("Source")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeVulDetailResponse(AbstractModel):
"""DescribeVulDetail返回参数结构体
"""
def __init__(self):
r"""
:param VulType: 漏洞类型
注意:此字段可能返回 null,表示取不到有效值。
:type VulType: int
:param SubVulType: 漏洞子类型
注意:此字段可能返回 null,表示取不到有效值。
:type SubVulType: str
:param CvssScore: cvss分数
注意:此字段可能返回 null,表示取不到有效值。
:type CvssScore: str
:param Cvss: cvss值
注意:此字段可能返回 null,表示取不到有效值。
:type Cvss: str
:param Cve: cve编号
注意:此字段可能返回 null,表示取不到有效值。
:type Cve: str
:param Cnvd: cnvd编号
注意:此字段可能返回 null,表示取不到有效值。
:type Cnvd: str
:param Cnnvd: cnnvd编号
注意:此字段可能返回 null,表示取不到有效值。
:type Cnnvd: str
:param Desc: 描述
注意:此字段可能返回 null,表示取不到有效值。
:type Desc: str
:param Reference: 参考
注意:此字段可能返回 null,表示取不到有效值。
:type Reference: str
:param Repair: 修复意见
注意:此字段可能返回 null,表示取不到有效值。
:type Repair: str
:param ReleaseTime: 披露时间
注意:此字段可能返回 null,表示取不到有效值。
:type ReleaseTime: str
:param UpdateTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
:param Name: 漏洞名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param Level: 等级
注意:此字段可能返回 null,表示取不到有效值。
:type Level: int
:param Status: 状态
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param ImpactAsset: 受影响资产唯一标识
注意:此字段可能返回 null,表示取不到有效值。
:type ImpactAsset: str
:param ImpactAssetName: 受影响资产名称
注意:此字段可能返回 null,表示取不到有效值。
:type ImpactAssetName: str
:param IsAssetDeleted: 受影响资产是否已删除
注意:此字段可能返回 null,表示取不到有效值。
:type IsAssetDeleted: bool
:param Source: 漏洞来源
注意:此字段可能返回 null,表示取不到有效值。
:type Source: str
:param VulUrl: 漏洞URL
注意:此字段可能返回 null,表示取不到有效值。
:type VulUrl: str
:param SsaAssetCategory: 资产归属
注意:此字段可能返回 null,表示取不到有效值。
:type SsaAssetCategory: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.VulType = None
self.SubVulType = None
self.CvssScore = None
self.Cvss = None
self.Cve = None
self.Cnvd = None
self.Cnnvd = None
self.Desc = None
self.Reference = None
self.Repair = None
self.ReleaseTime = None
self.UpdateTime = None
self.Name = None
self.Level = None
self.Status = None
self.ImpactAsset = None
self.ImpactAssetName = None
self.IsAssetDeleted = None
self.Source = None
self.VulUrl = None
self.SsaAssetCategory = None
self.RequestId = None
def _deserialize(self, params):
self.VulType = params.get("VulType")
self.SubVulType = params.get("SubVulType")
self.CvssScore = params.get("CvssScore")
self.Cvss = params.get("Cvss")
self.Cve = params.get("Cve")
self.Cnvd = params.get("Cnvd")
self.Cnnvd = params.get("Cnnvd")
self.Desc = params.get("Desc")
self.Reference = params.get("Reference")
self.Repair = params.get("Repair")
self.ReleaseTime = params.get("ReleaseTime")
self.UpdateTime = params.get("UpdateTime")
self.Name = params.get("Name")
self.Level = params.get("Level")
self.Status = params.get("Status")
self.ImpactAsset = params.get("ImpactAsset")
self.ImpactAssetName = params.get("ImpactAssetName")
self.IsAssetDeleted = params.get("IsAssetDeleted")
self.Source = params.get("Source")
self.VulUrl = params.get("VulUrl")
self.SsaAssetCategory = params.get("SsaAssetCategory")
self.RequestId = params.get("RequestId")
class DescribeVulListRequest(AbstractModel):
"""DescribeVulList请求参数结构体
"""
def __init__(self):
r"""
:param Params: 查询过滤参数
:type Params: str
"""
self.Params = None
def _deserialize(self, params):
self.Params = params.get("Params")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeVulListResponse(AbstractModel):
"""DescribeVulList返回参数结构体
"""
def __init__(self):
r"""
:param Data: 漏洞列表
:type Data: :class:`tencentcloud.ssa.v20180608.models.VulList`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = VulList()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class Filter(AbstractModel):
"""描述键值对过滤器,用于条件过滤查询。例如过滤ID、名称、状态等
若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
"""
def __init__(self):
r"""
:param Name: 过滤键的名称。
:type Name: str
:param Values: 一个或者多个过滤值。
:type Values: list of str
:param ExactMatch: 是否需要精确匹配
:type ExactMatch: bool
"""
self.Name = None
self.Values = None
self.ExactMatch = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
self.ExactMatch = params.get("ExactMatch")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class QueryFilter(AbstractModel):
"""查询参数
"""
def __init__(self):
r"""
:param FilterKey: 查询的字段
:type FilterKey: str
:param FilterValue: 查询的值
:type FilterValue: str
:param FilterOperatorType: 匹配类型,枚举见pb
:type FilterOperatorType: int
"""
self.FilterKey = None
self.FilterValue = None
self.FilterOperatorType = None
def _deserialize(self, params):
self.FilterKey = params.get("FilterKey")
self.FilterValue = params.get("FilterValue")
self.FilterOperatorType = params.get("FilterOperatorType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) | |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ResolverEndpoint(object):
"""
An OCI DNS resolver endpoint.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
#: A constant which can be used with the endpoint_type property of a ResolverEndpoint.
#: This constant has a value of "VNIC"
ENDPOINT_TYPE_VNIC = "VNIC"
#: A constant which can be used with the lifecycle_state property of a ResolverEndpoint.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ResolverEndpoint.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a ResolverEndpoint.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a ResolverEndpoint.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a ResolverEndpoint.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a ResolverEndpoint.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
def __init__(self, **kwargs):
"""
Initializes a new ResolverEndpoint object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.dns.models.ResolverVnicEndpoint`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this ResolverEndpoint.
:type name: str
:param endpoint_type:
The value to assign to the endpoint_type property of this ResolverEndpoint.
Allowed values for this property are: "VNIC", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type endpoint_type: str
:param forwarding_address:
The value to assign to the forwarding_address property of this ResolverEndpoint.
:type forwarding_address: str
:param is_forwarding:
The value to assign to the is_forwarding property of this ResolverEndpoint.
:type is_forwarding: bool
:param is_listening:
The value to assign to the is_listening property of this ResolverEndpoint.
:type is_listening: bool
:param listening_address:
The value to assign to the listening_address property of this ResolverEndpoint.
:type listening_address: str
:param compartment_id:
The value to assign to the compartment_id property of this ResolverEndpoint.
:type compartment_id: str
:param time_created:
The value to assign to the time_created property of this ResolverEndpoint.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this ResolverEndpoint.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ResolverEndpoint.
Allowed values for this property are: "ACTIVE", "CREATING", "DELETED", "DELETING", "FAILED", "UPDATING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param _self:
The value to assign to the _self property of this ResolverEndpoint.
:type _self: str
"""
self.swagger_types = {
'name': 'str',
'endpoint_type': 'str',
'forwarding_address': 'str',
'is_forwarding': 'bool',
'is_listening': 'bool',
'listening_address': 'str',
'compartment_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'_self': 'str'
}
self.attribute_map = {
'name': 'name',
'endpoint_type': 'endpointType',
'forwarding_address': 'forwardingAddress',
'is_forwarding': 'isForwarding',
'is_listening': 'isListening',
'listening_address': 'listeningAddress',
'compartment_id': 'compartmentId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'_self': 'self'
}
self._name = None
self._endpoint_type = None
self._forwarding_address = None
self._is_forwarding = None
self._is_listening = None
self._listening_address = None
self._compartment_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self.__self = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['endpointType']
if type == 'VNIC':
return 'ResolverVnicEndpoint'
else:
return 'ResolverEndpoint'
@property
def name(self):
"""
**[Required]** Gets the name of this ResolverEndpoint.
The name of the resolver endpoint. Must be unique, case-insensitive, within the resolver.
:return: The name of this ResolverEndpoint.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ResolverEndpoint.
The name of the resolver endpoint. Must be unique, case-insensitive, within the resolver.
:param name: The name of this ResolverEndpoint.
:type: str
"""
self._name = name
@property
def endpoint_type(self):
"""
Gets the endpoint_type of this ResolverEndpoint.
The type of resolver endpoint. VNIC is currently the only supported type.
Allowed values for this property are: "VNIC", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The endpoint_type of this ResolverEndpoint.
:rtype: str
"""
return self._endpoint_type
@endpoint_type.setter
def endpoint_type(self, endpoint_type):
"""
Sets the endpoint_type of this ResolverEndpoint.
The type of resolver endpoint. VNIC is currently the only supported type.
:param endpoint_type: The endpoint_type of this ResolverEndpoint.
:type: str
"""
allowed_values = ["VNIC"]
if not value_allowed_none_or_none_sentinel(endpoint_type, allowed_values):
endpoint_type = 'UNKNOWN_ENUM_VALUE'
self._endpoint_type = endpoint_type
@property
def forwarding_address(self):
"""
Gets the forwarding_address of this ResolverEndpoint.
An IP address from which forwarded queries may be sent. For VNIC endpoints, this IP address must be part
of the subnet and will be assigned by the system if unspecified when isForwarding is true.
:return: The forwarding_address of this ResolverEndpoint.
:rtype: str
"""
return self._forwarding_address
@forwarding_address.setter
def forwarding_address(self, forwarding_address):
"""
Sets the forwarding_address of this ResolverEndpoint.
An IP address from which forwarded queries may be sent. For VNIC endpoints, this IP address must be part
of the subnet and will be assigned by the system if unspecified when isForwarding is true.
:param forwarding_address: The forwarding_address of this ResolverEndpoint.
:type: str
"""
self._forwarding_address = forwarding_address
@property
def is_forwarding(self):
"""
**[Required]** Gets the is_forwarding of this ResolverEndpoint.
A Boolean flag indicating whether or not the resolver endpoint is for forwarding.
:return: The is_forwarding of this ResolverEndpoint.
:rtype: bool
"""
return self._is_forwarding
@is_forwarding.setter
def is_forwarding(self, is_forwarding):
"""
Sets the is_forwarding of this ResolverEndpoint.
A Boolean flag indicating whether or not the resolver endpoint is for forwarding.
:param is_forwarding: The is_forwarding of this ResolverEndpoint.
:type: bool
"""
self._is_forwarding = is_forwarding
@property
def is_listening(self):
"""
**[Required]** Gets the is_listening of this ResolverEndpoint.
A Boolean flag indicating whether or not the resolver endpoint is for listening.
:return: The is_listening of this ResolverEndpoint.
:rtype: bool
"""
return self._is_listening
@is_listening.setter
def is_listening(self, is_listening):
"""
Sets the is_listening of this ResolverEndpoint.
A Boolean flag indicating whether or not the resolver endpoint is for listening.
:param is_listening: The is_listening of this ResolverEndpoint.
:type: bool
"""
self._is_listening = is_listening
@property
def listening_address(self):
"""
Gets the listening_address of this ResolverEndpoint.
An IP address to listen to queries on. For VNIC endpoints this IP address must be part of the
subnet and will be assigned by the system if unspecified when isListening is true.
:return: The listening_address of this ResolverEndpoint.
:rtype: str
"""
return self._listening_address
@listening_address.setter
def listening_address(self, listening_address):
"""
Sets the listening_address of this ResolverEndpoint.
An IP address to listen to queries on. For VNIC endpoints this IP address must be part of the
subnet and will be assigned by the system if unspecified when isListening is true.
:param listening_address: The listening_address of this ResolverEndpoint.
:type: str
"""
self._listening_address = listening_address
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ResolverEndpoint.
The OCID of the owning compartment. This will match the resolver that the resolver endpoint is under
and will be updated if the resolver's compartment is changed.
:return: The compartment_id of this ResolverEndpoint.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ResolverEndpoint.
The OCID of the owning compartment. This will match the resolver that the resolver endpoint is under
| |
1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append(input[0].nelement())
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
# net.register_forward_hook(save_hook(net.__class__.__name__))
# net.register_forward_hook(simple_hook)
# net.register_forward_hook(simple_hook2)
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
if isinstance(net, torch.nn.ReLU):
net.register_forward_hook(relu_hook)
if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
net.register_forward_hook(pooling_hook)
return
for c in childrens:
foo(c)
foo(net)
fn, c, h, w = input_size
if input_num == 1:
input = Variable(torch.rand(fn, c, h, w).unsqueeze(0).float(), requires_grad=True)
if cuda:
input = input.cuda()
out = net(input)[0]
else:
input = []
for i in range(input_num):
input.append(Variable(torch.rand(c, h, w).unsqueeze(0), requires_grad=True))
if cuda:
input = [x in input, x.cuda()]
if input_num == 2:
out = net(input[0], input[1])[0]
elif input_num == 3:
out = net(input[0], input[1], input[2])[0]
else:
raise Exception("add {} input support".format(input_num))
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))
print(' + Number of FLOPs: %.4fG' % (total_flops / 1024.0 / 1024 / 1024))
def get_network_description(network):
"""Get the string and total parameters of the network"""
# pdb.set_trace()
# network = network.module
return str(network), sum(map(lambda x: x.numel(), network.parameters()))
def flipx4_forward(model, inp):
"""Flip testing with X4 self ensemble, i.e., normal, flip H, flip W, flip H and W
Args:
model (PyTorch model)
inp (Tensor): inputs defined by the model
Returns:
output (Tensor): outputs of the model. float, in CPU
"""
# normal
output_f = single_forward(model, inp)
# flip W
output = single_forward(model, torch.flip(inp, (-1, )))
output_f = output_f + torch.flip(output, (-1, ))
# flip H
output = single_forward(model, torch.flip(inp, (-2, )))
output_f = output_f + torch.flip(output, (-2, ))
# flip both H and W
output = single_forward(model, torch.flip(inp, (-2, -1)))
output_f = output_f + torch.flip(output, (-2, -1))
return output_f / 4
####################
# metric
####################
def calculate_psnr(img1, img2):
# img1 and img2 have range [0, 255]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
class ProgressBar(object):
'''A progress bar which can print the progress
modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py
'''
def __init__(self, task_num=0, bar_width=50, start=True):
self.task_num = task_num
max_bar_width = self._get_max_bar_width()
self.bar_width = (bar_width if bar_width <= max_bar_width else max_bar_width)
self.completed = 0
if start:
self.start()
def _get_max_bar_width(self):
terminal_width, _ = get_terminal_size()
max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50)
if max_bar_width < 10:
print('terminal width is too small ({}), please consider widen the terminal for better '
'progressbar visualization'.format(terminal_width))
max_bar_width = 10
return max_bar_width
def start(self):
if self.task_num > 0:
sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:\n{}\n'.format(
' ' * self.bar_width, self.task_num, 'Start...'))
else:
sys.stdout.write('completed: 0, elapsed: 0s')
sys.stdout.flush()
self.start_time = time.time()
def update(self, msg='In progress...'):
self.completed += 1
elapsed = time.time() - self.start_time
fps = self.completed / elapsed
if self.task_num > 0:
percentage = self.completed / float(self.task_num)
eta = int(elapsed * (1 - percentage) / percentage + 0.5)
mark_width = int(self.bar_width * percentage)
bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width)
sys.stdout.write('\033[2F') # cursor up 2 lines
sys.stdout.write('\033[J') # clean the output (remove extra chars since last display)
sys.stdout.write('[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\n{}\n'.format(
bar_chars, self.completed, self.task_num, fps, int(elapsed + 0.5), eta, msg))
else:
sys.stdout.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format(
self.completed, int(elapsed + 0.5), fps))
sys.stdout.flush()
def takeFirst(elem):
return elem[0]
def cal_lr_fea(fea, DW_model):
B, N, C, H, W = fea.size()
fea = fea.view(-1, C, H, W)
LR_fea = DW_model(fea)
LR_fea = LR_fea.view(B, N, LR_fea.shape[1], LR_fea.shape[2], LR_fea.shape[3])
return LR_fea
def search_patch_NCC_2d_pymaid(image_patch, nmpa_patch, imgs_in_pad, img_in_nmap_pad, \
start_x, start_y, small_scale, search_region):
B, N, C, PsizeH, PsizeW = image_patch.shape
_, _, _, H, W = imgs_in_pad.shape
center_idx = N//2
ncc_func = NormalizedCrossCorrelation(return_map=False,reduction='mean')
## recreat output
image_patch_new = image_patch.clone()
nmpa_patch_new = nmpa_patch.clone()
## downsampling the image patches
# scale = 8
scale = small_scale
image_patch_small = torch.reshape(image_patch, (B*N,C,PsizeH,PsizeW))
image_patch_small = F.interpolate(image_patch_small, scale_factor=1/scale, mode='bilinear', align_corners=False)
imgs_in_pad_small = torch.reshape(imgs_in_pad, (B*N,C,H,W))
imgs_in_pad_small = F.interpolate(imgs_in_pad_small, scale_factor=1/scale, mode='bilinear', align_corners=False)
_,_,newPsizeH,newPsizeW = image_patch_small.shape
_,_,newH,newW = imgs_in_pad_small.shape
image_patch_small = torch.reshape(image_patch_small,(B, N, C, newPsizeH, newPsizeW))
imgs_in_pad_small = torch.reshape(imgs_in_pad_small,(B, N, C, newH, newW))
#search_region = int(min(newH, newW)/10)
start_x = int(start_x/scale)
start_y = int(start_y/scale)
center_frame = image_patch_small[:,center_idx,:,:,:].clone()
thr = -5
# cadicate_idx_all = []
for batch in range(B):
start_x_current = start_x
start_y_current = start_y
# backfowd to the first frame
for fr in range(center_idx-1,-1,-1):
# print(fr)
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y = int(cadicate_idx[0][2])
search_region_small = step
# if cadicate_idx[0][0] > 0.6:
cadicate_idx = cal_candidate_idx(search_region_small, 1, new_start_x, new_start_y, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
# pdb.set_trace()
# cadicate_idx_all.append(cadicate_idx)
if len(cadicate_idx)>0:
if cadicate_idx[0][0] > thr:
nearest_x = int(cadicate_idx[0][1]*scale)
nearest_y = int(cadicate_idx[0][2]*scale)
start_x_current = int(cadicate_idx[0][1])
start_y_current = int(cadicate_idx[0][2])
else:
nearest_x = int(start_x*scale)
nearest_y = int(start_y*scale)
image_patch_new[batch,fr,...] = \
imgs_in_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
nmpa_patch_new[batch,fr,...] = \
img_in_nmap_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
# forward to the last frame
start_x_current = start_x
start_y_current = start_y
for fr in range(center_idx+1,N):
# print(fr)
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y = int(cadicate_idx[0][2])
search_region_small = step
# if cadicate_idx[0][0] > 0.6:
cadicate_idx = cal_candidate_idx(search_region_small, 1, new_start_x, new_start_y, batch, \
fr, newH, newW, imgs_in_pad_small, center_frame, newPsizeH, ncc_func)
# pdb.set_trace()
# cadicate_idx_all.append(cadicate_idx)
if len(cadicate_idx)>0:
if cadicate_idx[0][0] > thr:
nearest_x = int(cadicate_idx[0][1]*scale)
nearest_y = int(cadicate_idx[0][2]*scale)
start_x_current = int(cadicate_idx[0][1])
start_y_current = int(cadicate_idx[0][2])
else:
nearest_x = int(start_x*scale)
nearest_y = int(start_y*scale)
image_patch_new[batch,fr,...] = \
imgs_in_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
nmpa_patch_new[batch,fr,...] = \
img_in_nmap_pad[batch,fr,:,nearest_x:nearest_x+PsizeH,nearest_y:nearest_y+PsizeW].clone()
# pdb.set_trace()
return image_patch_new, nmpa_patch_new
def search_patch_NCC_2d_pymaid_wDSNet(image_patch, nmpa_patch, imgs_in_pad, img_in_nmap_pad, \
lr_features,\
start_x, start_y, small_scale, search_region):
B, N, C, PsizeH, PsizeW = image_patch.shape
_, _, _, H, W = imgs_in_pad.shape
center_idx = N//2
ncc_func = NormalizedCrossCorrelation(return_map=False,reduction='mean')
#----- recreat output -----
image_patch_new = image_patch.clone()
nmpa_patch_new = nmpa_patch.clone()
#----- select feature patch -----
scale = small_scale
start_x = int(start_x/scale)
start_y = int(start_y/scale)
center_feature = lr_features[:, center_idx, :, \
start_x:start_x+PsizeH//scale, \
start_y:start_y+PsizeW//scale].clone()
## downsampling the image patches
_,_,newPsizeH,newPsizeW = center_feature.shape
_,_,_,newH,newW = lr_features.shape
thr = -5
cadicate_idx_all = []
for batch in range(B):
start_x_current = start_x
start_y_current = start_y
# backfowd to the first frame
for fr in range(center_idx-1,-1,-1):
if fr != center_idx:
step = 2
cadicate_idx = cal_candidate_idx_wDSNet(search_region, step, start_x_current, start_y_current, batch, \
fr, newH, newW, lr_features, center_feature, newPsizeH, ncc_func)
new_start_x = int(cadicate_idx[0][1])
new_start_y | |
<reponame>FrankMillman/AccInABox
import db.objects
from db.connection import db_constants as dbc
import rep.finrpt as rep_finrpt
import rep.tranrpt
from common import AibError
async def check_subledg(caller, params):
# called from gl_per_close process - check that all sub-ledgers for this period have been closed
context = caller.manager.process.root.context
# print(params)
# return_params = {'all_closed': False}
# return return_params
current_period = params['current_period']
module_ids = ['cb', 'ar', 'ap', 'in']
sql = []
params = []
sql.append('SELECT module_id, ledger_id FROM (')
for module_id in module_ids:
sql.append(f'SELECT {dbc.param_style} AS module_id, b.ledger_id')
params.append(module_id)
sql.append(f'FROM {caller.company}.{module_id}_ledger_periods a')
sql.append(f'JOIN {caller.company}.{module_id}_ledger_params b ON b.row_id = a.ledger_row_id')
sql.append(f'WHERE a.period_row_id = {dbc.param_style}')
params.append(current_period)
sql.append(f'AND a.deleted_id = {dbc.param_style}')
params.append(0)
sql.append(f'AND a.state != {dbc.param_style}')
params.append('closed')
if module_id != module_ids[-1]:
sql.append('UNION ALL')
sql.append(') dum')
async with context.db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
cur = await conn.exec_sql(' '.join(sql), params)
async for module_id, ledger_id in cur:
exists = True
break
else:
exists = False
module_id = ledger_id = None
return_params = {'all_closed': not exists, 'module_id': module_id, 'ledger_id': ledger_id}
print('check all closed:', return_params)
return return_params
async def set_per_closing_flag(caller, params):
print('set_closing_flag')
context = caller.manager.process.root.context
if 'ledg_per' not in context.data_objects:
context.data_objects['ledg_per'] = await db.objects.get_db_object(
context, 'gl_ledger_periods')
ledg_per = context.data_objects['ledg_per']
await ledg_per.setval('period_row_id', params['current_period'])
if await ledg_per.getval('state') not in ('current', 'open'):
raise AibError(head='Closing flag', body='Period is not open')
await ledg_per.setval('state', 'closing')
await ledg_per.save()
async def posted_check(caller, params):
context = caller.manager.process.root.context
async with context.db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
check_date = params['check_date']
where = []
where.append(['WHERE', '', 'tran_date', '<=', check_date, ''])
where.append(['AND', '', 'deleted_id', '=', 0, ''])
where.append(['AND', '', 'posted', '=', False, ''])
params = []
sql = 'SELECT CASE WHEN EXISTS ('
table_names = [
'gl_tran_jnl',
]
for table_name in table_names:
db_table = await db.objects.get_db_table(context, caller.company, table_name)
s, p = await conn.build_select(context, db_table, ['row_id'], where=where, order=[])
sql += s
params += p
if table_name != table_names[-1]:
sql += ' UNION ALL '
sql += ') THEN $True ELSE $False END'
cur = await conn.exec_sql(sql, params)
exists, = await cur.__anext__()
return_params = {'all_posted': not bool(exists)}
print('check all posted:', return_params)
return return_params
async def set_per_closed_flag(caller, params):
print('set_per_closed_flag')
context = caller.manager.process.root.context
if 'ledg_per' not in context.data_objects:
context.data_objects['ledg_per'] = await db.objects.get_db_object(
context, 'gl_ledger_periods')
ledg_per = context.data_objects['ledg_per']
await ledg_per.setval('period_row_id', params['period_to_close'])
if await ledg_per.getval('state') != 'closing':
raise AibError(head='Closing flag', body='Closing flag not set')
await ledg_per.setval('state', 'closed')
await ledg_per.save()
if params['period_to_close'] == params['current_period']:
# set next month state to 'current'
await ledg_per.init()
await ledg_per.setval('period_row_id', params['current_period'] + 1)
await ledg_per.setval('state', 'current')
await ledg_per.save()
# set following month state to 'open'
await ledg_per.init()
await ledg_per.setval('period_row_id', params['current_period'] + 2)
await ledg_per.setval('state', 'open')
await ledg_per.save()
async def notify_manager(caller, params):
print('notify', params)
async def setup_ctrl(db_obj, xml):
# called from after_insert in various ledger_params
gl_codes = await db.objects.get_db_object(db_obj.context, 'gl_codes')
gl_code_id = await db_obj.getval('gl_code_id')
await gl_codes.setval('row_id', gl_code_id)
if await gl_codes.getval('ctrl_mod_row_id') is not None:
raise AibError(head='Control Account',
body=f"'{await gl_codes.getval('gl_code')}' is already a control a/c")
await gl_codes.setval('ctrl_mod_row_id', db_obj.db_table.module_row_id)
await gl_codes.setval('ctrl_ledg_row_id', await db_obj.getval('row_id'))
await gl_codes.setval('ctrl_acc_type', 'bal')
await gl_codes.save()
if db_obj.table_name == 'nsls_ledger_params':
if await db_obj.getval('allow_eff_date') is True:
uea_gl_code_id = await db_obj.getval('uea_gl_code_id')
await gl_codes.init()
await gl_codes.setval('row_id', uea_gl_code_id)
if await gl_codes.getval('ctrl_mod_row_id') is not None:
raise AibError(head='Control Account',
body=f"'{await gl_codes.getval('gl_code')}' is already a control a/c")
await gl_codes.setval('ctrl_mod_row_id', db_obj.db_table.module_row_id)
await gl_codes.setval('ctrl_ledg_row_id', await db_obj.getval('row_id'))
await gl_codes.setval('ctrl_acc_type', 'uea')
await gl_codes.save()
elif db_obj.table_name == 'npch_ledger_params':
if await db_obj.getval('allow_eff_date') is True:
uex_gl_code_id = await db_obj.getval('uex_gl_code_id')
await gl_codes.init()
await gl_codes.setval('row_id', uex_gl_code_id)
if await gl_codes.getval('ctrl_mod_row_id') is not None:
raise AibError(head='Control Account',
body=f"'{await gl_codes.getval('gl_code')}' is already a control a/c")
await gl_codes.setval('ctrl_mod_row_id', db_obj.db_table.module_row_id)
await gl_codes.setval('ctrl_ledg_row_id', await db_obj.getval('row_id'))
await gl_codes.setval('ctrl_acc_type', 'uex')
await gl_codes.save()
async def check_gl_group_link(db_obj, fld, value):
# called as validation from col_checks in nsls/npch_ledger_params.link_to_gl_grp
# number of 'drilldown' levels must match gl_groups, for drilldown reporting
if value is None: # link is optional - if None, nothing to check
return True
# get 'number of levels' in nsls_npch_groups, skip 'root' and 'ledg'
mod_row_id = db_obj.db_table.module_row_id
module_id = await db.cache.get_mod_id(db_obj.company, mod_row_id)
grp_table = await db.objects.get_db_table(db_obj.context, db_obj.company, f'{module_id}_groups')
tree_params = grp_table.tree_params
group, col_names, levels = tree_params
type_colname, level_types, sublevel_type = levels
level_types = level_types[None] + level_types[await db_obj.getval('row_id')]
no_grp_levels = len(level_types) - 2 # skip 'root' and 'ledg'
# get 'number of levels' in gl_groups, skip 'root' and levels up to and including this link
gl_grp = fld.foreign_key['tgt_field'].db_obj
gl_tree_params = gl_grp.db_table.tree_params
group, col_names, levels = gl_tree_params
type_colname, level_types, sublevel_type = levels
gl_type = await gl_grp.getval('group_type')
gl_levels = [x[0] for x in level_types]
gl_level_pos = gl_levels.index(gl_type)
no_gl_levels = len(levels) - 1 - gl_level_pos # levels below link_point - skip 'root' and level_pos
if no_grp_levels != no_gl_levels:
raise AibError(head='Link', body='Number of levels does not match gl')
return True
async def setup_gl_group_link(db_obj, xml):
# called from after_update in nsls/npch_ledger_params
"""
This only applies if gl_integration has been set up.
It assumes that gl_groups has been set up, with fixed levels.
It also assumes that {db_obj}_groups has been set up, with fixed levels. [It doesn't, but it should]
gl_groups top level is always 'root'.
{db_obj}_groups top level is 'root', but there are separate sub_trees for each ledger,
so each sub_tree's top level is 'ledg'.
The group link creates a link from the 'ledg' group in {db_obj}_group to a gl_group.
# There is no requirement that they have to be at the same level.
# But there should be a validation that there are the same number of levels *below*
# the link level, so that drill-downs always have a corresponding level to drill down to.
# 1. This validation has not been implemented yet.
2. Changes to gl_groups levels or {db_obj}_groups levels will have implications - not thought through.
3. Theoretically there is no requirement that they have to be at the same level.
But rep_finrpt is written on that assumption, so will have to be changed to handle alternatives.
Specifically, the JOINS from 'totals' to 'root' match those from 'gl'.
"""
fld = await db_obj.getfld('link_to_gl_grp')
if fld._value == fld._orig:
return # no change
gl_groups = await db.objects.get_db_object(db_obj.context, 'gl_groups')
if fld._orig is not None: # remove old link
await gl_groups.init()
await gl_groups.setval('row_id', fld._orig)
await gl_groups.setval('link_to_subledg', None)
await gl_groups.save()
if fld._value is not None: # add new link
await gl_groups.init()
await gl_groups.setval('row_id', fld._value)
if await gl_groups.getval('link_to_subledg') is not None:
raise AibError(head='Link to sub-ledger',
body=f"'{await gl_groups.getval('gl_group')}' already has a sub-ledger link")
link = [db_obj.db_table.module_row_id, await db_obj.getval('row_id')]
await gl_groups.setval('link_to_subledg', link)
await gl_groups.save()
async def setup_finrpt_vars(caller, xml):
context = caller.context
var = context.data_objects['var']
finrpt_defn = context.data_objects['finrpt_defn']
group_params = await finrpt_defn.getval('group_params')
for grp in group_params:
if grp[0] == 'date':
date_type = grp[1][0]
break
else: # 'date' not in group_params - must be 'single date'
date_type = 'single'
report_type = await finrpt_defn.getval('report_type')
if date_type == 'single':
date_param = 'balance_date' if report_type == 'as_at2' else 'date_range'
elif date_type == 'fin_yr':
date_param = 'fin_yr'
elif date_type == 'date_range':
date_param = 'balance_date' if report_type == 'as_at2' else 'date_range'
elif date_type == 'last_n_per':
date_param = 'start_per'
elif date_type == 'last_n_days':
date_param = 'start_date'
await var.setval('date_param', date_param)
if await finrpt_defn.getval('allow_select_loc_fun'):
if 'loc' not in [x[0] for x in group_params]: # n/a if already grouped by location
if await finrpt_defn.getval('_param.location_row_id') is None: # n/a if only 1 location
await var.setval('select_location', True)
if 'fun' not in [x[0] for x in group_params]: # n/a if already grouped by function
if await finrpt_defn.getval('_param.function_row_id') is None: # n/a if only 1 function
await var.setval('select_function', True)
async def run_finrpt(caller, xml):
context = caller.context
var = context.data_objects['var']
date_param = await var.getval('date_param')
if date_param == 'balance_date':
date_var = context.data_objects['balance_date_vars']
date_params = (await date_var.getval('balance_date'),) * 2 # end_date = start_date
elif date_param == 'date_range':
date_var = context.data_objects['date_range_vars']
date_params = (await date_var.getval('start_date'), await date_var.getval('end_date'))
elif date_param == 'fin_yr':
date_params = await var.getval('year_no')
elif date_param == 'start_per':
date_params = await var.getval('period_no')
elif date_param == 'start_date':
date_var = context.data_objects['balance_date_vars']
date_params = await date_var.getval('balance_date')
finrpt_defn = caller.context.data_objects['finrpt_defn']
finrpt_data = await finrpt_defn.get_data()
finrpt_data['ledger_row_id'] = context.ledger_row_id
finrpt_data['date_params'] = date_params
finrpt_data['single_location'] = await var.getval('location_id') # None for all locations
finrpt_data['single_function'] = await var.getval('function_id') # None for all functions
finrpt = rep_finrpt.FinReport()
await finrpt._ainit_(caller.form, finrpt_data, caller.session)
async def finrpt_drilldown(caller, xml):
# retrieve the finrpt_data that was used to create the report
# it was | |
<filename>pyramid_crud/forms.py
import wtforms_alchemy
import six
from wtforms.ext.csrf.form import SecureForm
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from .util import get_pks, meta_property
from sqlalchemy.orm.session import object_session
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.inspection import inspect
from webob.multidict import MultiDict
import logging
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
log = logging.getLogger(__name__)
class _CoreModelMeta(wtforms_alchemy.ModelFormMeta):
"""
Metaclass for :class:`_CoreModelForm`. Assignes some class properties. Not
to be used directly.
"""
def __new__(meta, name, bases, attrs):
# Copy over docstrings from parents
def get_mro_classes(bases):
return (mro_cls for base in bases for mro_cls in base.mro()
if mro_cls != object)
if not('__doc__' in attrs and attrs['__doc__']):
for mro_cls in get_mro_classes(bases):
doc = mro_cls.__doc__
if doc:
attrs['__doc__'] = doc
break
for attr, attribute in attrs.items():
if not attribute.__doc__:
for mro_cls in (mro_cls for mro_cls in get_mro_classes(bases)
if hasattr(mro_cls, attr)):
doc = getattr(getattr(mro_cls, attr), '__doc__')
if doc:
try:
attribute.__doc__ = doc
except AttributeError:
continue
break
cls = super(_CoreModelMeta, meta).__new__(meta, name, bases, attrs)
cls._add_relationship_fields()
return cls
@meta_property
def title(cls):
"""See inline documentation for ModelForm"""
return inspect(cls.Meta.model).class_.__name__
@meta_property
def title_plural(cls):
"""See inline documentation for ModelForm"""
return cls.title + "s"
@meta_property
def name(cls):
"""See inline documentation for ModelForm"""
return inspect(cls.Meta.model).class_.__name__.lower()
@meta_property
def field_names(cls):
"""
A property on the class that returns a list of field names for the
associated form.
:return: A list of all names defined in the field in the same order as
they are defined on the form.
:rtype: list of str
"""
return [field.name for field in cls()]
@six.add_metaclass(_CoreModelMeta)
class _CoreModelForm(wtforms_alchemy.ModelForm):
"""
Base class for all complex form actions. This is used instead of the usual
form class. Not to be used directly.
"""
def __init__(self, formdata=None, obj=None, *args, **kw):
self.formdata = formdata
super(_CoreModelForm, self).__init__(formdata, obj, *args, **kw)
self._obj = obj
@property
def primary_keys(self):
"""
Get a list of pairs ``name, value`` of primary key names and their
values on the current object.
"""
if self._obj is None:
raise AttributeError("No object attached")
return [(pk, getattr(self._obj, pk, None))
for pk in get_pks(self.Meta.model)]
@property
def fieldsets(self):
"""See inline documentation for ModelForm"""
default_fields = [field.name for field in self
if field.name != 'csrf_token']
return [{'title': '', 'fields': default_fields}]
def get_fieldsets(self):
"""
Get a list of all configured fieldsets, setting defaults where they are
missing.
"""
result = []
for original in self.fieldsets:
fieldset = {
'title': original.get('title', ''),
'template': original.get('template', 'horizontal'),
'fields': [getattr(self, f) for f in original['fields']],
}
result.append(fieldset)
return result
@classmethod
def _add_relationship_fields(cls):
for rel in cls._find_relationships_for_query():
if rel.direction != MANYTOONE:
continue
if not hasattr(cls, 'get_dbsession'):
raise ValueError('You need to define a get_dbsession classmethod')
def query():
session = cls.get_dbsession()
return session.query(rel.mapper.class_)
field = QuerySelectField(query_factory=query)
setattr(cls, rel.key, field)
@classmethod
def _find_relationships_for_query(cls):
if not cls.Meta.model:
return []
rels = inspect(cls.Meta.model).relationships
rels = [rel for rel in rels if rel.direction == MANYTOONE]
return rels
class CSRFForm(SecureForm):
"""
Base class from which new CSRF-protected forms are derived. Only use this
if you want to create a form without the extra model-functionality, i.e.
is normal form.
If you want to create a CSRF-protected model form use
:class:`CSRFModelForm`.
"""
def generate_csrf_token(self, csrf_context):
"""
Create a CSRF token from the given context (which is actually just a
:class:`pyramid.request.Request` instance). This is automatically
called during ``__init__``.
"""
self.request = csrf_context
return self.request.session.get_csrf_token()
def validate(self):
"""
Validate the form and with it the CSRF token. Logs a warning with the
error message and the remote IP address in case of an invalid token.
"""
result = super(CSRFForm, self).validate()
if not result and self.csrf_token.errors:
log.warn("Invalid CSRF token with error(s) '%s' from IP address "
"'%s'."
% (", ".join(self.csrf_token.errors),
self.request.client_addr))
return result
class ModelMeta(_CoreModelMeta):
def __new__(meta, name, bases, attrs):
attrs.setdefault("inlines", [])
cls = super(ModelMeta, meta).__new__(meta, name, bases, attrs)
for inline in cls.inlines:
inline._parent = cls
return cls
@six.add_metaclass(ModelMeta)
class ModelForm(_CoreModelForm):
"""
Base-class for all regular forms.
The following configuration options are available on this form in addition
to the full behavior described for `WTForms-Alchemy`_
.. _WTForms-Alchemy: https://wtforms-alchemy.readthedocs.org
.. note::
While this class can easily be the base for each form you want to
configure, it is strongly recommended to use the :class:`CSRFModelForm`
instead. It is almost no different than this form except for a new
``csrf_token`` field. Thus it should never hurt to subclass it instead
of this form.
Meta
This is the only mandatory argument. It is directly taken over from
`WTForms-Alchemy`_ so you should check out their documentation on this
class as it will provide you with a complete overview of what's
possible here.
.. _inlines:
inlines
A list of forms to use as inline forms. See :ref:`inline_forms`.
.. _fieldsets:
fieldsets
Optionally define fieldsets to group your form into categories. It
requires a list of dictionaries and in each dictionary, the following
attributes can/must be set:
* ``title``: A title to use for the fieldset. This is required but may
be the empty string (then no title is displayed).
* ``fields``: A list of field names that should be displayed together
in a fieldset. This is required.
* ``template``: The name of the fieldset template to load. This must be
the name of a file in the ``fieldsets`` directory of the current
theme **without** a file extension. It defaults to ``horizontal``
which uses bootstraps horizontal forms for each fieldset. See
:ref:`fieldset_templates` for details on available templates.
title
Set the title of your form. By default this returns the class name of
the model. It is used in different places such as the title of the
page.
title_plural:
The plural title. By default it is the title with an "s" appended,
however, you somtimes might want to override it because "Childs" just
looks stupid ;-)
name:
The name of this form. By default it uses the lowercase model class
name. This is used internally und you normally do not need to change
it.
get_dbsession:
Unfortunately, you have to define this ``classmethod`` on the form to
get support for the unique validator. It is documented in
`Unique Validator`_. This is a limitation we soon hope to overcome.
"""
@classmethod
def _relationship_key(cls, other_form):
"""
Get the name of the attribute that is the relationship between this
forms model and the model defined on another form.
By default the ``relationship_name`` attribute of ``other_form`` is
looked up and used, if it is present. Otherwise, the relationship is
determined dynamically.
:param other_form: The other form to which the relationship should be
found.
"""
# If explicitly defined, return it
if other_form.relationship_name:
return other_form.relationship_name
other_model = other_form.Meta.model
candidates = []
for relationship in inspect(cls.Meta.model).relationships:
if relationship.mapper.class_ == other_model:
candidates.append(relationship.key)
if len(candidates) == 0:
raise TypeError("Could not find relationship between the models "
"%s and %s" % (cls.Meta.model, other_model))
elif len(candidates) > 1:
raise TypeError("relationship between the models %s and %s is "
"ambigous. Please specify the "
"'relationship_name' attribute on %s"
% (cls.Meta.model, other_model, other_form))
return candidates[0]
def process(self, formdata=None, obj=None, **kwargs):
super(ModelForm, self).process(formdata, obj, **kwargs)
self.process_inline(formdata, obj, **kwargs)
def process_inline(self, formdata=None, obj=None, **kwargs):
"""
Process all inline fields. This sets the global attribute
``inline_fields`` which is a dict-like object that contains as keys
the name of all defined inline fields and as values a pair of
``inline, inline_forms`` where ``inline`` is the inline which the
name refers to and ``inline_forms`` is the list of form instances
associated with this inline.
"""
self.inline_fieldsets = OrderedDict()
for inline in self.inlines:
inline_forms = []
inline_formdata = {}
if formdata:
# create a dictionary of data by index for all existing form
# fields. It basically parses back its pattern of assigned
# names (i.e. inline.name_index_field.name).
# The created values can then be sent to the individual forms
# below based on their index.
count = int(formdata.get('%s_count' % inline.name, 0))
for index in range(count):
inline_formdata[index] = MultiDict()
for field in inline.field_names:
data = formdata.get('%s_%d_%s' % (inline.name,
index, field))
if data:
inline_formdata[index][field] = data
else:
count = None
# Find the matching relationship
# We determine this *outside* of the obj block because we want | |
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
pd.core.arrays.PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(
pd.IntervalIndex.from_breaks([0, 1, 2]),
pd.core.arrays.IntervalArray,
"interval",
),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(pd.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(pd.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
pd.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(pd.core.arrays.IntervalArray.from_breaks([0, 1]), "_left"),
(pd.SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
@pytest.mark.parametrize("box", [pd.Series, pd.Index])
def test_array(array, attr, box):
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip("No index type for {}".format(array.dtype))
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
with pytest.raises(ValueError, match="MultiIndex"):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, np.nan], dtype=object),
),
(
pd.core.arrays.IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
),
(pd.SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),
# tz-naive datetime
(
DatetimeArray(np.array(["2000", "2001"], dtype="M8[ns]")),
np.array(["2000", "2001"], dtype="M8[ns]"),
),
# tz-aware stays tz`-aware
(
DatetimeArray(
np.array(
["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
np.array(
[
pd.Timestamp("2000-01-01", tz="US/Central"),
pd.Timestamp("2000-01-02", tz="US/Central"),
]
),
),
# Timedelta
(
TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
np.array([0, 3600000000000], dtype="m8[ns]"),
),
],
)
@pytest.mark.parametrize("box", [pd.Series, pd.Index])
def | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def all_messages():
"""
keep all messages in tr
Returns:
all messages in JSON
"""
return \
{
"scan_started": "Nettacker motoru başladı ...",
"options": "python nettacker.py [seçenekler]",
"help_menu": "Nettacker Yardım Menüsünü Göster",
"license": "Lütfen lisans ve sözleşmeleri okuyun https://github.com/viraintel/OWASP-Nettacker",
"engine": "Motor",
"engine_input": "Motor girişi seçenekleri",
"select_language": "bir dil seçin {0}",
"range": "tüm IP'leri aralıkta tara",
"subdomains": "alt alanları bul ve tara",
"thread_number_connections": "Bir ana bilgisayara bağlantı için iş parçacığı numaraları",
"thread_number_hosts": "tarama konakları için iş parçacığı numaraları",
"save_logs": "tüm kayıtları dosyaya kaydet (results.txt, results.html, results.json)",
"target": "Hedef",
"target_input": "Hedef giriş seçenekleri",
"target_list": "hedef (ler) listesi, \",\" ile ayrı",
"read_target": "dosyadan hedef (ler) oku",
"scan_method_options": "Tarama yöntemi seçenekleri",
"choose_scan_method": "tarama yöntemini seçin {0}",
"exclude_scan_method": "{0} öğesini hariç tutmak için tarama yöntemini seçin",
"username_list": "kullanıcı adı (lar) listesi, \",\" ile ayrı",
"username_from_file": "dosyadan kullanıcı adlarını oku",
"password_seperator": "şifre listesi \",\" ile ayrı",
"read_passwords": "<PASSWORD> şifre (ler) oku",
"port_seperator": "port (lar) listesi, \",\" ile ayrı",
"time_to_sleep": "her istek arasında uyumak için zaman",
"error_target": "Hedef (ler) belirtilemiyor",
"error_target_file": "Dosya açılamayan hedef (ler) belirtilemiyor: {0}",
"thread_number_warning": "100'den daha düşük iplik numarası kullanmak daha iyi, BTW devam ediyor ...",
"set_timeout": "zaman aşımını {0} saniye olarak ayarlayın, çok büyük değil mi? devam ettikçe ...",
"scan_module_not_found": "bu tarama modülü [{0}] bulunamadı!",
"error_exclude_all": "tüm tarama yöntemlerini hariç tutamazsınız",
"exclude_module_error": "hariç tutmak için seçtiğiniz {0} modülü bulunamadı!",
"method_inputs": "yöntem girişlerini girin, örneğin: ftp_brute_users = test, admin & ftp_brute_passwds "
"= read_from_file: /tmp/pass.txt&ftp_brute_port=21",
"error_reading_file": "{0} dosyası okunamıyor",
"error_username": "Dosya açılamayan kullanıcı adı (lar) belirtilemez: {0}",
"found": "{0} bulundu! ({1}: {2})",
"error_password_file": "Dosya açılamayan şifre (ler) belirtilemez: {0}",
"file_write_error": "\"{0}\" dosyası yazılabilir değil!",
"scan_method_select": "lütfen tarama yönteminizi seçin!",
"remove_temp": "geçici dosyaları kaldırarak!",
"sorting_results": "sıralama sonuçları!",
"done": "bitmiş!",
"start_attack": "{0}, {1} arasında {1} saldırmaya başlama",
"module_not_available": "\"{0}\" bu modül mevcut değil",
"error_platform": "ne yazık ki yazılımın bu sürümü sadece linux / osx / windows üzerinde çalıştırılabilir.",
"python_version_error": "Python sürümünüz desteklenmiyor!",
"skip_duplicate_target": "yinelenen hedefi atla (bazı alt alanlar / alan adları aynı IP'ye ve "
"Aralıklara sahip olabilir)",
"unknown_target": "bilinmeyen hedef türü [{0}]",
"checking_range": "{0} aralığında kontrol ediliyor ...",
"checking": "{0} kontrol ediliyor ...",
"HOST": "HOST",
"USERNAME": "K<NAME>",
"PASSWORD": "<PASSWORD>",
"PORT": "LİMAN",
"TYPE": "TİP",
"DESCRIPTION": "AÇIKLAMA",
"verbose_level": "ayrıntılı mod düzeyi (0-5) (varsayılan 0)",
"software_version": "yazılım sürümünü göster",
"check_updates": "güncellemeleri kontrol ediniz",
"outgoing_proxy": "giden bağlantılar proxy'si (socks). örnek socks5: 127.0.0.1: 9050, çorap: //127.0.0.1:"
" 9050 socks5: //127.0.0.1: 9050 veya socks4: çorap4: //127.0.0.1: 9050, kimlik"
" doğrulama: çorap: // kullanıcı adı: şifre @ 127.0.0.1, socks4: // kullanıcı adı:"
" [email protected], socks5: // kullanıcı adı: [email protected]",
"valid_socks_address": "lütfen geçerli bir çorap adresi ve port giriniz. örnek socks5: 127.0.0.1: 9050,"
" çorap: //127.0.0.1: 9050, çorap5: //127.0.0.1: 9050 veya çorap4: çorap4: "
"//127.0.0.1: 9050, kimlik doğrulama: çorap: // kullanıcı adı: şifre @ 127.0.0.1, "
"socks4: // kullanıcı adı: [email protected], socks5: // kullanıcı adı: "
"[email protected]",
"connection_retries": "Bağlantı zaman aşımı olduğunda tekrar dener (varsayılan 3)",
"ftp_connection_timeout": "{0} ile ftp bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"login_successful": "BAŞARIYLA GİRİŞ YAPTI!",
"login_list_error": "BAŞARILI OLMAK ÜZERE, LİSANS KOMİSYONU İÇİN İZİN VERİLDİ!",
"ftp_connection_failed": "{0} için ftp bağlantısı: {1} başarısız oldu, tüm adımı atladım {süreç {2}} "
"{2}]! bir sonraki adıma geçmek",
"input_target_error": "{0} modülü için giriş hedefi {1} atlama, DOMAIN, HTTP veya SINGLE_IPv4 olmalıdır",
"user_pass_found": "user: {0} pass: {1} host: {2} bağlantı noktası: {3} bulundu!",
"file_listing_error": "(LİSTE DOSYALARI İÇİN İZİN YOK)",
"trying_message": "{3} {4}: {5} ({6}) 'daki {2} sürecindeki {1} hesabının {0} değerini denemek",
"smtp_connection_timeout": "{0} için smtp bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"smtp_connection_failed": "{0} için smtp bağlantısı: {1} başarısız oldu, tüm adımı atla {süreç {2}} {2}]!"
" bir sonraki adıma geçmek",
"ssh_connection_timeout": "{0} ile ssh bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"ssh_connection_failed": "{0} için ssh bağlantısı: {1} başarısız oldu, tüm adımı atladı {süreç {2} {2}]!"
" bir sonraki adıma geçmek",
"port/type": "{0} / {1}",
"port_found": "host: {0} port: {1} ({2}) bulundu!",
"target_submitted": "{0} hedefi gönderildi!",
"current_version": "{0} {1} {2} {6} OWASP Nettacker sürümünü {3} {4} {5} kod adıyla çalıştırıyorsunuz",
"feature_unavailable": "Bu özellik henüz mevcut değil! son sürümü almak için lütfen git klon "
"https://github.com/viraintel/OWASP-Nettacker.git veya pip install -U"
" OWASP-Nettacker çalıştırın.",
"available_graph": "Tüm aktiviteler ve bilgiler için bir grafik oluşturmak, HTML çıkışı kullanmalısınız."
" mevcut grafikler: {0}",
"graph_output": "Grafik özelliğini kullanmak için çıktı dosya adınız \".html\" veya \".htm\" "
"ile bitmelidir!",
"build_graph": "bina grafiği ...",
"finish_build_graph": "bina grafiğini bitir!",
"pentest_graphs": "Sızma Test Grafikleri",
"graph_message": "Bu grafik OWASP Nettacker tarafından oluşturuldu. Grafik tüm modül aktivitelerini, "
"ağ haritasını ve hassas bilgileri içerir. Lütfen güvenilir değilse, bu dosyayı "
"kimseyle paylaşmayın.",
"nettacker_report": "OWASP Nettacker Raporu",
"nettacker_version_details": "Yazılım Ayrıntıları: {2} içindeki OWASP Nettacker sürümü {0} [{1}]",
"no_open_ports": "açık bağlantı noktası bulunamadı!",
"no_user_passwords": "kull<PASSWORD>cı / ş<PASSWORD> bulunamadı!",
"loaded_modules": "{0} modül yüklendi ...",
"graph_module_404": "Bu grafik modülü bulunamadı: {0}",
"graph_module_unavailable": "bu \"{0}\" grafik modülü mevcut değil",
"ping_before_scan": "ana bilgisayarı taramadan önce ping",
"skipping_target": "Taramadan önce --ping -ping gerçek olduğundan ve yanıt vermediğinden {0} hedefleme "
"yöntemini ve {1} tarama yöntemini atlıyor!",
"not_last_version": "OWASP Nettacker'ın son sürümünü kullanmıyorsunuz, lütfen güncelleyin.",
"cannot_update": "güncellemeyi kontrol edemezsiniz, lütfen internet bağlantınızı kontrol edin.",
"last_version": "OWASP Nettacker'ın son sürümünü kullanıyorsunuz ...",
"directoy_listing": "dizin girişi {0} bulundu",
"insert_port_message": "lütfen URL yerine -g veya --methods-args anahtarından bağlantı noktası ekleyin",
"http_connection_timeout": "http bağlantısı {0} zaman aşımı!",
"wizard_mode": "sihirbaz modunu başlat",
"directory_file_404": "{1} numaralı bağlantı noktasında {0} için dizin veya dosya bulunamadı",
"open_error": "{0} açılamıyor",
"dir_scan_get": "dir_scan_http_method değeri GET veya HEAD olmalı, varsayılanı "
"GET olarak ayarlanmış olmalıdır.",
"list_methods": "tüm yöntemleri listeler",
"module_args_error": "{0} modül hatalarını alamıyor",
"trying_process": "{3} tarihinde {1} {1} tarihinde {1} {0} tarihinde {4} {5} tarihinde {0} denemeyi",
"domain_found": "alan bulundu: {0}",
"TIME": "ZAMAN",
"CATEGORY": "KATEGORİ",
"module_pattern_404": "{0} desenli bir modül bulamıyor!",
"enter_default": "lütfen {0} girin Varsayılan [{1}]>",
"enter_choices_default": "lütfen {0} girin seçimler [{1}] | Varsayılan [{2}]>",
"all_targets": "hedefler",
"all_thread_numbers": "iş parçacığı numarası",
"out_file": "çıktı dosya adı",
"all_scan_methods": "tarama yöntemleri",
"all_scan_methods_exclude": "dışlamak için tarama yöntemleri",
"all_usernames": "kullanıcı adları",
"all_passwords": "<PASSWORD>",
"timeout_seconds": "zaman aşımı saniye",
"all_ports": "port numaraları",
"all_verbose_level": "ayrıntılı seviye",
"all_socks_proxy": "çorap vekil",
"retries_number": "yeniden deneme sayısı",
"graph": "grafik",
"subdomain_found": "alt alan bulundu: {0}",
"select_profile": "profil seç {0}",
"profile_404": "\"{0}\" profili bulunamadı!",
"waiting": "{0} için bekliyor",
"vulnerable": "{0} için savunmasız",
"target_vulnerable": "{0} hedefi: {1}, {2} için savunmasız!",
"no_vulnerability_found": "hiçbir güvenlik açığı bulunamadı! ({0})",
"Method": "Yöntem",
"API": "API",
"API_options": "API seçenekleri",
"start_API": "API hizmetini başlat",
"API_host": "API ana bilgisayar adresi",
"API_port": "API bağlantı noktası numarası",
"API_debug": "API hata ayıklama modu",
"API_access_key": "API erişim anahtarı",
"white_list_API": "API'ye bağlanmak için beyaz liste ana bilgisayarlarına izin ver",
"define_whie_list": "ile beyaz liste konaklarını tanımlar, (örnek: 127.0.0.1, 192.168.0.1/24, "
"10.0.0.1-10.0.0.255)",
"gen_API_access_log": "API erişim günlüğü oluştur",
"API_access_log_file": "API erişim günlüğü dosya adı",
"API_port_int": "API portu bir tamsayı olmalı!",
"unknown_ip_input": "bilinmeyen giriş türü, kabul edilen türler SINGLE_IPv4, RANGE_IPv4, "
"CIDR_IPv4 şeklindedir.",
"API_key": "* API Anahtarı: {0}",
"ports_int": "portlar tamsayı olmalıdır! (ör. 80, 80, 1080, 80, 1080-1300, 9000, 12000-15000)",
"through_API": "OWASP Nettacker API'sı aracılığıyla",
"API_invalid": "geçersiz API anahtarı",
"unauthorized_IP": "IP'niz yetkili değil",
"not_found": "Bulunamadı!",
"no_subdomain_found": "subdomain_scan: alt alan adı bulunamadı!",
"viewdns_domain_404": "viewdns_reverse_ip_lookup_scan: alan adı bulunamadı!",
"browser_session_valid": "tarayıcınızın oturumu geçerli",
"browser_session_killed": "tarayıcı oturumunuz öldürüldü",
"updating_database": "veritabanını güncellemek ...",
"database_connect_fail": "Veritabanına bağlanılamadı!",
"inserting_report_db": "raporu veritabanına eklemek",
"inserting_logs_db": "günlükleri veritabanına eklemek",
"removing_logs_db": "eski günlükleri db'den kaldırma",
"len_subdomain_found": "{0} alt alan bulundu!",
"len_domain_found": "{0} alan (lar) bulundu!",
"phpmyadmin_dir_404": "phpmyadmin dir bulunamadı!",
"DOS_send": "DoS paketlerini {0} adresine göndermek",
"host_up": "{0} doldu! Geri ping atma zamanı {1}",
"host_down": "{0} ping edilemiyor!",
"root_required": "bunun kök olarak çalıştırılması gerekiyor",
"admin_scan_get": "admin_scan_http_method değeri GET veya HEAD olmalı, varsayılanı GET olarak"
" ayarlanmış olmalıdır.",
"telnet_connection_timeout": "{0} ile telnet bağlantısı: {1} zaman aşımı, {2} atlama: {3}",
"telnet_connection_failed": "{0} ile telnet bağlantısı: {1} başarısız oldu, tüm adımı atladı {süreç "
"{2}} {2}]! bir sonraki adıma geçmek",
"http_auth_success": "http temel kimlik doğrulama başarısı - ana bilgisayar: {2}: {3}, kullanıcı: "
"{0}, pass: {1} bulundu!",
"http_auth_failed": "http temel kimlik doğrulaması {0} tarihinde başarısız oldu: {3} {1} kullanarak: {2}",
"http_form_auth_success": "http formu kimlik doğrulama başarısı - ana bilgisayar: {2}: {3}, kullanıcı: | |
#jfp was w1 = numpy.full( a1.shape, w1 )
#jfp was w2 = numpy.full( a2.shape, w2 )
w1 = numpy.full( mv1.shape, -1 )
w1[i] = sw1
w1 = numpy.ma.masked_less(w1,0)
w2 = numpy.full( mv1.shape, -1 )
w2[i] = sw2
w2 = numpy.ma.masked_less(w2,0)
if not force_scalar_avg and\
hasattr(w1,'shape') and len(w1.shape)>0 and hasattr(w2,'shape') and len(w2.shape)>0:
if w1[i].mask.all(): # if w1[i] is all missing values:
w1[i] = sw1
if w2[i].mask.all(): # if w2[i] is all missing values:
w2[i] = sw2
# Here's what I think the numpy.ma averager does about weights and missing values:
# The output weight w(i)=sw1+sw2 if there be no missing value for mv1(i) and mv2(i) (maybe
# also if both be missing, because the weight doesn't matter then).
# But if i be missing for mv1, drop sw1, thus w(i)=sw2. If i be missing for mv2, drop sw2.
a,w = numpy.ma.average( numpy.ma.array((a1,a2)), axis=0,
weights=numpy.ma.array((w1[i],w2[i])), returned=True )
# Avoid the occasional surprise about float32/float64 data types:
a = a.astype( a1.dtype )
w = w.astype( a.dtype )
f1 = mv1.parent # the (open) file corresponding to the FileVariable mv1
w1id = mv1.id+'_vwgts'
if not hasattr(mv1,'vwgts'):
w1axes = mv1.getAxisList()
w1attributes = {}
addVariable( f1, w1id, 'd', w1axes, w1attributes )
f1w = f1[w1id]
f1w[:] = w1
f1w[i] = w
# TypeError: 'CdmsFile' object does not support item assignment f1[w1id] = w
mv1.vwgts = w1id
else:
# This is what happens most of the time. It's a simple average (of two compatible numpy
# arrays), weighted by scalars. These scalars are the length of time represented by
# by mv1, mv2.
# Note that a1,a2 are type TransientVariable which inherits from numpy.ma.MaskedArray
sw12 = sw1+sw2
a = ( a1*(sw1/sw12) + a2*(sw2/sw12) )
try:
if a.dtype != a1.dtype:
a = a.astype( a1.dtype )
except Exception as e:
# happens if a1 isn't a numpy array, e.g. a float. Then it's ok to just go on.
#print "In arithmetic average of",mv1.id,"in two_pt_avg, encountered exception:",e
#print "a1=",a1,type(a1),a1.dtype if hasattr(a1,'dtype') else None
pass
return a
def update_time_avg( redvars, redtime_bnds, redtime_wts, newvars, next_tbounds, dt=None,
new_time_weights=None, force_scalar_avg=False ):
"""Updates the time-reduced data for a list of variables. The reduced-time and averaged
variables are listed in redvars. Its weights (for time averaging) are another variable,
redtime_wts.
(Each member of redvars should have the same the time axis. Each member of newvars should have
the same time axis. Normally it has an attribute wgts which is the same as redtime_wts.id.)
The new data is listed in newvars, and this list should correspond to redvars, e.g.
newvars[i].id==redvars[i].id. The length of both should be equal and at least one.
Each variable is an MV. Normally redvar and redtime_wts will be a FileVariable (required if
they might change) and newvar a TransientVariable.
If newvar needs any spatial reductions to match redvar, they should have been performed before
calling this function.
next_tbounds is the next time interval, used if newvar is defined on a time beyond redvar's
present time axis. If next_tbounds==[], newvar will be ignored on such times. Normally
next_tbounds will be set to [] when updating a climatology file which has been initialized.
The penultimate argument dt is used only in that dt=0 means that we are computing climatologies - hence
the new data's time axis must be adjusted before averaging the data into redvars.
The last argument is the time_weights global attribute of the data file, if any; it corresponds
to newvars. This is expected to occur iff the data file is a climatology file written by
an earlier use of this module.
The optional argument force_scalar_avg argument is for testing and is passed on to two_pt_avg.
"""
# >>>> TO DO <<<< Ensure that each redvar, redtime_wts, newvar have consistent units
# >>>> for the variable and for time. Assert that they have the same shape, axes, etc.
if redvars is None or len(redvars)==0: # formerly redvar was initialized here
raise Exception("update_time_avg requires a reduced variable list")
nvars = len(redvars)
# The following two asserts express my assumption that the first index of the variable is time.
# This is almost universal, but in the future I should generalize this code. That would make
# slicing more verbose, e.g. if time were changed from the first index to second then
# v[j] would become v[:,j,:] (for a 2-D variable v).
for var in redvars:
redtime = var.getTime() # partially-reduced time axis
if redtime is not None: # some variables have no time axis
try:
assert( var.getDomain()[0][0].isTime() )
except Exception as e:
logging.exception("redvars=%s, var=%s, var.getDomain()=%s", redvars, var, var.getDomain())
raise e
break
assert( redtime is not None )
redtime_len = redtime.shape[0]
for var in newvars:
newtime = var.getTime() # original time axis, from the new variable
if newtime is not None:
try:
assert( var.getDomain()[0][0].isTime() )
except Exception as e:
logging.exception("redvars=%s, var=%s, var.getDomain()=%s", redvars, var, var.getDomain())
raise e
break
assert( newtime is not None ) # The input data should have a time axis!
if dt==0:
newtime = adjust_time_for_climatology( newtime, redtime )
newtime_bnds = getClimoBounds(newtime)
# newtime_wts[j,i] is the weight applied to the data at time newtime[j] in computing
# an average, reduced time for time newtime[ newtime_rti[i] ], 0<=i<2.
# If newtime_rti[i]<0, that means the weight is 0.
maxolaps = 3 # Maximum number of data time intervals which could overlap with a single
# reduced-time interval. We're unlikely to see more than 2.
newtime_wts = numpy.zeros(( newtime.shape[0], maxolaps ))
newtime_rti = numpy.zeros(( newtime.shape[0], maxolaps ), numpy.int32) - 1
for j in range( newtime.shape[0] ):
# First, extend redtime and redtime_bnds if necessary:
# This should be moved to a separate function.
if newtime_bnds[j][1] > redtime_bnds[-1][1]:
bndmin = max( newtime_bnds[j][0], next_tbounds[0] )
bndmax = min( newtime_bnds[j][1], next_tbounds[1] )
weight = bndmax-bndmin
if weight>0:
# Extend the time axis to add a new time, time bounds, and weight. With one more
# silly step (see below), this will also have the effect of extending redvars along
# the time axis.
redtime_bnds[redtime_len] = next_tbounds
redtime[redtime_len] = 0.5*(
redtime_bnds[redtime_len][1] + redtime_bnds[redtime_len][0] )
redtime_wts[redtime_len] = 0.0
redtime_len +=1
for iv in range(nvars):
# Without this silly step, the data in redvars[iv] won't be expanded to match the
# newly expanded time axis...
dummy = redvars[iv].shape
# This also will do the job, but it looks like a lot of i/o:
# redvars[iv].parent.write(redvars[iv])
# It doesn't help to write redtime_wts or redtime_bnds. You need to write a variable
# with the same axes as redvars.
# This doesn't do the job: redvars[iv].parent.sync()
# The weight of time newtime[j] is the part of its bounds which lie within some reduced-
# time bounds. We'll also need to remember the indices of the reduced-times for
# which this is nonzero (there will be few of those, many reduced times total)
k = -1
for i,red_bnds in enumerate( redtime_bnds ):
bndmin = max( newtime_bnds[j][0], red_bnds[0] )
bndmax = min( newtime_bnds[j][1], red_bnds[1] )
weight = bndmax-bndmin
if weight<0:
continue
else:
k += 1
newtime_wts[j,k] = weight
newtime_rti[j,k] = i
# This much simpler expression works if there is no overlap:
#newtime_wts[j] = newtime_bnds[j][1] - newtime_bnds[j][0]
kmax = k
assert( kmax<maxolaps ) # If k be unlimited, coding is more complicated.
# This is the first point at which we decide whether the input file covers any times of interest,
# e.g. for climatology of DJF, here is where we decide whether the file is a D,J,or F file.
# Here kmax<0 if the file has no interesting time.
if new_time_weights is not None:
# The weights from a climatology file make sense only when time is simply structured.
# Otherwise, we don't know what to do.
for j,nt in enumerate(newtime):
for k in range(kmax+1)[1:]:
assert( newtime_wts[j,k] ==0 )
newtime_wts = numpy.array([new_time_weights.data])
for j,nt in enumerate(newtime):
for k in range(kmax+1):
i = int( newtime_rti[j][k] )
# This | |
value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| commands | `list` of `Any` |
+-------------------------------+-----------------------------------+
"""
self.parent = parent
self._getter = getter
self._from_class_constructor = from_class_constructor
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to all of the represented client's respective event handler managers.
Parameters
----------
func : `callable`, Optional
The event to be added to the respective event handler.
*args : Positional parameter
Additionally passed positional parameters to be passed with the given `func` to the event handler.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be passed with the given `func` to the event handler.
Returns
-------
func : ``Routed``
The added functions.
"""
if func is ...:
return partial_func(self, *args, **kwargs)
handlers = self._getter(self)
if not handlers:
return
count = len(handlers)
routed_args = route_args(args, count)
routed_kwargs = route_kwargs(kwargs, count)
routed_func = maybe_route_func(func, count)
routed = []
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
func = handler.create_event(func_, *args, **kwargs)
routed.append(func)
return Router(routed)
def from_class(self, klass):
"""
Allows the event handler manager router to be able to capture a class and create and add it to the represented
event handlers from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
routed : ``Router``
The routed created instances.
Raises
------
TypeError
If the parent of the event handler manager has no support for `.from_class`.
BaseException
Any exception raised by any of the event handler.
"""
from_class_constructor = self._from_class_constructor
if from_class_constructor is None:
raise TypeError(f'`.from_class` is not supported by `{self.parent!r}`.')
handlers = self._getter(self)
count = len(handlers)
if not count:
return
routed_maybe = from_class_constructor(klass)
if isinstance(routed_maybe, Router):
if len(routed_maybe) != count:
raise ValueError(f'The given class is routed to `{len(routed_maybe)}`, meanwhile expected to be routed '
f'to `{count}` times, got {klass!r}.')
routed = routed_maybe
else:
copy_method = getattr(type(routed_maybe), 'copy', None)
if copy_method is None:
routed = [routed_maybe for _ in range(count)]
else:
routed = [copy_method(routed_maybe) for _ in range(count)]
for handler, event in zip(handlers, routed):
handler.create_event(event)
return routed
def remove(self, func, *args, **kwargs):
"""
Removes the given `func` from the represented event handler managers.
Parameters
----------
func : ``Router``, `callable`
The event to be removed to the respective event handlers.
*args : `str` or `None`
Additional positional parameters.
**kwargs : Keyword parameters
Additional keyword parameters.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if isinstance(func, Router):
if len(func) != count:
raise ValueError(f'The given `func` is routed `{len(func)}` times, meanwhile expected to be routed '
f'to `{count}` times, got {func!r}.')
for func, handler in zip(func, handlers):
handler.delete_event(func, *args, **kwargs)
else:
for handler in handlers:
handler.delete_event(func, *args, **kwargs)
def extend(self, iterable):
"""
Extends the event handler manager router's respective managers with the given iterable of events.
Parameters
----------
iterable : `iterable`
Raises
------
TypeError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(handlers[0], 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(f'`{parent!r}` does not supports elements of type `{type_!r}`.')
for element in iterable:
if isinstance(element, Router):
if len(element) != count:
raise ValueError(f'The given `func` is routed `{len(element)}` times, meanwhile expected to be routed '
f'to `{count}` times, got {element!r}.')
for func, handler in zip(element, handlers):
handler.create_event(func, None)
else:
for handler in handlers:
handler.create_event(element, None)
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
routed_args = route_args(args, count)
routed_func = maybe_route_func(func, count)
routed_kwargs = route_kwargs(kwargs, count)
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
handler.create_event(func_, *args, **kwargs)
def unextend(self, iterable):
"""
Unextends the event handler router's represented event handlers with the given `iterable`.
Parameters
----------
iterable : `iterable`
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed element is not stored by the parent event handler. At this case error is raised
only at the end.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(handlers[0], 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(f'`{parent!r}` does not supports elements of type `{type_!r}`.')
collected = []
for element in iterable:
if isinstance(element, Router):
if len(element) != count:
collected.append(f'The given `func` is routed `{len(element)}` times, meanwhile expected '
f'to be routed to `{count}` times, got {element!r}.')
continue
for func, handler in zip(element, handlers):
try:
handler.delete_event(func, None)
except ValueError as err:
collected.append(err.args[0])
else:
for handler in handlers:
try:
handler.delete_event(element, None)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
collected = []
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
routed_func = maybe_route_func(func, count)
if kwargs is None:
for handler, func_ in zip(handlers, routed_func):
try:
handler.delete_event(func_)
except ValueError as err:
collected.append(err.args[0])
else:
routed_kwargs = route_kwargs(kwargs, count)
routed_args = route_args(args, count)
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
try:
handler.delete_event(func_, *args, **kwargs)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
def __repr__(self):
return f'<{self.__class__.__name__} parent={self.parent!r}, getter={self._getter!r}, from_class_constructor=' \
f'{self._from_class_constructor!r}>'
class EventListElement:
"""
Represents an element of an ``eventlist``.
Attributes
----------
func : `callable`
The event of the event-list element.
args : `None` or `tuple` of `Any`
Additional positional parameters for `func`.
kwargs : `None` or `dict` of (`str`, `Any`) items
Additional key word parameters for `func`.
"""
__slots__ = ('func', 'args', 'kwargs', )
def __init__(self, func, args, kwargs):
"""
Creates a ``EventListElement` from the given parameters.
Parameters
----------
func : `callable`
The event of the eventlist element.
args : `None` or `str`
Additional positional parameters for `func`.
kwargs : `None` or `dict` of (`str`, `Any`) items
Additional key word parameters for `func`.
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Returns the representation of the eventlist element."""
return f'{self.__class__.__name__}({self.func!r}, args={self.args!r}, kwargs={self.kwargs!r})'
def __len__(self):
"""Additional information for unpacking if needed."""
return 3
def __iter__(self):
"""
Unpacks the eventlist element.
This method is a generator.
"""
yield self.func
yield self.args
yield self.kwargs
class Router(tuple):
"""
Object used to describe multiple captured created command-like objects.
"""
def __repr__(self):
"""Returns the router's representation."""
result = [self.__class__.__name__, '(']
limit = len(self)
if limit:
index = 0
while True:
element = self[index]
result.append(repr(element))
index += 1
if index == limit:
break
result.append(', ')
result.append(')')
return ''.join(result)
def route_value(to_route_value, count, default=None):
"""
Routes only a single `name` - `value` pair.
Parameters
----------
to_route_value : `Any`
The respective value to route
count : `int`
The expected amount of copies to generate.
default : `Any`, Optional
Optional default variable to use. Defaults to `None`.
Returns
-------
result : `list` of `Any`
A list of the routed values
"""
result = []
if isinstance(to_route_value, tuple):
if len(to_route_value) != count:
raise ValueError(f'The represented router has `{count}` applicable clients, meanwhile received only '
f'`{len(to_route_value)}` routed values, got: {to_route_value!r}.')
last = ...
for value in to_route_value:
if value is None:
value = default
last = default
elif | |
<filename>avaml/aggregatedata/download.py
import json
import math
import os
import pickle
import re
import sys
import datetime as dt
import requests
from concurrent import futures
import numpy as np
from avaml import _NONE, CSV_VERSION, REGIONS, merge, Error, setenvironment as se, varsomdata, REGION_NEIGH
from varsomdata import getforecastapi as gf
from varsomdata import getvarsompickles as gvp
from varsomdata import getmisc as gm
from varsomdata import getobservations as go
_pwl = re.compile("(DH|SH|FC)")
DIRECTIONS = ["N", "NE", "E", "SE", "S", "SW", "W", "NW"]
WIND_SPEEDS = {
'Stille/svak vind': 0.,
'Bris': 5.5,
'Frisk bris': 9.,
'Liten kuling': 12.,
'Stiv kuling': 15.5,
'Sterk kuling': 18.5,
'Liten storm': 23.,
'Storm': 30.
}
PROBLEMS = {
3: 'new-loose',
5: 'wet-loose',
7: 'new-slab',
10: 'drift-slab',
30: 'pwl-slab',
# This was safe to do when it was written (rewriting dpwl as pwl).
# If reverse lookups are done in the future, or if an .items() iteration
# is done, this may break something.
37: 'pwl-slab',
45: 'wet-slab',
50: 'glide'
}
CAUSES = {
10: 'new-snow',
11: 'hoar',
13: 'facet',
14: 'crust',
15: 'snowdrift',
16: 'ground-facet',
18: 'crust-above-facet',
19: 'crust-below-facet',
20: 'ground-water',
22: 'water-layers',
24: 'loose',
}
AVALANCHE_EXT = {
10: "dry_loose",
15: "wet_loose",
20: "dry_slab",
25: "wet_slab",
27: "glide",
30: "slush",
40: "cornice",
}
EXPOSED_HEIGHTS = {
1: "bottom-white",
2: "bottom-black",
3: "middle-white",
4: "middle-black",
}
# Transformations from Varsom main level
AVALANCHE_WARNING = {
"danger_level": ("danger_level", lambda x: x),
"emergency_warning": ("emergency_warning", lambda x: float(x == "Ikke gitt")),
"problem_amount": ("avalanche_problems", lambda x: len(x)),
}
# Same as AVALANCHE_WARNING, but destined for the label table
AVALANCHE_WARNING_LABEL = {
("CLASS", "danger_level"): ("danger_level", lambda x: x),
("CLASS", "emergency_warning"): ("emergency_warning", lambda x: x),
("CLASS", "problem_amount"): ("avalanche_problems", lambda x: len(x)),
}
# Transformations from Varsom problem level
AVALANCHE_PROBLEM = {
"dsize": ("destructive_size_ext_id", lambda x: x),
"prob": ("aval_probability_id", lambda x: x),
"trig": ("aval_trigger_simple_id", lambda x: {10: 0, 21: 1, 22: 2}.get(x, 0)),
"dist": ("aval_distribution_id", lambda x: x),
"lev_max": ("exposed_height_1", lambda x: x),
"lev_min": ("exposed_height_2", lambda x: x),
"cause_new-snow": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'new-snow')),
"cause_hoar": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'hoar')),
"cause_facet": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'facet')),
"cause_crust": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'crust')),
"cause_snowdrift": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'snowdrift')),
"cause_ground-facet": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'ground-facet')),
"cause_crust-above-facet": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'crust-above-facet')),
"cause_crust-below-facet": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'crust-below-facet')),
"cause_ground-water": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'ground-water')),
"cause_water-layers": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'water-layers')),
"cause_loose": ("aval_cause_id", lambda x: float(CAUSES.get(x, _NONE) == 'loose')),
"lev_fill_1": ("exposed_height_fill", lambda x: float(x == 1)),
"lev_fill_2": ("exposed_height_fill", lambda x: float(x == 2)),
"lev_fill_3": ("exposed_height_fill", lambda x: float(x == 3)),
"lev_fill_4": ("exposed_height_fill", lambda x: float(x == 4)),
"aspect_N": ("valid_expositions", lambda x: float(x[0])),
"aspect_NE": ("valid_expositions", lambda x: float(x[1])),
"aspect_E": ("valid_expositions", lambda x: float(x[2])),
"aspect_SE": ("valid_expositions", lambda x: float(x[3])),
"aspect_S": ("valid_expositions", lambda x: float(x[4])),
"aspect_SW": ("valid_expositions", lambda x: float(x[5])),
"aspect_W": ("valid_expositions", lambda x: float(x[6])),
"aspect_NW": ("valid_expositions", lambda x: float(x[7])),
}
# Same as AVALANCHE_PROBLEM, but destined for the label table
AVALANCHE_PROBLEM_LABEL = {
("CLASS", "cause"): ("aval_cause_id", lambda x: CAUSES.get(x, _NONE)),
("CLASS", "dsize"): ("destructive_size_ext_id", lambda x: x),
("CLASS", "prob"): ("aval_probability_id", lambda x: x),
("CLASS", "trig"): ("aval_trigger_simple_id", lambda x: x),
("CLASS", "dist"): ("aval_distribution_id", lambda x: x),
("CLASS", "lev_fill"): ("exposed_height_fill", lambda x: x),
("MULTI", "aspect"): ("valid_expositions", lambda x: x.zfill(8)),
("REAL", "lev_max"): ("exposed_height_1", lambda x: x),
("REAL", "lev_min"): ("exposed_height_2", lambda x: x),
}
# Transformations from Mountain Weather API
WEATHER_VARSOM = {
"precip_most_exposed": ("precip_most_exposed", lambda x: x),
"precip": ("precip_region", lambda x: x),
"wind_speed": ("wind_speed", lambda x: WIND_SPEEDS.get(x, 0)),
"temp_min": ("temperature_min", lambda x: x),
"temp_max": ("temperature_max", lambda x: x),
"temp_lev": ("temperature_elevation", lambda x: x),
"temp_freeze_lev": ("freezing_level", lambda x: x),
"wind_dir_N": ("wind_direction", lambda x: x == "N"),
"wind_dir_NE": ("wind_direction", lambda x: x == "NE"),
"wind_dir_E": ("wind_direction", lambda x: x == "E"),
"wind_dir_SE": ("wind_direction", lambda x: x == "SE"),
"wind_dir_S": ("wind_direction", lambda x: x == "S"),
"wind_dir_SW": ("wind_direction", lambda x: x == "SW"),
"wind_dir_W": ("wind_direction", lambda x: x == "W"),
"wind_dir_NW": ("wind_direction", lambda x: x == "NW"),
"temp_fl_start_0": ("fl_hour_of_day_start", lambda x: x == 0),
"temp_fl_start_6": ("fl_hour_of_day_start", lambda x: x == 6),
"temp_fl_start_12": ("fl_hour_of_day_start", lambda x: x == 12),
"temp_fl_start_18": ("fl_hour_of_day_start", lambda x: x == 18),
}
WEATHER_API = {
"precip_most_exposed": ("Precipitation_MostExposed_Median", lambda x: x),
"precip": ("Precipitation_overall_ThirdQuartile", lambda x: x),
"wind_speed": ("WindClassification", lambda x: WIND_SPEEDS.get(x)),
"temp_min": ("MinTemperature", lambda x: x),
"temp_max": ("MaxTemperature", lambda x: x),
"temp_lev": ("TemperatureElevation", lambda x: x),
"temp_freeze_lev": ("FreezingLevelAltitude", lambda x: x),
"wind_dir_N": ("WindDirection", lambda x: x == "N"),
"wind_dir_NE": ("WindDirection", lambda x: x == "NE"),
"wind_dir_E": ("WindDirection", lambda x: x == "E"),
"wind_dir_SE": ("WindDirection", lambda x: x == "SE"),
"wind_dir_S": ("WindDirection", lambda x: x == "S"),
"wind_dir_SW": ("WindDirection", lambda x: x == "SW"),
"wind_dir_W": ("WindDirection", lambda x: x == "W"),
"wind_dir_NW": ("WindDirection", lambda x: x == "NW"),
"temp_fl_start_0": ("FreezingLevelTime", lambda x: _round_hours(x) == 0),
"temp_fl_start_6": ("FreezingLevelTime", lambda x: _round_hours(x) == 6),
"temp_fl_start_12": ("FreezingLevelTime", lambda x: _round_hours(x) == 12),
"temp_fl_start_18": ("FreezingLevelTime", lambda x: _round_hours(x) == 18),
}
REG_ENG = {
"Faretegn": "dangersign",
"Tester": "tests",
"Skredaktivitet": "activity",
"Skredhendelse": "event",
"Snødekke": "snowpack",
"Skredproblem": "problem",
"Skredfarevurdering": "danger",
"Snøprofil": "snowprofile",
"AvalancheIndex": "avalancheidx",
}
REG_ENG_V4 = {
"Faretegn": "DangerObs",
"Tester": "CompressionTest",
"Skredaktivitet": "AvalancheActivityObs2",
"Skredhendelse": "AvalancheObs",
"Snødekke": "SnowSurfaceObservation",
"Skredproblem": "AvalancheEvalProblem2",
"Skredfarevurdering": "AvalancheEvaluation3",
"Snøprofil": "SnowProfile2",
}
# Transformations for RegObs Classes
REGOBS_CLASSES = {
"Faretegn": {
"DangerSignTID": {
2: 'avalanches',
3: 'whumpf',
4: 'cracks',
5: 'snowfall',
6: 'hoar',
7: 'temp',
8: 'water',
9: 'snowdrift',
}
},
"Tester": {
"PropagationName": {
"ECTPV": "ectpv",
"ECTP": "ectp",
"ECTN": "ectn",
"ECTX": "ectx",
"LBT": "lbt",
"CTV": "ctv",
"CTE": "cte",
"CTM": "ctm",
"CTH": "cth",
"CTN": "ctn",
},
},
"Skredaktivitet": {
"AvalancheExtTID": AVALANCHE_EXT,
"ExposedHeightComboTID": EXPOSED_HEIGHTS,
},
"Skredhendelse": {
"AvalancheTID": {
11: "wet_loose",
12: "dry_loose",
21: "wet_slab",
22: "dry_slab",
27: "glide",
30: "slush",
40: "cornice",
},
"AvalancheTriggerTID": {
10: "natural",
20: "artificial",
21: "artificial-skier",
22: "remote",
23: "artificial-test",
25: "explosive",
26: "human",
27: "snowmobile",
},
"TerrainStartZoneTID": {
10: "steep",
20: "lee",
30: "ridge",
40: "gully",
50: "slab",
60: "bowl",
70: "forest",
75: "logging",
95: "everywhere",
},
"AvalCauseTID": CAUSES,
},
"Snødekke": {
"SnowSurfaceTID": {
50: "facet",
61: "hard_hoar",
62: "soft_hoar",
101: "max_loose",
102: "med_loose",
103: "min_loose",
104: "wet_loose",
105: "hard_wind",
106: "soft_wind",
107: "crust",
}
},
"Skredproblem": {
"AvalCauseTID": CAUSES,
"AvalancheExtTID": AVALANCHE_EXT,
"ExposedHeightComboTID": EXPOSED_HEIGHTS,
},
"Skredfarevurdering": {},
"Snøprofil": {}
}
# Transformations for RegObs scalars
REGOBS_SCALARS = {
"Faretegn": {},
"Tester": {
"FractureDepth": ("FractureDepth", lambda x: x),
"TapsFracture": ("TapsFracture", lambda x: x),
"StabilityEval": ("StabilityEvalTID", lambda x: x),
"ComprTestFracture": ("ComprTestFractureTID", lambda x: x),
},
"Skredaktivitet": {
"EstimatedNum": ("EstimatedNumTID", lambda x: x),
"AvalTrigger": ("AvalTriggerSimpleTID", lambda x: {22: 5, 60: 4, 50: 3, 40: 2, 30: 1}.get(x, 0)),
"DestructiveSize": ("DestructiveSizeTID", lambda x: x if 0 < x <= 5 else 0),
"AvalPropagation": ("AvalPropagationTID", lambda x: x),
"ExposedHeight1": ("ExposedHeight1", lambda x: x),
"ExposedHeight2": ("ExposedHeight2", lambda x: x),
"ValidExpositionN": ("ValidExposition", lambda x: float(x[0])),
"ValidExpositionNE": ("ValidExposition", lambda x: float(x[1])),
"ValidExpositionE": ("ValidExposition", lambda x: float(x[2])),
"ValidExpositionSE": ("ValidExposition", lambda x: float(x[3])),
"ValidExpositionS": ("ValidExposition", lambda x: float(x[4])),
"ValidExpositionSW": ("ValidExposition", lambda x: float(x[5])),
"ValidExpositionW": ("ValidExposition", lambda x: float(x[6])),
"ValidExpositionNW": ("ValidExposition", lambda x: float(x[7])),
},
"Skredhendelse": {
"DestructiveSize": ("DestructiveSizeTID", lambda x: x if 0 < x <= 5 else 0),
"FractureHeight": ("FractureHeight", lambda x: x),
"FractureWidth": ("FractureWidth", lambda x: x),
"HeightStartZone": ("HeightStartZone", lambda x: x),
"HeightStopZone": ("HeightStopZone", lambda x: x),
"ValidExpositionN": ("ValidExposition", lambda x: float(x[0])),
"ValidExpositionNE": ("ValidExposition", lambda x: float(x[1])),
"ValidExpositionE": ("ValidExposition", lambda x: float(x[2])),
"ValidExpositionSE": ("ValidExposition", lambda x: float(x[3])),
"ValidExpositionS": ("ValidExposition", lambda x: float(x[4])),
"ValidExpositionSW": ("ValidExposition", lambda x: float(x[5])),
"ValidExpositionW": ("ValidExposition", lambda x: float(x[6])),
"ValidExpositionNW": ("ValidExposition", lambda x: float(x[7])),
},
"Snødekke": {
"SnowDepth": ("SnowDepth", lambda x: x),
"NewSnowDepth24": ("NewSnowDepth24", lambda x: x),
"Snowline": ("SnowLine", lambda x: x),
"NewSnowline": ("NewSnowLine", lambda x: x),
"HeightLimitLayeredSnow": ("HeightLimitLayeredSnow", lambda x: x),
"SnowDrift": ("SnowDriftTID", lambda x: x),
"SurfaceWaterContent": ("SurfaceWaterContentTID", lambda x: x),
},
"Skredproblem": {
"AvalCauseDepth": ("AvalCauseDepthTID", lambda x: x),
"AvalCauseLight": ("AvalCauseAttributeLightTID", lambda x: float(bool(x))),
"AvalCauseThin": ("AvalCauseAttributeThinTID", lambda x: float(bool(x))),
"AvalCauseSoft": ("AvalCauseAttributeSoftTID", lambda x: float(bool(x))),
"AvalCauseCrystal": ("AvalCauseAttributeCrystalTID", lambda x: float(bool(x))),
"AvalTrigger": ("AvalTriggerSimpleTID", lambda x: {22: 5, 60: 4, 50: 3, 40: 2, 30: 1}.get(x, 0)),
"DestructiveSize": ("DestructiveSizeTID", lambda x: x if 0 < x <= 5 else 0),
"AvalPropagation": ("AvalPropagationTID", lambda x: x),
"ExposedHeight1": ("ExposedHeight1", lambda x: x),
"ExposedHeight2": ("ExposedHeight2", lambda x: x),
"ValidExpositionN": ("ValidExposition", lambda x: float(x[0])),
"ValidExpositionNE": ("ValidExposition", lambda x: float(x[1])),
"ValidExpositionE": ("ValidExposition", lambda x: float(x[2])),
"ValidExpositionSE": ("ValidExposition", lambda x: float(x[3])),
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from common_utils import *
from crypto_utils import *
from random import *
import os
MODE_TYPE = {
'MODE_UNSPECIFIED' : 0x0000,
'MODE_KEYINIT' : 0x0001,
'MODE_AESINIT_ENC' : 0x0002,
'MODE_AESINIT_DEC' : 0x0004,
'MODE_ENC' : 0x0008,
'MODE_DEC' : 0x0010,
'MODE_RANDOM_KEY_EXT' : 0x0020,
'MODE_RANDOM_AES_EXT' : 0x0040
}
STATE_TYPE = {
'STATE_INITIALIZED' : 0x00,
'STATE_KEYINIT' : 0x01,
'STATE_AESINIT_ENC' : 0x02,
'STATE_AESINIT_DEC' : 0x04
}
ERR_MODE = {
'ERR_NO_OPERATION' : 0x1,
'ERR_ENC_AND_DEC' : 0x2,
'ERR_ENC_AND_DEC_AESINIT' : 0x4,
'ERR_AESINIT_AND_OP' : 0x8
}
ERR_OPERATION = {
'ERR_OP_GEN_RANDOM_KEY' : 0x10,
'ERR_OP_GEN_RANDOM_AES' : 0x20,
'ERR_OP_KEYINIT' : 0x40,
'ERR_OP_AESINIT' : 0x80,
'ERR_OP_ENC' : 0x100,
'ERR_OP_DEC' : 0x200
}
ERR_INPUT = {
'ERR_SIZE_KEY' : 0x400,
'ERR_SIZE_INPUT' : 0x800,
'ERR_SIZE_OUTPUT' : 0x1000,
'ERR_KEY_UNUSED' : 0x2000,
'ERR_INPUT_UNUSED' : 0x4000,
'ERR_KEY_MISSING' : 0x8000,
'ERR_INPUT_MISSING' : 0x10000
};
ERR_MODE_STATE = {
'ERR_AESINIT_MISSING' : 0x20000,
'ERR_KEYINIT_MISSING' : 0x40000,
'ERR_AESINIT_BAD' : 0x80000
}
TO_CHECK = {
'CHECK_OUTPUT' : (0x1 << 0),
'CHECK_RET' : (0x1 << 1),
'CHECK_STATE' : (0x1 << 2),
'CHECK_KEY_CTR' : (0x1 << 3),
'CHECK_AES_CTR' : (0x1 << 4),
'CHECK_AES_CTX' : (0x1 << 5)
}
# Targets to send tests
TARGETS = {
'QEMU' : 0,
'UART' : 1,
'STATIC' : 2,
}
# Returns a serialized test for AES
def gen_aes_test(name=None, mode=None, to_check=None, msg=None, key=None, random_key=None, random_aes=None, exp_out=None, exp_ret=None, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=None):
aes_test_string = "t"
# Name
if name != None:
if len(name) > 32:
print("Error: unsupported message length %d > 32" % len(name))
aes_test_string += name + ((32-len(name))*'\x00')
else:
aes_test_string += (32*'\x00')
# Mode
if mode != None:
aes_test_string += chr(mode)
else:
aes_test_string += chr(0)
# To check
if to_check != None:
aes_test_string += chr(to_check)
else:
aes_test_string += chr(0)
# Message
if msg != None:
if len(msg) != 16:
print("Error: unsupported message length %d != 16" % len(msg))
sys.exit(-1)
aes_test_string += chr(16) + msg
else:
aes_test_string += chr(0) + ('\x00'*16)
# Key
if key != None:
if (len(key) != 16) and (len(key) != 24) and (len(key) != 32):
print("Error: unsupported key length %d != 16, 24 or 32" % len(key))
sys.exit(-1)
aes_test_string += chr(len(key)) + key + ((32-len(key))*'\x00')
else:
aes_test_string += chr(0) + ('\x00'*32)
# Random Key
if random_key != None:
if len(random_key) != 19:
print("Error: unsupported random_key length %d != 19" % len(random_key))
sys.exit(-1)
aes_test_string += chr(19) + random_key
else:
aes_test_string += chr(0) + ('\x00'*19)
# Random AES
if random_aes != None:
if len(random_aes) != 19:
print("Error: unsupported random_aes length %d != 19" % len(random_aes))
sys.exit(-1)
aes_test_string += chr(19) + random_aes
else:
aes_test_string += chr(0) + ('\x00'*19)
# Expected output
if exp_out != None:
if len(exp_out) != 16:
print("Error: unsupported expected output length %d != 16" % len(exp_out))
sys.exit(-1)
aes_test_string += chr(16) + exp_out
else:
aes_test_string += chr(0) + ('\x00'*16)
# Expected ret
if exp_ret != None:
if exp_ret >= (0x1 << 32):
print("Error: unsupported expected ret (too big) %d" % exp_ret)
sys.exit(-1)
aes_test_string += chr(exp_ret & 0xff) + chr((exp_ret & 0xff00) >> 8) + chr((exp_ret & 0xff0000) >> 16) + chr((exp_ret & 0xff000000) >> 24)
else:
aes_test_string += ('\x00'*4)
# Expected state
if exp_state != None:
aes_test_string += chr(exp_state)
else:
aes_test_string += chr(0)
# Expected key counter
if exp_ctr_key != None:
if exp_ret >= (0x1 << 32):
print("Error: unsupported expected counter Key (too big) %d" % exp_ctr_key)
sys.exit(-1)
aes_test_string += chr(exp_ctr_key & 0xff) + chr((exp_ctr_key & 0xff00) >> 8) + chr((exp_ctr_key & 0xff0000) >> 16) + chr((exp_ctr_key & 0xff000000) >> 24)
else:
aes_test_string += ('\x00'*4)
# Expected AES counter
if exp_ctr_aes != None:
if exp_ctr_aes >= (0x1 << 32):
print("Error: unsupported expected counter AES (too big) %d" % exp_ctr_aes)
sys.exit(-1)
aes_test_string += chr(exp_ctr_aes & 0xff) + chr((exp_ctr_aes & 0xff00) >> 8) + chr((exp_ctr_aes & 0xff0000) >> 16) + chr((exp_ctr_aes & 0xff000000) >> 24)
else:
aes_test_string += ('\x00'*4)
# Comm line output
if comm_line_output != None:
aes_test_string += chr(comm_line_output)
else:
# No output by default
aes_test_string += chr(0)
return aes_test_string
def generate_aes_test_case(i, direction, comm_line_output=None):
test = ""
test_idx = []
old_len = 0
msg = expand(inttostring(randint(0, 2**128)), 128, "LEFT")
key = expand(inttostring(randint(0, 2**128)), 128, "LEFT")
aes_ecb = local_AES(key, AES.MODE_ECB)
if direction == 'ENC':
exp_out = aes_ecb.encrypt(msg)
elif direction == 'DEC':
exp_out = aes_ecb.decrypt(msg)
else:
print("Error: unsupported direction %s for AES" % direction)
sys.exit(-1)
# Full AES en/decryption test
mode = MODE_TYPE['MODE_KEYINIT'] | MODE_TYPE['MODE_AESINIT_'+direction] | MODE_TYPE['MODE_'+direction]
to_check = TO_CHECK['CHECK_RET'] | TO_CHECK['CHECK_OUTPUT']
test += "b"
test_idx.append((old_len,len(test)))
old_len = len(test)
test += gen_aes_test(name="["+direction+str(i)+"]full_"+str(i), mode=mode, to_check=to_check, msg=msg, key=key, random_key=None, random_aes=None, exp_out=exp_out,exp_ret=None, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=comm_line_output)
test_idx.append((old_len,len(test)))
old_len = len(test)
test += "e"
test_idx.append((old_len,len(test)))
old_len = len(test)
# Split AES en/decryption test, init alone
mode = MODE_TYPE['MODE_KEYINIT'] | MODE_TYPE['MODE_AESINIT_'+direction]
to_check = TO_CHECK['CHECK_RET']
test += "b"
test_idx.append((old_len,len(test)))
old_len = len(test)
test += gen_aes_test(name="["+direction+str(i)+"]split_step0_"+str(i), mode=mode, to_check=to_check, msg=msg, key=key, random_key=None, random_aes=None, exp_out=None,exp_ret=0, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=comm_line_output)
test_idx.append((old_len,len(test)))
old_len = len(test)
mode = MODE_TYPE['MODE_'+direction]
to_check = TO_CHECK['CHECK_RET'] | TO_CHECK['CHECK_OUTPUT']
test += gen_aes_test(name="["+direction+str(i)+"]split_step1_"+str(i), mode=mode, to_check=to_check, msg=msg, key=key, random_key=None, random_aes=None, exp_out=exp_out,exp_ret=0, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=comm_line_output)
test_idx.append((old_len,len(test)))
old_len = len(test)
test += "e"
test_idx.append((old_len,len(test)))
old_len = len(test)
# Split AES en/decryption test, init split
mode = MODE_TYPE['MODE_KEYINIT']
to_check = TO_CHECK['CHECK_RET']
test += "b"
test_idx.append((old_len,len(test)))
old_len = len(test)
test += gen_aes_test(name="["+direction+str(i)+"]splitinit_step0_"+str(i), mode=mode, to_check=to_check, msg=msg, key=key, random_key=None, random_aes=None, exp_out=None,exp_ret=0, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=comm_line_output)
test_idx.append((old_len,len(test)))
old_len = len(test)
mode = MODE_TYPE['MODE_AESINIT_'+direction]
to_check = TO_CHECK['CHECK_RET']
test += gen_aes_test(name="["+direction+str(i)+"]splitinit_step1_"+str(i), mode=mode, to_check=to_check, msg=msg, key=key, random_key=None, random_aes=None, exp_out=None,exp_ret=0, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=comm_line_output)
test_idx.append((old_len,len(test)))
old_len = len(test)
mode = MODE_TYPE['MODE_'+direction]
to_check = TO_CHECK['CHECK_RET'] | TO_CHECK['CHECK_OUTPUT']
test += gen_aes_test(name="["+direction+str(i)+"]splitinit_step2_"+str(i), mode=mode, to_check=to_check, msg=msg, key=key, random_key=None, random_aes=None, exp_out=exp_out,exp_ret=0, exp_state=None, exp_ctr_key=None, exp_ctr_aes=None, comm_line_output=comm_line_output)
test_idx.append((old_len,len(test)))
old_len = len(test)
test += "e"
test_idx.append((old_len,len(test)))
old_len = len(test)
return test, test_idx
def generate_multiple_aes_tests(num, comm_line_output=None):
test = ""
test_idx = []
# Generate all the asked tests for encryption and decryption
for i in range(0, num):
# Encryption
test_i_enc, test_i_enc_idx = generate_aes_test_case(i, 'ENC', comm_line_output=comm_line_output)
# Offsets
for idx in range(len(test_i_enc_idx)):
b, e = test_i_enc_idx[idx]
test_i_enc_idx[idx] = (b + len(test), e + len(test))
test += test_i_enc
test_idx += test_i_enc_idx
# Decryption
test_i_dec, test_i_dec_idx = generate_aes_test_case(i, 'DEC', comm_line_output=comm_line_output)
# Offsets
for idx in range(len(test_i_dec_idx)):
b, e = test_i_dec_idx[idx]
test_i_dec_idx[idx] = (b + len(test), e + len(test))
test += test_i_dec
test_idx += test_i_dec_idx
return test, test_idx
def send_comm_aes_tests(t, channel, uart=None):
tests, tests_idx = t
script_path = os.path.abspath(os.path.dirname(sys.argv[0])) + "/"
# The STATIC channel generates an internal test as header to be compiled
# with the sources. The static test is launched at startup of the firmware/program.
if channel == 'STATIC':
# The communication channel is stdin, we create a flat file
if not os.path.exists(script_path+"../generated_tests/"):
os.mkdir(script_path+"../generated_tests/")
out_file_name = script_path+"../generated_tests/aes_tests.h"
out_string = "/* Autogerated static tests for the masked AES. Please do not edit! */"
out_string += "\nconst char aes_static_tests[] = \""
for i in range(len(tests)):
out_string += "\\x%02x" % ord(tests[i])
out_string += "\";"
save_in_file(out_string, out_file_name)
# The QEMU channel is barely a flat file that is fed through stdin
elif channel == 'QEMU':
# The communication channel is stdin, we create a flat file
if not os.path.exists(script_path+"../generated_tests/"):
os.mkdir(script_path+"../generated_tests/")
out_file_name = script_path+"../generated_tests/aes_tests.bin"
save_in_file(tests, out_file_name)
# The UART channel communicates with a board plugged to an UART
elif channel == 'UART':
import serial
# An uart path must be provided
if uart == None:
print("Error: you must provide an uart path!")
sys.exit(-1)
# Open the serial handler
ser = serial.Serial()
ser.port = uart
ser.baudrate = 115200
ser.open()
# Now that the serial is opened, send our test file
# We split all the tests
for (b, e) in tests_idx:
ser.write(encode_string(tests[b:e]))
if len(tests[b:e]) > 1:
print("Testing %s" % tests[b+1:b+33])
# Wait for the response ('r' + return status + real output when needed)
ret = ser.read(18)
ret_err = ret[1]
ret_out = ret[2:]
# Check the return error and the output
exp_out = tests[b+126:b+126+16]
print(" -> Test returns %s" % (local_hexlify(ret_err)))
if exp_out != ret_out:
print("Error: output mismatch %s != %s" % (local_hexlify(exp_out), local_hexlify(ret_out)))
else:
print("Error: unsupported channel %s for AES tests" % channel)
sys.exit(-1)
def PrintUsage():
executable = os.path.basename(__file__)
print("Error when executing %s\n\tUsage:\t%s (STATIC|QEMU|UART) <number> <uart_path>" % (executable, executable))
sys.exit(-1)
# Get the arguments
if __name__ == '__main__':
# Get the | |
<reponame>sgraton/python-emploi-store
# encoding: utf-8
"""Unit tests for emploi_store module."""
import codecs
import datetime
import itertools
import tempfile
import shutil
import unittest
import mock
import requests_mock
import emploi_store
# TODO: Add more tests.
@requests_mock.Mocker()
class ClientTestCase(unittest.TestCase):
"""Unit tests for the Client class."""
def setUp(self):
super(ClientTestCase, self).setUp()
self.client = emploi_store.Client('my-ID', 'my-Secret')
def test_access_token(self, mock_requests):
"""Test the access_token method."""
def _match_request_data(request):
data = (request.text or '').split('&')
return 'client_id=my-ID' in data and \
'client_secret=my-Secret' in data and \
'scope=application_my-ID+my-scope' in data
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
additional_matcher=_match_request_data,
json={'access_token': 'foobar'})
token = self.client.access_token(scope='my-scope')
self.assertEqual('foobar', token)
def test_access_fails(self, mock_requests):
"""Test the access_token method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
status_code=401)
with self.assertRaises(ValueError):
self.client.access_token(scope='my-scope')
@mock.patch(emploi_store.__name__ + '.datetime')
def test_access_token_reuse(self, mock_requests, mock_datetime):
"""Test the access_token just after a token was fetched."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'first-<PASSWORD>', 'expires_in': 500})
now = datetime.datetime(2016, 3, 11)
mock_datetime.datetime.now.return_value = now
mock_datetime.timedelta = datetime.timedelta
self.client.access_token('my-scope')
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': '<PASSWORD>', 'expires_in': 500})
now += datetime.timedelta(seconds=40)
mock_datetime.datetime.now.return_value = now
token = self.client.access_token('my-scope')
self.assertEqual('first-token', token)
@mock.patch(emploi_store.__name__ + '.datetime')
def test_access_token_expired(self, mock_requests, mock_datetime):
"""Test the access_token just after a token was fetched."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'first-<PASSWORD>', 'expires_in': 20})
now = datetime.datetime(2016, 3, 11)
mock_datetime.datetime.now.return_value = now
mock_datetime.timedelta = datetime.timedelta
self.client.access_token('my-scope')
now += datetime.timedelta(seconds=40)
mock_datetime.datetime.now.return_value = now
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'second <PASSWORD>', 'expires_in': 20})
token = self.client.access_token('my-scope')
self.assertEqual('second token', token)
def test_get_lbb_companies(self, mock_requests):
"""Test the get_lbb_companies method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': '<PASSWORD>'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/labonneboite/v1/company/?'
'distance=10&latitude=45&longitude=2.1&rome_codes=A1204%2CB1201',
headers={'Authorization': 'Bearer foobar'},
json={'companies': [{'one': 1}, {'two': 2}]})
companies = self.client.get_lbb_companies(45, 2.1, rome_codes=['A1204', 'B1201'])
companies = list(companies)
self.assertEqual([{'one': 1}, {'two': 2}], companies)
def test_get_lbb_companies_by_city_id(self, mock_requests):
"""Test the get_lbb_companies method using a city ID as input."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': '<PASSWORD>'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/labonneboite/v1/company/?'
'distance=10&commune_id=31555&rome_codes=A1204',
headers={'Authorization': 'Bearer foobar'},
json={'companies': [{'one': 1}, {'two': 2}]})
companies = list(self.client.get_lbb_companies(city_id='31555', rome_codes=['A1204']))
self.assertEqual([{'one': 1}, {'two': 2}], companies)
def test_get_lbb_companies_missing_location(self, unused_mock_requests):
"""Test the get_lbb_companies method when no location is given."""
generator = self.client.get_lbb_companies(rome_codes=['A1204'])
self.assertRaises(ValueError, next, generator)
def test_get_lbb_companies_fail(self, mock_requests):
"""Test the get_lbb_companies method if the server fails."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/labonneboite/v1/company/?'
'distance=10&latitude=45&longitude=2.1&rome_codes=A1204%2CB1201',
headers={'Authorization': 'Bearer foobar'},
status_code=502, reason='Internal Failure')
companies = self.client.get_lbb_companies(45, 2.1, rome_codes=['A1204', 'B1201'])
with self.assertRaises(emploi_store.requests.exceptions.HTTPError):
list(companies)
def test_get_lbb_companies_one_rome_code_only(self, mock_requests):
"""Test the get_lbb_companies method using a single rome code."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/labonneboite/v1/company/?'
'distance=10&latitude=45&longitude=2.1&rome_codes=A1204&naf_codes=4711C',
headers={'Authorization': 'Bearer foobar'},
json={'companies': [{'one': 1}, {'two': 2}]})
companies = self.client.get_lbb_companies(45, 2.1, rome_codes='A1204', naf_codes='4711C')
companies = list(companies)
self.assertEqual([{'one': 1}, {'two': 2}], companies)
def test_get_employment_rate_rank_for_training(self, mock_requests):
"""Test the get_employment_rate_rank_for_training method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/retouralemploisuiteformation/v1/rank?'
'codeinseeville=69123&formacode=22435',
headers={'Authorization': 'Bearer foobar'},
json=[{
'formacode': '22435',
'codeinsee-bassin': '52114',
'taux-bassin': '',
'taux-departemental': '',
'taux-regional': '0.4',
'taux-national': '0.6',
}])
ranking = self.client.get_employment_rate_rank_for_training(
city_id='69123', formacode='22435')
self.assertEqual(
{
'formacode': '22435',
'codeinsee-bassin': '52114',
'taux-bassin': '',
'taux-departemental': '',
'taux-regional': '0.4',
'taux-national': '0.6',
},
ranking)
def test_get_employment_rate_rank_for_training_fail(self, mock_requests):
"""Test the get_employment_rate_rank_for_training method when the server fails."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/retouralemploisuiteformation/v1/rank?'
'codeinseeville=69123&formacode=22435',
headers={'Authorization': 'Bearer foobar'},
status_code=502, reason='Internal Failure')
with self.assertRaises(emploi_store.requests.exceptions.HTTPError):
self.client.get_employment_rate_rank_for_training(
city_id='69123', formacode='22435')
def test_get_match_via_soft_skills(self, mock_requests):
"""Test the match_via_soft_skills method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.post(
'https://api.emploi-store.fr/partenaire/matchviasoftskills/v1/professions/job_skills?'
'code=A1204',
headers={'Authorization': 'Bearer foobar'},
status_code=201,
json={
'uuid': 'something',
'code': 'A1204',
'create_at': 'a date and time',
'skills': {
'soft_skill_1': {'score': 1},
'soft_skill_2': {'score': 2},
},
})
skills = list(self.client.get_match_via_soft_skills('A1204'))
self.assertEqual([{'score': 1}, {'score': 2}], sorted(skills, key=lambda s: s['score']))
def test_get_match_via_soft_skills_fail(self, mock_requests):
"""Test the match_via_soft_skills method when the server fails."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.post(
'https://api.emploi-store.fr/partenaire/matchviasoftskills/v1/professions/job_skills?'
'code=A1204',
headers={'Authorization': 'Bearer foobar'},
status_code=502, reason='Internal Failure')
with self.assertRaises(emploi_store.requests.exceptions.HTTPError):
list(self.client.get_match_via_soft_skills('A1204'))
def test_la_bonne_alternance(self, mock_requests):
"""Test the get_lbb_companies method to access data from La Bonne Alternance."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/labonneboite/v1/company/?'
'distance=10&latitude=45&longitude=2.1&rome_codes=A1204%2CB1201&contract=alternance',
headers={'Authorization': 'Bearer foobar'},
json={
'companies': [
{'one': 1},
{'two': 2},
],
})
companies = list(self.client.get_lbb_companies(
45, 2.1, rome_codes=['A1204', 'B1201'], contract='alternance'))
self.assertEqual([{'one': 1}, {'two': 2}], companies)
def test_list_emploistore_services(self, mock_requests):
"""Test the list_emploistore_services method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/cataloguedesservicesemploistore/'
'v1/api-emploistore/fichesservices',
headers={'Authorization': 'Bearer foobar'},
json=[
{
'identifiantService': 'bobEmploi',
'nomService': 'Bob',
'typeService': 'coaching',
},
{
'identifiantService': 'laBonneBoite',
'nomService': 'La Bonne Boite',
'typeService': 'Moteur de recherche',
},
])
services = self.client.list_emploistore_services()
self.assertEqual(
['Bob', '<NAME>'],
[service.get('nomService') for service in services])
def test_describe_emploistore_service(self, mock_requests):
"""Test the describe_emploistore_service method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/cataloguedesservicesemploistore/'
'v1/api-emploistore/fichesservices/bobEmploi/false',
headers={'Authorization': 'Bearer foobar'},
json={
'ficheService': {
'identifiantService': 'bobEmploi',
'nomService': 'Bob',
'typeService': 'coaching',
},
})
service = self.client.describe_emploistore_service('bobEmploi')
self.assertEqual('Bob', service.get('ficheService', {}).get('nomService'))
def test_list_online_events(self, mock_requests):
"""Test the list_online_events method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/evenements/v1/salonsenligne',
headers={
'Accept': 'application/json',
'Authorization': 'Bearer foobar',
},
json=[
{
'titre': 'Recrutement ADMR',
'nombreOffres': 4,
},
{
'titre': u'la transition écologique: rejoignez HITECH !',
'nombreOffres': 2,
},
])
events = self.client.list_online_events()
self.assertEqual(
['Recrutement ADMR', u'la transition écologique: rejoignez HITECH !'],
[event.get('titre') for event in events])
def test_list_physical_events(self, mock_requests):
"""Test the list_physical_events method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token',
json={'access_token': '<PASSWORD>'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/evenements/v1/evenementsphysiques',
headers={
'Accept': 'application/json',
'Authorization': 'Bearer foobar',
},
json=[
{
'titre': u'"Tremplin de l\'emploi" à Wittelsheim',
'categorie': 'Salon',
'dateDebut': '12/03/2019',
'dateFin': '12/03/2019',
'periode': 'de 9h à 17h',
'rue': '111, rue de Reiningue',
'codePostal': '68310',
'ville': 'Wittelsheim',
'region': 'Grand Est',
'latitudeGps': '47.792960',
'longitudeGps': '7.228931',
},
{
'titre': '10 clics pour un emploi',
'categorie': "Semaine d'événements",
'dateDebut': '25/02/2019',
'dateFin': '25/02/2019',
'periode': '14h - 15h30',
'rue': '3 bis Avenue des Noëlles',
'codePostal': '44500',
'ville': 'LA BAULE',
'region': 'Pays de la Loire',
'latitudeGps': '47.290804',
'longitudeGps': '-2.393948',
},
])
events = self.client.list_physical_events()
self.assertEqual(
[u'"Tremplin de l\'emploi" à Wittelsheim', '10 clics pour un emploi'],
[event.get('titre') for event in events])
self.assertEqual(
['Wittelsheim', 'LA BAULE'],
[event.get('ville') for event in events])
@requests_mock.Mocker()
class PackageTest(unittest.TestCase):
"""Unit tests for the Package class."""
def setUp(self):
super(PackageTest, self).setUp()
self._client = emploi_store.Client('my-ID', 'my-Secret')
def test_get_resource_newer_version(self, mock_requests):
"""Test the get_resource method with a specific pe_version."""
package = emploi_store.Package(
self._client, name='BMO', resources=[
{'name': 'BMO 2013', 'id': 'bmo-2013-1', 'pe_version': '1'},
{'name': 'BMO 2013', 'id': 'bmo-2013-2', 'pe_version': '2'},
])
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/resource_show?id=bmo-2013-2',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'id': 'downloaded-id',
'name': 'Downloaded BMO',
},
},
)
res = package.get_resource(name='BMO 2013', pe_version='2')
self.assertEqual('Downloaded BMO', res.name)
@requests_mock.Mocker()
class ResourceTest(unittest.TestCase):
"""Unit tests for the Resource class."""
def setUp(self):
super(ResourceTest, self).setUp()
_client = emploi_store.Client('my-ID', 'my-Secret')
self.res = emploi_store.Resource(
_client, name='BMO 2016', id='1234-abc')
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
super(ResourceTest, self).tearDown()
shutil.rmtree(self.tmpdir)
def test_to_csv(self, mock_requests):
"""Test the to_csv method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
'id=1234-abc',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'records': [
{'CODE': '123', 'NAME': 'First'},
{'CODE': '456', 'NAME': 'Second'},
],
},
})
filename = self.tmpdir + '/bmo_2016.csv'
self.res.to_csv(filename)
with open(filename) as csv_file:
csv_content = csv_file.read().replace('\r\n', '\n')
self.assertEqual('''CODE,NAME
123,First
456,Second
''', csv_content)
def test_to_csv_number(self, mock_requests):
"""Test the to_csv method when resource returns numbers directly."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
'id=1234-abc',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'records': [
{'CODE': 123, 'NAME': 'First'},
{'CODE': 456, 'NAME': 'Second'},
],
},
})
filename = self.tmpdir + '/bmo_2016.csv'
self.res.to_csv(filename)
with open(filename) as csv_file:
csv_content = csv_file.read().replace('\r\n', '\n')
self.assertEqual('''CODE,NAME
123,First
456,Second
''', csv_content)
def test_to_csv_utf8(self, mock_requests):
"""Test the to_csv method when resource has Unicode chars."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': '<PASSWORD>'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
'id=1234-abc',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'records': [
{u'CÖDE': '123', 'NAME': u'Fïrst'},
{u'CÖDE': '456', 'NAME': u'Ségond'},
],
},
})
filename = self.tmpdir + '/bmo_2016.csv'
self.res.to_csv(filename)
with codecs.open(filename, 'r', 'utf-8') as csv_file:
csv_content = csv_file.read().replace('\r\n', '\n')
self.assertEqual(u'''CÖDE,NAME
123,Fïrst
456,Ségond
''', csv_content)
def test_to_csv_utf8_with_bom(self, mock_requests):
"""Test the to_csv method when resource has the BOM bug."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
'id=1234-abc',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'records': [
{u'\ufeffCÖDE': '123', 'NAME': u'Fïrst'},
{u'\ufeffCÖDE': '456', 'NAME': u'Ségond'},
],
},
})
filename = self.tmpdir + '/bmo_2016.csv'
self.res.to_csv(filename)
with codecs.open(filename, 'r', 'utf-8') as csv_file:
csv_content = csv_file.read().replace('\r\n', '\n')
self.assertEqual(u'''CÖDE,NAME
123,Fïrst
456,Ségond
''', csv_content)
def test_to_csv_iterator(self, mock_requests):
"""Test the iterator arg of the to_csv method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
'id=1234-abc',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'records': [
{'CODE': '1', 'NAME': 'First'},
{'CODE': '2', 'NAME': 'Second'},
{'CODE': '3', 'NAME': 'Third'},
{'CODE': '4', 'NAME': 'Fourth'},
],
},
})
filename = self.tmpdir + '/bmo_2016.csv'
self.res.to_csv(filename, iterator=lambda r: itertools.islice(r, 0, None, 2))
with open(filename) as csv_file:
csv_content = csv_file.read().replace('\r\n', '\n')
self.assertEqual('''CODE,NAME
1,First
3,Third
''', csv_content)
def test_num_records(self, mock_requests):
"""Test the length of the records method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
'id=1234-abc',
headers={'Authorization': 'Bearer foobar'},
json={
'success': True,
'result': {
'total': 42429,
'records': [{'id': 'hello'}],
},
})
records = self.res.records()
self.assertEqual(42429, len(records))
self.assertEqual([{'id': 'hello'}], list(records))
def test_to_csv_iterator_using_num_records(self, mock_requests):
"""Test the iterator arg of the to_csv method."""
mock_requests.post(
'https://entreprise.pole-emploi.fr/connexion/oauth2/access_token?realm=%2Fpartenaire',
json={'access_token': 'foobar'})
mock_requests.get(
'https://api.emploi-store.fr/partenaire/infotravail/v1/datastore_search?'
| |
response:
self.assertEqual(response.status, 200)
self.assertEqual(
await response.text(),
textwrap.dedent('''\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
.highlight {
background-color: #ff0;
}
</style>
<title>Taguette Codebook</title>
</head>
<body>
<h1>Taguette Codebook</h1>
<h2>interesting</h2>
<p class="number">1 highlight</p>
<p>Further review required</p>
<h2>people</h2>
<p class="number">2 highlights</p>
<p>People of interest</p>
<h2>interesting.places</h2>
<p class="number">1 highlight</p>
<p></p>
</body>
</html>'''),
)
# Export codebook of project 2 to REFI-QDA
async with self.aget('/project/2/export/codebook.qdc') as response:
self.assertEqual(response.status, 200)
self.assertEqual(
response.headers['Content-Type'],
'text/xml; charset=utf-8',
)
compare_xml(
await response.text(),
('<?xml version="1.0" encoding="utf-8"?>\n'
'<CodeBook xmlns="urn:QDA-XML:codebook:1.0" origin="'
'Taguette {ver}"><Codes><Code guid="0D62985D-B147-5D01-A9B5-'
'CAE5DCD98342" name="interesting" isCodable="true"/><Code '
'guid="DFE5C38E-9449-5959-A1F7-E3D895CFA87F" name="people" '
'isCodable="true"/><Code guid="725F0645-9CD3-598A-8D2B-'
'EC3D39AB3C3F" name="interesting.places" isCodable="true"/>'
'</Codes><Sets/></CodeBook>'
).format(ver=exact_version()),
)
# Merge tag 3 into 2
async with self.apost(
'/api/project/2/tag/merge',
json=dict(src=3, dest=2),
) as response:
self.assertEqual(response.status, 200)
self.assertEqual(await response.json(), {'id': 2})
self.assertEqual(
await poll_proj2,
{'type': 'tag_merge', 'id': 11, 'src_tag_id': 3, 'dest_tag_id': 2},
)
poll_proj2 = await self.poll_event(2, 11)
# List all highlights in project 2
async with self.aget('/api/project/2/highlights/') as response:
self.assertEqual(response.status, 200)
self.assertEqual(await response.json(), {
'highlights': [
{'id': 2, 'document_id': 2, 'tags': [4],
'text_direction': 'LEFT_TO_RIGHT', 'content': "diff"},
{'id': 3, 'document_id': 2, 'tags': [2],
'text_direction': 'LEFT_TO_RIGHT', 'content': "tent"},
{'id': 4, 'document_id': 3, 'tags': [2],
'text_direction': 'RIGHT_TO_LEFT',
'content': "<strong>Opinion</strong>"},
],
'pages': 1,
})
await asyncio.sleep(2)
self.assertNotDone(poll_proj1)
self.assertNotDone(poll_proj2)
@gen_test(timeout=60)
async def test_reset_password(self):
# Accept cookies
async with self.apost('/cookies', data=dict()) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
# Fetch registration page
async with self.aget('/register') as response:
self.assertEqual(response.status, 200)
# Register
async with self.apost(
'/register',
data=dict(
login='User',
password1='<PASSWORD>', password2='<PASSWORD>',
email='<EMAIL>',
),
) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
# User exists in database
db = self.application.DBSession()
self.assertEqual(
[
(
user.login,
bool(user.hashed_password), bool(user.password_set_date),
user.check_password('<PASSWORD>'),
user.check_password('<PASSWORD>'),
)
for user in db.query(database.User).all()
],
[
('admin', True, True, False, False),
('user', True, True, True, False),
],
)
# Log out
async with self.aget('/logout') as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
# Wait so that reset link is more recent than password
time.sleep(1)
# Send reset link
async with self.aget('/reset_password'):
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
with mock.patch.object(self.application, 'send_mail') as mo:
async with self.apost(
'/reset_password',
data=dict(email='<EMAIL>'),
) as response:
self.assertEqual(response.status, 200)
msg = mo.call_args[0][0]
content = msg.get_payload()[0].get_content()
self.assertTrue(content.startswith("Someone has requested "))
link = re.search(r'https:\S*', content).group(0)
token = urlparse(link).query[12:]
# Check wrong tokens don't work
async with self.aget(
'/new_password?reset_token=wrongtoken',
) as response:
self.assertEqual(response.status, 403)
async with self.apost(
'/new_password',
data=dict(
reset_token='wrongtoken',
password1='<PASSWORD>', password2='<PASSWORD>',
),
) as response:
self.assertEqual(response.status, 403)
# Check right token works
async with self.aget('/new_password?reset_token=' + token) as response:
self.assertEqual(response.status, 200)
async with self.apost(
'/new_password',
data=dict(
reset_token=token,
password1='<PASSWORD>', password2='<PASSWORD>',
),
) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
# User exists in database
db = self.application.DBSession()
self.assertEqual(
[
(
user.login,
bool(user.hashed_password), bool(user.password_set_date),
user.check_password('<PASSWORD>'),
user.check_password('<PASSWORD>'),
)
for user in db.query(database.User).all()
],
[
('admin', True, True, False, False),
('user', True, True, False, True),
],
)
# Check token doesn't work anymore
async with self.apost(
'/new_password',
data=dict(
reset_token=token,
password1='<PASSWORD>', password2='<PASSWORD>',
),
) as response:
self.assertEqual(response.status, 403)
async with self.aget('/new_password?reset_token=' + token) as response:
self.assertEqual(response.status, 403)
@classmethod
def make_basic_db(cls, db, db_num):
# Populate database
user = database.User(login='db%duser' % db_num)
user.set_password('<PASSWORD>')
db.add(user)
cls.make_basic_project(db, db_num, 1)
cls.make_basic_project(db, db_num, 2)
@staticmethod
def make_basic_project(db, db_num, project_num):
# Creates 1 project, 2 (+1 deleted) documents, 2 (+1 deleted) tags,
# 2 (+1 deleted) highlights, 13 commands total
def doc(project, number, dir=database.TextDirection.LEFT_TO_RIGHT):
text = 'db%ddoc%d%d' % (db_num, project_num, number)
return database.Document(
name=text + '.txt',
description='',
filename=text + '.txt',
project=project,
contents=text,
text_direction=dir,
)
user = 'db%duser' % db_num
project1 = database.Project(
name='db%dproject%d' % (db_num, project_num),
description='',
)
db.add(project1)
db.flush()
document1 = doc(project1, 1)
db.add(document1)
document2 = doc(project1, 2, database.TextDirection.RIGHT_TO_LEFT)
db.add(document2)
tag1 = database.Tag(
project=project1,
path='db%dtag%d1' % (db_num, project_num),
description='',
)
db.add(tag1)
tag2 = database.Tag(
project=project1,
path='db%dtag%d2' % (db_num, project_num),
description='',
)
db.add(tag2)
db.flush()
hl1 = database.Highlight(
document_id=document1.id,
start_offset=3, end_offset=6, snippet='doc',
)
hl2 = database.Highlight(
document_id=document2.id,
start_offset=3, end_offset=6, snippet='doc',
)
db.add(hl1)
db.add(hl2)
db.flush()
db.add(database.HighlightTag(highlight_id=hl1.id, tag_id=tag2.id))
db.add(database.HighlightTag(highlight_id=hl2.id, tag_id=tag1.id))
db.add(database.ProjectMember(user_login='admin', project=project1,
privileges=database.Privileges.ADMIN))
db.add(database.ProjectMember(user_login=user,
project=project1,
privileges=database.Privileges.ADMIN))
db.add(database.Command.document_add(user, document1))
document_fake = doc(project1, 100)
db.add(document_fake)
db.flush()
db.add(database.Command.document_add(user, document_fake))
db.add(database.Command.document_add(user, document2))
db.add(database.Command.document_delete(user, document_fake))
db.delete(document_fake)
tag_fake = database.Tag(project=project1, path='db%dtagF' % db_num,
description='')
db.add(tag_fake)
db.flush()
db.add(database.Command.tag_add(user, tag1))
db.add(database.Command.tag_add(user, tag_fake))
db.add(database.Command.tag_add(user, tag2))
db.add(database.Command.tag_delete(user, project1.id, tag_fake.id))
db.delete(tag_fake)
hl_fake = database.Highlight(
document_id=document1.id,
start_offset=3, end_offset=6, snippet='doc',
)
db.add(hl_fake)
db.flush()
db.add(database.Command.highlight_add(user, document1, hl1, []))
db.add(database.Command.highlight_add(user, document1, hl1, [tag2.id]))
db.add(database.Command.highlight_add(user, document1, hl_fake,
[tag1.id]))
db.add(database.Command.highlight_add(user, document1, hl2, [tag1.id]))
db.add(database.Command.highlight_delete(user, document1, hl_fake.id))
db.delete(hl_fake)
def assertRowsEqualsExceptDates(self, first, second):
self.assertEqual(*[
[
[item for item in row if not isinstance(item, datetime)]
for row in rows]
for rows in (first, second)
])
@with_tempdir
@gen_test
async def test_import(self, tmp):
# Populate database
db1 = self.application.DBSession()
self.make_basic_db(db1, 1)
db1.commit()
# Log in
async with self.apost('/cookies', data=dict()) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
async with self.aget('/login') as response:
self.assertEqual(response.status, 200)
async with self.apost(
'/login',
data=dict(next='/', login='db1user', password='<PASSWORD>'),
) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
# Create second database
db2_path = os.path.join(tmp, 'db.sqlite3')
db2 = database.connect('sqlite:///' + db2_path)()
db2.add(database.User(login='admin'))
self.make_basic_db(db2, 2)
db2.commit()
# List projects in database
with open(db2_path, 'rb') as fp:
async with self.apost(
'/api/import',
data={},
files=dict(file=('db2.sqlite3', 'application/octet-stream',
fp.read())),
) as response:
self.assertEqual(response.status, 200)
self.assertEqual(await response.json(), {
'projects': [{'id': 1, 'name': 'db2project1'},
{'id': 2, 'name': 'db2project2'}],
})
# Import project
with open(db2_path, 'rb') as fp:
async with self.apost(
'/api/import',
data={'project_id': '1'},
files=dict(file=('db2.sqlite3', 'application/octet-stream',
fp.read())),
) as response:
self.assertEqual(response.status, 200)
self.assertEqual(await response.json(), {
'project_id': 3,
})
# Check imported project
self.assertEqual(
{row[0] for row in db1.execute(
sqlalchemy.select([database.User.__table__.c.login])
)},
{'admin', 'db1user'},
)
self.assertRowsEqualsExceptDates(
db1.execute(
database.Project.__table__.select()
.order_by(database.Project.__table__.c.id)
),
[
(1, 'db1project1', '', datetime.utcnow()),
(2, 'db1project2', '', datetime.utcnow()),
(3, 'db2project1', '', datetime.utcnow()),
],
)
self.assertRowsEqualsExceptDates(
db1.execute(
database.ProjectMember.__table__.select()
.order_by(database.ProjectMember.__table__.c.project_id)
.order_by(database.ProjectMember.__table__.c.user_login)
),
[
(1, 'admin', database.Privileges.ADMIN),
(1, 'db1user', database.Privileges.ADMIN),
(2, 'admin', database.Privileges.ADMIN),
(2, 'db1user', database.Privileges.ADMIN),
(3, 'db1user', database.Privileges.ADMIN),
],
)
self.assertEqual(
[
(row['id'], row['name'])
for row in db1.execute(
database.Document.__table__.select()
.order_by(database.Document.__table__.c.id)
)
],
[
(1, 'db1doc11.txt'),
(2, 'db1doc12.txt'),
(4, 'db1doc21.txt'),
(5, 'db1doc22.txt'),
(7, 'db2doc11.txt'),
(8, 'db2doc12.txt'),
],
)
self.assertRowsEqualsExceptDates(
db1.execute(
database.Command.__table__.select()
.where(database.Command.__table__.c.project_id == 3)
.order_by(database.Command.__table__.c.id)
),
[
# id, user_login, project_id, document_id, {payload}
# project 1 imported as 3
# documents 1, 2, 3 imported as 7, 8, 9
# tags 1, 2, 3 imported as 7, 8, -3
# highlights 1, 2, 3 imported as 7, 8, -3
# commands 1-13 exported as 27-39
(27, 'db1user', 3, 7,
{'type': 'document_add', 'description': '',
'text_direction': 'LEFT_TO_RIGHT',
'document_name': 'db2doc11.txt'}),
(28, 'db1user', 3, -3,
{'type': 'document_add', 'description': '',
'text_direction': 'LEFT_TO_RIGHT',
'document_name': 'db2doc1100.txt'}),
(29, 'db1user', 3, 8,
{'type': 'document_add', 'description': '',
'text_direction': 'RIGHT_TO_LEFT',
'document_name': 'db2doc12.txt'}),
(30, 'db1user', 3, -3, {'type': 'document_delete'}),
(31, 'db1user', 3, None,
{'type': 'tag_add', 'description': '', 'tag_id': 7,
'tag_path': 'db2tag11'}),
(32, 'db1user', 3, None,
{'type': 'tag_add', 'description': '', 'tag_id': -3,
'tag_path': 'db2tagF'}),
(33, 'db1user', 3, None,
{'type': 'tag_add', 'description': '', 'tag_id': 8,
'tag_path': 'db2tag12'}),
(34, 'db1user', 3, None, {'type': 'tag_delete', 'tag_id': -3}),
(35, 'db1user', 3, 7,
{'type': 'highlight_add', 'highlight_id': 7,
'start_offset': 3, 'end_offset': 6, 'tags': []}),
(36, 'db1user', 3, 7,
{'type': 'highlight_add', 'highlight_id': 7,
'start_offset': 3, 'end_offset': 6, 'tags': [8]}),
(37, 'db1user', 3, 7,
{'type': 'highlight_add', 'highlight_id': -3,
'start_offset': 3, 'end_offset': 6, 'tags': [7]}),
(38, 'db1user', 3, 7,
{'type': 'highlight_add', 'highlight_id': 8,
'start_offset': 3, 'end_offset': 6, 'tags': [7]}),
(39, 'db1user', 3, 7,
{'type': 'highlight_delete', 'highlight_id': -3}),
(40, 'db1user', 3, None, {'type': 'project_import'}),
],
)
self.assertEqual(
[
(row['id'], row['document_id'])
for row in db1.execute(
database.Highlight.__table__.select()
.order_by(database.Highlight.__table__.c.id)
)
],
[(1, 1), (2, 2), (4, 4), (5, 5), (7, 7), (8, 8)],
)
self.assertRowsEqualsExceptDates(
db1.execute(database.Tag.__table__.select()),
[
(1, 1, 'db1tag11', ''),
(2, 1, 'db1tag12', ''),
(4, 2, 'db1tag21', ''),
(5, 2, 'db1tag22', ''),
(7, 3, 'db2tag11', ''),
(8, 3, 'db2tag12', ''),
],
)
self.assertRowsEqualsExceptDates(
db1.execute(
database.HighlightTag.__table__.select()
.order_by(database.HighlightTag.__table__.c.highlight_id)
),
[(1, 2), (2, 1), (4, 5), (5, 4), (7, 8), (8, 7)],
)
@with_tempdir
@gen_test
async def test_export(self, tmp):
# Populate database
db1 = self.application.DBSession()
self.make_basic_db(db1, 1)
db1.commit()
# Log in
async with self.apost('/cookies', data=dict()) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
async with self.aget('/login') as response:
self.assertEqual(response.status, 200)
async with self.apost(
'/login',
data=dict(next='/', login='db1user', password='<PASSWORD>'),
) as response:
self.assertEqual(response.status, 302)
self.assertEqual(response.headers['Location'], '/')
# Export project
db2_path = os.path.join(tmp, 'db.sqlite3')
async with self.aget('/project/2/export/project.sqlite3') as response:
self.assertEqual(response.status, 200)
self.assertEqual(response.headers['Content-Type'],
'application/vnd.sqlite3')
self.assertTrue(re.match(
'^attachment; filename='
+ '"[0-9]{4}-[0-9]{2}-[0-9]{2}_db1project2.sqlite3"$',
response.headers['Content-Disposition'],
))
with open(db2_path, 'wb') as fp:
| |
the method "iterkeys()".\n'
'\n'
' Iterator objects also need to implement this method; '
'they are\n'
' required to return themselves. For more information on '
'iterator\n'
' objects, see Iterator Types.\n'
'\n'
'object.__reversed__(self)\n'
'\n'
' Called (if present) by the "reversed()" built-in to '
'implement\n'
' reverse iteration. It should return a new iterator '
'object that\n'
' iterates over all the objects in the container in '
'reverse order.\n'
'\n'
' If the "__reversed__()" method is not provided, the '
'"reversed()"\n'
' built-in will fall back to using the sequence protocol '
'("__len__()"\n'
' and "__getitem__()"). Objects that support the '
'sequence protocol\n'
' should only provide "__reversed__()" if they can '
'provide an\n'
' implementation that is more efficient than the one '
'provided by\n'
' "reversed()".\n'
'\n'
' New in version 2.6.\n'
'\n'
'The membership test operators ("in" and "not in") are '
'normally\n'
'implemented as an iteration through a sequence. However, '
'container\n'
'objects can supply the following special method with a '
'more efficient\n'
'implementation, which also does not require the object be '
'a sequence.\n'
'\n'
'object.__contains__(self, item)\n'
'\n'
' Called to implement membership test operators. Should '
'return true\n'
' if *item* is in *self*, false otherwise. For mapping '
'objects, this\n'
' should consider the keys of the mapping rather than the '
'values or\n'
' the key-item pairs.\n'
'\n'
' For objects that don\'t define "__contains__()", the '
'membership test\n'
' first tries iteration via "__iter__()", then the old '
'sequence\n'
' iteration protocol via "__getitem__()", see this '
'section in the\n'
' language reference.\n',
'shifting': '\n'
'Shifting operations\n'
'*******************\n'
'\n'
'The shifting operations have lower priority than the arithmetic\n'
'operations:\n'
'\n'
' shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n'
'\n'
'These operators accept plain or long integers as arguments. '
'The\n'
'arguments are converted to a common type. They shift the first\n'
'argument to the left or right by the number of bits given by '
'the\n'
'second argument.\n'
'\n'
'A right shift by *n* bits is defined as division by "pow(2, '
'n)". A\n'
'left shift by *n* bits is defined as multiplication with "pow(2, '
'n)".\n'
'Negative shift counts raise a "ValueError" exception.\n'
'\n'
'Note: In the current implementation, the right-hand operand is\n'
' required to be at most "sys.maxsize". If the right-hand '
'operand is\n'
' larger than "sys.maxsize" an "OverflowError" exception is '
'raised.\n',
'slicings': '\n'
'Slicings\n'
'********\n'
'\n'
'A slicing selects a range of items in a sequence object (e.g., '
'a\n'
'string, tuple or list). Slicings may be used as expressions or '
'as\n'
'targets in assignment or "del" statements. The syntax for a '
'slicing:\n'
'\n'
' slicing ::= simple_slicing | extended_slicing\n'
' simple_slicing ::= primary "[" short_slice "]"\n'
' extended_slicing ::= primary "[" slice_list "]"\n'
' slice_list ::= slice_item ("," slice_item)* [","]\n'
' slice_item ::= expression | proper_slice | ellipsis\n'
' proper_slice ::= short_slice | long_slice\n'
' short_slice ::= [lower_bound] ":" [upper_bound]\n'
' long_slice ::= short_slice ":" [stride]\n'
' lower_bound ::= expression\n'
' upper_bound ::= expression\n'
' stride ::= expression\n'
' ellipsis ::= "..."\n'
'\n'
'There is ambiguity in the formal syntax here: anything that '
'looks like\n'
'an expression list also looks like a slice list, so any '
'subscription\n'
'can be interpreted as a slicing. Rather than further '
'complicating the\n'
'syntax, this is disambiguated by defining that in this case the\n'
'interpretation as a subscription takes priority over the\n'
'interpretation as a slicing (this is the case if the slice list\n'
'contains no proper slice nor ellipses). Similarly, when the '
'slice\n'
'list has exactly one short slice and no trailing comma, the\n'
'interpretation as a simple slicing takes priority over that as '
'an\n'
'extended slicing.\n'
'\n'
'The semantics for a simple slicing are as follows. The primary '
'must\n'
'evaluate to a sequence object. The lower and upper bound '
'expressions,\n'
'if present, must evaluate to plain integers; defaults are zero '
'and the\n'
'"sys.maxint", respectively. If either bound is negative, the\n'
"sequence's length is added to it. The slicing now selects all "
'items\n'
'with index *k* such that "i <= k < j" where *i* and *j* are the\n'
'specified lower and upper bounds. This may be an empty '
'sequence. It\n'
'is not an error if *i* or *j* lie outside the range of valid '
'indexes\n'
"(such items don't exist so they aren't selected).\n"
'\n'
'The semantics for an extended slicing are as follows. The '
'primary\n'
'must evaluate to a mapping object, and it is indexed with a key '
'that\n'
'is constructed from the slice list, as follows. If the slice '
'list\n'
'contains at least one comma, the key is a tuple containing the\n'
'conversion of the slice items; otherwise, the conversion of the '
'lone\n'
'slice item is the key. The conversion of a slice item that is '
'an\n'
'expression is that expression. The conversion of an ellipsis '
'slice\n'
'item is the built-in "Ellipsis" object. The conversion of a '
'proper\n'
'slice is a slice object (see section The standard type '
'hierarchy)\n'
'whose "start", "stop" and "step" attributes are the values of '
'the\n'
'expressions given as lower bound, upper bound and stride,\n'
'respectively, substituting "None" for missing expressions.\n',
'specialattrs': '\n'
'Special Attributes\n'
'******************\n'
'\n'
'The implementation adds a few special read-only attributes '
'to several\n'
'object types, where they are relevant. Some of these are '
'not reported\n'
'by the "dir()" built-in function.\n'
'\n'
'object.__dict__\n'
'\n'
' A dictionary or other mapping object used to store an '
"object's\n"
' (writable) attributes.\n'
'\n'
'object.__methods__\n'
'\n'
' Deprecated since version 2.2: Use the built-in function '
'"dir()" to\n'
" get a list of an object's attributes. This attribute is "
'no longer\n'
' available.\n'
'\n'
'object.__members__\n'
'\n'
' Deprecated since version 2.2: Use the built-in function '
'"dir()" to\n'
" get a list of an object's attributes. This attribute is "
'no longer\n'
' available.\n'
'\n'
'instance.__class__\n'
'\n'
' The class to which a class instance belongs.\n'
'\n'
'class.__bases__\n'
'\n'
' The tuple of base classes of a class object.\n'
'\n'
'definition.__name__\n'
'\n'
' The name of the class, type, function, method, '
'descriptor, or\n'
' generator instance.\n'
'\n'
'The following attributes are only supported by *new-style '
'class*es.\n'
'\n'
'class.__mro__\n'
'\n'
' This attribute is a tuple of classes that are considered '
'when\n'
' looking for base classes during method resolution.\n'
'\n'
'class.mro()\n'
'\n'
' This method can be overridden by a metaclass to customize '
'the\n'
' method resolution order for its instances. It is called '
'at class\n'
' instantiation, and its result is stored in "__mro__".\n'
'\n'
'class.__subclasses__()\n'
'\n'
' Each new-style class keeps a list of weak references to '
'its\n'
' immediate subclasses. This method returns a list of all '
'those\n'
' references still alive. Example:\n'
'\n'
' >>> int.__subclasses__()\n'
" [<type 'bool'>]\n"
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] Additional information on these special methods may be '
'found\n'
' in the Python Reference Manual (Basic customization).\n'
'\n'
'[2] As a consequence, the list "[1, 2]" is considered equal '
'to\n'
' "[1.0, 2.0]", and similarly for tuples.\n'
'\n'
"[3] They must have since the parser can't tell the type of "
'the\n'
' operands.\n'
'\n'
'[4] Cased characters are those with general category '
'property\n'
' being one of "Lu" (Letter, uppercase), "Ll" (Letter, '
'lowercase),\n'
' or "Lt" (Letter, titlecase).\n'
'\n'
'[5] To format only a tuple you should therefore provide a\n'
' singleton tuple whose only element is the tuple to be '
'formatted.\n'
'\n'
'[6] The advantage of leaving the newline on is that '
'returning an\n'
' empty | |
<filename>plugins/cisco_firepower_management_center/icon_cisco_firepower_management_center/actions/create_address_object/schema.py<gh_stars>0
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Creates a new address object"
class Input:
ADDRESS = "address"
ADDRESS_OBJECT = "address_object"
SKIP_PRIVATE_ADDRESS = "skip_private_address"
WHITELIST = "whitelist"
class Output:
ADDRESS_OBJECT = "address_object"
class CreateAddressObjectInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "Address",
"description": "IP address, CIDR IP address, or domain name to assign to the Address Object",
"order": 2
},
"address_object": {
"type": "string",
"title": "Address Object",
"description": "Name of the address object, defaults to the value address in the address field if no name is given",
"order": 1
},
"skip_private_address": {
"type": "boolean",
"title": "Skip Private Addresses",
"description": "If set to true, any addresses that are defined in the RFC1918 space will not be blocked. e.g. 10/8, 172.16/12, 192.168/16",
"order": 3
},
"whitelist": {
"type": "array",
"title": "Whitelist",
"description": "This list contains a set of hosts that should not be blocked. This can include IP addresses, CIDR IP addresses, and domains",
"items": {
"type": "string"
},
"order": 4
}
},
"required": [
"address",
"skip_private_address"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CreateAddressObjectOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address_object": {
"$ref": "#/definitions/address_object",
"title": "Address Object",
"description": "Returns information about the newly created address object",
"order": 1
}
},
"definitions": {
"address_object": {
"type": "object",
"title": "address_object",
"properties": {
"description": {
"type": "string",
"title": "Description",
"description": "User provided resource description",
"order": 4
},
"dnsResolution": {
"type": "string",
"title": "DNS Resolution",
"description": "DNS resolution",
"order": 12
},
"id": {
"type": "string",
"title": "ID",
"description": "Unique identifier representing response object",
"order": 7
},
"links": {
"$ref": "#/definitions/links",
"title": "Links",
"description": "This defines the self referencing links for the given resource",
"order": 5
},
"metadata": {
"$ref": "#/definitions/metadata",
"title": "Metadata",
"description": "Defines read only details about the object - whether it is system defined, last user who modified the object etc",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "User assigned resource name",
"order": 3
},
"overridable": {
"type": "boolean",
"title": "Overridable",
"description": "Boolean indicating whether object values can be overridden",
"order": 2
},
"overrideTargetId": {
"type": "string",
"title": "Override Target ID",
"description": "Unique identifier of domain or device when override assigned to child domain. Used as path parameter to GET override details for a specific object on a specific target (device or domain)",
"order": 11
},
"overrides": {
"$ref": "#/definitions/override",
"title": "Overrides",
"description": "Defines the override details for this object",
"order": 6
},
"type": {
"type": "string",
"title": "Type",
"description": "The unique type of this object",
"order": 8
},
"value": {
"type": "string",
"title": "Value",
"description": "Actual value of the network",
"order": 9
},
"version": {
"type": "string",
"title": "Version",
"description": "Version number of the response object",
"order": 10
}
},
"definitions": {
"domain": {
"type": "object",
"title": "domain",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Unique UUID of this domain",
"order": 3
},
"links": {
"$ref": "#/definitions/links",
"title": "Links",
"description": "This defines the self referencing links for the given resource",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the domain",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "Domain type definition",
"order": 4
}
},
"definitions": {
"links": {
"type": "object",
"title": "links",
"properties": {
"parent": {
"type": "string",
"title": "Parent",
"description": "Full resource URL path to reference the parent (if any) for this resource",
"order": 1
},
"self": {
"type": "string",
"title": "Self",
"description": "Full resource URL path to reference this particular resource",
"order": 2
}
}
}
}
},
"links": {
"type": "object",
"title": "links",
"properties": {
"parent": {
"type": "string",
"title": "Parent",
"description": "Full resource URL path to reference the parent (if any) for this resource",
"order": 1
},
"self": {
"type": "string",
"title": "Self",
"description": "Full resource URL path to reference this particular resource",
"order": 2
}
}
},
"metadata": {
"type": "object",
"title": "metadata",
"properties": {
"domain": {
"$ref": "#/definitions/domain",
"title": "Domain",
"description": "The details about the domain",
"order": 2
},
"ipType": {
"type": "string",
"title": "IP Type",
"description": "IP type",
"order": 5
},
"lastUser": {
"$ref": "#/definitions/metadata_user",
"title": "Last User",
"description": "This object defines details about the user",
"order": 1
},
"parentType": {
"type": "string",
"title": "Parent Type",
"description": "Parent type",
"order": 6
},
"readOnly": {
"$ref": "#/definitions/read_only",
"title": "Read Only",
"description": "Defines the read only conditions if the referenced resource is read only",
"order": 3
},
"timestamp": {
"type": "integer",
"title": "Timestamp",
"description": "The last updated timestamp",
"order": 4
}
},
"definitions": {
"domain": {
"type": "object",
"title": "domain",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Unique UUID of this domain",
"order": 3
},
"links": {
"$ref": "#/definitions/links",
"title": "Links",
"description": "This defines the self referencing links for the given resource",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the domain",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "Domain type definition",
"order": 4
}
},
"definitions": {
"links": {
"type": "object",
"title": "links",
"properties": {
"parent": {
"type": "string",
"title": "Parent",
"description": "Full resource URL path to reference the parent (if any) for this resource",
"order": 1
},
"self": {
"type": "string",
"title": "Self",
"description": "Full resource URL path to reference this particular resource",
"order": 2
}
}
}
}
},
"links": {
"type": "object",
"title": "links",
"properties": {
"parent": {
"type": "string",
"title": "Parent",
"description": "Full resource URL path to reference the parent (if any) for this resource",
"order": 1
},
"self": {
"type": "string",
"title": "Self",
"description": "Full resource URL path to reference this particular resource",
"order": 2
}
}
},
"metadata_user": {
"type": "object",
"title": "metadata_user",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "The unique UUID of the user",
"order": 3
},
"links": {
"$ref": "#/definitions/links",
"title": "Links",
"description": "This defines the self referencing links for the given resource",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the user",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "The user type",
"order": 4
}
},
"definitions": {
"links": {
"type": "object",
"title": "links",
"properties": {
"parent": {
"type": "string",
"title": "Parent",
"description": "Full resource URL path to reference the parent (if any) for this resource",
"order": 1
},
"self": {
"type": "string",
"title": "Self",
"description": "Full resource URL path to reference this particular resource",
"order": 2
}
}
}
}
},
"read_only": {
"type": "object",
"title": "read_only",
"properties": {
"reason": {
"type": "string",
"title": "Reason",
"description": "Reason the resource is read only - SYSTEM (if it is system defined), RBAC (if user RBAC permissions make it read only) or DOMAIN (if resource is read only in current domain)",
"order": 1
},
"state": {
"type": "boolean",
"title": "State",
"description": "True if this resource is read only and false otherwise",
"order": 2
}
}
}
}
},
"metadata_user": {
"type": "object",
"title": "metadata_user",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "The unique UUID of the user",
"order": 3
},
"links": {
"$ref": "#/definitions/links",
"title": "Links",
"description": "This defines the self referencing links for the given resource",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the user",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "The user type",
"order": 4
}
},
"definitions": {
"links": {
"type": "object",
"title": "links",
"properties": {
"parent": {
"type": "string",
"title": "Parent",
"description": "Full resource URL path to reference the parent (if any) for this resource",
"order": 1
},
"self": {
"type": "string",
"title": "Self",
"description": "Full resource URL path to reference this particular resource",
"order": 2
}
}
}
}
},
"override": {
"type": "object",
"title": "override",
"properties": {
"parent": {
"$ref": "#/definitions/reference",
"title": "Parent",
"description": "Contains parent reference information",
"order": 1
},
"target": {
"$ref": "#/definitions/reference",
| |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
#
# scapy.contrib.description = IEC-60870-5-104 APCI / APDU layer definitions
# scapy.contrib.status = loads
"""
IEC 60870-5-104
~~~~~~~~~~~~~~~
:description:
This module provides the IEC 60870-5-104 (common short name: iec104)
layer, the information objects and related information element
definitions.
normative references:
- IEC 60870-5-4:1994 (atomic base types / data format)
- IEC 60870-5-101:2003 (information elements (sec. 7.2.6) and
ASDU definition (sec. 7.3))
- IEC 60870-5-104:2006 (information element TSC (sec. 8.8, p. 44))
:TODO:
- add allowed direction to IO attributes
(but this could be derived from the name easily <--> )
- information elements / objects need more testing
(e.g. on live traffic w comparison against tshark)
:NOTES:
- bit and octet numbering is used as in the related standards
(they usually start with index one instead of zero)
- some of the information objects are only valid for IEC 60870-5-101 -
so usually they should never appear on the network as iec101 uses
serial connections. I added them if decoding of those messages is
needed cause one goes to implement a iec101<-->iec104 gateway or
hits such a gateway that acts not standard conform (e.g. by
forwarding 101 messages to a 104 network)
"""
from scapy.contrib.scada.iec104.iec104_fields import * # noqa F403,F401
from scapy.contrib.scada.iec104.iec104_information_elements import * # noqa F403,F401
from scapy.contrib.scada.iec104.iec104_information_objects import * # noqa F403,F401
from scapy.compat import orb
from scapy.config import conf
from scapy.error import warning, Scapy_Exception
from scapy.fields import ByteField, BitField, ByteEnumField, PacketListField, \
BitEnumField, XByteField, FieldLenField, LEShortField, BitFieldLenField
from scapy.layers.inet import TCP
from scapy.packet import Raw, Packet, bind_layers
IEC_104_IANA_PORT = 2404
# direction - from the central station to the substation
IEC104_CONTROL_DIRECTION = 0
IEC104_CENTRAL_2_SUB_DIR = IEC104_CONTROL_DIRECTION
# direction - from the substation to the central station
IEC104_MONITOR_DIRECTION = 1
IEC104_SUB_2_CENTRAL_DIR = IEC104_MONITOR_DIRECTION
IEC104_DIRECTIONS = {
IEC104_MONITOR_DIRECTION: 'monitor direction (sub -> central)',
IEC104_CONTROL_DIRECTION: 'control direction (central -> sub)',
}
# COT - cause of transmission
IEC104_COT_UNDEFINED = 0
IEC104_COT_CYC = 1
IEC104_COT_BACK = 2
IEC104_COT_SPONT = 3
IEC104_COT_INIT = 4
IEC104_COT_REQ = 5
IEC104_COT_ACT = 6
IEC104_COT_ACTCON = 7
IEC104_COT_DEACT = 8
IEC104_COT_DEACTCON = 9
IEC104_COT_ACTTERM = 10
IEC104_COT_RETREM = 11
IEC104_COT_RETLOC = 12
IEC104_COT_FILE = 13
IEC104_COT_RESERVED_14 = 14
IEC104_COT_RESERVED_15 = 15
IEC104_COT_RESERVED_16 = 16
IEC104_COT_RESERVED_17 = 17
IEC104_COT_RESERVED_18 = 18
IEC104_COT_RESERVED_19 = 19
IEC104_COT_INROGEN = 20
IEC104_COT_INRO1 = 21
IEC104_COT_INRO2 = 22
IEC104_COT_INRO3 = 23
IEC104_COT_INRO4 = 24
IEC104_COT_INRO5 = 25
IEC104_COT_INRO6 = 26
IEC104_COT_INRO7 = 27
IEC104_COT_INRO8 = 28
IEC104_COT_INRO9 = 29
IEC104_COT_INRO10 = 30
IEC104_COT_INRO11 = 31
IEC104_COT_INRO12 = 32
IEC104_COT_INRO13 = 33
IEC104_COT_INRO14 = 34
IEC104_COT_INRO15 = 35
IEC104_COT_INRO16 = 36
IEC104_COT_REQCOGEN = 37
IEC104_COT_REQCO1 = 38
IEC104_COT_REQCO2 = 39
IEC104_COT_REQCO3 = 40
IEC104_COT_REQCO4 = 41
IEC104_COT_RESERVED_42 = 42
IEC104_COT_RESERVED_43 = 43
IEC104_COT_UNKNOWN_TYPE_CODE = 44
IEC104_COT_UNKNOWN_TRANSMIT_REASON = 45
IEC104_COT_UNKNOWN_COMMON_ADDRESS_OF_ASDU = 46
IEC104_COT_UNKNOWN_ADDRESS_OF_INFORMATION_OBJECT = 47
IEC104_COT_PRIVATE_48 = 48
IEC104_COT_PRIVATE_49 = 49
IEC104_COT_PRIVATE_50 = 50
IEC104_COT_PRIVATE_51 = 51
IEC104_COT_PRIVATE_52 = 52
IEC104_COT_PRIVATE_53 = 53
IEC104_COT_PRIVATE_54 = 54
IEC104_COT_PRIVATE_55 = 55
IEC104_COT_PRIVATE_56 = 56
IEC104_COT_PRIVATE_57 = 57
IEC104_COT_PRIVATE_58 = 58
IEC104_COT_PRIVATE_59 = 59
IEC104_COT_PRIVATE_60 = 60
IEC104_COT_PRIVATE_61 = 61
IEC104_COT_PRIVATE_62 = 62
IEC104_COT_PRIVATE_63 = 63
CAUSE_OF_TRANSMISSIONS = {
IEC104_COT_UNDEFINED: 'undefined',
IEC104_COT_CYC: 'cyclic (per/cyc)',
IEC104_COT_BACK: 'background (back)',
IEC104_COT_SPONT: 'spontaneous (spont)',
IEC104_COT_INIT: 'initialized (init)',
IEC104_COT_REQ: 'request (req)',
IEC104_COT_ACT: 'activation (act)',
IEC104_COT_ACTCON: 'activation confirmed (actcon)',
IEC104_COT_DEACT: 'activation canceled (deact)',
IEC104_COT_DEACTCON: 'activation cancellation confirmed (deactcon)',
IEC104_COT_ACTTERM: 'activation finished (actterm)',
IEC104_COT_RETREM: 'feedback caused by remote command (retrem)',
IEC104_COT_RETLOC: 'feedback caused by local command (retloc)',
IEC104_COT_FILE: 'file transfer (file)',
IEC104_COT_RESERVED_14: 'reserved_14',
IEC104_COT_RESERVED_15: 'reserved_15',
IEC104_COT_RESERVED_16: 'reserved_16',
IEC104_COT_RESERVED_17: 'reserved_17',
IEC104_COT_RESERVED_18: 'reserved_18',
IEC104_COT_RESERVED_19: 'reserved_19',
IEC104_COT_INROGEN: 'queried by station (inrogen)',
IEC104_COT_INRO1: 'queried by query to group 1 (inro1)',
IEC104_COT_INRO2: 'queried by query to group 2 (inro2)',
IEC104_COT_INRO3: 'queried by query to group 3 (inro3)',
IEC104_COT_INRO4: 'queried by query to group 4 (inro4)',
IEC104_COT_INRO5: 'queried by query to group 5 (inro5)',
IEC104_COT_INRO6: 'queried by query to group 6 (inro6)',
IEC104_COT_INRO7: 'queried by query to group 7 (inro7)',
IEC104_COT_INRO8: 'queried by query to group 8 (inro8)',
IEC104_COT_INRO9: 'queried by query to group 9 (inro9)',
IEC104_COT_INRO10: 'queried by query to group 10 (inro10)',
IEC104_COT_INRO11: 'queried by query to group 11 (inro11)',
IEC104_COT_INRO12: 'queried by query to group 12 (inro12)',
IEC104_COT_INRO13: 'queried by query to group 13 (inro13)',
IEC104_COT_INRO14: 'queried by query to group 14 (inro14)',
IEC104_COT_INRO15: 'queried by query to group 15 (inro15)',
IEC104_COT_INRO16: 'queried by query to group 16 (inro16)',
IEC104_COT_REQCOGEN: 'queried by counter general interrogation (reqcogen)',
IEC104_COT_REQCO1: 'queried by query to counter group 1 (reqco1)',
IEC104_COT_REQCO2: 'queried by query to counter group 2 (reqco2)',
IEC104_COT_REQCO3: 'queried by query to counter group 3 (reqco3)',
IEC104_COT_REQCO4: 'queried by query to counter group 4 (reqco4)',
IEC104_COT_RESERVED_42: 'reserved_42',
IEC104_COT_RESERVED_43: 'reserved_43',
IEC104_COT_UNKNOWN_TYPE_CODE: 'unknown type code',
IEC104_COT_UNKNOWN_TRANSMIT_REASON: 'unknown transmit reason',
IEC104_COT_UNKNOWN_COMMON_ADDRESS_OF_ASDU:
'unknown common address of ASDU',
IEC104_COT_UNKNOWN_ADDRESS_OF_INFORMATION_OBJECT:
'unknown address of information object',
IEC104_COT_PRIVATE_48: 'private_48',
IEC104_COT_PRIVATE_49: 'private_49',
IEC104_COT_PRIVATE_50: 'private_50',
IEC104_COT_PRIVATE_51: 'private_51',
IEC104_COT_PRIVATE_52: 'private_52',
IEC104_COT_PRIVATE_53: 'private_53',
IEC104_COT_PRIVATE_54: 'private_54',
IEC104_COT_PRIVATE_55: 'private_55',
IEC104_COT_PRIVATE_56: 'private_56',
IEC104_COT_PRIVATE_57: 'private_57',
IEC104_COT_PRIVATE_58: 'private_58',
IEC104_COT_PRIVATE_59: 'private_59',
IEC104_COT_PRIVATE_60: 'private_60',
IEC104_COT_PRIVATE_61: 'private_61',
IEC104_COT_PRIVATE_62: 'private_62',
IEC104_COT_PRIVATE_63: 'private_63'
}
IEC104_APDU_TYPE_UNKNOWN = 0x00
IEC104_APDU_TYPE_I_SEQ_IOA = 0x01
IEC104_APDU_TYPE_I_SINGLE_IOA = 0x02
IEC104_APDU_TYPE_U = 0x03
IEC104_APDU_TYPE_S = 0x04
def _iec104_apci_type_from_packet(data):
"""
the type of the message is encoded in octet 1..4
oct 1, bit 1 2 oct 3, bit 1
I Message 0 1|0 0
S Message 1 0 0
U Message 1 1 0
see EN 60870-5-104:2006, sec. 5 (p. 13, fig. 6,7,8)
"""
oct_1 = orb(data[2])
oct_3 = orb(data[4])
oct_1_bit_1 = bool(oct_1 & 1)
oct_1_bit_2 = bool(oct_1 & 2)
oct_3_bit_1 = bool(oct_3 & 1)
if oct_1_bit_1 is False and oct_3_bit_1 is False:
if len(data) < 8:
return IEC104_APDU_TYPE_UNKNOWN
is_seq_ioa = ((orb(data[7]) & 0x80) == 0x80)
if is_seq_ioa:
return IEC104_APDU_TYPE_I_SEQ_IOA
else:
return IEC104_APDU_TYPE_I_SINGLE_IOA
if oct_1_bit_1 and oct_1_bit_2 is False and oct_3_bit_1 is False:
return IEC104_APDU_TYPE_S
if oct_1_bit_1 and oct_1_bit_2 and oct_3_bit_1 is False:
return IEC104_APDU_TYPE_U
return IEC104_APDU_TYPE_UNKNOWN
class IEC104_APDU(Packet):
"""
basic Application Protocol Data Unit definition used by S/U/I messages
"""
def guess_payload_class(self, payload):
payload_len = len(payload)
if payload_len < 6:
return self.default_payload_class(payload)
if orb(payload[0]) != 0x68:
self.default_payload_class(payload)
# the length field contains the number of bytes starting from the
# first control octet
apdu_length = 2 + orb(payload[1])
if payload_len < apdu_length:
warning(
'invalid len of APDU. given len: {} available len: {}'.format(
apdu_length, payload_len))
return self.default_payload_class(payload)
apdu_type = _iec104_apci_type_from_packet(payload)
return IEC104_APDU_CLASSES.get(apdu_type,
self.default_payload_class(payload))
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
detect type of the message by checking packet data
:param _pkt: raw bytes of the packet layer data to be checked
:param args: unused
:param kargs: unused
:return: class of the detected message type
"""
if _iec104_is_i_apdu_seq_ioa(_pkt):
return IEC104_I_Message_SeqIOA
if _iec104_is_i_apdu_single_ioa(_pkt):
return IEC104_I_Message_SingleIOA
if _iec104_is_u_apdu(_pkt):
return IEC104_U_Message
if _iec104_is_s_apdu(_pkt):
return IEC104_S_Message
return Raw
class IEC104_S_Message(IEC104_APDU):
"""
message used for ack of received I-messages
"""
name = 'IEC-104 S APDU'
fields_desc = [
XByteField('start', 0x68),
ByteField("apdu_length", 4),
ByteField('octet_1', 0x01),
ByteField('octet_2', 0),
IEC104SequenceNumber('rx_seq_num', 0),
]
class IEC104_U_Message(IEC104_APDU):
"""
message used for connection tx control (start/stop) and monitoring (test)
"""
name = 'IEC-104 U APDU'
fields_desc = [
XByteField('start', 0x68),
ByteField("apdu_length", 4),
BitField('testfr_con', 0, 1),
BitField('testfr_act', 0, 1),
BitField('stopdt_con', 0, 1),
BitField('stopdt_act', 0, 1),
BitField('startdt_con', 0, 1),
BitField('startdt_act', 0, 1),
BitField('octet_1_1_2', 3, 2),
ByteField('octet_2', 0),
ByteField('octet_3', 0),
ByteField('octet_4', 0)
]
def _i_msg_io_dispatcher_sequence(pkt, next_layer_data):
"""
get the type id and return the matching ASDU instance
"""
next_layer_class_type = IEC104_IO_CLASSES.get(pkt.type_id, conf.raw_layer)
return next_layer_class_type(next_layer_data)
def _i_msg_io_dispatcher_single(pkt, next_layer_data):
"""
get the type id and return the matching ASDU instance
(information object address + regular ASDU information object fields)
"""
next_layer_class_type = IEC104_IO_WITH_IOA_CLASSES.get(pkt.type_id,
conf.raw_layer)
return next_layer_class_type(next_layer_data)
class IEC104ASDUPacketListField(PacketListField):
"""
used to add a list of information objects to an I-message
"""
def m2i(self, pkt, m):
"""
add calling layer instance to the cls()-signature
:param pkt: calling layer instance
:param m: raw data forming the next layer
:return: instance of the class representing the next layer
"""
return self.cls(pkt, m)
class IEC104_I_Message_StructureException(Scapy_Exception):
"""
Exception raised if payload is not of type Information Object
"""
pass
class IEC104_I_Message(IEC104_APDU):
"""
message used for transmitting data (APDU - Application Protocol Data Unit)
APDU: MAGIC + APCI + ASDU
MAGIC: 0x68
APCI : Control Information (rx/tx seq/ack numbers)
ASDU : Application Service Data Unit - information object related data
see EN 60870-5-104:2006, sec. 5 (p. 12)
"""
name = 'IEC-104 I APDU'
IEC_104_MAGIC = 0x68 # dec -> 104
SQ_FLAG_SINGLE = 0
SQ_FLAG_SEQUENCE = 1
SQ_FLAGS = {
SQ_FLAG_SINGLE: 'single',
SQ_FLAG_SEQUENCE: 'sequence'
}
TEST_DISABLED = 0
TEST_ENABLED = 1
TEST_FLAGS = {
| |
None: return None
return self.name2imdbID(name)
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID.
"""
name = getCharacterName(characterID,
'%scharacters.index' % self.__db,
'%scharacters.data' % self.__db)
if not name:
return None
return self.character2imdbID(name)
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID.
"""
name = getCompanyName(companyID,
'%scompanies.index' % self.__db,
'%scompanies.data' % self.__db)
if not name:
return None
return self.company2imdbID(name)
def do_adult_search(self, doAdult):
"""If set to 0 or False, movies in the Adult category are not
shown in the results of a search."""
self.doAdult = doAdult
def _search_movie(self, title, results, _episodes=False):
title = title.strip()
if not title: return []
# Search for these title variations.
if not _episodes:
title1, title2, title3 = titleVariations(title, fromPtdf=1)
else:
title1 = normalizeTitle(title)
title2 = ''
title3 = ''
# XXX: only a guess: results are shrinked, to exclude Adult
# titles and to remove duplicated entries.
resultsST = results * 3
res = _scan_titles('%stitles.key' % self.__db,
title1, title2, title3, resultsST, _episodes)
res[:] = [x[1] for x in res]
# Check for adult movies.
if not self.doAdult:
newlist = []
for entry in res:
genres = getMovieMisc(movieID=entry[0],
dataF='%s%s.data' % (self.__db, 'genres'),
indexF='%s%s.index' % (self.__db, 'genres'),
attrIF='%sattributes.index' % self.__db,
attrKF='%sattributes.key' % self.__db)
if 'Adult' not in genres: newlist.append(entry)
res[:] = newlist
# Get the real name, if this is an AKA.
# XXX: duplicated code!
new_res = []
seen_MID = []
for idx, (movieID, r) in enumerate(res):
# Remove duplicates.
# XXX: find a way to prefer titles with an AKA? Or prefer
# the original title?
if movieID in seen_MID:
continue
else:
seen_MID.append(movieID)
realMID = self._get_real_movieID(movieID)
if movieID == realMID:
new_res.append((movieID, r))
continue
if realMID in seen_MID:
continue
else:
seen_MID.append(realMID)
aka_title = build_title(r, canonical=0)
real_title = getLabel(realMID, '%stitles.index' % self.__db,
'%stitles.key' % self.__db)
if aka_title == real_title:
new_res.append((realMID, r))
continue
new_r = analyze_title(real_title, canonical=1)
new_r['akas'] = [aka_title]
new_res.append((realMID, new_r))
if results > 0: new_res[:] = new_res[:results]
return new_res
def _search_episode(self, title, results):
title = title.strip()
if not title: return
_episodes = True
if analyze_title(title)['kind'] == 'episode':
_episodes = False
return self._search_movie(title, results, _episodes=_episodes)
def get_movie_main(self, movieID):
# Information sets provided by this method.
infosets = ('main', 'vote details')
tl = getLabel(movieID, '%stitles.index' % self.__db,
'%stitles.key' % self.__db)
# No title, no party.
if tl is None:
raise IMDbDataAccessError, 'unable to get movieID "%s"' % movieID
res = analyze_title(tl)
# Build the cast list.
actl = []
for castG in ('actors', 'actresses'):
midx = getFullIndex('%s%s.titles' % (self.__db, castG),
movieID, multi=1)
if midx is not None:
params = {'movieID': movieID,
'dataF': '%s%s.data' % (self.__db, castG),
'indexF': '%snames.index' % self.__db,
'keyF': '%snames.key' % self.__db,
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db,
'charNF': '%scharacter2id.index' % self.__db,
'offsList': midx, 'doCast': 1}
actl += getMovieCast(**params)
if actl:
actl.sort()
res['cast'] = actl
# List of other workers.
works = ('writer', 'cinematographer', 'composer',
'costume-designer', 'director', 'editor', 'miscellaneou',
'producer', 'production-designer', 'cinematographer')
for i in works:
index = getFullIndex('%s%ss.titles' % (self.__db, i),
movieID, multi=1)
if index is not None:
params = {'movieID': movieID,
'dataF': '%s%s.data' % (self.__db, i),
'indexF': '%snames.index' % self.__db,
'keyF': '%snames.key' % self.__db,
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db,
'offsList': index}
name = key = i
if '-' in name:
name = name.replace('-', ' ')
elif name == 'miscellaneou':
name = 'miscellaneous crew'
key = 'miscellaneou'
elif name == 'writer':
params['doWriters'] = 1
params['dataF'] = '%s%ss.data' % (self.__db, key)
data = getMovieCast(**params)
if name == 'writer': data.sort()
res[name] = data
# Rating.
rt = self.get_movie_vote_details(movieID)['data']
if rt: res.update(rt)
# Various information.
miscInfo = (('runtimes', 'running-times'), ('color info', 'color-info'),
('genres', 'genres'), ('distributors', 'distributors'),
('languages', 'language'), ('certificates', 'certificates'),
('special effects companies', 'special-effects-companies'),
('sound mix', 'sound-mix'), ('tech info', 'technical'),
('production companies', 'production-companies'),
('countries', 'countries'))
for name, fname in miscInfo:
params = {'movieID': movieID,
'dataF': '%s%s.data' % (self.__db, fname),
'indexF': '%s%s.index' % (self.__db, fname),
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db}
data = getMovieMisc(**params)
if name in ('distributors', 'special effects companies',
'production companies'):
for nitem in xrange(len(data)):
n, notes = split_company_name_notes(data[nitem])
company = Company(name=n, companyID=getCompanyID(n,
'%scompany2id.index' % self.__db),
notes=notes,
accessSystem=self.accessSystem)
data[nitem] = company
if data: res[name] = data
if res.has_key('runtimes') and len(res['runtimes']) > 0:
rt = res['runtimes'][0]
episodes = re_episodes.findall(rt)
if episodes:
res['runtimes'][0] = re_episodes.sub('', rt)
res['number of episodes'] = episodes[0]
# AKA titles.
akas = getAkaTitles(movieID,
'%saka-titles.data' % self.__db,
'%stitles.index' % self.__db,
'%stitles.key' % self.__db,
'%sattributes.index' % self.__db,
'%sattributes.key' % self.__db)
if akas:
# normalize encoding.
for i in xrange(len(akas)):
ts = akas[i].split('::')
if len(ts) != 2: continue
t = ts[0]
n = ts[1]
nt = self._changeAKAencoding(n, t)
if nt is not None: akas[i] = '%s::%s' % (nt, n)
res['akas'] = akas
if res.get('kind') == 'episode':
# Things to do if this is a tv series episode.
episodeOf = res.get('episode of')
if episodeOf is not None:
parentSeries = Movie(data=res['episode of'],
accessSystem='local')
seriesID = self._getTitleID(parentSeries.get(
'long imdb canonical title'))
parentSeries.movieID = seriesID
res['episode of'] = parentSeries
if not res.get('year'):
year = getFullIndex('%smovies.data' % self.__db,
movieID, kind='moviedata', rindex=1)
if year: res['year'] = year
# MPAA info.
mpaa = getMPAA(movieID, '%smpaa-ratings-reasons.index' % self.__db,
'%smpaa-ratings-reasons.data' % self.__db)
if mpaa: res.update(mpaa)
return {'data': res, 'info sets': infosets}
def get_movie_plot(self, movieID):
pl = getPlot(movieID, '%splot.index' % self.__db,
'%splot.data' % self.__db)
trefs, nrefs = self._extractRefs(pl)
if pl: return {'data': {'plot': pl},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_taglines(self, movieID):
tg = getTaglines(movieID, '%staglines.index' % self.__db,
'%staglines.data' % self.__db)
if tg: return {'data': {'taglines': tg}}
return {'data': {}}
def get_movie_keywords(self, movieID):
params = {'movieID': movieID,
'dataF': '%skeywords.data' % self.__db,
'indexF': '%skeywords.index' % self.__db,
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db}
kwds = getMovieMisc(**params)
if kwds: return {'data': {'keywords': kwds}}
return {'data': {}}
def get_movie_alternate_versions(self, movieID):
av = parseMinusList(movieID, '%salternate-versions.data' % self.__db,
'%salternate-versions.index' % self.__db)
trefs, nrefs = self._extractRefs(av)
if av: return {'data': {'alternate versions': av},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_crazy_credits(self, movieID):
cc = parseMinusList(movieID, '%scrazy-credits.data' % self.__db,
'%scrazy-credits.index' % self.__db)
trefs, nrefs = self._extractRefs(cc)
if cc: return {'data': {'crazy credits': cc},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_goofs(self, movieID):
goo = parseMinusList(movieID, '%sgoofs.data' % self.__db,
'%sgoofs.index' % self.__db)
trefs, nrefs = self._extractRefs(goo)
if goo: return {'data': {'goofs': goo},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_soundtrack(self, movieID):
goo = parseMinusList(movieID, '%ssoundtracks.data' % self.__db,
'%ssoundtracks.index' % self.__db)
trefs, nrefs = self._extractRefs(goo)
if goo: return {'data': {'soundtrack': goo},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_quotes(self, movieID):
mq = getQuotes(movieID, '%squotes.data' % self.__db,
'%squotes.index' % self.__db)
trefs, nrefs = self._extractRefs(mq)
for idx, quote in enumerate(mq):
mq[idx] = quote.split('::')
if mq: return {'data': {'quotes': mq},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_release_dates(self, movieID):
params = {'movieID': movieID,
'dataF': '%srelease-dates.data' % self.__db,
'indexF': '%srelease-dates.index' % self.__db,
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db}
data = getMovieMisc(**params)
if data: return {'data': {'release dates': data}}
return {'data': {}}
def get_movie_miscellaneous_companies(self, movieID):
params = {'movieID': movieID,
'dataF': '%smiscellaneous-companies.data' % self.__db,
'indexF': '%smiscellaneous-companies.index' % self.__db,
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db}
try:
data = getMovieMisc(**params)
except IMDbDataAccessError:
import warnings
warnings.warn('miscellaneous-companies files not found; '
'run the misc-companies4local.py script.')
return {'data': {}}
for nitem in xrange(len(data)):
n, notes = split_company_name_notes(data[nitem])
company = Company(name=n, companyID=getCompanyID(n,
'%scompany2id.index' % self.__db),
notes=notes,
accessSystem=self.accessSystem)
data[nitem] = company
if data: return {'data': {'miscellaneous companies': data}}
return {'data': {}}
def get_movie_vote_details(self, movieID):
data = getRatingData(movieID, '%sratings.data' % self.__db)
return {'data': data}
def get_movie_trivia(self, movieID):
triv = parseMinusList(movieID, '%strivia.data' % self.__db,
'%strivia.index' % self.__db)
trefs, nrefs = self._extractRefs(triv)
if triv: return {'data': {'trivia': triv},
'titlesRefs': trefs, 'namesRefs': nrefs}
return {'data': {}}
def get_movie_locations(self, movieID):
params = {'movieID': movieID,
'dataF': '%slocations.data' % self.__db,
'indexF': '%slocations.index' % self.__db,
'attrIF': '%sattributes.index' % self.__db,
'attrKF': '%sattributes.key' % self.__db}
data = getMovieMisc(**params)
if data: | |
#!/usr/bin/env python
"""
pyOpt_optimizer
Holds the Python Design Optimization Classes (base and inherited).
Copyright (c) 2008-2013 by pyOpt Developers
All rights reserved.
Revision: 1.1 $Date: 08/05/2008 21:00$
Developers:
-----------
- Dr. <NAME> (GKK)
"""
from __future__ import print_function
# =============================================================================
# Imports
# =============================================================================
import os
import time
import copy
import numpy
from .pyOpt_gradient import Gradient
from .pyOpt_error import Error, pyOptSparseWarning
from .pyOpt_history import History
from .pyOpt_solution import Solution
from .pyOpt_optimization import INFINITY
from .pyOpt_utils import convertToDense, convertToCOO, extractRows, \
mapToCSC, scaleRows, IDATA
eps = numpy.finfo(1.0).eps
# =============================================================================
# Optimizer Class
# =============================================================================
class Optimizer(object):
"""
Base optimizer class
Parameters
----------
name : str
Optimizer name
category : str
Typically local or global
defOptions : dictionary
A dictionary containing the default options
informs : dict
Dictionary of the inform codes
"""
def __init__(self, name=None, category=None, defOptions=None,
informs=None, **kwargs):
self.name = name
self.category = category
self.options = {}
self.options['defaults'] = defOptions
self.informs = informs
self.callCounter = 0
self.sens = None
# Initialize Options
for key in defOptions:
self.options[key] = defOptions[key]
koptions = kwargs.pop('options', {})
for key in koptions:
self.setOption(key, koptions[key])
self.optProb = None
# Default options:
self.appendLinearConstraints = False
self.jacType = 'dense'
self.unconstrained = False
self.userObjTime = 0.0
self.userSensTime = 0.0
self.interfaceTime = 0.0
self.userObjCalls = 0
self.userSensCalls = 0
self.storeSens = True
# Cache storage
self.cache = {'x': None, 'fobj': None, 'fcon': None,
'gobj': None, 'gcon': None}
# A second-level cache for optimizers that require callbacks
# for each constraint. (eg. PSQP, FSQP, etc)
self.storedData = {}
self.storedData['x'] = None
# Create object to pass information about major iterations.
# Only relevant for SNOPT.
self.iu0 = 0
# Store the jacobian conversion maps
self._jac_map_csr_to_csc = None
def _clearTimings(self):
"""Clear timings and call counters"""
self.userObjTime = 0.0
self.userSensTime = 0.0
self.interfaceTime = 0.0
self.userObjCalls = 0
self.userSensCalls = 0
def _setSens(self, sens, sensStep, sensMode):
"""
Common function to setup sens function
"""
# If we have SNOPT set derivative level to 3...it will be
# reset if necessary
if self.name in ['SNOPT']:
# SNOPT is the only one where None is ok.
self.setOption('Derivative level', 3)
# Next we determine what to what to do about
# derivatives. We must have a function or we use FD or CS:
if sens is None:
if self.name in ['SNOPT']:
# SNOPT is the only one where None is ok.
self.setOption('Derivative level', 0)
self.sens = None
else:
raise Error("'None' value given for sens. Must be one "
" of 'FD', 'FDR', 'CD', 'CDR', 'CS' or a user supplied function.")
elif hasattr(sens, '__call__'):
# We have function handle for gradients! Excellent!
self.sens = sens
elif sens.lower() in ['fd', 'fdr', 'cd', 'cdr', 'cs']:
# Create the gradient class that will operate just like if
# the user supplied function
self.sens = Gradient(self.optProb, sens.lower(), sensStep,
sensMode, self.optProb.comm)
else:
raise Error("Unknown value given for sens. Must be None, 'FD', "
"'FDR', 'CD', 'CDR', 'CS' or a python function handle")
def _setHistory(self, storeHistory, hotStart):
"""
Generic routine for setting up the hot start information
Parameters
----------
storeHistory : str
File for possible history file. Or None if not writing file.
hotStart : str
Filename for history file for hot start
"""
# By default no hot start
self.hotStart = None
# Determine if we want to do a hot start:
if hotStart is not None:
# Now, if if the hot start file and the history are
# the SAME, we don't allow that. We will create a copy
# of the hotStart file and use *that* instead.
import tempfile
import shutil
if storeHistory == hotStart:
if os.path.exists(hotStart):
fname = tempfile.mktemp()
shutil.copyfile(storeHistory, fname)
self.hotStart = History(fname, temp=True, flag='r')
else:
if os.path.exists(hotStart):
self.hotStart = History(hotStart, temp=False, flag='r')
else:
pyOptSparseWarning('Hot start file does not exist. \
Performing a regular start')
self.storeHistory = False
if storeHistory:
self.hist = History(storeHistory)
self.storeHistory = True
if hotStart is not None:
varInfo = self.hotStart.readData('varInfo')
conInfo = self.hotStart.readData('conInfo')
if varInfo is not None:
self.hist.writeData('varInfo', varInfo)
if conInfo is not None:
self.hist.writeData('conInfo', conInfo)
def _masterFunc(self, x, evaluate):
"""
This is the master function that **ALL** optimizers call from
the specific signature functions. The reason for this is that
we can generically do the hot-start replay, history storage,
timing and possibly caching once for all optimizers. It also
takes care of the MPI communication that allows the optimizer
to run on one process only, but within a larger MPI context.
It does add one additional level of call, but we think it is
well worth it for reduce code duplication
Parameters
----------
x : array
This is the raw x-array data from the optimizer
evaluate : list of strings
This list contains at least one of 'fobj', 'fcon', 'gobj'
or 'gcon'. This list tells this function which of the
values is required on return
"""
# We are hot starting, we should be able to read the required
# information out of the hot start file, process it and then
# fire it back to the specific optimizer
timeA = time.time()
if self.hotStart:
# This is a very inexpensive check to see if point exists
if self.hotStart.pointExists(self.callCounter):
# Read the actual data for this point:
data = self.hotStart.read(self.callCounter)
# Get the x-value and (de)process
xuser = self.optProb.deProcessX(data['xuser'])
# Validated x-point point to use:
xScaled = x*self.optProb.invXScale + self.optProb.xOffset
if numpy.linalg.norm(xScaled - xuser) < eps:
# However, we may need a sens that *isn't* in the
# the dictionary:
funcs = None
funcsSens = None
validPoint = True
if 'fobj' in evaluate or 'fcon' in evaluate:
funcs = data['funcs']
if 'gobj' in evaluate or 'gcon' in evaluate:
if 'funcsSens' in data:
funcsSens = data['funcsSens']
else:
validPoint = False
# Only continue if valid:
if validPoint:
if self.storeHistory:
# Just dump the (exact) dictionary back out:
self.hist.write(self.callCounter, data)
fail = data['fail']
returns = []
# Process constraints/objectives
if funcs is not None:
self.optProb.evaluateLinearConstraints(xScaled, funcs)
fcon = self.optProb.processConstraints(funcs)
fobj = self.optProb.processObjective(funcs)
if 'fobj' in evaluate:
returns.append(fobj)
if 'fcon' in evaluate:
returns.append(fcon)
# Process gradients if we have them
if funcsSens is not None:
gobj = self.optProb.processObjectiveGradient(funcsSens)
gcon = self.optProb.processConstraintJacobian(funcsSens)
gcon = self._convertJacobian(gcon)
if 'gobj' in evaluate:
returns.append(gobj)
if 'gcon' in evaluate:
returns.append(gcon)
# We can now safely increment the call counter
self.callCounter += 1
returns.append(fail)
self.interfaceTime += time.time()-timeA
return returns
# end if (valid point -> all data present)
# end if (x's match)
# end if (point exists)
# We have used up all the information in hot start so we
# can close the hot start file
self.hotStart.close()
self.hotStart = None
# end if (hot starting)
# Now we have to actually run our function...this is where the
# MPI gets a little tricky. Up until now, only the root proc
# has called up to here...the rest of them are waiting at a
# broadcast to know what to do.
args = [x, evaluate]
# Broadcast the type of call (0 means regular call)
self.optProb.comm.bcast(0, root=0)
# Now broadcast out the required arguments:
self.optProb.comm.bcast(args)
result = self._masterFunc2(*args)
self.interfaceTime += time.time()-timeA
return result
def _masterFunc2(self, x, evaluate, writeHist=True):
"""
Another shell function. This function is now actually called
on all the processors.
"""
# Our goal in this function is to return the values requested
# in 'evaluate' for the corresponding x. We have to be a
# little cheeky here since some optimizers will make multiple
# call backs with the same x, one for the objective and one
# for the constraint. We therefore at the end of each function
# or sensitivity call we cache the x value and the fobj, fcon,
# gobj, and gcon values such that on the next pass we can just
# read them and return.
xScaled = self.optProb.invXScale * x + self.optProb.xOffset
xuser = self.optProb.processX(xScaled)
masterFail = False
# Set basic parameters in history
hist = {'xuser': xuser}
returns = []
# Start with fobj:
tmpObjCalls = self.userObjCalls
tmpSensCalls = self.userSensCalls
if 'fobj' in evaluate:
if numpy.linalg.norm(x-self.cache['x']) > eps:
timeA = | |
<filename>maiconverter/simai/simai.py
from __future__ import annotations
import math
from typing import Optional, Tuple, List, Union
from lark import Lark
from .tools import (
get_measure_divisor,
convert_to_fragment,
get_rest,
parallel_parse_fragments,
)
from ..event import NoteType
from .simainote import TapNote, HoldNote, SlideNote, TouchTapNote, TouchHoldNote, BPM
from .simai_parser import SimaiTransformer
# I hate the simai format can we use bmson or stepmania chart format for
# community-made charts instead
from ..tool import measure_to_second, second_to_measure, offset_arg_to_measure
class SimaiChart:
"""A class that represents a simai chart. Contains notes and bpm
information. Does not include information such as
song name, chart difficulty, composer, chart maker, etc.
It only contains enough information to build a working simai
chart.
Attributes:
bpms: Contains bpm events of the chart.
notes: Contains notes of the chart.
"""
def __init__(self):
self.notes: List[
Union[TapNote, HoldNote, SlideNote, TouchTapNote, TouchHoldNote]
] = []
self.bpms: List[BPM] = []
self._divisor: Optional[float] = None
self._measure = 1.0
@classmethod
def from_str(cls, chart_text: str, message: Optional[str] = None) -> SimaiChart:
# TODO: Rewrite this
if message is None:
print("Parsing simai chart...", end="", flush=True)
else:
print(message, end="", flush=True)
simai_chart = cls()
chart_text = "".join(chart_text.split())
try:
events_list = parallel_parse_fragments(chart_text.split(","))
except:
print("ERROR")
raise
else:
print("Done")
for events in events_list:
star_positions = []
offset = 0
for event in events:
event_type = event["type"]
if event_type == "bpm":
simai_chart.set_bpm(simai_chart._measure, event["value"])
elif event_type == "divisor":
simai_chart._divisor = event["value"]
elif event_type == "tap":
is_break, is_ex, is_star = False, False, False
modifier = event["modifier"]
if "b" in modifier:
is_break = True
if "x" in modifier:
is_ex = True
if "$" in modifier:
is_star = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_tap(
measure=simai_chart._measure + offset,
position=event["button"],
is_break=is_break,
is_star=is_star,
is_ex=is_ex,
)
elif event_type == "hold":
is_ex = False
modifier = event["modifier"]
if "x" in modifier:
is_ex = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_hold(
measure=simai_chart._measure + offset,
position=event["button"],
duration=event["duration"],
is_ex=is_ex,
)
elif event_type == "slide":
is_break, is_ex, is_tapless = False, False, False
modifier = event["modifier"]
if "b" in modifier:
is_break = True
if "x" in modifier:
is_ex = True
if any([a in modifier for a in "?!$"]):
# Tapless slides
# ? means the slide has no tap
# ! produces a tapless slide with no path, just a moving star
# $ is a remnant of 2simai, it is equivalent to ?
is_tapless = True
if "*" in modifier:
# Chained slides should have the same offset
pass
elif "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
if not (is_tapless or event["start_button"] in star_positions):
simai_chart.add_tap(
measure=simai_chart._measure + offset,
position=event["start_button"],
is_break=is_break,
is_star=True,
is_ex=is_ex,
)
star_positions.append(event["start_button"])
equivalent_bpm = event["equivalent_bpm"]
duration = event["duration"]
delay = 0.25
if equivalent_bpm is not None:
multiplier = (
simai_chart.get_bpm(simai_chart._measure) / equivalent_bpm
)
duration = multiplier * duration
delay = multiplier * delay
simai_chart.add_slide(
measure=simai_chart._measure + offset,
start_position=event["start_button"],
end_position=event["end_button"],
duration=duration,
pattern=event["pattern"],
delay=delay,
reflect_position=event["reflect_position"],
)
elif event_type == "touch_tap":
is_firework = False
modifier = event["modifier"]
if "f" in modifier:
is_firework = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_touch_tap(
measure=simai_chart._measure + offset,
position=event["location"],
region=event["region"],
is_firework=is_firework,
)
elif event_type == "touch_hold":
is_firework = False
modifier = event["modifier"]
if "f" in modifier:
is_firework = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_touch_hold(
measure=simai_chart._measure + offset,
position=event["location"],
region=event["region"],
duration=event["duration"],
is_firework=is_firework,
)
else:
raise Exception(f"Unknown event type: {event_type}")
simai_chart._measure += 1 / simai_chart._divisor
return simai_chart
@classmethod
def open(cls, file: str) -> SimaiChart:
"""Opens a text file containing only a Simai chart. Does NOT accept a regular Simai file which contains
metadata and multiple charts. Use `parse_file` to parse a normal Simai file.
Args:
file: The path of the Simai chart file.
Examples:
Open a Simai chart file named "example.txt" at current directory.
>>> simai = SimaiChart.open("./example.txt")
"""
with open(file, "r") as f:
chart = f.read()
return cls.from_str(chart)
def add_tap(
self,
measure: float,
position: int,
is_break: bool = False,
is_star: bool = False,
is_ex: bool = False,
) -> SimaiChart:
"""Adds a tap note to the list of notes.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the tap note happens.
is_break: Whether a tap note is a break note.
is_star: Whether a tap note is a star note.
is_ex: Whether a tap note is an ex note.
Examples:
Add a regular tap note at measure 1, break tap note at
measure 2, ex tap note at measure 2.5, star note at
measure 3, and a break star note at measure 5. All at
position 7.
>>> simai = SimaiChart()
>>> simai.add_tap(1, 7)
>>> simai.add_tap(2, 7, is_break=True)
>>> simai.add_tap(2.5, 7, is_ex=True)
>>> simai.add_tap(3, 7, is_star=True)
>>> simai.add_tap(5, 7, is_break=True, is_star=True)
"""
tap_note = TapNote(
measure=measure,
position=position,
is_break=is_break,
is_star=is_star,
is_ex=is_ex,
)
self.notes.append(tap_note)
return self
def del_tap(self, measure: float, position: int) -> SimaiChart:
"""Deletes a tap note from the list of notes.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the tap note happens.
Examples:
Remove tap note at measure 26.75 at button 4.
>>> simai = SimaiChart()
>>> simai.add_tap(26.5, 4)
>>> simai.del_tap(26.75, 4)
"""
tap_notes = [
x
for x in self.notes
if isinstance(x, TapNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == position
]
for note in tap_notes:
self.notes.remove(note)
return self
def add_hold(
self,
measure: float,
position: int,
duration: float,
is_ex: bool = False,
) -> SimaiChart:
"""Adds a hold note to the list of notes.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the hold note happens.
duration: Total time duration of the hold note.
is_ex: Whether a hold note is an ex note.
Examples:
Add a regular hold note at button 2 at measure 1, with
duration of 5 measures. And an ex hold note at button
6 at measure 3, with duration of 0.5 measures.
>>> simai = SimaiChart()
>>> simai.add_hold(1, 2, 5)
>>> simai.add_hold(3, 6, 0.5, is_ex=True)
"""
hold_note = HoldNote(measure, position, duration, is_ex)
self.notes.append(hold_note)
return self
def del_hold(self, measure: float, position: int) -> SimaiChart:
"""Deletes the matching hold note in the list of notes. If there are multiple
matches, all matching notes are deleted. If there are no match, nothing happens.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the hold note happens.
Examples:
Add a regular hold note at button 0 at measure 3.25 with duration of 2 measures
and delete it.
>>> simai = SimaiChart()
>>> simai.add_hold(3.25, 0, 2)
>>> simai.del_hold(3.25, 0)
"""
hold_notes = [
x
for x in self.notes
if isinstance(x, HoldNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == position
]
for note in hold_notes:
self.notes.remove(note)
return self
def add_slide(
self,
measure: float,
start_position: int,
end_position: int,
duration: float,
pattern: str,
delay: float = 0.25,
reflect_position: Optional[int] = None,
) -> SimaiChart:
"""Adds both a slide note to the list of notes.
Args:
measure: Time when the slide starts, in
terms of measures.
start_position: Button where the slide starts.
end_position: Button where the slide ends.
duration: Total duration of the slide, in terms of
measures. Includes slide delay.
pattern: The one or two character slide pattern used.
delay: Duration from when the slide appears and when it
starts to move, in terms of measures. Defaults to 0.25.
reflect_position: The button where the 'V' slide will first go to.
Optional, defaults to None.
Examples:
Add a '-' slide at measure 2.25 from button 1 to button 5 with
duration of 1.5 measures
>>> simai = SimaiChart()
>>> simai.add_slide(2.25, 1, 5, 1.5, "-")
>>> simai.add_slide(3, 2, 7, 0.5, "V", reflect_position=4)
| |
import sys, py
from rpython.translator.translator import TranslationContext
from rpython.annotator import unaryop, binaryop
from rpython.rtyper.test import snippet
from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
from rpython.rlib.rarithmetic import (
r_int, r_uint, r_longlong, r_ulonglong, r_singlefloat)
from rpython.rlib.objectmodel import compute_hash
class TestSnippet(object):
def _test(self, func, types):
t = TranslationContext()
t.buildannotator().build_types(func, types)
t.buildrtyper().specialize()
t.checkgraphs()
def test_not1(self):
self._test(snippet.not1, [float])
def test_not2(self):
self._test(snippet.not2, [float])
def test_float1(self):
self._test(snippet.float1, [float])
def test_float_cast1(self):
self._test(snippet.float_cast1, [float])
def DONTtest_unary_operations(self):
# XXX TODO test if all unary operations are implemented
for opname in unaryop.UNARY_OPERATIONS:
print 'UNARY_OPERATIONS:', opname
def DONTtest_binary_operations(self):
# XXX TODO test if all binary operations are implemented
for opname in binaryop.BINARY_OPERATIONS:
print 'BINARY_OPERATIONS:', opname
class BaseTestRfloat(BaseRtypingTest):
inf = 'inf'
minus_inf = '-inf'
nan = 'nan'
def test_float2str(self):
def fn(f):
return str(f)
res = self.interpret(fn, [1.5])
assert float(self.ll_to_string(res)) == 1.5
res = self.interpret(fn, [-1.5])
assert float(self.ll_to_string(res)) == -1.5
inf = 1e200 * 1e200
nan = inf/inf
res = self.interpret(fn, [inf])
assert self.ll_to_string(res) == self.inf
res = self.interpret(fn, [-inf])
assert self.ll_to_string(res) == self.minus_inf
res = self.interpret(fn, [nan])
assert self.ll_to_string(res) == self.nan
def test_string_mod_float(self):
def fn(f):
return '%f' % f
res = self.interpret(fn, [1.5])
assert float(self.ll_to_string(res)) == 1.5
def test_int_conversion(self):
def fn(f):
return int(f)
res = self.interpret(fn, [1.0])
assert res == 1
assert type(res) is int
res = self.interpret(fn, [2.34])
assert res == fn(2.34)
def test_longlong_conversion(self):
def fn(f):
return r_longlong(f)
res = self.interpret(fn, [1.0])
assert res == 1
# r_longlong is int on a 64 bit system
if sys.maxint == 2**63 - 1:
assert self.is_of_type(res, int)
else:
assert self.is_of_type(res, r_longlong)
res = self.interpret(fn, [2.34])
assert res == fn(2.34)
big = float(0x7fffffffffffffff)
x = big - 1.e10
assert x != big
y = fn(x)
assert fn(x) == 9223372026854775808
def test_to_r_uint(self):
def fn(x):
return r_uint(x)
res = self.interpret(fn, [12.34])
assert res == 12
bigval = sys.maxint * 1.234
res = self.interpret(fn, [bigval])
assert long(res) == long(bigval)
def test_from_r_uint(self):
def fn(n):
return float(r_uint(n)) / 2
res = self.interpret(fn, [41])
assert self.float_eq(res, 20.5)
res = self.interpret(fn, [-9])
assert self.float_eq(res, 0.5 * ((sys.maxint+1)*2 - 9))
def test_to_r_ulonglong(self):
def fn(x):
return r_ulonglong(x)
res = self.interpret(fn, [12.34])
assert res == 12
bigval = sys.maxint * 1.234
res = self.interpret(fn, [bigval])
assert long(res) == long(bigval)
def test_from_r_ulonglong(self):
def fn(n):
return float(r_ulonglong(n)) / 2
res = self.interpret(fn, [41])
assert self.float_eq(res, 20.5)
def test_r_singlefloat(self):
def fn(x):
y = r_singlefloat(x)
return float(y)
res = self.interpret(fn, [2.1])
assert res != 2.1 # precision lost
assert abs(res - 2.1) < 1E-6
def test_float_constant_conversions(self):
DIV = r_longlong(10 ** 10)
def fn():
return 420000000000.0 / DIV
res = self.interpret(fn, [])
assert self.float_eq(res, 42.0)
def test_exceptions(self):
def fn(x, y, z):
try:
# '/', when float-based, cannot raise in RPython!
# the try:finally: only tests an annotation bug.
x /= (y / z)
finally:
return x
self.interpret(fn, [1.0, 2.0, 3.0])
def test_copysign(self):
from rpython.rlib import rfloat
def fn(x, y):
return rfloat.copysign(x, y)
assert self.interpret(fn, [42, -1]) == -42
assert self.interpret(fn, [42, -0.0]) == -42
assert self.interpret(fn, [42, 0.0]) == 42
def test_rstring_to_float(self):
from rpython.rlib.rfloat import rstring_to_float
def fn(i):
s = ['42.3', '123.4'][i]
return rstring_to_float(s)
assert self.interpret(fn, [0]) == 42.3
def test_isnan(self):
from rpython.rlib import rfloat
def fn(x, y):
n1 = x * x
n2 = y * y * y
return rfloat.isnan(n1 / n2)
if self.__class__.__name__ != 'TestCliFloat':
# the next line currently fails on mono 2.6.7 (ubuntu 11.04), see:
# https://bugzilla.novell.com/show_bug.cgi?id=692493
assert self.interpret(fn, [1e200, 1e200]) # nan
#
assert not self.interpret(fn, [1e200, 1.0]) # +inf
assert not self.interpret(fn, [1e200, -1.0]) # -inf
assert not self.interpret(fn, [42.5, 2.3]) # +finite
assert not self.interpret(fn, [42.5, -2.3]) # -finite
def test_isinf(self):
from rpython.rlib import rfloat
def fn(x, y):
n1 = x * x
n2 = y * y * y
return rfloat.isinf(n1 / n2)
assert self.interpret(fn, [1e200, 1.0]) # +inf
assert self.interpret(fn, [1e200, -1.0]) # -inf
assert not self.interpret(fn, [1e200, 1e200]) # nan
assert not self.interpret(fn, [42.5, 2.3]) # +finite
assert not self.interpret(fn, [42.5, -2.3]) # -finite
def test_isfinite(self):
from rpython.rlib import rfloat
def fn(x, y):
n1 = x * x
n2 = y * y * y
return rfloat.isfinite(n1 / n2)
assert self.interpret(fn, [42.5, 2.3]) # +finite
assert self.interpret(fn, [42.5, -2.3]) # -finite
assert not self.interpret(fn, [1e200, 1.0]) # +inf
assert not self.interpret(fn, [1e200, -1.0]) # -inf
#
if self.__class__.__name__ != 'TestCliFloat':
# the next line currently fails on mono 2.6.7 (ubuntu 11.04), see:
# https://bugzilla.novell.com/show_bug.cgi?id=692493
assert not self.interpret(fn, [1e200, 1e200]) # nan
def test_break_up_float(self):
from rpython.rlib.rfloat import break_up_float
assert break_up_float('1') == ('', '1', '', '')
assert break_up_float('+1') == ('+', '1', '', '')
assert break_up_float('-1') == ('-', '1', '', '')
assert break_up_float('.5') == ('', '', '5', '')
assert break_up_float('1.2e3') == ('', '1', '2', '3')
assert break_up_float('1.2e+3') == ('', '1', '2', '+3')
assert break_up_float('1.2e-3') == ('', '1', '2', '-3')
# some that will get thrown out on return:
assert break_up_float('.') == ('', '', '', '')
assert break_up_float('+') == ('+', '', '', '')
assert break_up_float('-') == ('-', '', '', '')
assert break_up_float('e1') == ('', '', '', '1')
py.test.raises(ValueError, break_up_float, 'e')
def test_formatd(self):
from rpython.rlib.rfloat import formatd
def f(x):
return formatd(x, 'f', 2, 0)
res = self.ll_to_string(self.interpret(f, [10/3.0]))
assert res == '3.33'
def test_formatd_repr(self):
from rpython.rlib.rfloat import formatd
def f(x):
return formatd(x, 'r', 0, 0)
res = self.ll_to_string(self.interpret(f, [1.1]))
assert res == '1.1'
def test_formatd_huge(self):
from rpython.rlib.rfloat import formatd
def f(x):
return formatd(x, 'f', 1234, 0)
res = self.ll_to_string(self.interpret(f, [1.0]))
assert res == '1.' + 1234 * '0'
def test_formatd_F(self):
from rpython.translator.c.test.test_genc import compile
from rpython.rlib.rfloat import formatd
def func(x):
# Test the %F format, which is not supported by
# the Microsoft's msvcrt library.
return formatd(x, 'F', 4)
f = compile(func, [float])
assert f(10/3.0) == '3.3333'
def test_parts_to_float(self):
from rpython.rlib.rfloat import parts_to_float, break_up_float
def f(x):
if x == 0:
s = '1.0'
else:
s = '1e-100'
sign, beforept, afterpt, expt = break_up_float(s)
return parts_to_float(sign, beforept, afterpt, expt)
res = self.interpret(f, [0])
assert res == 1.0
res = self.interpret(f, [1])
assert res == 1e-100
def test_string_to_float(self):
from rpython.rlib.rfloat import rstring_to_float
def func(x):
if x == 0:
s = '1e23'
else:
s = '-1e23'
return rstring_to_float(s)
assert self.interpret(func, [0]) == 1e23
assert self.interpret(func, [1]) == -1e23
def test_copysign(self):
from rpython.rlib.rfloat import copysign
assert copysign(1, 1) == 1
assert copysign(-1, 1) == 1
assert copysign(-1, -1) == -1
assert copysign(1, -1) == -1
assert copysign(1, -0.) == -1
def test_round_away(self):
from rpython.rlib.rfloat import round_away
assert round_away(.1) == 0.
assert round_away(.5) == 1.
assert round_away(.7) == 1.
assert round_away(1.) == 1.
assert round_away(-.5) == -1.
assert round_away(-.1) == 0.
assert round_away(-.7) == -1.
assert round_away(0.) == 0.
def test_round_double(self):
from rpython.rlib.rfloat import round_double
def almost_equal(x, y):
assert round(abs(x-y), 7) == 0
almost_equal(round_double(0.125, 2), 0.13)
almost_equal(round_double(0.375, 2), 0.38)
almost_equal(round_double(0.625, 2), 0.63)
almost_equal(round_double(0.875, 2), 0.88)
almost_equal(round_double(-0.125, 2), -0.13)
almost_equal(round_double(-0.375, 2), -0.38)
almost_equal(round_double(-0.625, 2), -0.63)
almost_equal(round_double(-0.875, 2), -0.88)
almost_equal(round_double(0.25, 1), 0.3)
almost_equal(round_double(0.75, 1), 0.8)
almost_equal(round_double(-0.25, 1), -0.3)
almost_equal(round_double(-0.75, 1), -0.8)
round_double(-6.5, 0) == -7.0
round_double(-5.5, 0) == -6.0
round_double(-1.5, 0) == -2.0
round_double(-0.5, 0) == -1.0
round_double(0.5, 0) == 1.0
round_double(1.5, 0) == 2.0
round_double(2.5, 0) == 3.0
round_double(3.5, 0) == 4.0
round_double(4.5, 0) == 5.0
round_double(5.5, 0) == 6.0
round_double(6.5, 0) == 7.0
round_double(-25.0, -1) == -30.0
round_double(-15.0, -1) == -20.0
round_double(-5.0, -1) == -10.0
round_double(5.0, -1) == 10.0
round_double(15.0, -1) == 20.0
round_double(25.0, -1) == 30.0
round_double(35.0, -1) == 40.0
round_double(45.0, -1) == 50.0
round_double(55.0, -1) == 60.0
round_double(65.0, -1) == 70.0
round_double(75.0, -1) == 80.0
round_double(85.0, -1) == 90.0
round_double(95.0, -1) == 100.0
round_double(12325.0, -1) == 12330.0
round_double(350.0, -2) == 400.0
round_double(450.0, -2) == 500.0
almost_equal(round_double(0.5e21, -21), 1e21)
almost_equal(round_double(1.5e21, -21), 2e21)
almost_equal(round_double(2.5e21, -21), 3e21)
almost_equal(round_double(5.5e21, -21), 6e21)
almost_equal(round_double(8.5e21, -21), 9e21)
almost_equal(round_double(-1.5e22, -22), -2e22)
almost_equal(round_double(-0.5e22, -22), -1e22)
almost_equal(round_double(0.5e22, -22), 1e22)
almost_equal(round_double(1.5e22, -22), 2e22)
def test_round_half_even(self):
from rpython.rlib import rfloat
for func in (rfloat.round_double_short_repr,
rfloat.round_double_fallback_repr):
# 2.x behavior
assert func(2.5, 0, False) == 3.0
# 3.x behavior
assert func(2.5, 0, True) == 2.0
class TestLLtype(BaseTestRfloat, LLRtypeMixin):
def test_hash(self):
def fn(f):
return compute_hash(f)
res = self.interpret(fn, | |
"""
Args:
keys (list or str): the column name(s) to apply the `func` to
func (callable): applied to each element in the specified columns
"""
return [[func(v) for v in self[key]] for key in keys]
def merge_rows(self, key, merge_scalars=True):
"""
Uses key as a unique index an merges all duplicates rows. Use
cast_column to modify types of columns before merging to affect
behavior of duplicate rectification.
Args:
key: row to merge on
merge_scalars: if True, scalar values become lists
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> key_to_list = {
>>> 'uuid': [1, 1, 2, 3, 4, 2, 1],
>>> 'a': [1, 2, 3, 4, 5, 6, 7],
>>> 'b': [[1], [2], [3], [4], [5], [6], [7]],
>>> 'c': [[1], [1], [2], [3], [4], [2], [1]],
>>> }
>>> self = ColumnLists(key_to_list)
>>> key = 'uuid'
>>> newself = self.merge_rows('uuid')
>>> print(newself.to_csv())
# a, c, b, uuid
4, [3], [4], 3
5, [4], [5], 4
"[1, 2, 7]", "[1, 1, 1]", "[1, 2, 7]", "[1, 1, 1]"
"[3, 6]", "[2, 2]", "[3, 6]", "[2, 2]"
"""
import utool as ut
unique_labels, groupxs = self.group_indicies(key)
single_xs = [xs for xs in groupxs if len(xs) == 1]
multi_xs = [xs for xs in groupxs if len(xs) > 1]
singles = self.take(ut.flatten(single_xs))
multis = [self.take(idxs) for idxs in multi_xs]
merged_groups = []
for group in multis:
newgroup = {}
for key_ in group.keys():
val = group[key_]
if key_ == key:
# key_ was garuenteed unique
val_ = val[0]
elif hasattr(val[0].__class__, 'union'):
# HACK
# Sets are unioned
val_ = ut.oset.union(*val)
elif isinstance(val[0], (ut.oset,)):
# Sets are unioned
val_ = ut.oset.union(*val)
elif isinstance(val[0], (set)):
# Sets are unioned
val_ = set.union(*val)
elif isinstance(val[0], (tuple, list)):
# Lists are merged together
val_ = ut.flatten(val)
# val_ = ut.unique(ut.flatten(val))
else:
if ut.allsame(val):
# Merge items that are the same
val_ = val[0]
else:
if merge_scalars:
# If mergeing scalars is ok, then
# Values become lists if they are different
val_ = val
else:
if True:
# If there is only one non-none value then use that.
other_vals = ut.filter_Nones(val)
if len(other_vals) == 1:
val_ = val[0]
else:
raise ValueError(
'tried to merge a scalar in %r, val=%r'
% (key_, val)
)
else:
# If merging scalars is not ok, then
# we must raise an error
raise ValueError(
'tried to merge a scalar in %r, val=%r' % (key_, val)
)
newgroup[key_] = [val_]
merged_groups.append(ut.ColumnLists(newgroup))
merged_multi = self.__class__.flatten(merged_groups)
merged = singles + merged_multi
return merged
# def fix_autoreload_classes():
# """
# http://stackoverflow.com/questions/15605925/last-exception-python-prompt
# http://stackoverflow.com/questions/31363311/any-way-to-manually-fix-operation-of-super-after-ipython-reload-avoiding-ty
# """
# tb = sys.last_traceback
# val = sys.last_value
# next_tb = tb
# while next_tb is not None:
# this_tb = next_tb
# next_tb = this_tb.tb_next
# frame = this_tb.tb_frame
# code = frame.f_code
# fpath = code.co_filename
# modname = frame.f_globals['__name__']
# module = sys.modules[modname]
# # ... not sure what to do now
# # need to change local variables. seems not possible
class NamedPartial(functools.partial, NiceRepr):
def __init__(self, func, *args, **kwargs):
import utool as ut
super(functools.partial, self).__init__(func, *args, **kwargs)
self.__name__ = ut.get_funcname(func)
def __nice__(self):
return self.__name__
def super2(this_class, self):
"""
Fixes an error where reload causes super(X, self) to raise an exception
The problem is that reloading causes X to point to the wrong version of the
class. This function fixes the problem by searching and returning the
correct version of the class. See example for proper usage.
Args:
this_class (class): class passed into super
self (instance): instance passed into super
Ignore:
>>> # ENABLE_DOCTEST
>>> # If the parent module is reloaded, the super call may fail
>>> # super(Foo, self).__init__()
>>> # This will work around the problem most of the time
>>> # ut.super2(Foo, self).__init__()
>>> import utool as ut
>>> class Parent(object):
>>> def __init__(self):
>>> self.parent_attr = 'bar'
>>> class ChildSafe(Parent):
>>> def __init__(self):
>>> ut.super2(ChildSafe, self).__init__()
>>> class ChildDanger(Parent):
>>> def __init__(self):
>>> super(ChildDanger, self).__init__()
>>> # initial loading is fine
>>> safe1 = ChildSafe()
>>> danger1 = ChildDanger()
>>> assert safe1.parent_attr == 'bar'
>>> assert danger1.parent_attr == 'bar'
>>> # But if we reload (via simulation), then there will be issues
>>> Parent_orig = Parent
>>> ChildSafe_orig = ChildSafe
>>> ChildDanger_orig = ChildDanger
>>> # reloading the changes the outer classname
>>> # but the inner class is still bound via the closure
>>> # (we simulate this by using the old functions)
>>> # (note in reloaded code the names would not change)
>>> class Parent_new(object):
>>> __init__ = Parent_orig.__init__
>>> Parent_new.__name__ = 'Parent'
>>> class ChildSafe_new(Parent_new):
>>> __init__ = ChildSafe_orig.__init__
>>> ChildSafe_new.__name__ = 'ChildSafe'
>>> class ChildDanger_new(Parent_new):
>>> __init__ = ChildDanger_orig.__init__
>>> ChildDanger_new.__name__ = 'ChildDanger'
>>> #
>>> safe2 = ChildSafe_new()
>>> assert safe2.parent_attr == 'bar'
>>> import pytest
>>> with pytest.raises(TypeError):
>>> danger2 = ChildDanger_new()
"""
return super(fix_super_reload(this_class, self), self)
def fix_super_reload(this_class, self):
"""
Fixes an error where reload causes super(X, self) to raise an exception
The problem is that reloading causes X to point to the wrong version of the
class. This function fixes the problem by searching and returning the
correct version of the class. See example for proper usage.
USE `ut.super2` INSTEAD
Args:
this_class (class): class passed into super
self (instance): instance passed into super
DisableExample:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> class Parent(object):
>>> def __init__(self):
>>> self.parent_attr = 'bar'
>>> #
>>> class Foo(Parent):
>>> def __init__(self):
>>> # Dont do this, it will error if you reload
>>> # super(Foo, self).__init__()
>>> # Do this instead
>>> _Foo = ut.super2(Foo, self)
>>> super(_Foo, self).__init__()
>>> self = Foo()
>>> assert self.parent_attr == 'bar'
"""
if isinstance(self, this_class):
# Case where everything is ok
this_class_now = this_class
else:
# Case where we need to search for the right class
def find_parent_class(leaf_class, target_name):
target_class = None
from collections import deque
queue = deque()
queue.append(leaf_class)
seen_ = set([])
while len(queue) > 0:
related_class = queue.pop()
if related_class.__name__ != target_name:
for base in related_class.__bases__:
if base not in seen_:
queue.append(base)
seen_.add(base)
else:
target_class = related_class
break
return target_class
# Find new version of class
leaf_class = self.__class__
target_name = this_class.__name__
target_class = find_parent_class(leaf_class, target_name)
this_class_now = target_class
# print('id(this_class) = %r' % (id(this_class),))
# print('id(this_class_now) = %r' % (id(this_class_now),))
assert isinstance(self, this_class_now), 'Failed to fix %r, %r, %r' % (
self,
this_class,
this_class_now,
)
return this_class_now
class Shortlist(NiceRepr):
"""
Keeps an ordered collection of items.
Removes smallest items if size grows to large.
Example:
>>> # DISABLE_DOCTEST
>>> shortsize = 3
>>> shortlist = Shortlist(shortsize)
>>> print('shortlist = %r' % (shortlist,))
>>> item = (10, 1)
>>> shortlist.insert(item)
>>> print('shortlist = %r' % (shortlist,))
>>> item = (9, 1)
>>> shortlist.insert(item)
>>> print('shortlist = %r' % (shortlist,))
>>> item = (4, 1)
>>> shortlist.insert(item)
>>> print('shortlist = %r' % (shortlist,))
>>> item = (14, 1)
>>> shortlist.insert(item)
>>> print('shortlist = %r' % (shortlist,))
>>> item = (1, 1)
>>> shortlist.insert(item)
>>> print('shortlist = %r' % (shortlist,))
"""
def __init__(self, maxsize=None):
self._items = []
self._keys = []
self.maxsize = maxsize
def __iter__(self):
return iter(self._items)
def __len__(self):
return len(self._items)
def __nice__(self):
return str(self._items)
def insert(self, item):
import bisect
k = item[0]
idx = bisect.bisect_left(self._keys, k)
self._keys.insert(idx, k)
self._items.insert(idx, item)
if self.maxsize is not None and len(self._keys) > self.maxsize:
self._keys = self._keys[-self.maxsize :]
self._items = self._items[-self.maxsize :]
def _heappush_max(heap, item):
"""why is this not in heapq"""
heap.append(item)
heapq._siftdown_max(heap, 0, len(heap) - 1)
class PriorityQueue(NiceRepr):
"""
abstracted priority queue for our needs
Combines properties of dicts and heaps
Uses a heap for fast minimum/maximum value search
Uses a dict for fast read only operations
CommandLine:
python -m utool.util_dev PriorityQueue
References:
http://code.activestate.com/recipes/522995-priority-dict-a-priority-queue-with-updatable-prio/
https://stackoverflow.com/questions/33024215/built-in-max-heap-api-in-python
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> items = dict(a=42, b=29, c=40, d=95, e=10)
>>> self = ut.PriorityQueue(items)
>>> print(self)
>>> assert len(self) == 5
>>> print(self.pop())
>>> assert len(self) == 4
>>> print(self.pop())
>>> assert len(self) | |
<gh_stars>0
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015-2016, <NAME> <EMAIL>
# pylint: disable=too-many-public-methods
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
"""
Test of the Verilog preprocessor
"""
from os.path import join, dirname, exists
import os
from unittest import TestCase
import shutil
from vunit.ostools import renew_path, write_file
from vunit.parsing.verilog.preprocess import VerilogPreprocessor, Macro
from vunit.parsing.verilog.tokenizer import VerilogTokenizer
from vunit.parsing.tokenizer import Token
from vunit.test.mock_2or3 import mock
class TestVerilogPreprocessor(TestCase):
"""
Test of the Verilog preprocessor
"""
def setUp(self):
self.output_path = join(dirname(__file__), "test_verilog_preprocessor_out")
renew_path(self.output_path)
self.cwd = os.getcwd()
os.chdir(self.output_path)
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.output_path)
def test_non_preprocess_tokens_are_kept(self):
result = self.preprocess('"hello"ident/*comment*///comment')
result.assert_has_tokens('"hello"ident/*comment*///comment')
result.assert_no_defines()
def test_preprocess_define_without_value(self):
result = self.preprocess("`define foo")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo")})
result = self.preprocess("`define foo\nkeep")
result.assert_has_tokens("keep")
result.assert_has_defines({"foo": Macro("foo")})
def test_preprocess_define_with_value(self):
result = self.preprocess("`define foo bar \"abc\"")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("bar \"abc\""))})
def test_preprocess_define_with_lpar_value(self):
result = self.preprocess("`define foo (bar)")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("(bar)"))})
def test_preprocess_define_with_one_arg(self):
result = self.preprocess("`define foo(arg)arg 123")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("arg 123"), args=("arg",))})
def test_preprocess_define_with_one_arg_ignores_initial_space(self):
result = self.preprocess("`define foo(arg) arg 123")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("arg 123"), args=("arg",))})
def test_preprocess_define_with_multiple_args(self):
result = self.preprocess("`define foo( arg1, arg2)arg1 arg2")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("arg1 arg2"), args=("arg1", "arg2"))})
def test_preprocess_define_with_default_values(self):
result = self.preprocess("`define foo(arg1, arg2=default)arg1 arg2")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo",
tokenize("arg1 arg2"),
args=("arg1", "arg2"),
defaults={"arg2": tokenize("default")})})
def test_preprocess_substitute_define_without_args(self):
result = self.preprocess("""\
`define foo bar \"abc\"
`foo""")
result.assert_has_tokens("bar \"abc\"")
def test_preprocess_substitute_define_with_one_arg(self):
result = self.preprocess("""\
`define foo(arg)arg 123
`foo(hello hey)""")
result.assert_has_tokens("hello hey 123")
def test_preprocess_substitute_define_with_multile_args(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1 2, hello)""")
result.assert_has_tokens("1 2, hello")
def test_preprocess_substitute_define_with_default_values(self):
result = self.preprocess("""\
`define foo(arg1, arg2=default)arg1 arg2
`foo(1)""")
result.assert_has_tokens("1 default")
def test_preprocess_include_directive(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('`include "include.svh"',
include_paths=[self.output_path])
result.assert_has_tokens("hello hey")
result.assert_included_files([join(self.output_path, "include.svh")])
def test_detects_circular_includes(self):
self.write_file("include1.svh", '`include "include2.svh"')
self.write_file("include2.svh", '`include "include1.svh"')
result = self.preprocess('`include "include1.svh"',
include_paths=[self.output_path])
result.logger.error.assert_called_once_with(
'Circular `include of include2.svh detected\n%s',
'from fn.v line 1:\n'
'`include "include1.svh"\n'
'~~~~~~~~\n'
'from include1.svh line 1:\n'
'`include "include2.svh"\n'
'~~~~~~~~\n'
'from include2.svh line 1:\n'
'`include "include1.svh"\n'
'~~~~~~~~\n'
'at include1.svh line 1:\n'
'`include "include2.svh"\n'
' ~~~~~~~~~~~~~~')
def test_detects_circular_include_of_self(self):
self.write_file("include.svh", '`include "include.svh"')
result = self.preprocess('`include "include.svh"',
include_paths=[self.output_path])
result.logger.error.assert_called_once_with(
'Circular `include of include.svh detected\n%s',
'from fn.v line 1:\n'
'`include "include.svh"\n'
'~~~~~~~~\n'
'from include.svh line 1:\n'
'`include "include.svh"\n'
'~~~~~~~~\n'
'at include.svh line 1:\n'
'`include "include.svh"\n'
' ~~~~~~~~~~~~~')
def test_does_not_detect_non_circular_includes(self):
self.write_file("include3.svh", 'keep')
self.write_file("include1.svh", '`include "include3.svh"\n`include "include2.svh"')
self.write_file("include2.svh", '`include "include3.svh"')
result = self.preprocess('`include "include1.svh"\n`include "include2.svh"',
include_paths=[self.output_path])
result.assert_no_log()
def test_detects_circular_macro_expansion_of_self(self):
result = self.preprocess('''
`define foo `foo
`foo
''')
result.logger.error.assert_called_once_with(
'Circular macro expansion of foo detected\n%s',
'from fn.v line 3:\n'
'`foo\n'
'~~~~\n'
'from fn.v line 2:\n'
'`define foo `foo\n'
' ~~~~\n'
'at fn.v line 2:\n'
'`define foo `foo\n'
' ~~~~')
def test_detects_circular_macro_expansion(self):
result = self.preprocess('''
`define foo `bar
`define bar `foo
`foo
''')
result.logger.error.assert_called_once_with(
'Circular macro expansion of bar detected\n%s',
'from fn.v line 4:\n'
'`foo\n'
'~~~~\n'
'from fn.v line 2:\n'
'`define foo `bar\n'
' ~~~~\n'
'from fn.v line 3:\n'
'`define bar `foo\n'
' ~~~~\n'
'at fn.v line 2:\n'
'`define foo `bar\n'
' ~~~~')
def test_does_not_detect_non_circular_macro_expansion(self):
result = self.preprocess('''
`define foo bar
`foo
`foo
''')
result.assert_no_log()
def test_preprocess_include_directive_from_define(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('''\
`define inc "include.svh"
`include `inc''',
include_paths=[self.output_path])
result.assert_has_tokens('hello hey')
result.assert_included_files([join(self.output_path, "include.svh")])
def test_preprocess_include_directive_from_define_with_args(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('''\
`define inc(a) a
`include `inc("include.svh")''', include_paths=[self.output_path])
result.assert_has_tokens('hello hey')
result.assert_included_files([join(self.output_path, "include.svh")])
def test_preprocess_macros_are_recursively_expanded(self):
result = self.preprocess('''\
`define foo `bar
`define bar xyz
`foo
`define bar abc
`foo
''',
include_paths=[self.output_path])
result.assert_has_tokens('xyz\nabc\n')
def test_ifndef_taken(self):
result = self.preprocess('''\
`ifndef foo
taken
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_taken(self):
result = self.preprocess('''\
`define foo
`ifdef foo
taken
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_else_taken(self):
result = self.preprocess('''\
`define foo
`ifdef foo
taken
`else
else
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_not_taken(self):
result = self.preprocess('''\
`ifdef foo
taken
`endif
keep''')
result.assert_has_tokens("keep")
def test_ifdef_else_not_taken(self):
result = self.preprocess('''\
`ifdef foo
taken
`else
else
`endif
keep''')
result.assert_has_tokens("else\nkeep")
def test_ifdef_elsif_taken(self):
result = self.preprocess('''\
`define foo
`ifdef foo
taken
`elsif bar
elsif_taken
`else
else_taken
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_elsif_elseif_taken(self):
result = self.preprocess('''\
`define bar
`ifdef foo
taken
`elsif bar
elsif_taken
`else
else_taken
`endif
keep''')
result.assert_has_tokens("elsif_taken\nkeep")
def test_ifdef_elsif_else_taken(self):
result = self.preprocess('''\
`ifdef foo
taken
`elsif bar
elsif_taken
`else
else_taken
`endif
keep''')
result.assert_has_tokens("else_taken\nkeep")
def test_nested_ifdef(self):
result = self.preprocess('''\
`define foo
`ifdef foo
outer_before
`ifdef bar
inner_ifndef
`else
inner_else
`endif
`ifdef bar
inner_ifndef
`elsif foo
inner_elsif
`endif
outer_after
`endif
keep''')
result.assert_has_tokens("outer_before\n"
"inner_else\n"
"inner_elsif\n"
"outer_after\n"
"keep")
def test_preprocess_broken_define(self):
result = self.preprocess("`define")
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"Verilog `define without argument\n%s",
"at fn.v line 1:\n"
"`define\n"
"~~~~~~~")
def test_preprocess_broken_define_first_argument(self):
result = self.preprocess('`define "foo"')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"Verilog `define invalid name\n%s",
"at fn.v line 1:\n"
'`define "foo"\n'
" ~~~~~")
def test_preprocess_broken_define_argument_list(self):
result = self.preprocess('`define foo(')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(\n'
" ~")
result = self.preprocess('`define foo(a')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a\n'
" ~")
result = self.preprocess('`define foo(a=')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a=\n'
" ~")
result = self.preprocess('`define foo(a=b')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a=b\n'
" ~")
result = self.preprocess('`define foo(a=)')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a=)\n'
" ~")
result = self.preprocess('`define foo("a"')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo("a"\n'
" ~")
result = self.preprocess('`define foo("a"=')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo("a"=\n'
" ~")
def test_preprocess_substitute_define_broken_args(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1 2)""")
result.assert_has_tokens("")
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo""")
result.assert_has_tokens("")
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(""")
result.assert_has_tokens("")
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1""")
result.assert_has_tokens("")
def test_preprocess_substitute_define_missing_argument(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1)""")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"Missing value for argument arg2\n%s",
"at fn.v line 2:\n"
'`foo(1)\n'
"~~~~")
def test_preprocess_substitute_define_too_many_argument(self):
result = self.preprocess("""\
`define foo(arg1)arg1
`foo(1, 2)""")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"Too many arguments got 2 expected 1\n%s",
"at fn.v line 2:\n"
'`foo(1, 2)\n'
"~~~~")
def test_preprocess_substitute_define_eof(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1 2""")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define actuals\n%s",
"at fn.v line 2:\n"
'`foo(1 2\n'
"~~~~")
def test_substitute_undefined(self):
result = self.preprocess('`foo')
result.assert_has_tokens("")
# Debug since there are many custon `names in tools
result.logger.debug.assert_called_once_with(
"Verilog undefined name\n%s",
"at fn.v line 1:\n"
'`foo\n'
"~~~~")
def test_preprocess_include_directive_missing_file(self):
result = self.preprocess('`include "missing.svh"',
include_paths=[self.output_path])
result.assert_has_tokens("")
result.assert_included_files([])
# Is debug message since there are so many builtin includes in tools
result.logger.debug.assert_called_once_with(
"Could not find `include file missing.svh\n%s",
"at fn.v line 1:\n"
'`include "missing.svh"\n'
" ~~~~~~~~~~~~~")
def test_preprocess_include_directive_missing_argument(self):
result = self.preprocess('`include',
include_paths=[self.output_path])
result.assert_has_tokens("")
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `include argument\n%s",
"at fn.v line 1:\n"
'`include\n'
"~~~~~~~~")
def test_preprocess_include_directive_bad_argument(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('`include foo "include.svh"',
include_paths=[self.output_path])
result.assert_has_tokens(' "include.svh"')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include bad argument\n%s",
"at fn.v line 1:\n"
'`include foo "include.svh"\n'
" ~~~")
def test_preprocess_include_directive_from_define_bad_argument(self):
result = self.preprocess('''\
`define inc foo
`include `inc
keep''',
include_paths=[self.output_path])
result.assert_has_tokens('\nkeep')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include has bad argument\n%s",
"from fn.v line 2:\n"
'`include `inc\n'
' ~~~~\n'
"at fn.v line 1:\n"
'`define inc foo\n'
" ~~~")
def test_preprocess_include_directive_from_empty_define(self):
result = self.preprocess('''\
`define inc
`include `inc
keep''', include_paths=[self.output_path])
result.assert_has_tokens('\nkeep')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include has bad argument, empty define `inc\n%s",
"at fn.v line 2:\n"
'`include `inc\n'
" ~~~~")
def test_preprocess_include_directive_from_define_not_defined(self):
result = self.preprocess('`include `inc', include_paths=[self.output_path])
result.assert_has_tokens('')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include argument not defined\n%s",
"at fn.v line 1:\n"
'`include `inc\n'
" ~~~~")
def test_preprocess_error_in_include_file(self):
self.write_file("include.svh", '`include foo')
result = self.preprocess('\n\n`include "include.svh"',
include_paths=[self.output_path])
result.assert_has_tokens('\n\n')
result.assert_included_files([join(self.output_path, "include.svh")])
result.logger.warning.assert_called_once_with(
"Verilog `include bad argument\n%s",
"from fn.v line 3:\n"
'`include "include.svh"\n'
"~~~~~~~~\n"
"at include.svh line 1:\n"
'`include foo\n'
' ~~~')
def test_preprocess_error_in_expanded_define(self):
result = self.preprocess('''\
`define foo `include wrong
`foo
''', include_paths=[self.output_path])
result.assert_has_tokens('\n')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include bad argument\n%s",
"from fn.v line 2:\n"
'`foo\n'
'~~~~\n'
"at fn.v line 1:\n"
'`define foo `include wrong\n'
" ~~~~~")
def test_ifdef_eof(self):
result = self.preprocess('''\
`ifdef foo
taken''')
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `ifdef\n%s",
"at fn.v line 1:\n"
'`ifdef foo\n'
'~~~~~~')
def test_ifdef_bad_argument(self):
result = self.preprocess('''\
`ifdef "hello"
keep''')
result.assert_has_tokens("\nkeep")
result.logger.warning.assert_called_once_with(
"Bad argument to `ifdef\n%s",
"at fn.v line 1:\n"
'`ifdef "hello"\n'
' ~~~~~~~')
def test_elsif_bad_argument(self):
result = self.preprocess('''\
`ifdef bar
`elsif "hello"
keep''')
result.assert_has_tokens("\nkeep")
result.logger.warning.assert_called_once_with(
"Bad argument to `elsif\n%s",
"at fn.v line 2:\n"
'`elsif "hello"\n'
' ~~~~~~~')
def test_undefineall(self):
result = self.preprocess('''\
`define foo keep
`define bar keep2
`foo
`undefineall''')
result.assert_has_tokens("keep\n")
result.assert_no_defines()
def test_resetall(self):
result = self.preprocess('''\
`define foo keep
`define bar keep2
`foo
`resetall''')
result.assert_has_tokens("keep\n")
result.assert_no_defines()
def test_undef(self):
result = self.preprocess('''\
`define foo keep
`define bar keep2
`foo
`undef foo''')
result.assert_has_tokens("keep\n")
result.assert_has_defines({"bar": Macro("bar", tokenize("keep2"))})
def test_undef_eof(self):
result = self.preprocess('`undef')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `undef\n%s",
"at fn.v line 1:\n"
'`undef\n'
'~~~~~~')
def test_undef_bad_argument(self):
result = self.preprocess('`undef "foo"')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"Bad argument to `undef\n%s",
"at fn.v line 1:\n"
'`undef "foo"\n'
' ~~~~~')
def test_undef_not_defined(self):
result = self.preprocess('`undef foo')
result.assert_has_tokens("")
result.assert_no_defines()
| |
self.update()
def EventFullscreenSwitch( self, event ):
self.parentWidget().FullscreenSwitch()
def KeepCursorAlive( self ):
self._InitiateCursorHideWait()
def ProcessContentUpdates( self, service_keys_to_content_updates ):
if self._current_media is None:
# probably a file view stats update as we close down--ignore it
return
if self.HasMedia( self._current_media ):
next_media = self._GetNext( self._current_media )
if next_media == self._current_media:
next_media = None
else:
next_media = None
ClientMedia.ListeningMediaList.ProcessContentUpdates( self, service_keys_to_content_updates )
if self.HasNoMedia():
self._TryToCloseWindow()
elif self.HasMedia( self._current_media ):
HG.client_controller.pub( 'canvas_new_index_string', self._canvas_key, self._GetIndexString() )
self.update()
elif self.HasMedia( next_media ):
self.SetMedia( next_media )
else:
self.SetMedia( self._GetFirst() )
class CanvasMediaListFilterArchiveDelete( CanvasMediaList ):
def __init__( self, parent, page_key, media_results ):
CanvasMediaList.__init__( self, parent, page_key, media_results )
self._my_shortcuts_handler.AddShortcuts( 'archive_delete_filter' )
self._reserved_shortcut_names.append( 'archive_delete_filter' )
self._kept = set()
self._deleted = set()
self._widget_event_filter.EVT_MOUSE_EVENTS( self.EventMouse )
HG.client_controller.sub( self, 'Delete', 'canvas_delete' )
HG.client_controller.sub( self, 'Undelete', 'canvas_undelete' )
QP.CallAfter( self.SetMedia, self._GetFirst() ) # don't set this until we have a size > (20, 20)!
def _Back( self ):
if self._IShouldCatchShortcutEvent():
if self._current_media == self._GetFirst():
return
else:
self._ShowPrevious()
self._kept.discard( self._current_media )
self._deleted.discard( self._current_media )
def TryToDoPreClose( self ):
if len( self._kept ) > 0 or len( self._deleted ) > 0:
label = 'keep ' + HydrusData.ToHumanInt( len( self._kept ) ) + ' and delete ' + HydrusData.ToHumanInt( len( self._deleted ) ) + ' files?'
( result, cancelled ) = ClientGUIDialogsQuick.GetFinishFilteringAnswer( self, label )
if cancelled:
if self._current_media in self._kept:
self._kept.remove( self._current_media )
if self._current_media in self._deleted:
self._deleted.remove( self._current_media )
return False
elif result == QW.QDialog.Accepted:
def process_in_thread( service_keys_and_content_updates ):
for ( service_key, content_update ) in service_keys_and_content_updates:
HG.client_controller.WriteSynchronous( 'content_updates', { service_key : [ content_update ] } )
self._deleted_hashes = [ media.GetHash() for media in self._deleted ]
self._kept_hashes = [ media.GetHash() for media in self._kept ]
service_keys_and_content_updates = []
reason = 'Deleted in Archive/Delete filter.'
for chunk_of_hashes in HydrusData.SplitListIntoChunks( self._deleted_hashes, 64 ):
service_keys_and_content_updates.append( ( CC.LOCAL_FILE_SERVICE_KEY, HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, chunk_of_hashes, reason = reason ) ) )
service_keys_and_content_updates.append( ( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ARCHIVE, self._kept_hashes ) ) )
HG.client_controller.CallToThread( process_in_thread, service_keys_and_content_updates )
self._kept = set()
self._deleted = set()
self._current_media = self._GetFirst() # so the pubsub on close is better
if HC.options[ 'remove_filtered_files' ]:
all_hashes = set()
all_hashes.update( self._deleted_hashes )
all_hashes.update( self._kept_hashes )
HG.client_controller.pub( 'remove_media', self._page_key, all_hashes )
return CanvasMediaList.TryToDoPreClose( self )
def _Delete( self, media = None, reason = None, file_service_key = None ):
if self._current_media is None:
return False
self._deleted.add( self._current_media )
if self._current_media == self._GetLast():
self._TryToCloseWindow()
else:
self._ShowNext()
return True
def _GenerateHoverTopFrame( self ):
ClientGUIHoverFrames.FullscreenHoverFrameTopArchiveDeleteFilter( self, self, self._canvas_key )
def _Keep( self ):
self._kept.add( self._current_media )
if self._current_media == self._GetLast():
self._TryToCloseWindow()
else:
self._ShowNext()
def _Skip( self ):
if self._current_media == self._GetLast():
self._TryToCloseWindow()
else:
self._ShowNext()
def Keep( self, canvas_key ):
if canvas_key == self._canvas_key:
self._Keep()
def Back( self, canvas_key ):
if canvas_key == self._canvas_key:
self._Back()
def Delete( self, canvas_key ):
if canvas_key == self._canvas_key:
self._Delete()
def EventDelete( self, event ):
if self._IShouldCatchShortcutEvent( event = event ):
self._Delete()
else:
return True # was: event.ignore()
def EventMouse( self, event ):
if self._IShouldCatchShortcutEvent( event = event ):
if event.modifiers() & QC.Qt.ShiftModifier:
caught = True
if event.type() == QC.QEvent.MouseButtonPress and event.button() == QC.Qt.LeftButton:
self.BeginDrag()
elif event.type() == QC.QEvent.MouseButtonRelease and event.button() == QC.Qt.LeftButton:
self.EndDrag()
elif event.type() == QC.QEvent.MouseMove and event.buttons() != QC.Qt.NoButton:
self.EventMouseMove( event )
else:
caught = False
if caught:
return False
shortcut = ClientGUIShortcuts.ConvertMouseEventToShortcut( event )
if shortcut is not None:
shortcut_processed = self._my_shortcuts_handler.ProcessShortcut( shortcut )
if shortcut_processed:
return False
return True # was: event.ignore()
def EventUndelete( self, event ):
if self._IShouldCatchShortcutEvent( event = event ):
self._Undelete()
else:
return True # was: event.ignore()
def ProcessApplicationCommand( self, command, canvas_key = None ):
if canvas_key is not None and canvas_key != self._canvas_key:
return False
command_processed = True
command_type = command.GetCommandType()
data = command.GetData()
if command_type == CC.APPLICATION_COMMAND_TYPE_SIMPLE:
action = data
if action in ( 'archive_delete_filter_keep', 'archive_file' ):
self._Keep()
elif action in ( 'archive_delete_filter_delete', 'delete_file' ):
self._Delete()
elif action == 'archive_delete_filter_skip':
self._Skip()
elif action == 'archive_delete_filter_back':
self._Back()
elif action == 'launch_the_archive_delete_filter':
self._TryToCloseWindow()
else:
command_processed = False
else:
command_processed = False
if not command_processed:
command_processed = CanvasMediaList.ProcessApplicationCommand( self, command )
return command_processed
def Skip( self, canvas_key ):
if canvas_key == self._canvas_key:
self._Skip()
def Undelete( self, canvas_key ):
if canvas_key == self._canvas_key:
self._Undelete()
class CanvasMediaListNavigable( CanvasMediaList ):
def __init__( self, parent, page_key, media_results ):
CanvasMediaList.__init__( self, parent, page_key, media_results )
self._my_shortcuts_handler.AddShortcuts( 'media_viewer_browser' )
self._reserved_shortcut_names.append( 'media_viewer_browser' )
HG.client_controller.sub( self, 'Delete', 'canvas_delete' )
HG.client_controller.sub( self, 'ShowNext', 'canvas_show_next' )
HG.client_controller.sub( self, 'ShowPrevious', 'canvas_show_previous' )
HG.client_controller.sub( self, 'Undelete', 'canvas_undelete' )
def _GenerateHoverTopFrame( self ):
ClientGUIHoverFrames.FullscreenHoverFrameTopNavigableList( self, self, self._canvas_key )
def Archive( self, canvas_key ):
if self._canvas_key == canvas_key:
self._Archive()
def Delete( self, canvas_key ):
if self._canvas_key == canvas_key:
self._Delete()
def Inbox( self, canvas_key ):
if self._canvas_key == canvas_key:
self._Inbox()
def ProcessApplicationCommand( self, command, canvas_key = None ):
if canvas_key is not None and canvas_key != self._canvas_key:
return False
command_processed = True
command_type = command.GetCommandType()
data = command.GetData()
if command_type == CC.APPLICATION_COMMAND_TYPE_SIMPLE:
action = data
if action == 'remove_file_from_view':
self._Remove()
elif action == 'view_first':
self._ShowFirst()
elif action == 'view_last':
self._ShowLast()
elif action == 'view_previous':
self._ShowPrevious()
elif action == 'view_next':
self._ShowNext()
else:
command_processed = False
else:
command_processed = False
if not command_processed:
command_processed = CanvasMediaList.ProcessApplicationCommand( self, command )
return command_processed
def ShowFirst( self, canvas_key ):
if canvas_key == self._canvas_key:
self._ShowFirst()
def ShowLast( self, canvas_key ):
if canvas_key == self._canvas_key:
self._ShowLast()
def ShowNext( self, canvas_key ):
if canvas_key == self._canvas_key:
self._ShowNext()
def ShowPrevious( self, canvas_key ):
if canvas_key == self._canvas_key:
self._ShowPrevious()
def Undelete( self, canvas_key ):
if canvas_key == self._canvas_key:
self._Undelete()
class CanvasMediaListBrowser( CanvasMediaListNavigable ):
def __init__( self, parent, page_key, media_results, first_hash ):
CanvasMediaListNavigable.__init__( self, parent, page_key, media_results )
self._timer_slideshow_job = None
self._timer_slideshow_interval = 0
self._widget_event_filter.EVT_MOUSE_EVENTS( self.EventMouse )
if first_hash is None:
first_media = self._GetFirst()
else:
try:
first_media = self._GetMedia( { first_hash } )[0]
except:
first_media = self._GetFirst()
QP.CallAfter( self.SetMedia, first_media ) # don't set this until we have a size > (20, 20)!
HG.client_controller.sub( self, 'AddMediaResults', 'add_media_results' )
def _PausePlaySlideshow( self ):
if self._RunningSlideshow():
self._StopSlideshow()
elif self._timer_slideshow_interval > 0:
self._StartSlideshow( self._timer_slideshow_interval )
def _RunningSlideshow( self ):
| |
page = requests.get(date_url)
soup = bs4.BeautifulSoup(page.content, 'lxml')
raw_date = soup.findAll('li', attrs = {'class': 'b-list__box-list-item'})[0].text
raw_date = raw_date.replace('\n', '')
raw_date = raw_date.replace('Date:', '')
raw_date = raw_date.replace(' ', '')
raw_date = raw_date.replace(',', '')
date_list = raw_date.split(' ')
month = month_labels.index(date_list[0]) + 1
day = date_list[1]
year = date_list[2]
date = str(day) + '/' + str(month) + '/' + str(year)
# Adds the data to the appropriate lists
info_data.append(date)
info_data.append(name1)
info_data.append(name2)
info_data.append(r)
info_data.append(split_dec)
fighter1_stats.append(name1)
fighter1_stats.append(date)
fighter1_stats.append(time)
fighter1_stats.append(knockdowns1)
fighter1_stats.append(knockdowns2)
fighter1_stats.append(sig_strikes_landed1)
fighter1_stats.append(sig_strikes_attempted1)
fighter1_stats.append(sig_strikes_landed2)
fighter1_stats.append(strikes_landed1)
fighter1_stats.append(strikes_attempted1)
fighter1_stats.append(strikes_landed2)
fighter1_stats.append(takedowns1)
fighter1_stats.append(takedown_attempts1)
fighter1_stats.append(takedowns2)
fighter1_stats.append(submission_attempts1)
fighter1_stats.append(clinch_strikes1)
fighter1_stats.append(clinch_strikes2)
fighter1_stats.append(ground_strikes1)
fighter1_stats.append(ground_strikes2)
fighter2_stats.append(name2)
fighter2_stats.append(date)
fighter2_stats.append(time)
fighter2_stats.append(knockdowns2)
fighter2_stats.append(knockdowns1)
fighter2_stats.append(sig_strikes_landed2)
fighter2_stats.append(sig_strikes_attempted2)
fighter2_stats.append(sig_strikes_landed1)
fighter2_stats.append(strikes_landed2)
fighter2_stats.append(strikes_attempted2)
fighter2_stats.append(strikes_landed1)
fighter2_stats.append(takedowns2)
fighter2_stats.append(takedown_attempts2)
fighter2_stats.append(takedowns1)
fighter2_stats.append(submission_attempts2)
fighter2_stats.append(clinch_strikes2)
fighter2_stats.append(clinch_strikes1)
fighter2_stats.append(ground_strikes2)
fighter2_stats.append(ground_strikes1)
# Adds the data for this fight to the accumulated dataset
all_info.append(info_data)
all_stats.append(fighter1_stats)
all_stats.append(fighter2_stats)
except:
print('Passing')
pass
# Adds the data to the relevant pandas dataframes
for data in all_info:
df_len = len(results_dataframe)
results_dataframe.loc[df_len] = data
for stat in all_stats:
df_len = len(stats_dataframe)
stats_dataframe.loc[df_len] = stat
# Saves the dataframes to CSV files
results_dataframe.to_csv('FightResults.csv')
stats_dataframe.to_csv('FightStats.csv')
self.fight_results = pandas.read_csv('FightResults.csv')
self.fight_stats = pandas.read_csv('FightStats.csv')
return self.fight_results, self.fight_stats
# Scrapes the data for each individual fighter from the internet and stores this in a CSV file
def getFighterData(self, window):
def calculateAge(month, day, year):
today = datetime.date.today()
return today.year - year - ((today.month, today.day) < (month, day))
not_allowed = ['%', 'lbs.', ',', '"', 'Record:', 'Reach:', 'SLpM:', 'Str. Acc.:', 'SApM:', 'Str. Def:',
'TD Avg.:',
'TD Acc.:', 'TD Def.:', 'Sub. Avg.:', ' \n', '\n', ' ']
month_labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
allChars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z']
url_list = list()
print('Stage 3/4')
window.updateProgress(round(0))
# Finds the urls for the webpages of all individual fighterss
for c in allChars:
window.updateProgress(round(((allChars.index(c) + 1) * 100) / len(allChars), 2))
url = ("http://www.ufcstats.com/statistics/fighters?char=%s&page=all" % c)
page = requests.get(url)
soup = bs4.BeautifulSoup(page.content, 'lxml')
for a in soup.findAll('a', href = True, attrs = {'class': 'b-link b-link_style_black'}):
new_url = a['href']
if new_url not in url_list:
url_list.append(new_url)
# Initializes the pandas dataframe for storing individual fighter information
fighters_dataframe = pandas.DataFrame(
columns = ['Name', 'Wins', 'Losses', 'Draws', 'Height', 'Weight', 'Reach', 'Stance', 'Age', 'SLpM', 'StrAcc',
'SApM', 'StrDef', 'TDAvg', 'TDAcc', 'TDDef', 'SubAvg'])
print('Stage 4/4')
window.updateProgress(0)
# Loops through the webpages for each fighter
for url in url_list:
window.updateProgress(round(((url_list.index(url) + 1) * 100) / len(url_list), 2))
try:
value_index = url_list.index(url)
except:
value_index = -1
print(round(((value_index / len(url_list)) * 100), 1), '% complete')
# Gets the page content from the url
page = requests.get(url)
soup = bs4.BeautifulSoup(page.content, 'lxml')
# Finds all the useful data for the webpage and stores it in the dataframe
data = list()
# Loops through all the h2 elements found on the webpage
for h2 in soup.findAll('h2'):
raw_fighter_name = h2.find('span', attrs = {'class': 'b-content__title-highlight'}).text
for i in range(0, len(not_allowed)):
if not_allowed[i] in raw_fighter_name:
raw_fighter_name = raw_fighter_name.replace(not_allowed[i], '')
name_array = raw_fighter_name.split(' ')
name = name_array[0]
for y in range(1, len(name_array)):
name = name + ' ' + name_array[y]
record = h2.find('span', attrs = {'class': 'b-content__title-record'}).text
for i in [' ', '\n', 'Record:']:
if i in record:
record = record.replace(i, '')
record = record.split('-')
wins = record[0]
losses = record[1]
draws = record[2]
# Ensures that 'draws' is in the correct format
if '(' in draws:
draws = draws[0]
data.append(name)
data.append(wins)
data.append(losses)
data.append(draws)
# Loops through all the unordered list elements in the webpage
for ul in soup.findAll('ul'):
# Loops through all the list item elements in the given webpage list
for li in ul.findAll('li', attrs = {'class': 'b-list__box-list-item b-list__box-list-item_type_block'}):
collected_data = li.text # The text from the given list item
for i in range(0, len(not_allowed)):
if not_allowed[i] in collected_data:
collected_data = collected_data.replace(not_allowed[i], '')
# Processes the data accordingly if it represents the height of the fighter
if ('Height:' in str(collected_data)) and ('--' not in str(collected_data)):
collected_data = collected_data.replace('Height:', '')
measurement = collected_data.split("'")
# Converts height to centimetres
cm1 = int(measurement[0]) * 30.48
cm2 = int(measurement[1]) * 2.54
collected_data = round((cm1 + cm2), 1)
# Processes the data accordingly if it represents the date of birth of the fighter
if ('DOB:' in str(collected_data)) and ('--' not in str(collected_data)):
collected_data = collected_data.replace('DOB:', '')
dateList = collected_data.split(' ')
monthStr = str(dateList[0])
day = int(dateList[1])
year = int(dateList[2])
month = 1
for x in range(0, len(month_labels)):
if month_labels[x] == monthStr:
month = x + 1
collected_data = int(calculateAge(month, day, year))
# Processes the data accordingly if it represents the weight of the fighter
if 'Weight:' in str(collected_data):
collected_data = collected_data.replace('Weight:', '')
collected_data = collected_data.replace('lbs', '')
collected_data = collected_data.replace(' ', '')
# Processes the data accordingly if it represents the stance of the fighter
elif 'STAN' in str(collected_data):
if 'Orthodox' in collected_data:
collected_data = 1
elif 'Southpaw' in collected_data:
collected_data = 2
else:
collected_data = 3
collected_data = str(collected_data)
# Adds the current piece of data to the 'data' list in string format
if (collected_data != '') and ('--' not in collected_data):
data.append(collected_data)
# Adds the fighter data to the dataframe if the data found reflect the full set of data required
if len(data) == 17:
df_len = len(fighters_dataframe)
fighters_dataframe.loc[df_len] = data
# Saves the dataframe to a CSV file
fighters_dataframe.to_csv('FighterData.csv')
self.fighter_data = fighters_dataframe
print('Finished.')
# Creates a set of training data based upon the statistics of each fighter prior to a given fight,
# using the result of the fight as the training label
def createTrainingData(self, window):
window.updateProgress(0)
if len(self.fight_results) != 0 and len(self.fight_stats) != 0 and len(self.fighter_data) != 0:
training_data = pandas.DataFrame(
columns = ['Height1', 'Reach1', 'Age 1', 'Knockdowns PM 1', 'Gets Knocked Down PM 1',
'Sig Strikes Landed PM 1', 'Sig Strikes Attempted PM 1', 'Sig Strikes Absorbed PM 1',
'Strikes Landed PM 1', 'Strikes Attempted PM 1', 'Strikes Absorbed PM 1', 'Strike Accuracy 1',
'Takedowns PM 1', 'Takedown Attempts PM 1', 'Gets Taken Down PM 1', 'Submission Attempts PM 1',
'Clinch Strikes PM 1', 'Clinch Strikes Taken PM 1', 'Grounds Strikes PM 1',
'Ground Strikes Taken PM 1', 'Height 2', 'Reach 2', 'Age 2', 'Knockdowns PM 2',
'Gets Knocked Down PM 2', 'Sig Strikes Landed PM 2', 'Sig Strikes Attempted PM 2',
'Sig Strikes Absorbed PM 2', 'Strikes Landed PM 2', 'Strikes Attempted PM 2',
'Strikes Absorbed PM 2', 'Strike Accuracy 2', 'Takedowns PM 2', 'Takedown Attempts PM 2',
'Gets Taken Down PM 2', 'Submission Attempts PM 2', 'Clinch Strikes PM 2',
'Clinch Strikes Taken PM 2', 'Grounds Strikes PM 2', 'Ground Strikes Taken PM 2', 'Win', 'Loss'])
all_data = list()
# Loops through all the fight results and attempts to find the stats for each of the fighters from their
# four prior fights. This data can then be labelled with the fight result and used to train the neural network
# model in the 'Predictor' class, when it is called from main.
for index, row in self.fight_results.iterrows():
window.updateProgress(round(((index + 1) * 100) / len(self.fight_results), 2))
try:
date = row[1]
days_since_fight = calculateDaysSince(date.split('/')[0], date.split('/')[1], date.split('/')[2])
name1 = row[2].rstrip()
name2 = row[3].rstrip()
result = row[4]
split_dec = row[5]
if days_since_fight > 75: # 0
print(date)
# Doesn't include any fights that happened before 2010
if int(date.split('/')[2]) < 2010:
raise Exception()
# Finds the stats of the two fighters prior to the date of the given fight occuring
fighter1_useful_data = self.findFighterStats(name1, date)
fighter2_useful_data = self.findFighterStats(name2, date)
# Produces a 'one-hot' array describing the outcome of the fight
if result == 2 and split_dec == 1:
result_array = [0.4, 0.6]
opposite_array = [0.6, 0.4]
elif result == 1 and split_dec == 1:
result_array = [0.6, 0.4]
opposite_array = [0.4, 0.6]
elif result == 2:
result_array = [0, 1]
opposite_array = [1, 0]
elif result == 1:
result_array = [1, 0]
opposite_array = [0, 1]
else: # Draw
result_array = | |
True
else:
t[0] = False
def p_arg_list_opt(t):
''' arg_list_opt : arg_list
|'''
if len(t)== 2:
t[0] = t[1]
else:
t[0] = []
def p_arg_list(t):
''' arg_list : arg_list COMA ID
| ID'''
if len(t) == 4:
t[1].append(t[3])
t[0] = t[1]
else:
t[0] = [t[1]]
def p_ins_create_pl(t):
'''ins_create_pl : CREATE op_replace FUNCTION ID PARABRE parameteropt PARCIERRE returns AS block LANGUAGE ID PUNTO_COMA
| CREATE op_replace PROCEDURE ID PARABRE parameteropt PARCIERRE LANGUAGE ID AS block
'''
global banderaFunction
global listaParametros
t[0] = GenerarC3D()
if len(t) == 14:
meta = {'id':t[4], 'parametros':t[6],'estado': 'ALMACENADO', 'tipo': t[3]}
func = funcion(meta,t[10])
ListaFunciones.append({'id':t[4], 'cod':func})
genTable(t[4])
t[0].code = ""
t[0].statement = 'CREATE_FUNCTION'
else:
meta = {'id':t[4], 'parametros':t[6], 'estado': 'ALMACENADO', 'tipo':t[3]}
func = funcion(meta,t[11])
ListaFunciones.append({'id':t[4], 'cod':func})
genTable(t[4])
t[0].code = ""
t[0].statement = 'CREATE_FUNCTION'
banderaFunction = False
listaParametros.clear()
def p_op_replace(t):
'''op_replace : OR REPLACE
| '''
global banderaFunction
banderaFunction = True
def p_parameteropt(t):
'''parameteropt : parameters
|
'''
global listaParametros
if len(t)== 2:
t[0] = t[1]
else:
t[0] = []
listaParametros = t[0]
def p_parameters(t):
'''parameters : parameters COMA parameter
| parameter
'''
if len(t) == 4:
t[1].append(t[3])
t[0] = t[1]
elif len(t) == 2:
t[0] = [t[1]]
def p_parameter(t):
'''parameter : idopt t_dato
| ID ANYELEMENT
| ID ANYCOMPATIBLE
| OUT ID t_dato
| ID
'''
if len(t) == 4:
AddTs(t[2], 'None', 'DECLARACION PARÁMETRO')
t[0] = t[2]
elif len(t) == 2:
AddTs(t[1], 'None', 'DECLARACION PARÁMETRO')
t[0] = t[1]
else:
AddTs(t[1], t[2], 'DECLARACION PARÁMETRO')
t[0] = t[1]
def p_idopt(t):
'''idopt : ID
|
'''
if len(t) == 2:
t[0] = t[1]
else:
t[0] = ""
def p_t_dato(t):
'''t_dato : SMALLINT
| BIGINT
| NUMERIC
| DECIMAL PARABRE NUMERO COMA NUMERO PARCIERRE
| INTEGER
| INT
| REAL
| DOUBLE PRECISION
| CHAR PARABRE NUMERO PARCIERRE
| VARCHAR PARABRE NUMERO PARCIERRE
| VARCHAR
| CHARACTER PARABRE NUMERO PARCIERRE
| TEXT
| TIMESTAMP arg_precision
| TIME arg_precision
| DATE
| INTERVAL arg_tipo arg_precision
| BOOLEAN
| MONEY
| ID '''
if t[1] == 'SMALLINT':
t[0]= DBType.smallint
elif t[1] == 'BIGINT':
t[0]= DBType.bigint
elif t[1] == 'DOUBLE':
t[0] = DBType.double_precision
elif t[1] == 'NUMERIC':
t[0] = DBType.numeric
elif t[1] == 'DECIMAL':
t[0] = DBType.decimal
elif t[1] == 'INTEGER':
t[0] = DBType.integer
elif t[1] == 'CHAR':
t[0] = DBType.char
elif t[1] == 'VARCHAR':
t[0] = DBType.varchar
elif t[1] == 'CHARACTER':
t[0] = DBType.character
elif t[1] == 'REAL':
t[0] = DBType.real
elif t[1] == 'INT':
t[0] = DBType.integer
elif t[1] == 'TEXT':
t[0] = DBType.text
elif t[1] == 'TIMESTAMP':
t[0] = DBType.timestamp_wtz
elif t[1] == 'DOUBLE':
t[0] = DBType.double
elif t[1] == 'TIME':
t[0] = DBType.time_wtz
elif t[1] == 'DATE':
t[0] = DBType.date
elif t[1] == 'INTERVAL':
t[0] = DBType.interval
elif t[1] == 'BOOLEAN':
t[0] = DBType.boolean
elif t[1] == 'MONEY':
t[0] = DBType.money
else:
t[0] = 'None'
def p_retruns(t):
'''returns : RETURNS exp_plsql
| RETURNS ANYELEMENT
| RETURNS ANYCOMPATIBLE
| RETURNS tipo_dato
| RETURNS VOID
|
'''
def p_block(t):
'''block : DOLAR_LABEL body PUNTO_COMA DOLAR_LABEL
'''
t[0] = t[2]
def p_body(t):
'''body : declare_statement BEGIN internal_blockopt END '''
t1 = ""
t3 = ""
if t[1] != None:
t1 = t[1]
if t[3] != None:
t3 = t[3]
if len(t1) == 0 and t[3] != None:
t[0] = t3
elif len(t3) == 0 and t[1] != None:
t[0] = t1
else:
t[0] = t1 + t3
def p_declare(t):
'''declare_statement : declare_statement DECLARE declares
| DECLARE declares
| '''
if len(t) == 3:
t[0] = t[2]
elif len(t) == 4:
t[0] = t[1] + t[3]
else:
t[0] = []
def p_declares(t):
'''declares : declares declaracion
| declaracion
'''
if len(t) == 3:
t[1] += t[2]
t[0] = t[1]
else:
t[0] = t[1]
def p_declaracion(t):
'''declaracion : ID constante t_dato not_null declaracion_default PUNTO_COMA'''
temp = None
v2 = ""
if isinstance(t[5],dict):
temp = t[5]['temp']
v2 = t[5]['c3d']
v1 = declare(t[1],t[3],temp)
AddTs(t[1], t[3], 'DECLARACIÓN')
t[0] = v2 + v1
def p_internal_blockopt(t):
'''internal_blockopt : internal_block
|
'''
if len(t) == 2:
t[0] = t[1]
else:
t[0] = ""
def p_internal_block(t):
'''internal_block : internal_body'''
t[0] = t[1]
def p_internal_body(t):
'''internal_body : body PUNTO_COMA
| instruccion_if END IF PUNTO_COMA
| instruccion_case
| return
| statements
'''
if isinstance(t[1],Ins_If):
t[0] = t[1].Traduct()
else:
t[0] = t[1]
def p_constante(t):
'''constante : CONSTANT'''
def p_constante_null(t):
'''constante : '''
def p_not_null(t):
'''not_null : NOT NULL'''
def p_not_null_null(t):
'''not_null : '''
def p_declaracion_default(t):
'''declaracion_default : DEFAULT exp_plsql'''
t[0] = traduct(t[2])
def p_declaracion_default_dos(t):
'''declaracion_default : SIGNO_IGUAL exp_plsql '''
t[0] = traduct(t[2])
def p_declaracion_default_signo(t):
'''declaracion_default : DOSPUNTOS SIGNO_IGUAL exp_plsql '''
t[0] = traduct(t[3])
def p_declaracion_default_null(t):
'''declaracion_default : '''
t[0] = None
def p_declaracionf_funcion(t):
'''declaracion_funcion : ID ALIAS FOR DOLAR NUMERO PUNTO_COMA'''
t[0] = ''
def p_declaracionf_funcion_rename(t):
'''declaracion_funcion : ID ALIAS FOR ID PUNTO_COMA'''
t[0] = ''
def p_declaracionc_copy(t):
'''declaracion_copy : ID ID PUNTO ID SIGNO_MODULO TYPE PUNTO_COMA'''
t[0] = ''
def p_declaracionr_row(t):
'''declaracion_row : ID ID SIGNO_MODULO ROWTYPE PUNTO_COMA'''
print('COPY ROW')
t[0] = ''
def p_declaracionre_record(t):
'''declaracion_record : ID RECORD PUNTO_COMA'''
print('RECORD')
t[0] = ''
def p_asignacion(t):
'''asignacion : ID referencia_id SIGNO_IGUAL exp_plsql PUNTO_COMA'''
valor = traduct(t[4])
temporal = valor['temp']
codigo = assign(t[1], temporal)
if codigo == None: codigo = ""
modifyTs(t[1],temporal, 'ASIGNACION')
t[0] = '\n' + valor['c3d'] + codigo
def p_asignacion_igual(t):
'''asignacion : ID referencia_id SIGNO_IGUAL ins_select_parentesis PUNTO_COMA
'''
v = assignQ(t[1],t[4].code)
modifyTs(t[1],t[4].code, 'ASIGNACION')
t[0] = v
def p_asignacion_igual_parentesis(t):
'''asignacion : ID referencia_id SIGNO_IGUAL PARABRE ins_select_parentesis PARCIERRE PUNTO_COMA
'''
modifyTs(t[1],t[5].code, 'ASIGNACION')
t[0] = assignQ(t[1],t[5].code)
def p_asignacion_dos(t):
'''asignacion : ID referencia_id DOSPUNTOS SIGNO_IGUAL exp_plsql PUNTO_COMA'''
valor = traduct(t[5])
codigo = assign(t[1], valor['temp'])
if codigo == None: codigo = ""
modifyTs(t[1],valor['temp'], 'ASIGNACION')
t[0] ='\n' + valor['c3d'] + codigo
def p_asignacion_dos_signo_(t):
'''asignacion : ID referencia_id DOSPUNTOS SIGNO_IGUAL ins_select_parentesis PUNTO_COMA'''
modifyTs(t[1],t[5].code, 'ASIGNACION')
t[0] = assignQ(t[1], t[5].code)
def p_asignacion_dos_signo(t):
'''asignacion : ID referencia_id DOSPUNTOS SIGNO_IGUAL PARABRE ins_select_parentesis PARCIERRE PUNTO_COMA'''
modifyTs(t[1],t[6].code, 'ASIGNACION')
t[0] = assignQ(t[1], t[6].code)
def p_referencia_id(t):
'''referencia_id : PUNTO ID
| '''
def p_return(t):
'''return : RETURN exp_plsql PUNTO_COMA'''
t[0] = returnF(t[2])
def p_return_next(t):
'''return : RETURN NEXT exp_plsql PUNTO_COMA'''
t[0] = returnF(t[2])
def p_return_query(t):
'''return : RETURN QUERY query'''
def p_query(t):
'''query : ins_insert
| ins_select
| ins_update
| ins_delete '''
def p_instruccion_if(t):
'''instruccion_if : IF exp_plsql then ELSE statements
| IF exp_plsql then instruccion_elif
| IF exp_plsql then'''
if len(t) == 6:
print('INSTRUCCION IF else')
insif = Ins_If(t[2],t[3],t[5],t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
elif len(t) == 5:
print('INSTRUCCION IF elif')
insif = Ins_If(t[2],t[3],t[4],t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
else:
print('INSTRUCCION IFsolo')
insif = Ins_If(t[2],t[3],None,t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
def p_elsif(t):
'''instruccion_elif : ELSIF exp_plsql then ELSE statements
| ELSIF exp_plsql then instruccion_elif
| ELSIF exp_plsql then '''
if len(t) == 6:
print('INSTRUCCION elsIF - else')
insif = Ins_If(t[2],t[3],t[5],t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
elif len(t) == 5:
print('INSTRUCCION elsIF - elsif')
insif = Ins_If(t[2],t[3],t[4],t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
else:
print('INSTRUCCION elsIF')
insif = Ins_If(t[2],t[3],None,t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
def p_then(t):
'''then : THEN statements
| THEN '''
if len(t) == 3:
t[0] = t[2]
else:
t[0] = ''
def p_sentencia(t):
'''sentencia : statements
| '''
if len(t) == 2:
t[0] = t[1]
else:
t[0] = ''
def p_instruccion_case(t):
'''instruccion_case : CASE exp_plsql cases END CASE PUNTO_COMA'''
codi = ''
condi = traduct(t[2])
if isinstance(t[3],Ins_Case):
t[3].case = condi['temp']
codi = t[3].Traduct()
t[0] = condi['c3d']+'\n'+t[1]+' '+condi['temp']+'\n'+codi+' '+t[4]+' '+t[5]+' '+t[6]+'\n'
def p_cases(t):
'''cases : WHEN multiple then cases
| WHEN multiple then ELSE sentencia
| WHEN multiple then '''
if len(t) == 6:
print('when else')
insif = Ins_Case(t[2],t[3],t[5],t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
elif len(t) == 5:
print('when when')
insif = Ins_Case(t[2],t[3],t[4],t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
else:
print('when solo ')
insif = Ins_Case(t[2],t[3],None,t.slice[1].lexpos, t.slice[1].lineno)
t[0] = insif
def p_multiple(t):
'''multiple : multiple COMA exp_plsql
| exp_plsql'''
if len(t) == 4:
t[1].append({'valor':t[3],'tipo':copy(bandexp[0])})
t[0] = t[1]
else:
t[0] = [{'valor':t[1],'tipo':copy(bandexp[0])}]
def p_statements(t):
''' statements : statements statement
| statement'''
if len(t) == 3:
t[0] = t[1] + t[2]
else:
t[0] = t[1]
def p_statement(t):
'''statement : asignacion
| f_query
| null
| declaracion
| declaracion_funcion
| declaracion_copy
| declaracion_row
| declaracion_record
| instruccion_if END IF PUNTO_COMA
| instruccion_case
| return'''
if isinstance(t[1],Ins_If):
t[0] | |
<reponame>MCSitar/colab-zirc-dims<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Utility functions (e.g., loading datasets, calculating scale factors) for single
image-per-shot datasets (non-ALC).
"""
import os
import copy
import skimage.io as skio
import pandas as pd
from . import czd_utils
__all__ = ['find_Align',
'indiv_img_scale_factor',
'check_load_sample_info',
'check_unused_samples',
'unique_scan_name',
'load_gen_data_dict',
'gen_calc_scans_n']
def find_Align(name, all_files_list, img_suffix = '.png'):
"""Find a .Align file matching (same except for file type) an image file.
Parameters
----------
name : str
Image file name (relative; without full path).
all_files_list : list[str]
A list of all files in a directory where .Align file may be present.
img_suffix : str, optional
File type for image file. The default is '.png'.
Returns
-------
str
Either the name of a matching .Align file (if found) or '' (if not).
"""
Align_src = name.strip(img_suffix) + '.Align'
if Align_src in all_files_list:
return Align_src
else:
return ''
def indiv_img_scale_factor(img_path, Align_path):
"""Calculate the scale factor for an individual shot image.
Parameters
----------
img_path : str
Full path to an image file (tested with .png, .bmp images, should work
with others, including .tif).
Align_path : str
Full path to a .Align xml file with real-world dimensions for the image.
Returns
-------
float
A calculated scale factor for the image in μm/pixel.
"""
img = skio.imread(img_path)
img_xy = img.shape[:2][::-1]
align_xy = czd_utils.get_Align_center_size(Align_path)[2:]
return czd_utils.calc_scale_factor(align_xy, img_xy)
def check_load_sample_info(project_dir):
"""Checks if a sample_info.csv file is present in a project directory and
whether it has correct formatting. Returns loaded scale dict if true.
Parameters
----------
project_dir : str
Path to a project directory that may or may not contain sample_info.csv.
Returns
-------
found_bool : bool
True if a correctly-formatted .csv is found, otherwise False.
output_dict : dict{str: float}
A dict with values from .csv {SAMPLE_NAME1: SCALE_FACTOR1, ...} if
usable sample_info.csv found. Otherwise, an empty dict {}.
"""
found_bool = False
output_dict = {}
if 'sample_info.csv' in os.listdir(project_dir):
csv_path = os.path.join(project_dir, 'sample_info.csv')
orig_csv = pd.read_csv(csv_path, header=0, index_col=False,
squeeze=False).to_dict('list')
orig_csv_cols = list(orig_csv.keys())
#check to make sure that necessary columns are in imported csv
if all(key in orig_csv_cols for key in ['Sample', 'Scale']):
#make sure that columns are same length
if len(orig_csv['Sample']) == len(orig_csv['Scale']):
found_bool = True
for each_idx, each_sample in enumerate(orig_csv['Sample']):
output_dict[str(each_sample)] = float(orig_csv['Scale'][each_idx])
else:
print('Error: sample_info.csv columns cannot be different lengths')
else:
print('Error: required column names not in sample_info.csv')
else:
print('Error: no sample_info.csv file found in project directory')
return found_bool, output_dict
def check_unused_samples(loaded_csv_dict, loaded_img_dict):
"""Checks whether any samples loaded from a sample_info.csv file do not
match any samples loaded from a project folder. Prints a warning if
true.
Parameters
----------
loaded_csv_dict : dict{str: float}
A dict with values from .csv {SAMPLE_NAME1: SCALE_FACTOR1, ...}.
loaded_img_dict : dict
A dict loaded from a project folder with format:
{EACH_SAMPLE: {EACH_SPOT: {'img_file': FULL_IMG_PATH,
'Align_file': FULL_ALIGN_PATH or '',
'rel_file': IMG_FILENAME}, ...}, ...}.
Returns
-------
None.
"""
unused_samples_list = []
csv_samples = list(loaded_csv_dict.keys())
img_samples = list(loaded_img_dict.keys())
for csv_sample in csv_samples:
if csv_sample not in img_samples:
unused_samples_list.append(csv_sample)
if unused_samples_list:
print('WARNING: sample(s) from sample_info.csv do not match',
'sample(s) loaded from folder:', '\n',
unused_samples_list, '\n',
'Try checking names/capitalization in sample_info and try reloading')
def unique_scan_name(curr_scan_name, curr_sample_keys):
"""Get a unique scan name if neccesary to avoid replacing scans
with the same name in a dict.
Parameters
----------
curr_scan_name : str
Unalterned name of a scan that is being loaded into a sample dict.
curr_sample_keys : list[str]
Names of scans that have already been loaded into said dict.
Returns
-------
curr_scan_name : str
A unique name for the scan (appends a count integer to input name).
"""
if curr_scan_name not in curr_sample_keys:
return curr_scan_name
else:
orig_scan_name = curr_scan_name
count_int = 1
while curr_scan_name in curr_sample_keys:
curr_scan_name = str(orig_scan_name) + '-' + str(count_int)
count_int += 1
return curr_scan_name
def load_gen_opt_A(scans_dir, split_fxn = None, file_type = '.png'):
"""Split image files in a folder by sample, shot names extracted from image
file names and load them into a dict.
Parameters
----------
scans_dir : str
Path to a 'scans' subdirectory in a project directory.
split_fxn : function or None, optional
A function (defined outside this one) that takes a image file name
as input and returns a sample name and scan name. The default is None;
in this case scan names will = file names - file_type.
file_type : str, optional
File type for loadable images in scans_dir. The default is '.png'.
Returns
-------
output_dict : dict
A dict loaded from a scans_dir with format:
{EACH_SAMPLE: {EACH_SPOT: {'img_file': FULL_IMG_PATH,
'Align_file': FULL_ALIGN_PATH or '',
'rel_file': IMG_FILENAME}, ...}, ...}.
"""
output_dict = {}
all_dir_files = os.listdir(scans_dir)
all_dir_imgs = [file for file in all_dir_files if file.endswith(file_type)]
for each_file in all_dir_imgs:
each_sample = 'UNSPECIFIED_SAMPLE'
each_spot = each_file.strip(file_type)
if split_fxn is not None:
try:
each_sample, each_spot = split_fxn(each_file)
except ValueError:
print('ERROR: SPLITTING FUNCTION INCOMPATIBLE WITH IMAGE NAMES')
split_fxn = None
each_img_path = os.path.join(scans_dir, each_file)
#checks for available .Align file, returns (relative) path if possible
each_align_path = find_Align(each_file, all_dir_files, file_type)
if each_align_path:
each_align_path = os.path.join(scans_dir, each_align_path)
if each_sample not in output_dict.keys():
output_dict[each_sample] = {}
#makes sure that spots are unique to avoid losing data
each_spot = unique_scan_name(each_spot,
list(output_dict[each_sample].keys()))
output_dict[each_sample][each_spot] = {'img_file': each_img_path,
'Align_file': each_align_path,
'rel_file': each_file}
return output_dict
def load_gen_opt_B(scans_dir, split_fxn = None, file_type = '.png'):
"""Load image files from a project directory where image +/- .Align files
are organized into sample-specific sub-folders.
Parameters
----------
scans_dir : str
Path to a project folder 'scans' sub-directory.
split_fxn : function or None, optional
A function (defined outside this one) that takes a image file name
as input and returns a sample name (this can be a generic value)
and scan name. The default is None; in this case scan names will be
file names - file_type.
file_type : str, optional
File type for loadable images in scans_dir. The default is '.png'.
Returns
-------
output_dict : dict
A dict loaded from a scans_dir with format:
{EACH_SAMPLE: {EACH_SPOT: {'img_file': FULL_IMG_PATH,
'Align_file': FULL_ALIGN_PATH or '',
'rel_file': IMG_FILENAME}, ...}, ...}.
"""
output_dict = {}
# get sample subfolder paths, sample names from folders
sample_dirs = [ f.path for f in os.scandir(scans_dir) if f.is_dir() ]
#print(sample_dirs)
samples = [ os.path.basename(os.path.normpath(f)) for f in sample_dirs ]
#gets info for each file in subfolder
for each_sample_idx, each_sample_dir in enumerate(sample_dirs):
each_sample = samples[each_sample_idx]
all_dir_files = os.listdir(each_sample_dir)
all_dir_imgs = [file for file in all_dir_files if file.endswith(file_type)]
for each_file in all_dir_imgs:
each_spot = each_file.strip(file_type)
if split_fxn is not None:
try:
_, each_spot = split_fxn(each_file)
except ValueError:
print('ERROR: SPLITTING FUNCTION INCOMPATIBLE WITH IMAGE NAMES')
split_fxn = None
each_img_path = os.path.join(each_sample_dir, each_file)
#checks for available .Align file, returns (relative) path if possible
each_align_path = find_Align(each_file, all_dir_files, file_type)
if each_align_path:
each_align_path = os.path.join(each_sample_dir, each_align_path)
if each_sample not in output_dict.keys():
output_dict[each_sample] = {}
#makes sure that spots are unique to avoid losing data
each_spot = unique_scan_name(each_spot,
list(output_dict[each_sample].keys()))
output_dict[each_sample][each_spot] = {'img_file': each_img_path,
'Align_file': each_align_path,
'rel_file': each_file}
return output_dict
def gen_img_scale_factors(loaded_img_dict, scale_bools, sample_csv_dict ={},
verbose = False):
"""Add scale factors to an project dict with per-scan RL zircon images.
Parameters
----------
loaded_img_dict : dict
A dict loaded from a project 'scans' subdirectory with format:
{EACH_SAMPLE: {EACH_SPOT: {'img_file': FULL_IMG_PATH,
'Align_file': FULL_ALIGN_PATH or '',
'rel_file': IMG_FILENAME}, ...}, ...}.
scale_bools : list[bool]
User-input booleans indicating how scale factors should be
found/calculated. Format is [bool, bool]. These correspond to options:
[Try calculating scales from .Align files if possible,
Try loading scales from a sample_info.csv file if possible].
These options are tried sequentially. If neither work or if both bools
are False, a default scale of 1.0 is used.
sample_csv_dict : dict{str: float}, optional
A dict with values from sample_info.csv if available:
{SAMPLE_NAME1: SCALE_FACTOR1, ...}.
Otherwise, default of {} (will not be used for scaling).
verbose : bool, optional
A bool indicating whether fxn will print sample names as it loads/
calculates scale factors. This can take a while, so may be good
to indicate that process is still running.
Returns
-------
output_dict : dict
A copy of loaded_img_dict with additional per-scan info:
{'scale_factor': float(scale factor for each scan),
'scale_from': str(method used to calculate/find scale factor)}.
"""
#unlinked | |
#!/usr/bin/env python
# Copyright (c) 2004-2006 ActiveState Software Inc.
#
# Contributors:
# <NAME> (<EMAIL>)
"""
pythoncile - a Code Intelligence Language Engine for the Python language
Module Usage:
from pythoncile import scan
mtime = os.stat("foo.py")[stat.ST_MTIME]
content = open("foo.py", "r").read()
scan(content, "foo.py", mtime=mtime)
Command-line Usage:
pythoncile.py [<options>...] [<Python files>...]
Options:
-h, --help dump this help and exit
-V, --version dump this script's version and exit
-v, --verbose verbose output, use twice for more verbose output
-f, --filename <path> specify the filename of the file content
passed in on stdin, this is used for the "path"
attribute of the emitted <file> tag.
--md5=<string> md5 hash for the input
--mtime=<secs> modification time for output info, in #secs since
1/1/70.
-L, --language <name>
the language of the file being scanned
-c, --clock print timing info for scans (CIX is not printed)
One or more Python files can be specified as arguments or content can be
passed in on stdin. A directory can also be specified, in which case
all .py files in that directory are scanned.
This is a Language Engine for the Code Intelligence (codeintel) system.
Code Intelligence XML format. See:
http://specs.activestate.com/Komodo_3.0/func/code_intelligence.html
The command-line interface will return non-zero iff the scan failed.
"""
# Dev Notes:
# <none>
#
# TODO:
# - type inferencing: asserts
# - type inferencing: return statements
# - type inferencing: calls to isinstance
# - special handling for None may be required
# - Comments and doc strings. What format?
# - JavaDoc - type hard to parse and not reliable
# (http://java.sun.com/j2se/javadoc/writingdoccomments/).
# - PHPDoc? Possibly, but not that rigorous.
# - Grouch (http://www.mems-exchange.org/software/grouch/) -- dunno yet.
# - Don't like requirement for "Instance attributes:" landmark in doc
# strings.
# - This can't be a full solution because the requirement to repeat
# the argument name doesn't "fit" with having a near-by comment when
# variable is declared.
# - Two space indent is quite rigid
# - Only allowing attribute description on the next line is limiting.
# - Seems focussed just on class attributes rather than function
# arguments.
# - Perhaps what PerlCOM POD markup uses?
# - Home grown? My own style? Dunno
# - make type inferencing optional (because it will probably take a long
# time to generate), this is tricky though b/c should the CodeIntel system
# re-scan a file after "I want type inferencing now" is turned on? Hmmm.
# - [lower priority] handle staticmethod(methname) and
# classmethod(methname). This means having to delay emitting XML until
# end of class scope and adding .visitCallFunc().
# - [lower priority] look for associated comments for variable
# declarations (as per VS.NET's spec, c.f. "Supplying Code Comments" in
# the VS.NET user docs)
import os
import sys
import getopt
from hashlib import md5
import re
import logging
import pprint
import glob
import time
import stat
import types
from cStringIO import StringIO
from functools import partial
# this particular ET is different from xml.etree and is expected
# to be returned from scan_et() by the clients of this module
import ciElementTree as et
import compiler
from compiler import ast
from compiler.visitor import dumpNode, ExampleASTVisitor
import parser
from codeintel2.common import CILEError
from codeintel2 import util
from codeintel2 import tdparser
#---- exceptions
class PythonCILEError(CILEError):
pass
#---- global data
_version_ = (0, 3, 0)
log = logging.getLogger("pythoncile")
# log.setLevel(logging.DEBUG)
util.makePerformantLogger(log)
_gClockIt = 0 # if true then we are gathering timing data
_gClock = None # if gathering timing data this is set to time retrieval fn
_gStartTime = None # start time of current file being scanned
#---- internal routines and classes
def _isclass(namespace):
return (len(namespace["types"]) == 1
and "class" in namespace["types"])
def _isfunction(namespace):
return (len(namespace["types"]) == 1
and "function" in namespace["types"])
def getAttrStr(attrs):
"""Construct an XML-safe attribute string from the given attributes
"attrs" is a dictionary of attributes
The returned attribute string includes a leading space, if necessary,
so it is safe to use the string right after a tag name. Any Unicode
attributes will be encoded into UTF8 encoding as part of this process.
"""
from xml.sax.saxutils import quoteattr
s = ''
for attr, value in attrs.items():
if not isinstance(value, basestring):
value = str(value)
elif isinstance(value, unicode):
value = value.encode("utf-8")
s += ' %s=%s' % (attr, quoteattr(value))
return s
# match 0x00-0x1f except TAB(0x09), LF(0x0A), and CR(0x0D)
_encre = re.compile('([\x00-\x08\x0b\x0c\x0e-\x1f])')
# XXX: this is not used anywhere, is it needed at all?
if sys.version_info >= (2, 3):
charrefreplace = 'xmlcharrefreplace'
else:
# Python 2.2 doesn't have 'xmlcharrefreplace'. Fallback to a
# literal '?' -- this is better than failing outright.
charrefreplace = 'replace'
def xmlencode(s):
"""Encode the given string for inclusion in a UTF-8 XML document.
Note: s must *not* be Unicode, it must be encoded before being passed in.
Specifically, illegal or unpresentable characters are encoded as
XML character entities.
"""
# As defined in the XML spec some of the character from 0x00 to 0x19
# are not allowed in well-formed XML. We replace those with entity
# references here.
# http://www.w3.org/TR/2000/REC-xml-20001006#charsets
#
# Dev Notes:
# - It would be nice if Python has a codec for this. Perhaps we
# should write one.
# - Eric, at one point, had this change to '_xmlencode' for rubycile:
# p4 diff2 -du \
# //depot/main/Apps/Komodo-devel/src/codeintel/ruby/rubycile.py#7 \
# //depot/main/Apps/Komodo-devel/src/codeintel/ruby/rubycile.py#8
# but:
# My guess is that there was a bug here, and explicitly
# utf-8-encoding non-ascii characters fixed it. This was a year
# ago, and I don't recall what I mean by "avoid shuffling the data
# around", but it must be related to something I observed without
# that code.
# replace with XML decimal char entity, e.g. ''
return _encre.sub(lambda m: '&#%d;' % ord(m.group(1)), s)
def cdataescape(s):
"""Return the string escaped for inclusion in an XML CDATA section.
Note: Any Unicode will be encoded to UTF8 encoding as part of this process.
A CDATA section is terminated with ']]>', therefore this token in the
content must be escaped. To my knowledge the XML spec does not define
how to do that. My chosen escape is (courteousy of EricP) is to split
that token into multiple CDATA sections, so that, for example:
blah...]]>...blah
becomes:
blah...]]]]><![CDATA[>...blah
and the resulting content should be copacetic:
<b><![CDATA[blah...]]]]><![CDATA[>...blah]]></b>
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
parts = s.split("]]>")
return "]]]]><![CDATA[>".join(parts)
def _unistr(x):
if isinstance(x, unicode):
return x
elif isinstance(x, str):
return x.decode('utf8')
else:
return unicode(x)
def _et_attrs(attrs):
return dict((_unistr(k), xmlencode(_unistr(v))) for k, v in attrs.items()
if v is not None)
def _et_data(data):
return xmlencode(_unistr(data))
def _node_attrs(node, **kw):
return dict(name=node["name"],
line=node.get("line"),
doc=node.get("doc"),
attributes=node.get("attributes") or None,
**kw)
def _node_citdl(node):
max_type = None
max_score = -1
#'guesses' is a types dict: {<type guess>: <score>, ...}
guesses = node.get("types", {})
for type, score in guesses.items():
if ' ' in type:
# XXX Drop the <start-scope> part of CITDL for now.
type = type.split(None, 1)[0]
# Don't emit None types, it does not help us. Fix for bug:
# http://bugs.activestate.com/show_bug.cgi?id=71989
if type != "None":
if score > max_score:
max_type = type
max_score = score
return max_type
class AST2CIXVisitor:
"""Generate Code Intelligence XML (CIX) from walking a Python AST tree.
This just generates the CIX content _inside_ of the <file/> tag. The
prefix and suffix have to be added separately.
Note: All node text elements are encoded in UTF-8 format by the Python AST
tree processing, no matter what encoding is used for the file's
original content. The generated CIX XML will also be UTF-8 encoded.
"""
DEBUG = 0
def __init__(self, moduleName=None, content=None, lang="Python"):
self.lang = lang
if self.DEBUG is None:
self.DEBUG = log.isEnabledFor(logging.DEBUG)
self.moduleName = moduleName
if content:
self.lines = content.splitlines(0)
else:
self.lines = None
# Symbol Tables (dicts) are built up for each scope. The namespace
# stack to the global-level is maintain in self.nsstack.
self.st = { # the main module symbol table
# <scope name>: <namespace dict>
}
self.nsstack = []
self.cix = et.TreeBuilder()
def emit_start(self, s, attrs={}):
self.cix.start(s, _et_attrs(attrs))
def emit_data(self, data):
self.cix.data(_et_data(data))
def emit_end(self, s):
self.cix.end(s)
def emit_tag(self, s, attrs={}, data=None):
self.emit_start(s, _et_attrs(attrs))
if data is not None:
self.emit_data(data)
self.emit_end(s)
def cix_module(self, node):
"""Emit CIX for the given module namespace."""
# log.debug("cix_module(%s, level=%r)", '.'.join(node["nspath"]),
# level)
assert len(node["types"]) == 1 and "module" in node["types"]
attrs = _node_attrs(node, lang=self.lang, ilk="blob")
module | |
' '.join(filter_options)))
# if address-family and address-book-type have not been set then default
if not filter_type:
filter_type = 'mixed'
term_dup_check = set()
new_terms = []
self._FixLargePolices(terms, filter_type)
for term in terms:
if set(['established', 'tcp-established']).intersection(term.option):
logging.debug('Skipping established term %s ' +
'because SRX is stateful.', term.name)
continue
term.name = self.FixTermLength(term.name)
if term.name in term_dup_check:
raise SRXDuplicateTermError('You have a duplicate term: %s'
% term.name)
term_dup_check.add(term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info('INFO: Term %s in policy %s>%s expires '
'in less than two weeks.', term.name, self.from_zone,
self.to_zone)
if term.expiration <= current_date:
logging.warn('WARNING: Term %s in policy %s>%s is expired.',
term.name, self.from_zone, self.to_zone)
continue
# SRX address books leverage network token names for IPs.
# When excluding addresses, we lose those distinct names so we need
# to create a new unique name based off the term name before excluding.
if term.source_address_exclude:
# If we have a naked source_exclude, we need something to exclude from
if not term.source_address:
term.source_address = [nacaddr.IP('0.0.0.0/0',
term.name.upper(),
term.name.upper())]
# Use the term name as the token & parent_token
new_src_parent_token = term.name.upper() + '_SRC_EXCLUDE'
new_src_token = new_src_parent_token
for i in term.source_address_exclude:
term.source_address = nacaddr.RemoveAddressFromList(
term.source_address, i)
for j in term.source_address:
j.token = new_src_token
j.parent_token = new_src_parent_token
if term.destination_address_exclude:
if not term.destination_address:
term.destination_address = [nacaddr.IP('0.0.0.0/0',
term.name.upper(),
term.name.upper())]
new_dst_parent_token = term.name.upper() + '_DST_EXCLUDE'
new_dst_token = new_dst_parent_token
for i in term.destination_address_exclude:
term.destination_address = nacaddr.RemoveAddressFromList(
term.destination_address, i)
for j in term.destination_address:
j.token = new_dst_token
j.parent_token = new_dst_parent_token
# SRX policies are controlled by addresses that are used within, so
# policy can be at the same time inet and inet6.
if self._GLOBAL_ADDR_BOOK in self.addr_book_type:
for zone in self.addressbook:
for unused_name, ips in sorted(
six.iteritems(self.addressbook[zone])):
ips = [i for i in ips]
if term.source_address == ips:
term.source_address = ips
if term.destination_address == ips:
term.destination_address = ips
for addr in term.source_address:
if addr.version in self._AF_MAP[filter_type]:
self._BuildAddressBook(self.from_zone, addr)
for addr in term.destination_address:
if addr.version in self._AF_MAP[filter_type]:
self._BuildAddressBook(self.to_zone, addr)
new_term = Term(term, self.from_zone, self.to_zone, self.expresspath,
verbose)
new_terms.append(new_term)
# Because SRX terms can contain inet and inet6 addresses. We have to
# have ability to recover proper AF for ICMP type we need.
# If protocol is empty or we cannot map to inet or inet6 we insert bogus
# af_type name which will cause new_term.NormalizeIcmpTypes to fail.
if not term.protocol:
icmp_af_type = 'unknown_af_icmp'
else:
icmp_af_type = self._AF_ICMP_MAP.get(
term.protocol[0], 'unknown_af_icmp')
tmp_icmptype = new_term.NormalizeIcmpTypes(
term.icmp_type, term.protocol, icmp_af_type)
# NormalizeIcmpTypes returns [''] for empty, convert to [] for eval
normalized_icmptype = tmp_icmptype if tmp_icmptype != [''] else []
# rewrites the protocol icmpv6 to icmp6
if 'icmpv6' in term.protocol:
protocol = list(term.protocol)
protocol[protocol.index('icmpv6')] = 'icmp6'
else:
protocol = term.protocol
new_application_set = {'sport': self._BuildPort(term.source_port),
'dport': self._BuildPort(term.destination_port),
'protocol': protocol,
'icmp-type': normalized_icmptype,
'timeout': term.timeout}
for application_set in self.applications:
if all(item in list(application_set.items()) for item in
new_application_set.items()):
new_application_set = ''
term.replacement_application_name = application_set['name']
break
if (term.name == application_set['name'] and
new_application_set != application_set):
raise ConflictingApplicationSetsError(
'Application set %s has a conflicting entry' % term.name)
if new_application_set:
new_application_set['name'] = term.name
self.applications.append(new_application_set)
self.srx_policies.append((header, new_terms, filter_options))
def _FixLargePolices(self, terms, address_family):
"""Loops over all terms finding terms exceeding SRXs policy limit.
Args:
terms: List of terms from a policy.
address_family: Tuple containing address family versions.
See the following URL for more information
http://www.juniper.net/techpubs/en_US/junos12.1x44/topics/reference/
general/address-address-sets-limitations.html
"""
def Chunks(l):
"""Splits a list of IP addresses into smaller lists based on byte size."""
return_list = [[]]
counter = 0
index = 0
for i in l:
# Size is split in half due to the max size being a sum of src and dst.
if counter > (self._ADDRESS_LENGTH_LIMIT/2):
counter = 0
index += 1
return_list.append([])
if i.version == 6:
counter += self._IPV6_SIZE
else:
counter += 1
return_list[index].append(i)
return return_list
expanded_terms = []
for term in terms:
if (term.AddressesByteLength(
self._AF_MAP[address_family]) > self._ADDRESS_LENGTH_LIMIT):
logging.warn('LARGE TERM ENCOUNTERED')
src_chunks = Chunks(term.source_address)
counter = 0
for chunk in src_chunks:
for ip in chunk:
ip.parent_token = 'src_' + term.name + str(counter)
counter += 1
dst_chunks = Chunks(term.destination_address)
counter = 0
for chunk in dst_chunks:
for ip in chunk:
ip.parent_token = 'dst_' + term.name + str(counter)
counter += 1
src_dst_products = itertools.product(src_chunks, dst_chunks)
counter = 0
for src_dst_list in src_dst_products:
new_term = copy.copy(term)
new_term.source_address = src_dst_list[0]
new_term.destination_address = src_dst_list[1]
new_term.name = new_term.name + '_' + str(counter)
expanded_terms.append(new_term)
counter += 1
else:
expanded_terms.append(term)
if expanded_terms:
del terms[:]
terms.extend(expanded_terms)
def _BuildAddressBook(self, zone, address):
"""Create the address book configuration entries.
Args:
zone: the zone these objects will reside in
address: a naming library address object
"""
if zone not in self.addressbook:
self.addressbook[zone] = collections.defaultdict(list)
name = address.parent_token
for ip in self.addressbook[zone][name]:
if ip.supernet_of(address):
return
if address.supernet_of(ip):
for index, ip_addr in enumerate(self.addressbook[zone][name]):
if ip_addr == ip:
self.addressbook[zone][name][index] = address
return
self.addressbook[zone][name].append(address)
def _SortAddressBookNumCheck(self, item):
"""Used to give a natural order to the list of acl entries.
Args:
item: string of the address book entry name
Returns:
returns the characters and number
"""
item_list = item.split('_')
num = item_list.pop(-1)
if isinstance(item_list[-1], int):
set_number = item_list.pop(-1)
num = int(set_number) * 1000 + int(num)
alpha = '_'.join(item_list)
if num:
return (alpha, int(num))
return (alpha, 0)
def _BuildPort(self, ports):
"""Transform specified ports into list and ranges.
Args:
ports: a policy terms list of ports
Returns:
port_list: list of ports and port ranges
"""
port_list = []
for i in ports:
if i[0] == i[1]:
port_list.append(str(i[0]))
else:
port_list.append('%s-%s' % (str(i[0]), str(i[1])))
return port_list
def _GenerateAddressBook(self):
"""Creates address book."""
target = IndentList(self.INDENT)
# create address books if address-book-type set to global
if self._GLOBAL_ADDR_BOOK in self.addr_book_type:
global_address_book = collections.defaultdict(list)
target.IndentAppend(1, 'replace: address-book {')
target.IndentAppend(2, 'global {')
for zone in self.addressbook:
for group in self.addressbook[zone]:
for address in self.addressbook[zone][group]:
global_address_book[group].append(address)
names = sorted(global_address_book.keys())
for name in names:
counter = 0
ips = nacaddr.SortAddrList(global_address_book[name])
ips = nacaddr.CollapseAddrList(ips)
global_address_book[name] = ips
for ip in ips:
target.IndentAppend(4, 'address ' + name + '_' + str(counter) + ' ' +
str(ip) + ';')
counter += 1
for group in sorted(global_address_book.keys()):
target.IndentAppend(4, 'address-set ' + group + ' {')
counter = 0
for unused_addr in global_address_book[group]:
target.IndentAppend(5, 'address ' + group + '_' + str(counter) + ';')
counter += 1
target.IndentAppend(4, '}')
target.IndentAppend(2, '}')
target.IndentAppend(1, '}')
else:
target.IndentAppend(1, 'zones {')
for zone in self.addressbook:
target.IndentAppend(2, 'security-zone ' + zone + ' {')
target.IndentAppend(3, 'replace: address-book {')
# building individual addresses
groups = sorted(self.addressbook[zone])
for group in groups:
ips = nacaddr.SortAddrList(self.addressbook[zone][group])
ips = nacaddr.CollapseAddrList(ips)
self.addressbook[zone][group] = ips
count = 0
for address in self.addressbook[zone][group]:
target.IndentAppend(4, 'address ' + group + '_' + str(count) +
' ' + str(address) + ';')
count += 1
# building address-sets
for group in groups:
target.IndentAppend(4, 'address-set ' + group + ' {')
count = 0
for address in self.addressbook[zone][group]:
target.IndentAppend(5, 'address ' + group + '_' + str(count) + ';')
count += 1
target.IndentAppend(4, '}')
target.IndentAppend(3, '}')
target.IndentAppend(2, '}')
target.IndentAppend(1, '}')
return target
def _GenerateApplications(self):
target = IndentList(self.INDENT)
apps_set_list = IndentList(self.INDENT)
target.append('replace: applications {')
done_apps = []
for app in sorted(self.applications, key=lambda x: x['name']):
app_list = IndentList(self.INDENT)
if app in done_apps:
continue
if app['protocol'] or app['sport'] or app['dport'] or app['icmp-type']:
# generate ICMP statements
if app['icmp-type']:
target.IndentAppend(1, 'application ' + app['name'] + '-app {')
if app['timeout']:
timeout = app['timeout']
else:
timeout = 60
for i, code in enumerate(app['icmp-type']):
for proto in app['protocol']:
target.IndentAppend(
2,
'term t%d protocol %s %s-type %s inactivity-timeout %d;' %
(i + 1, proto, proto, str(code), int(timeout))
)
target.IndentAppend(1, '}')
# generate non-ICMP statements
else:
i = 1
apps_set_list.IndentAppend(1, 'application-set ' +
app['name'] + '-app {')
for proto in app['protocol'] or ['']:
for sport in app['sport'] or ['']:
for dport in app['dport'] or ['']:
chunks = []
if proto:
# SRX does not like proto vrrp
if proto == 'vrrp':
proto = '112'
chunks.append(' protocol %s' % proto)
if sport:
chunks.append(' source-port %s' % sport)
if dport:
chunks.append(' destination-port %s' % dport)
if app['timeout']:
chunks.append(' inactivity-timeout %d' % int(app['timeout']))
if chunks:
apps_set_list.IndentAppend(
2, 'application ' + app['name'] + | |
<gh_stars>10-100
import os
import shutil
import sys
import time
import requests
import qdarkstyle
from PyQt5.QtCore import QPersistentModelIndex, Qt, QThread, QUrl, pyqtSignal
from PyQt5.QtGui import QDesktopServices, QImage, QPixmap
from PyQt5.QtWidgets import (
QAbstractItemView,
QApplication,
QFileDialog,
QMainWindow,
QTableWidgetItem,
)
import utils
from ui import UiMainWindow
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
IMG_PATH = os.path.join(BASE_PATH, "img")
UTILS_PATH = os.path.join(BASE_PATH, "utils")
class MainPage(QMainWindow, UiMainWindow):
"""Main page of the application."""
def __init__(self, parent=None):
super(MainPage, self).__init__(parent)
self.setupUi(self)
# Hide the fetching, reattempt, error label, and revert button
self.url_fetching_data_label.hide()
self.url_error_label.hide()
self.url_reattempt_load_label.hide()
self.url_poor_connection.hide()
self.revert_annotate.hide()
# Activate hyperlink on upper right
self.credit_url.linkActivated.connect(self.set_credit_url)
self.credit_url.setText('<a href="https://github.com/irahorecka/YouTube2Mp3">source code</a>')
# Connect the delete video button with the remove_selected_items fn.
self.remove_from_table_button.clicked.connect(self.remove_selected_items)
# Buttons connection with the appropriate functions
self.save_as_mp3_box.setChecked(True)
self.save_as_mp3_box.clicked.connect(self.set_check_mp3_box)
self.save_as_mp4_box.clicked.connect(self.set_check_mp4_box)
self.url_load_button.clicked.connect(self.url_loading_button_click)
self.url_input.returnPressed.connect(self.url_load_button.click)
self.url_input.mousePressEvent = lambda _: self.url_input.selectAll()
self.download_button.clicked.connect(self.download_button_click)
self.download_path.clicked.connect(self.get_download_path)
self.itunes_annotate.clicked.connect(self.itunes_annotate_click)
self.revert_annotate.clicked.connect(self.default_annotate_table)
self.video_table.cellPressed.connect(self.load_table_content)
# edit table cell with single click
self.video_table.setEditTriggers(QAbstractItemView.CurrentChanged)
# Input changes in video property text box to appropriate cell.
self.change_video_info_input.clicked.connect(self.replace_single_cell)
self.change_video_info_input_all.clicked.connect(self.replace_all_cells)
self.video_info_input.returnPressed.connect(self.change_video_info_input.click)
# Exit application
self.cancel_button.clicked.connect(self.close)
# Get download directory
self.download_dir = BASE_PATH
self.download_folder_select.setText(self._get_parent_current_dir(self.download_dir)) # get directory tail
def url_loading_button_click(self):
"""Reads input data from self.url_input and creates an instance
of the UrlLoading thread."""
# declare videos_dict upon loading url
self.videos_dict = {}
playlist_url = self._get_cell_text(self.url_input)
self._reflect_url_loading_status()
self.url_fetching_data_label.show()
self.url_load = UrlLoading(playlist_url)
self.url_load.loadStatus.connect(self._reflect_url_loading_status)
self.url_load.countChanged.connect(self._url_loading_finished)
self.url_load.start()
def _reflect_url_loading_status(self, status=None):
"""Reflect YouTube url loading status. If no status is provided,
hide all error label and keep table content."""
self.video_table.clearContents() # clear table content when loading
self.video_info_input.setText("") # clear video info input cell
self._display_artwork(None) # clear artwork display to default image
self.url_poor_connection.hide()
self.url_fetching_data_label.hide()
self.url_reattempt_load_label.hide()
self.url_error_label.hide()
if status == "success":
return
# if status obj is not null, but not "success"
if status:
if status == "invalid url":
self.url_error_label.show()
elif status == "reattempt":
self.url_reattempt_load_label.show()
elif status == "server error":
self.url_poor_connection.show()
self.revert_annotate.hide()
self.itunes_annotate.show() # refresh "Ask butler" button
def _url_loading_finished(self, videos_dict, is_executed):
"""Retrieves data from thread when complete, updates GUI table."""
# First entry of self.videos_dict in MainPage class
self.videos_dict = videos_dict
self.video_table.clearContents() # clear table for new loaded content
if is_executed:
self.default_annotate_table() # set table content
else:
self.url_error_label.show()
def itunes_annotate_click(self):
"""Load iTunes annotation info on different thread."""
self.video_info_input.setText("")
# i.e. clicked annotate button with empty table
if not self._assert_videos_dict(self.video_info_input, "Could not get information."):
return
self.annotate = iTunesLoading(self.videos_dict)
self.annotate.loadFinished.connect(self._itunes_annotate_finished)
self.annotate.start()
def _itunes_annotate_finished(self, itunes_query_tuple, query_status):
"""Populate GUI table with iTunes meta information once
iTunes annotation query complete."""
for row_index, ITUNES_META_JSON in itunes_query_tuple:
self._itunes_annotate_table(row_index, ITUNES_META_JSON)
if not query_status:
# no iTunes metadata available or poor connection
self.video_info_input.setText("Could not get information.")
else:
# show revert button if iTunes annotation loaded successfully
self.itunes_annotate.hide()
self.revert_annotate.show()
def _itunes_annotate_table(self, row_index, ITUNES_META_JSON):
"""Provide iTunes annotation guess based on video title"""
try:
song_name, song_index = ITUNES_META_JSON["track_name"], 0
album_name, album_index = ITUNES_META_JSON["album_name"], 1
artist_name, artist_index = ITUNES_META_JSON["artist_name"], 2
genre_name, genre_index = ITUNES_META_JSON["primary_genre_name"], 3
artwork_name, artwork_index = ITUNES_META_JSON["artwork_url_fullres"], 4
except TypeError: # ITUNES_META_JSON was never called.
# get video title
song_name, song_index = (
self._get_cell_text(self.video_table.item(row_index, 0)),
0,
)
if not song_name:
song_name = "Unknown"
album_name, album_index = "Unknown", 1
artist_name, artist_index = "Unknown", 2
genre_name, genre_index = "Unknown", 3
artwork_name, artwork_index = "Unknown", 4
self.video_table.setItem(row_index, song_index, QTableWidgetItem(song_name))
self.video_table.setItem(row_index, album_index, QTableWidgetItem(album_name))
self.video_table.setItem(row_index, artist_index, QTableWidgetItem(artist_name))
self.video_table.setItem(row_index, genre_index, QTableWidgetItem(genre_name))
self.video_table.setItem(row_index, artwork_index, QTableWidgetItem(artwork_name))
def default_annotate_table(self):
"""Default table annotation to video title in song columns"""
if not self.videos_dict: # i.e. an invalid playlist input
self.video_table.clearContents()
return
self.video_info_input.setText("")
for index, key in enumerate(self.videos_dict):
self.video_table.setItem(index, 0, QTableWidgetItem(key)) # part of QWidget
self.video_table.setItem(index, 1, QTableWidgetItem("Unknown"))
self.video_table.setItem(index, 2, QTableWidgetItem("Unknown"))
self.video_table.setItem(index, 3, QTableWidgetItem("Unknown"))
self.video_table.setItem(index, 4, QTableWidgetItem("Unknown"))
self.revert_annotate.hide()
self.itunes_annotate.show()
def get_download_path(self):
"""Fetch download file path"""
self.download_dir = QFileDialog.getExistingDirectory(self, "Open folder", BASE_PATH)
if not self.download_dir:
self.download_dir = BASE_PATH
self.download_folder_select.setText(self._get_parent_current_dir(self.download_dir))
def download_button_click(self):
""" Executes when the button is clicked """
# assert self.videos_dict exists
if not self._assert_videos_dict(self.download_status, "No video to download."):
return
playlist_properties = self._get_playlist_properties()
self.download_button.setEnabled(False)
self.download_status.setText("Downloading...")
self.down = DownloadingVideos(
self.videos_dict, self.download_dir, playlist_properties, self.save_as_mp4_box.isChecked(),
)
self.down.downloadCount.connect(self._download_finished)
self.down.start()
def _get_playlist_properties(self):
"""Get video information from self.video_table to reflect to
downloaded MP3 metadata."""
playlist_properties = []
for row_index, _ in enumerate(self.videos_dict.items()):
song_properties = {}
song_properties["song"] = self._get_cell_text(self.video_table.item(row_index, 0)).replace(
"/", "-"
) # will be filename -- change illegal char to legal - make func
song_properties["album"] = self._get_cell_text(self.video_table.item(row_index, 1))
song_properties["artist"] = self._get_cell_text(self.video_table.item(row_index, 2))
song_properties["genre"] = self._get_cell_text(self.video_table.item(row_index, 3))
song_properties["artwork"] = self._get_cell_text(self.video_table.item(row_index, 4))
playlist_properties.append(song_properties) # this assumes that dict will be ordered like list
return playlist_properties
def _download_finished(self, download_time):
"""Emit changes to MainPage once dowload is complete."""
_min = int(download_time // 60)
sec = int(download_time % 60)
self.download_status.setText(f"Download time: {_min} min. {sec} sec.")
self.download_button.setEnabled(True)
def load_table_content(self, row, column):
"""Display selected cell content into self.video_info_input
and display selected artwork on Qpixmap widget."""
# display video info in self.video_info_input
self._display_cell_content(row, column)
# load and display video artwork
artwork_file = self._get_cell_text(self.video_table.item(row, 4))
self.loaded_artwork = ArtworkLoading(artwork_file) # if populated, `artwork_file` is a url
self.loaded_artwork.loadFinished.connect(self._display_artwork)
self.loaded_artwork.start()
def _display_cell_content(self, row, column):
"""Display selected cell content in self.video_info_input"""
self.video_info_input.setText(self._get_cell_text(self.video_table.item(row, column)))
def _display_artwork(self, artwork_content):
"""Display selected artwork on Qpixmap widget."""
if not artwork_content:
qt_artwork_content = os.path.join(IMG_PATH, "default_artwork.png")
self.album_artwork.setPixmap(QPixmap(qt_artwork_content))
else:
qt_artwork_content = QImage()
qt_artwork_content.loadFromData(artwork_content)
self.album_artwork.setPixmap(QPixmap.fromImage(qt_artwork_content))
self.album_artwork.setScaledContents(True)
self.album_artwork.setAlignment(Qt.AlignCenter)
def remove_selected_items(self):
"""Removes the selected items from self.videos_table and self.videos_dict.
Table widget updates -- multiple row deletion capable."""
video_list = []
if self._assert_videos_dict():
video_list = [key_value for key_value in self.videos_dict.items()]
row_index_list = []
for model_index in self.video_table.selectionModel().selectedRows():
row = model_index.row()
row_index = QPersistentModelIndex(model_index)
row_index_list.append(row_index)
try:
current_key = video_list[row][0]
del self.videos_dict[current_key] # remove row item from self.videos_dict
except (IndexError, KeyError): # no item/key in video_list or videos_dict
pass
for index in row_index_list:
self.video_table.removeRow(index.row())
def replace_single_cell(self):
"""Change selected cell value to value in self.video_info_input."""
row = self.video_table.currentIndex().row()
column = self.video_table.currentIndex().column()
video_info_input_value = self._get_cell_text(self.video_info_input)
self._replace_cell_item(row, column, video_info_input_value)
def replace_all_cells(self):
"""Change all rows, except songs, in table to match selected cell row."""
# get row of cells to replace all others
replacement_row_index = self.video_table.currentIndex().row()
for row_index in range(self.video_table.rowCount()):
# omit first column (i.e. song)
for col_index in range(1, self.video_table.columnCount()):
# get current cell item to be deleted and cell item to replace
current_value = self._get_cell_text(self.video_table.item(row_index, col_index))
replacement_value = self._get_cell_text(self.video_table.item(replacement_row_index, col_index))
if current_value and replacement_value:
self._replace_cell_item(row_index, col_index, replacement_value)
def _replace_cell_item(self, row, column, value):
"""Replace cell with value at row / column index."""
self.video_table.setItem(row, column, QTableWidgetItem(value))
def set_check_mp3_box(self):
"""if self.save_as_mp3_box is checked, uncheck
self.save_as_mp4_box."""
self.save_as_mp3_box.setChecked(True)
self.save_as_mp4_box.setChecked(False)
def set_check_mp4_box(self):
"""if self.save_as_mp4_box is checked, uncheck
self.save_as_mp3_box."""
self.save_as_mp3_box.setChecked(False)
self.save_as_mp4_box.setChecked(True)
def _assert_videos_dict(self, qline_edit_obj=None, text=""):
"""Assert existence of `self.videos_dict` in current program state of caller.
If not, display `text` to `qline_edit_obj` if `qline_edit_obj` provided."""
try:
assert self.videos_dict
except (AttributeError, AssertionError):
if qline_edit_obj:
qline_edit_obj.setText(text)
return False
return True
@staticmethod
def set_credit_url(url_str):
"""Set source code url on upper right of table."""
QDesktopServices.openUrl(QUrl(url_str))
@staticmethod
def _get_cell_text(cell_item):
"""Get text of cell value, if empty return empty str."""
try:
cell_item = cell_item.text()
return cell_item
except AttributeError:
cell_item = ""
return cell_item
@staticmethod
def _get_parent_current_dir(current_path):
"""Get current and parent directory as str."""
parent_dir, current_dir = os.path.split(current_path)
parent_dir = os.path.split(parent_dir)[1] # get tail of parent_dir
return f"../{parent_dir}/{current_dir}"
class UrlLoading(QThread):
"""Load video data from YouTube url."""
countChanged = pyqtSignal(dict, bool)
loadStatus = pyqtSignal(str)
def __init__(self, playlist_link, parent=None):
QThread.__init__(self, parent)
self.playlist_link = playlist_link
self.reattempt_count = 0
self.override_error = False
def run(self):
"""Main function, gets all the playlist videos data, emits the info dict"""
# allow 5 reattempts if error in fetching YouTube videos
# else just get loaded videos by overriding error handling
if self.reattempt_count > 5:
self.override_error = True
try:
videos_dict = utils.get_youtube_content(self.playlist_link, self.override_error)
if not videos_dict:
# if empty videos_dict returns, throw invalid url warning.
self.loadStatus.emit("invalid url")
else:
self.loadStatus.emit("success")
self.countChanged.emit(videos_dict, True)
except RuntimeError as error: # handle error from video load fail
error_message = str(error)
if any(message in error_message for message in ["not a valid URL", "Unsupported URL", "list"]):
self.loadStatus.emit("invalid url")
elif "nodename nor servname provided" in error_message:
self.loadStatus.emit("server error")
else:
self.loadStatus.emit("reattempt")
self.reattempt_count += 1
self.run()
class iTunesLoading(QThread):
"""Get video data properties from iTunes."""
loadFinished = pyqtSignal(tuple, bool)
def __init__(self, videos_dict, parent=None):
QThread.__init__(self, parent)
self.videos_dict = videos_dict
def run(self):
"""Multithread query to iTunes - return tuple."""
try:
query_iter = ((row_index, key_value) for row_index, key_value in enumerate(self.videos_dict.items()))
except AttributeError: # i.e. no content in table -- exit early
return
itunes_query = utils.map_threads(utils.thread_query_itunes, query_iter)
itunes_query_tuple = tuple(itunes_query)
if not self.check_itunes_nonetype(itunes_query_tuple):
query_status | |
"""
HTTP Exception
--------------
This module processes Python exceptions that relate to HTTP exceptions
by defining a set of exceptions, all subclasses of HTTPException.
Each exception, in addition to being a Python exception that can be
raised and caught, is also a WSGI application and ``webob.Response``
object.
This module defines exceptions according to RFC 2068 [1]_ : codes with
100-300 are not really errors; 400's are client errors, and 500's are
server errors. According to the WSGI specification [2]_ , the application
can call ``start_response`` more then once only under two conditions:
(a) the response has not yet been sent, or (b) if the second and
subsequent invocations of ``start_response`` have a valid ``exc_info``
argument obtained from ``sys.exc_info()``. The WSGI specification then
requires the server or gateway to handle the case where content has been
sent and then an exception was encountered.
Exception
HTTPException
HTTPOk
* 200 - HTTPOk
* 201 - HTTPCreated
* 202 - HTTPAccepted
* 203 - HTTPNonAuthoritativeInformation
* 204 - HTTPNoContent
* 205 - HTTPResetContent
* 206 - HTTPPartialContent
HTTPRedirection
* 300 - HTTPMultipleChoices
* 301 - HTTPMovedPermanently
* 302 - HTTPFound
* 303 - HTTPSeeOther
* 304 - HTTPNotModified
* 305 - HTTPUseProxy
* 306 - Unused (not implemented, obviously)
* 307 - HTTPTemporaryRedirect
HTTPError
HTTPClientError
* 400 - HTTPBadRequest
* 401 - HTTPUnauthorized
* 402 - HTTPPaymentRequired
* 403 - HTTPForbidden
* 404 - HTTPNotFound
* 405 - HTTPMethodNotAllowed
* 406 - HTTPNotAcceptable
* 407 - HTTPProxyAuthenticationRequired
* 408 - HTTPRequestTimeout
* 409 - HTTPConflict
* 410 - HTTPGone
* 411 - HTTPLengthRequired
* 412 - HTTPPreconditionFailed
* 413 - HTTPRequestEntityTooLarge
* 414 - HTTPRequestURITooLong
* 415 - HTTPUnsupportedMediaType
* 416 - HTTPRequestRangeNotSatisfiable
* 417 - HTTPExpectationFailed
HTTPServerError
* 500 - HTTPInternalServerError
* 501 - HTTPNotImplemented
* 502 - HTTPBadGateway
* 503 - HTTPServiceUnavailable
* 504 - HTTPGatewayTimeout
* 505 - HTTPVersionNotSupported
Subclass usage notes:
---------------------
The HTTPException class is complicated by 4 factors:
1. The content given to the exception may either be plain-text or
as html-text.
2. The template may want to have string-substitutions taken from
the current ``environ`` or values from incoming headers. This
is especially troublesome due to case sensitivity.
3. The final output may either be text/plain or text/html
mime-type as requested by the client application.
4. Each exception has a default explanation, but those who
raise exceptions may want to provide additional detail.
Subclass attributes and call parameters are designed to provide an easier path
through the complications.
Attributes:
``code``
the HTTP status code for the exception
``title``
remainder of the status line (stuff after the code)
``explanation``
a plain-text explanation of the error message that is
not subject to environment or header substitutions;
it is accessible in the template via %(explanation)s
``detail``
a plain-text message customization that is not subject
to environment or header substitutions; accessible in
the template via %(detail)s
``body_template``
a content fragment (in HTML) used for environment and
header substitution; the default template includes both
the explanation and further detail provided in the
message
Parameters:
``detail``
a plain-text override of the default ``detail``
``headers``
a list of (k,v) header pairs
``comment``
a plain-text additional information which is
usually stripped/hidden for end-users
``body_template``
a string.Template object containing a content fragment in HTML
that frames the explanation and further detail
To override the template (which is HTML content) or the plain-text
explanation, one must subclass the given exception; or customize it
after it has been created. This particular breakdown of a message
into explanation, detail and template allows both the creation of
plain-text and html messages for various clients as well as
error-free substitution of environment variables and headers.
The subclasses of :class:`~_HTTPMove`
(:class:`~HTTPMultipleChoices`, :class:`~HTTPMovedPermanently`,
:class:`~HTTPFound`, :class:`~HTTPSeeOther`, :class:`~HTTPUseProxy` and
:class:`~HTTPTemporaryRedirect`) are redirections that require a ``Location``
field. Reflecting this, these subclasses have two additional keyword arguments:
``location`` and ``add_slash``.
Parameters:
``location``
to set the location immediately
``add_slash``
set to True to redirect to the same URL as the request, except with a
``/`` appended
Relative URLs in the location will be resolved to absolute.
References:
.. [1] http://www.python.org/peps/pep-0333.html#error-handling
.. [2] http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.5
"""
import re
import urlparse
import sys
import types
from string import Template
from webob import Response, Request, html_escape
newstyle_exceptions = issubclass(Exception, object)
tag_re = re.compile(r'<.*?>', re.S)
br_re = re.compile(r'<br.*?>', re.I|re.S)
comment_re = re.compile(r'<!--|-->')
def no_escape(value):
if value is None:
return ''
if not isinstance(value, basestring):
if hasattr(value, '__unicode__'):
value = unicode(value)
else:
value = str(value)
return value
def strip_tags(value):
value = value.replace('\n', ' ')
value = value.replace('\r', '')
value = br_re.sub('\n', value)
value = comment_re.sub('', value)
value = tag_re.sub('', value)
return value
class HTTPException(Exception):
"""
Exception used on pre-Python-2.5, where new-style classes cannot be used as
an exception.
"""
def __init__(self, message, wsgi_response):
Exception.__init__(self, message)
self.__dict__['wsgi_response'] = wsgi_response
def __call__(self, environ, start_response):
return self.wsgi_response(environ, start_response)
def exception(self):
return self
exception = property(exception)
# for old style exceptions
if not newstyle_exceptions: #pragma NO COVERAGE
def __getattr__(self, attr):
if not attr.startswith('_'):
return getattr(self.wsgi_response, attr)
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr.startswith('_') or attr in ('args',):
self.__dict__[attr] = value
else:
setattr(self.wsgi_response, attr, value)
class WSGIHTTPException(Response, HTTPException):
## You should set in subclasses:
# code = 200
# title = 'OK'
# explanation = 'why this happens'
# body_template_obj = Template('response template')
code = None
title = None
explanation = ''
body_template_obj = Template('''\
${explanation}<br /><br />
${detail}
${html_comment}
''')
plain_template_obj = Template('''\
${status}
${body}''')
html_template_obj = Template('''\
<html>
<head>
<title>${status}</title>
</head>
<body>
<h1>${status}</h1>
${body}
</body>
</html>''')
## Set this to True for responses that should have no request body
empty_body = False
def __init__(self, detail=None, headers=None, comment=None,
body_template=None, **kw):
Response.__init__(self,
status='%s %s' % (self.code, self.title),
**kw)
Exception.__init__(self, detail)
if headers:
self.headers.extend(headers)
self.detail = detail
self.comment = comment
if body_template is not None:
self.body_template = body_template
self.body_template_obj = Template(body_template)
if self.empty_body:
del self.content_type
del self.content_length
def __str__(self):
return self.detail or self.explanation
def _make_body(self, environ, escape):
args = {
'explanation': escape(self.explanation),
'detail': escape(self.detail or ''),
'comment': escape(self.comment or ''),
}
if self.comment:
args['html_comment'] = '<!-- %s -->' % escape(self.comment)
else:
args['html_comment'] = ''
body_tmpl = self.body_template_obj
if WSGIHTTPException.body_template_obj is not self.body_template_obj:
# Custom template; add headers to args
for k, v in environ.items():
args[k] = escape(v)
for k, v in self.headers.items():
args[k.lower()] = escape(v)
t_obj = self.body_template_obj
return t_obj.substitute(args)
def plain_body(self, environ):
body = self._make_body(environ, no_escape)
body = strip_tags(body)
return self.plain_template_obj.substitute(status=self.status,
title=self.title,
body=body)
def html_body(self, environ):
body = self._make_body(environ, html_escape)
return self.html_template_obj.substitute(status=self.status,
body=body)
def generate_response(self, environ, start_response):
if self.content_length is not None:
del self.content_length
headerlist = list(self.headerlist)
accept = environ.get('HTTP_ACCEPT', '')
if accept and 'html' in accept or '*/*' in accept:
content_type = 'text/html'
body = self.html_body(environ)
else:
content_type = 'text/plain'
body = self.plain_body(environ)
extra_kw = {}
if isinstance(body, unicode):
extra_kw.update(charset='utf-8')
resp = Response(body,
status=self.status,
headerlist=headerlist,
content_type=content_type,
**extra_kw
)
resp.content_type = content_type
return resp(environ, start_response)
def __call__(self, environ, start_response):
# FIXME: ensure HEAD and GET response headers are identical
if environ['REQUEST_METHOD'] == 'HEAD':
start_response(self.status, self.headerlist)
return []
if not self.body and not self.empty_body:
return self.generate_response(environ, start_response)
return Response.__call__(self, environ, start_response)
def wsgi_response(self):
return self
wsgi_response = property(wsgi_response)
def exception(self):
if newstyle_exceptions:
return self
else:
return HTTPException(self.detail, self)
exception = property(exception)
class HTTPError(WSGIHTTPException):
"""
base class for status codes in the 400's and 500's
This is an exception which indicates that an error has occurred,
and that any work in progress should not be committed. These are
typically results in the 400's and 500's.
"""
class HTTPRedirection(WSGIHTTPException):
"""
base class for 300's status code (redirections)
This is an abstract base class for 3xx redirection. It indicates
that further action needs to be taken by the user agent in order
to fulfill the request. It does not necessarly signal an error
condition.
"""
class HTTPOk(WSGIHTTPException):
"""
Base class for the 200's status code (successful responses)
code: 200, title: OK
"""
code = 200
title = 'OK'
############################################################
## 2xx success
############################################################
class HTTPCreated(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that request has been fulfilled and resulted in a new
resource being created.
code: 201, title: Created
"""
code = 201
title = 'Created'
class HTTPAccepted(HTTPOk):
"""
subclass of :class:`~HTTPOk`
This indicates that the request has been accepted for processing, but the
processing has not been completed.
code: 202, title: Accepted
"""
| |
distance to any main track. If overlapping, they are relegated to the end of the list
The closest unassigned track is then assigned. If all but 1 of the tracks overlap with the gallery track, it is assigned the non overlapping.
If there are M < N main tracks that dont overlap with the gallery track, only compare these using spatio-temporal distances and re-id.
If the track is overlapping with all main tracks, then the internal spatio-temporal distance is used, as well as the amount of overlap, and how frames have concurrent detections, and re-id.
Input:
galleryTracks: List of IDs of gallery tracks
mainTracks: List of IDs of main tracks
Output:
paths: List of lists, consisting of associated IDs
'''
assignLog = []
notAssigned = []
outputDict = {}
assignedCounter = 0
# Create dummy main tracks and assign data
self.mainTracks = {}
for mTrack in mainTracks:
outputDict[mTrack] = [mTrack]
self.addMainTrack(mTrack)
while len(galleryTracks) > 0:
## Find next gallery track
tempDistList, galleryTracks = self.rankTrackletTemporally(galleryTracks, mainTracks)
gTrack = tempDistList[0][0]
galleryTracks.pop(0)
# Per mainTrack, get distance values
tDistm = np.ones(self.n_fish) * -1 # temporal distance
sDistm = np.ones(self.n_fish) * -1 # spatial distance
iFramesm = np.ones(self.n_fish) * -1 # number of intersecting frames
iFramesRatiom = np.ones(self.n_fish) * -1 # ratio of intersecting frames and total frames in gallery track
iTDistm = np.ones(self.n_fish) * -1 # internal temporal distance
iSDistm = np.ones(self.n_fish) * -1 # internal spatial distance
validIndecies = [-1]*self.n_fish
validm = np.zeros(self.n_fish)
for idx, mTrack in enumerate(mainTracks):
distTuple = self.getDistances(gTrack, mTrack)
validm[idx] = distTuple[-1]
if validm[idx]:
tDistm[idx] = distTuple[0]
sDistm[idx] = distTuple[1]
iFramesm[idx] = distTuple[2]
iFramesRatiom[idx] = distTuple[3]
iTDistm[idx] = distTuple[4]
iSDistm[idx] = distTuple[5]
validIndecies[idx] = distTuple[6]
# Check if there are any valid tracks
if sum(validm) == 0:
## No valid tracks available
assignLog.append("Gallery {} - No valid main tracks".format(gTrack))
notAssigned.append(gTrack)
continue
assigned, mtID = self.costAssignment(tDistm, sDistm, iFramesm, iFramesRatiom, iTDistm, iSDistm)
if assigned:
print("Adding gallery track {} to main track {}".format(gTrack, mainTracks[mtID]))
assignLog.append("Gallery {} - Assigned to main track {}".format(gTrack, mainTracks[mtID]))
else:
assignLog.append("Gallery {} - Equal Prob".format(gTrack))
notAssigned.append(gTrack)
continue
self.combineTracklets(mainTracks[mtID], gTrack, validIndecies[mtID])
outputDict[mainTracks[mtID]].append(gTrack)
assignedCounter += 1
print()
## Statistics of the constructed main tracks
for mTrack in mainTracks:
mt = self.mainTracks[mTrack]
gap = []
sDist = []
for i in range(1, len(mt.frame)):
diff = mt.frame[i] - mt.frame[i-1]
t1 = self.frameTo3DCoord(mt, i)
t2 = self.frameTo3DCoord(mt, i-1)
sDist.append(np.linalg.norm(t1-t2))
if sDist[-1] == 0:
if mt.frame[i] == mt.frame[i-1]:
print(mt.frame[i], mt.frame[i-1])
if diff > 1:
gap.append(diff)
print("Main Track {}:".format(mTrack))
print("\tLength {} - Detections {} - Consistency {} - Start {} - End {}".format(mt.frame[-1]-mt.frame[0]+1, len(mt.frame), len(mt.frame)/(mt.frame[-1]-mt.frame[0]+1), mt.frame[0], mt.frame[-1]))
if len(gap) > 0:
print("\tLargest Gap {} - Mean Gap {} - Median Gap {} - Std Dev Gap {} - Max Gap {} - Min Gap {} - # Gaps {}".format(np.max(gap), np.mean(gap), np.median(gap), np.std(gap), np.max(gap), np.min(gap), len(gap)))
if len(sDist) > 0:
print("\tLargest Dist {} - Mean Dist {} - Median Dist {} - Std Dev Dist {} - Max Dist {} - Min Dist {}\n".format(np.max(sDist), np.mean(sDist), np.median(sDist), np.std(sDist), np.max(sDist), np.min(sDist)))
with open(os.path.join(self.path, "processed", "assigned.txt"), "w") as f:
f.write("\n".join(assignLog))
if notAssigned:
na_len = []
na_ids = []
for naTrack in notAssigned:
na = self.tracks[naTrack]
na_len.append(len(na.frame))
na_ids.append(naTrack)
print("Largest unassigned tracklet: ID {} - Mean {} - Median {} - Std Dev {} - Min {} - Max {} - # {}".format(na_ids[np.argmax(na_len)], np.mean(na_len), np.median(na_len), np.std(na_len), np.min(na_len), np.max(na_len), len(na_len)))
paths = []
for key in outputDict:
paths.append(outputDict[key])
return paths
### Initial main track selection
def getTemporalOverlap(self, mainTrack, galleryTrack):
'''
Calculates the temporal overlap between the provided tracks, based on their first and last frame number.
If there is no overlap, a value of -1 is returned
Output:
tuple: The first element indicates whether there is an overlap, and the second the overlap itself.
'''
if mainTrack.frame[0] <= galleryTrack.frame[-1] and galleryTrack.frame[0] <= mainTrack.frame[-1]:
overlap = min(galleryTrack.frame[-1] - mainTrack.frame[0], mainTrack.frame[-1]-galleryTrack.frame[0])
return (0, overlap)
else:
return (-1,-1)
def findConccurent(self, trackIds, overlaping = True, fully_concurrent = False, verbose = True):
"""
Finds the concurrent tracks between a specific track and a set of other tracks
Can find both partially overlapping and fully concurrent tracklets
Input:
track: A Track object
candidates: List of Track objects
Output:
concurrent: List of Track objects from candidates that were overlaping with the track argument
"""
tracks = self.tracks
if verbose:
print()
print(trackIds)
concurrent = []
for idx1 in range(len(trackIds)):
track1Id = trackIds[idx1]
track1 = tracks[track1Id]
if verbose:
print("Track1: {}, fStart {}, fEnd {}, # frames {}, # missing frames {}".format(track1Id, tracks[track1Id].frame[0], tracks[track1Id].frame[-1], len(tracks[track1Id].frame), (tracks[track1Id].frame[-1] - tracks[track1Id].frame[0] + 1) - len(tracks[track1Id].frame)))
assigned = []
trackList = []
for idx2 in range(idx1+1, len(trackIds)):
if idx1 == idx2:
continue
track2Id = trackIds[idx2]
track2 = tracks[track2Id]
if verbose:
print("Track2: {}, fStart {}, fEnd {}, # frames {}, # missing frames {}".format(track2Id, tracks[track2Id].frame[0], tracks[track2Id].frame[-1], len(tracks[track2Id].frame), (tracks[track2Id].frame[-1] - tracks[track2Id].frame[0] + 1) - len(tracks[track2Id].frame)))
interCheck = self.checkTrackIntersection(track1, track2)
if interCheck == -1:
if verbose:
print("Not intersecting ", track1Id, track2Id)
## If the tracks dont share the same time span
continue
if not fully_concurrent:
if interCheck == 0:
## If track2 is fully in the span of track1. Only track1 is kept
assigned.append(track2Id)
continue
if interCheck == 1:
## If track1 is fully in the span of track2. Only track2 is kept
assigned.append(track1Id)
continue
if not overlaping:
if interCheck == 2 or interCheck == 3:
continue
if verbose:
print("\tKept")
trackList.append(track2Id)
assigned.append(track2Id)
if track1Id not in assigned:
trackList.append(track1Id)
assigned.append(track1Id)
if len(trackList) > 0:
concurrent.append(trackList)
if verbose:
print()
if verbose:
print(concurrent)
print(assigned)
print()
return concurrent
def findMainTracks(self):
"""
Finds all cases where there are N tracks that are both long and overlapping. These tracks are used as seeds for the tracklet association.
The tracks have to be of a certain length and have a certain amount of detections in them before they are considered.
The minimum length and number of detections are determined by taking the value of the (N*user_multiple)th top value
All tracks are then checked for overlap, and all valid cases where N tracks overlap eachother more than a user defined value, are then returned.
Output:
main_tracks: A list of lists containing the set of main track IDs which can be used as seeds
"""
detections = []
length = []
ids = []
for tr in self.tracks:
track = self.tracks[tr]
ids.append(tr)
detections.append(len(track.frame))
length.append(len(np.linspace(track.frame[0], track.frame[-1], track.frame[-1]-track.frame[0] + 1, dtype=np.int)))
print("Track {}: Start {}, End {}, Length {}, Detections {}".format(tr, track.frame[0], track.frame[-1], length[-1], detections[-1]))
sorted_length = sorted(length)
sorted_detections = sorted(detections)
candidateN = min(len(self.tracks), self.n_fish*self.main_track_search_multiple)
candidateN = len(self.tracks)
min_length = sorted_length[-candidateN]
min_detections = sorted_detections[-candidateN]
min_overlap = 1#min_length * self.min_main_overlap
print("Minimum tracklet length {}".format(min_length))
print("Minimum tracklet detections {}".format(min_detections))
print("Minimum tracklet overlap {}".format(min_overlap))
main_tracks_init = []
for idx in range(len(self.tracks)):
if length[idx] >= min_length and detections[idx] >= min_detections:
main_tracks_init.append(ids[idx])
# find which of the initially selected tracks are concurrent
concurrent_tracks = self.findConccurent(main_tracks_init, overlaping = True, fully_concurrent = True, verbose = False)
# For all set of concurrent tracks go through and see how much they overlap
main_tracks = []
overlap_lst = []
checked_sets = []
no_overlap_dict = {x:[] for x in self.tracks}
for conc_set in concurrent_tracks:
if len(conc_set) < self.n_fish:
# If fewer tracks than fish in the set, discard it
continue
else:
# Go through all combinations of the tracks, where there are n_fish tracks in a set, and keep the ones where the overlap between all tracks is big enough
# All tracks should overlap so that e.g. for 3 fish it cannot be
# |----------------------------------------|
# |-----------------| |----------------------------|
for comb_set in combinations(conc_set, self.n_fish):
if comb_set in checked_sets:
continue
else:
checked_sets.append(comb_set)
valid_set = True
for track in comb_set:
if track in no_overlap_dict:
if set(no_overlap_dict[track]).intersection(set(comb_set)):
| |
information for
the superelement. Some or all of this info is in the `uset`
table, but if a coordinate system is not used as an output
system of any grid, it will not show up in `uset`. That is why
`cstm` is here. `cstm` has 14 columns::
cstm = [ id type xo yo zo T(1,:) T(2,:) T(3,:) ]
Note that each `cstm` always starts with the two ids 0 and -1.
The 0 is the basic coordinate system and the -1 is a dummy for
SPOINTs. Note the T is transformation between coordinate
systems as defined (not necessarily the same as the
transformation for a particular grid ... which, for
cylindrical and spherical, depends on grid location). This is
the same T as in the `uset` table.
For example, to convert coordinates from global to basic::
Rectangular (type = 1):
[x; y; z] = T*[xg; yg; zg] + [xo; yo; zo]
Cylindrical (type = 2):
% c = cos(theta); s = sin(theta)
[x; y; z] = T*[R c; R s; zg] + [xo; yo; zo]
Spherical (type = 3):
% s1 = sin(theta); s2 = sin(phi)
[x; y; z] = T*[r s1 c2; r s1 s2; r c1] + [xo; yo; zo]
*'cstm2' description*
Each `cstm2` is a dictionary with the same 5x3 that the
'Coord_Info' listed above has (doesn't include the first row
which is the node location). The dictionary is indexed by the
coordinate id.
*'maps' description*
`maps` will be [] for superelements whose A-set dof did not
get rearranged going downstream (on the CSUPER entry.) For
other superelements, `maps` will contain two columns: [order,
scale]. The first column reorders upstream A-set to be in the
order that they appear in the downstream:
``down = up[maps[:, 0]]``. The second column is typically 1.0;
if not, these routines will print an error message and
stop. Together with `dnids`, a partition vector can be formed
for the A-set of an upstream superelement (see
:func:`pyyeti.nastran.n2p.upasetpv`).
The op2 file that this routine reads is written by the Nastran
DMAP NAS2CAM. The data in the file are expected to be in this
order::
SLIST & EMAP or SUPERID
For each superelement:
USET
EQEXINS
CSTMS (if required)
BGPDTS
MAPS (if required)
Note: The 2nd bit for the DOF column of all `uset` tables is
cleared for all S-set. See
:func:`pyyeti.nastran.n2p.mkusetmask` for more information.
Example usage::
from pyyeti import nastran
# list superelement 100 DOF that are in the B set:
o2 = nastran.OP2('nas2cam.op2')
nas = nastran.rdn2cop2()
bset = nastran.mksetpv(nas['uset'][100], 'p', 'b')
print('bset of se100 =', nas['uset'][100][bset, :2])
See also
--------
:func:`rdnas2cam`, :func:`pyyeti.nastran.bulk.bulk2uset`.
"""
# setup basic coordinate system info and a dummy for spoints:
bc = np.array(
[
[+0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
nas = {
"uset": {},
"cstm": {},
"cstm2": {},
"maps": {},
"dnids": {},
"upids": {},
}
self._fileh.seek(self._postheaderpos)
# read datablock (slist) header record:
name, trailer, dbtype = self.rdop2nt()
if name not in ("SUPERID", "SLIST"):
raise ValueError(
"expected 'SUPERID' or 'SLIST' as first data "
f"block in nas2cam op2 file, but got {name}"
)
if dbtype > 0:
selist = np.hstack((self.rdop2matrix(trailer), [[0]]))
selist = selist.astype(int)
name, trailer, dbtype = self.rdop2nt()
else:
selist = self._rdop2selist()
nse = np.size(selist, 0)
name, trailer, dbtype = self.rdop2nt()
if name == "EMAP":
self._rdop2emap(nas, nse, trailer)
name, trailer, dbtype = self.rdop2nt()
# read uset and eqexins tables and do some processing:
for se in selist[:, 0]:
if not name:
break
uset = self._rdop2uset()
name, trailer, dbtype = self.rdop2nt()
eqexin1, eqexin = self._rdop2eqexin()
name, trailer, dbtype = self.rdop2nt()
if name == "CSTMS":
cstm = np.vstack((bc, self._rdop2cstm()))
name, trailer, dbtype = self.rdop2nt()
else:
cstm = bc
(xyz, cid, dof, doftype, nid, upids) = self._proc_bgpdt(eqexin1, eqexin)
nas["upids"][se] = upids
Uset, cstm, cstm2 = self._buildUset(
se, dof, doftype, nid, uset, xyz, cid, cstm, None
)
nas["uset"][se] = Uset
nas["cstm"][se] = cstm
nas["cstm2"][se] = cstm2
name, trailer, dbtype = self.rdop2nt()
if name == "MAPS":
nas["maps"][se] = self._rdop2maps(trailer, se)
name, trailer, dbtype = self.rdop2nt()
else:
nas["maps"][se] = []
nas["selist"] = selist
return nas
def rdmats(filename=None, names=None):
"""
Read all matrices from Nastran output2 file.
Parameters
----------
filename : string or None; optional
Name of op2 file to read. Can also be the name of a directory
or None; in these cases, a GUI is opened for file selection.
names : list_like; optional
Iterable of names to read in. If None, read all. These can
be input in lower case.
Returns
-------
dict
Dictionary containing all matrices in the op2 file:
{'NAME1': matrix1, 'NAME2': matrix2, ...}
Notes
-----
The keys are the names as stored (upper case).
This routine is for convenience; this is what it does::
filename = guitools.get_file_name(filename, read=True)
return OP2(filename).rdop2mats(names)
"""
filename = guitools.get_file_name(filename, read=True)
return OP2(filename).rdop2mats(names)
def _get_op2_op4(op2file, op4file):
if op2file is None: # pragma: no cover
op2file = guitools.get_file_name(None, read=True)
elif not os.path.exists(op2file):
op2file = op2file + ".op2"
if not op4file:
op4file = op2file.replace(".op2", ".op4")
return op2file, op4file
def rdnas2cam(op2file="nas2cam", op4file=None):
"""
Read op2/op4 data written by the DMAP NAS2CAM.
Parameters
----------
op2file : string or None
Either the basename of the .op2 and .op4 files, or the full
name of the .op2 file. If None, a GUI is opened for file
selection.
op4file : string or None
The name of the .op4 file or, if None, builds name from the
`op2file` input.
Returns
-------
nas : dictionary
Dictionary with all members created by :func:`OP2.rdn2cop2`
(see that routine's help) and the following additional
members.
'nrb' : integer
The number of rigid-body modes for residual.
'ulvs' : dictionary indexed by SE
The ULVS matrices (row partitions of residual modes to the
A-set DOF of the SE).
'lambda' : dictionary indexed by SE
The eigenvalues for each SE.
'gm' : dictionary indexed by SE
N-set to M-set transformation matrix GM: M = GM N.
'got' : dictionary indexed by SE
constraint modes
'goq' : dictionary indexed by SE
normal modes
'rfmodes' : dictionary indexed by SE
index partition vector for res-flex modes
'maa' : dictionary indexed by SE
A-set mass
'baa' : dictionary indexed by SE
A-set damping
'kaa' : dictionary indexed by SE
A-set stiffness
'pha' : dictionary indexed by SE
A-set modes
'mdd' : dictionary indexed by SE
D-set mass
'bdd' : dictionary indexed by SE
D-set damping
'kdd' : dictionary indexed by SE
D-set stiffness
'pdt' : dictionary indexed by SE
D-set loads
'mgg' : dictionary indexed by SE
G-set mass
'kgg' : dictionary indexed by SE
G-set stiffness
'phg' : dictionary indexed by SE
G-set mode shape matrix
'rbg' : dictionary indexed by SE
G-set rigid-body modes; see also drg output and rbgeom_uset
'drg' : dictionary indexed by SE
G-set transpose of rigid-body modes; see also 'rbg' and
:func:`pyyeti.nastran.n2p.rbgeom_uset`. `drg` = `rbg.T` if
both are present.
'pg' : dictionary indexed by SE
G-set loads
'fgravh' : array
gravity on generalized dof for se 0
'fgravg' : array
gravity on G-set physical dof for se 0
Notes
-----
See :func:`OP2.rdn2cop2` for a description of what is expected of
the `op2file`. The `op4file` is expected to contain certain
marker matrices. Scalar SE_START starts each superelement and can
be followed by any matrices for that superelement. The end of the
superelement input is marked by a matrix named LOOP_END.
See also the Nastran DMAP NAS2CAM.
"""
op2file, op4file = _get_op2_op4(op2file, op4file)
# read op2 file:
with OP2(op2file) as o2:
nas = o2.rdn2cop2()
# read op4 file:
op4names, op4vars = op4.load(op4file, into="list")[:2]
# loop over superelements:
j = 0
for se in nas["selist"][:, 0]:
if op4names[j] != "se_start":
raise ValueError(
"matrices are not in understandable order. Expected 'se_start', got "
f"'{op4names[j]}'"
)
# read all matrices for this se
| |
"some text",
"type": IOTypes.STR,
},
{
"name": "input2",
"description": "some text",
"type": IOTypes.FLOAT,
},
{
"name": "input3",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
# Trying to set op template before processing components
with self.assertRaises(PolyaxonSchemaError):
config.set_op_component("A")
config.process_components()
# Trying to set op template before processing dag
with self.assertRaises(PolyaxonSchemaError):
config.set_op_component("A")
config.process_dag()
assert config.dag["A"].op.component is None
assert config.dag["B"].op.component is None
config.set_op_component("A")
assert config.dag["B"].op.component is None
assert config.dag["A"].op.component is not None
assert (
config.dag["A"].op.component == config._components_by_names["job-template"]
)
config.set_op_component("B")
assert config.dag["B"].op.component is not None
assert (
config.dag["B"].op.component == config._components_by_names["job-template"]
)
def test_dag_with_wrong_refs(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"params": {"input1": "sdf", "input2": 12.0, "input3": False},
},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
"params": {
"input1": "{{ ops.A.outputs.output1 }}",
"input2": 12.123,
},
},
],
"components": [
{
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.STR,
},
{
"name": "input2",
"description": "some text",
"type": IOTypes.FLOAT,
},
{
"name": "input3",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_dag_with_correct_refs(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"params": {"input1": 2, "input2": "gs://bucket/path/to/blob/"},
},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
"params": {
"input1": "{{ ops.A.outputs.output1 }}",
"input2": "gs://bucket/path/to/blob/",
},
},
],
"components": [
{
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.INT,
},
{
"name": "input2",
"description": "some text",
"type": IOTypes.GCS_PATH,
},
{
"name": "input3",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"outputs": [
{
"name": "output1",
"description": "some text",
"type": IOTypes.INT,
"is_optional": True,
"value": 123,
}
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
config.process_components()
def test_dag_with_correct_ref_and_wrong_ref_type(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"params": {
"input1": 2,
"input2": "gs://bucket/path/to/blob/",
"output1": 123,
},
},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
"params": {
"input1": 3,
"input2": "{{ ops.A.outputs.output1 }}",
"output1": 123,
},
},
],
"components": [
{
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.INT,
},
{
"name": "input2",
"description": "some text",
"type": IOTypes.GCS_PATH,
},
{
"name": "input3",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"outputs": [
{
"name": "output1",
"description": "some text",
"type": IOTypes.INT,
}
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_dag_with_template_not_defining_inputs_and_ops_refs_params(self):
config_dict = {
"kind": "dag",
"ops": [
{"component_ref": {"name": "build-template"}, "name": "A"},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
"params": {"param1": "{{ ops.A.outputs.x }}"},
},
],
"components": [
{"name": "job-template", "container": {"image": "test"}},
{
"name": "build-template",
"tags": ["kaniko"],
"container": {"image": "test"},
},
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_dag_with_ops_and_components(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "build-template"},
"name": "A",
"description": "description A",
"tags": ["tag11", "tag12"],
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 2},
},
{
"component_ref": {"name": "experiment-template"},
"name": "B",
"description": "description B",
"tags": ["tag21", "tag22"],
"dependencies": ["A"],
"params": {
"input1": 11.1,
"input2": False,
"input3": "{{ runs.64332180bfce46eba80a65caf73c5396.outputs.foo }}",
"output1": "S3://foo.com",
},
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 3},
},
{
"component_ref": {"name": "group-template"},
"name": "C",
"description": "description C",
"tags": ["tag31", "tag32"],
"params": {
"input1": "{{ ops.B.outputs.output1 }}",
"input2": "{{ ops.B.outputs.output2 }}",
"output1": "S3://foo.com",
},
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 5},
},
],
"components": [
{
"name": "experiment-template",
"description": "description experiment",
"tags": ["tag11", "tag12"],
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
},
{
"name": "input2",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
{
"name": "input3",
"description": "some text",
"type": IOTypes.INT,
"is_optional": True,
"value": True,
},
],
"outputs": [
{
"name": "output1",
"description": "some text",
"type": IOTypes.S3_PATH,
},
{
"name": "output2",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 2},
"container": {"image": "test"},
},
{
"name": "group-template",
"description": "description group",
"tags": ["tag11", "tag12"],
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.S3_PATH,
},
{
"name": "input2",
"description": "some text",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"outputs": [
{
"name": "output1",
"description": "some text",
"type": IOTypes.S3_PATH,
}
],
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 2},
"container": {"image": "test"},
},
{
"name": "build-template",
"description": "description build",
"tags": ["tag11", "tag12"],
"container": {"image": "test"},
},
{
"name": "build-template2",
"description": "description build",
"tags": ["tag11", "tag12", "kaniko"],
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 2},
"container": {"image": "test"},
},
{
"name": "job-template",
"description": "description job",
"tags": ["tag11", "tag12"],
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.S3_PATH,
"is_optional": True,
"value": "s3://foo",
}
],
"outputs": [
{
"name": "output1",
"description": "some text",
"type": IOTypes.S3_PATH,
}
],
"environment": {
"resources": {"requests": {"cpu": 1}},
"node_selector": {"polyaxon": "core"},
"service_account": "service",
"image_pull_secrets": ["secret1", "secret2"],
},
"termination": {"max_retries": 2},
"container": {"image": "test"},
},
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
config.process_dag()
config.validate_dag()
config.process_components()
dag = config.dag
assert len(dag) == 3
assert config.get_independent_ops(dag=dag) == {"A"}
assert dags.get_independent_ops(dag=dag) == {"A"}
assert config.get_orphan_ops(dag=dag) == set([])
assert dags.get_orphan_ops(dag=dag) == set([])
sorted_dag = config.sort_topologically(dag=dag)
assert config.sort_topologically(dag=dag) == sorted_dag
assert sorted_dag[0] == ["A"]
assert sorted_dag[1] == ["B"]
assert sorted_dag[2] == ["C"]
# op upstreams
op_upstream_by_names = ops_params.get_upstream_op_params_by_names(
params=config.dag["A"].op.params
)
assert op_upstream_by_names == {}
op_upstream_by_names = ops_params.get_upstream_op_params_by_names(
params=config.dag["B"].op.params
)
assert op_upstream_by_names == {}
op_upstream_by_names = ops_params.get_upstream_op_params_by_names(
params=config.dag["C"].op.params
)
assert len(op_upstream_by_names["B"]) == 2
if op_upstream_by_names["B"][0].name == "input1":
assert op_upstream_by_names["B"][0] == ops_params.ParamSpec(
name="input1",
iotype=None,
value="ops.B.outputs.output1",
entity="ops",
entity_ref="B",
entity_value="output1",
is_flag=None,
)
assert op_upstream_by_names["B"][1] == ops_params.ParamSpec(
name="input2",
iotype=None,
value="ops.B.outputs.output2",
entity="ops",
entity_ref="B",
entity_value="output2",
is_flag=None,
)
else:
assert op_upstream_by_names["B"][1] == ops_params.ParamSpec(
name="input1",
iotype=None,
value="ops.B.outputs.output1",
entity="ops",
entity_ref="B",
entity_value="output1",
is_flag=None,
)
assert op_upstream_by_names["B"][0] == ops_params.ParamSpec(
name="input2",
iotype=None,
value="ops.B.outputs.output2",
entity="ops",
entity_ref="B",
entity_value="output2",
is_flag=None,
)
# run upstreams
run_upstream_by_names = ops_params.get_upstream_run_params_by_names(
params=config.dag["A"].op.params
)
assert run_upstream_by_names == {}
run_upstream_by_names = ops_params.get_upstream_run_params_by_names(
params=config.dag["B"].op.params
)
assert run_upstream_by_names["64332180bfce46eba80a65caf73c5396"] == [
ops_params.ParamSpec(
name="input3",
iotype=None,
value="runs.64332180bfce46eba80a65caf73c5396.outputs.foo",
entity="runs",
entity_ref="64332180bfce46eba80a65caf73c5396",
entity_value="foo",
is_flag=None,
)
]
run_upstream_by_names = ops_params.get_upstream_run_params_by_names(
params=config.dag["C"].op.params
)
assert run_upstream_by_names == {}
# pipeline upstreams
pipeline_by_names = ops_params.get_pipeline_params_by_names(
params=config.dag["A"].op.params
)
assert pipeline_by_names == {}
pipeline_by_names = ops_params.get_pipeline_params_by_names(
params=config.dag["B"].op.params
)
assert pipeline_by_names == {}
pipeline_by_names = ops_params.get_pipeline_params_by_names(
params=config.dag["C"].op.params
)
assert pipeline_by_names == {}
def test_pipeline_context(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "A"},
"name": "A",
"params": {
"input1": 11.1,
"input2": False,
"input3": "{{ runs.64332180bfce46eba80a65caf73c5396.outputs.foo }}",
"input4": "s3://foo",
},
},
{
"component_ref": {"name": "B"},
"name": "B",
"dependencies": ["A"],
"params": {
"input1": "{{ ops.A.inputs.input4 }}",
"input2": "{{ ops.A.outputs.output1 }}",
},
},
{
"component_ref": {"name": "B"},
"name": "C",
"params": {
"input1": "{{ dag.inputs.input_pipe }}",
"input2": "{{ ops.B.outputs.output1 }}",
},
},
],
"components": [
{
"name": "A",
"inputs": [
{"name": "input1", "type": IOTypes.FLOAT},
{
"name": "input2",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
{
"name": "input3",
"type": IOTypes.INT,
"is_optional": True,
"value": True,
},
{"name": "input4", "type": IOTypes.S3_PATH},
],
"outputs": [
{"name": "output1", "type": IOTypes.S3_PATH},
{
"name": "output2",
"type": IOTypes.BOOL,
"is_optional": True,
"value": True,
},
],
"container": {"image": "test"},
},
{
"name": "B",
"inputs": [
{"name": "input1", "type": IOTypes.S3_PATH},
{"name": "input2", "type": IOTypes.S3_PATH},
],
"outputs": [{"name": "output1", "type": IOTypes.S3_PATH}],
"container": {"image": "test"},
},
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
config.process_dag()
config.validate_dag()
config.process_components(
inputs=[IOConfig.from_dict({"name": "input_pipe", "type": IOTypes.S3_PATH})]
)
dag = config.dag
assert len(dag) == 3
assert config.get_independent_ops(dag=dag) == {"A"}
assert dags.get_independent_ops(dag=dag) == {"A"}
assert config.get_orphan_ops(dag=dag) == set([])
assert dags.get_orphan_ops(dag=dag) == set([])
sorted_dag = config.sort_topologically(dag=dag)
assert config.sort_topologically(dag=dag) == sorted_dag
| |
"""
A class for the OKR graph structure
Author: <NAME> and <NAME>
"""
import os
import copy
import logging
import itertools
import xml.etree.ElementTree as ET
import re
import stop_words
from collections import defaultdict
NULL_VALUE = 0
STOP_WORDS = stop_words.get_stop_words('en')
class MentionType:
"""
Enum for mention type
"""
Entity = 0
Proposition = 1
class OKR:
"""
A class for the OKR graph structure
"""
def __init__(self, name, sentences, ignored_indices, tweet_ids, entities, propositions):
self.name = name # XML file name
self.sentences = sentences # Dictionary of sentence ID (starts from 1) to tokenized sentence
self.ignored_indices = ignored_indices # set of words to ignore, in format sentence_id[index_id]
self.tweet_ids = tweet_ids # Dictionary of sentence ID to tweet ID
self.entities = entities # Dictionary of entity ID to Entity object
self.propositions = propositions # Dictionary of proposition id to Proposition object
# Set arguments original indices and name
for prop in self.propositions.values():
for prop_mention in prop.mentions.values():
for argument in prop_mention.argument_mentions.values():
set_parent_indices(argument, self)
# Set template for predicate mentions and use it to create mention entailment graph
for p_id, prop in self.propositions.iteritems():
for m_id, prop_mention in prop.mentions.iteritems():
set_template(prop_mention, self.entities, self.propositions)
prop.entailment_graph.mentions_graph = from_term_id_to_mention_id(prop.entailment_graph.graph,
prop.mentions, MentionType.Proposition)
prop.entailment_graph.contradictions_mention_graph = from_term_id_to_mention_id(
prop.entailment_graph.contradictions_graph, prop.mentions, MentionType.Proposition)
# Create dictionaries to get mentions by their string ID
self.prop_mentions_by_key = {str(mention): mention
for prop in self.propositions.values() for mention in prop.mentions.values()}
self.ent_mentions_by_key = {str(mention): mention
for ent in self.entities.values() for mention in ent.mentions.values()}
def clone(self):
"""
Returns a deep copy of the graph
"""
return copy.deepcopy(self)
def get_sentence_by_id(self, sent_id_str):
"""
Receives a sentence ID and returns a sentence
:param sent_id_str The sentence ID
:return the sentence
"""
sent_id = int(sent_id_str.split('[')[0])
sentence = self.sentences[sent_id]
indices = [int(index) for index in sent_id_str.split('[')[1][:-1].split(',')]
indices_new = [int(index) for index in sent_id_str.split('[')[1][:-1].split(',') if int(index) < len(sentence)]
if not len(indices) == len(indices_new):
logging.warning('Error in the length of sentence id %s' % sent_id_str)
return ' '.join([sentence[i] for i in indices_new])
class AbstractNode:
"""
A class for either a proposition or an entity in the graph
"""
def __init__(self, id, name, mentions, terms, entailment_graph):
self.id = id
self.name = name
self.mentions = mentions
self.terms = terms
self.entailment_graph = entailment_graph
def __str__(self):
"""
Use this as a unique id for a node which is comparable among graphs
"""
return '#'.join(sorted(list(set(map(str, self.mentions.values())))))
class Entity(AbstractNode):
"""
A class for an entity in the graph
"""
def __init__(self, id, name, mentions, terms, entailment_graph):
AbstractNode.__init__(self, id, name, mentions, terms, entailment_graph)
class Proposition(AbstractNode):
"""
A class for a proposition in the graph
"""
def __init__(self, id, name, mentions, attributor, terms, entailment_graph):
AbstractNode.__init__(self, id, name, mentions, terms, entailment_graph)
self.attributor = attributor
class Mention:
"""
An abstract class for a mention in the graph
"""
def __init__(self, id, sentence_id, indices, terms, parent):
self.id = id
self.sentence_id = sentence_id
self.indices = indices
self.terms = terms
self.parent = parent
def __str__(self):
"""
Use this as a unique id for a mention which is comparable among graphs
"""
return str(self.sentence_id) + str(self.indices)
class Entailment_graph:
"""
A class representing the entailment graph (for propositions, entities or arguments)
"""
def __init__(self, graph, mentions_graph, contradictions_graph, contradictions_mention_graph):
self.graph = graph # graph of terms
self.mentions_graph = mentions_graph # graph of mention IDs (each term is connected to one or more mention IDs)
self.contradictions_graph = contradictions_graph # graph of contradictions (terms)
self.contradictions_mention_graph = contradictions_mention_graph # graph of contradictions (mention IDs)
class EntityMention(Mention):
"""
A class for an entity mention in the graph
"""
def __init__(self, id, sentence_id, indices, terms, parent):
Mention.__init__(self, id, sentence_id, indices, terms, parent)
class PropositionMention(Mention):
"""
A class for a proposition mention in the graph
"""
def __init__(self, id, sentence_id, indices, terms, parent, argument_mentions, is_explicit):
Mention.__init__(self, id, sentence_id, indices, terms, parent)
self.argument_mentions = argument_mentions
self.template = None # template with argument IDs
self.is_explicit = is_explicit
def __str__(self):
"""
Use this as a unique id for a mention which is comparable among graphs
override inherited function in order to implement str for implicit mentions and remove prepositions
"""
# Implicit proposition
if self.indices == [-1]:
new_indices = [item for sublist in [arg.parent_indices[1] for arg in self.argument_mentions.values()] for
item in sublist]
new_indices.sort()
return str(self.sentence_id) + str(new_indices)
# TODO: Rachel - replace with POS looking for nouns and verbs
terms_lst = self.terms.split()
verb_noun_indices = [self.indices[i] for i in range(0, len(self.indices) - 1) if terms_lst[i] not in STOP_WORDS]
# Predicate with noun or a verb
if len(verb_noun_indices) > 0:
return str(self.sentence_id) + str(verb_noun_indices)
return str(self.sentence_id) + str(self.indices)
# __repr__ = __str__
class ArgumentMention:
"""
A class for an argument mention in the graph
"""
def __init__(self, id, desc, mention_type, parent_id, parent_mention_id):
self.id = id
self.desc = desc
self.mention_type = mention_type
self.parent_id = parent_id
self.parent_mention_id = parent_mention_id
# These fields are set when the graph loading is done
self.parent_indices = None
self.parent_name = None
def __str__(self):
"""
Use this as a unique id for a mention which is comparable among graphs
Returns unique ID for argument mention only
"""
if self.parent_indices == None:
return 'NONE'
return str(self.parent_indices[0]) + str(self.parent_indices[1])
def str_p(self, proposition_mention):
"""
Use this as a unique id for a mention which is comparable among graphs
Returns unique ID of proposition_mention + argument_mention
"""
return str(proposition_mention) + '_' + str(self)
def load_graphs_from_folder(input_folder):
"""
Load OKR files from a given folder
:param input_folder: the folder path
:return: a list of OKR objects
"""
return [load_graph_from_file(input_folder + "/" + f) for f in os.listdir(input_folder)]
def load_graph_from_file(input_file):
"""
Loads an OKR object from an xml file
:param input_file: the xml file
:return: an OKR object
"""
mention_types = {'Entity': MentionType.Entity, 'Proposition': MentionType.Proposition}
# Load the xml to a tree object
tree = ET.parse(input_file)
# Load the sentences
root = tree.getroot()
sentences_node = root.find('sentences')[1:]
# Handle different versions - old version:
if sentences_node[0].find('str') != None:
sentences = { int(sentence.find('id').text): sentence.find('str').text.split() for sentence in sentences_node }
ignored_indices = None
tweet_ids = {}
# New version
else:
sentences = { int(sentence.find('id').text) : [token.find('str').text for token in sentence.find('tokens')]
for sentence in sentences_node }
ignored_indices = set(
[sentence.find('id').text + '[' + token.find('id').text + ']' for sentence in sentences_node
for token in sentence.find('tokens') if token.find('isIrrelevant').text == 'true'])
tweet_ids = {int(sentence.find('id').text): sentence.find('name').text for sentence in sentences_node}
# Load the entities
entities_node = root.find('typeManagers').findall('typeManager')[1].find('types')
entities = {}
for entity in entities_node:
# Entity mentions
mentions = {int(mention[0].text): # mention id
EntityMention(int(mention[0].text), # mention id
int(mention[1].text), # sentence id
[int(index[0].text) for index in mention[3]], # mention indices
' '.join([index[1].text.lower() for index in mention[3]]), # mention terms
int(entity[0].text) # parent
) for mention in entity.find('mentions')}
# Check for empty mentions
empty_mentions = [(mention.parent, m_id) for m_id, mention in mentions.iteritems() if len(mention.indices) == 0]
if len(empty_mentions) > 0:
logging.warning('Empty mentions in entity %s' % entity[0].text)
# Entity entailment graph
entailment_info = entity[3]
ent_terms = entailment_info[0]
term_dic = {int(term[0].text): term[1].text.lower() for term in ent_terms}
graph = []
contradictions_graph = []
ent_connections = entailment_info[1]
for connection in ent_connections:
# The second entails the first or they are equal
if connection[0].text == '1' or connection[0].text == '0':
graph.append((term_dic[int(connection[2].text)], term_dic[int(connection[1].text)]))
# The first entails the second or they are equal
if connection[0].text == '2' or connection[0].text == '0':
graph.append((term_dic[int(connection[1].text)], term_dic[int(connection[2].text)]))
# Contradiction
if connection[0].text == '3':
contradictions_graph.append((term_dic[int(connection[1].text)], term_dic[int(connection[2].text)]))
# Create the transitive closure of the entailment graph
final_graph = transitive_closure(graph)
mentions_graph = from_term_id_to_mention_id(final_graph, mentions, MentionType.Entity)
contradictions_mentions_graph = from_term_id_to_mention_id(contradictions_graph, mentions, MentionType.Entity)
entity_entailment = Entailment_graph(final_graph, mentions_graph, contradictions_graph,
contradictions_mentions_graph)
# Entity terms
terms = set([mention.terms for mention in mentions.values()])
entities[int(entity[0].text)] = Entity(int(entity[0].text), # id
entity[1].text, # name
mentions, # entity mentions
terms, # entity terms
entity_entailment) # entity entailment graph
# Load the propositions
propositions_node = root.find('typeManagers').findall('typeManager')[0].find('types')
propositions = {}
for proposition in propositions_node:
# Proposition mentions
mentions = {int(mention.find('id').text): # mention id
PropositionMention(int(mention.find('id').text), # mention id
int(mention.find('sentenceId').text), # sentence id
[int(index.find('ind').text) for index in mention.find('tokens')], # mention indices
# mention terms
' '.join([index.find('word').text.lower() for index in mention.find('tokens')]),
int(proposition[0].text), # parent
# Argument mentions
{arg[0].text: ArgumentMention(arg[0].text, # argument id
arg[1].text, # argument description
mention_types[arg[2][0][0].text],
# mention type (entity/proposition)
int(arg[2][0][1].text),
# entity/proposition id
int(arg[2][0][2].text))
# entity/proposition mention id
for arg in mention.find('args')},
| |
<filename>turbogears/widgets/tests/test_widgets.py
import itertools
import cherrypy
from turbogears import controllers, expose, widgets, validators, view
from turbogears.testutil import call, catch_validation_errors
try:
set
except NameError: # Python 2.3
from sets import Set as set
class Request:
input_values = {}
validation_errors = {}
oldrequest = None
def setup_module():
global oldrequest
oldrequest = cherrypy.request
cherrypy.request = Request()
if not view.engines:
view.load_engines()
def teardown_module():
global oldrequest
cherrypy.request = oldrequest
def test_rendering_without_engine():
"""Helpful error when rendering widgets with no templating engine loaded"""
from turbogears import view
engines = view.engines
view.engines = {}
try:
widgets.CSSLink("foo")()
except Exception, msg:
msg = str(msg)
else:
msg = 'No error'
view.engines = engines
assert 'templating engine is not yet loaded' in msg
def test_label():
"""Tests simple labels"""
label = widgets.Label("foo")
rendered = label.render("The Foo", format='xhtml')
assert """<label id="foo" class="label">The Foo</label>""" == rendered
def test_default_value():
"""Widgets can have a default value"""
textfield = widgets.TextField("name")
output = textfield.render(format='xhtml')
assert 'value' not in output
textfield = widgets.TextField("name", default="<NAME>")
output = textfield.render(format='xhtml')
assert 'value="ed kowalczyk"' in output
def test_callable_default_value():
"""Widgets can have a callable default value"""
textfield = widgets.TextField("name", default=lambda: "<NAME>")
output = textfield.render(format='xhtml')
assert 'value="<NAME>"' in output
def test_false_default_value():
"""Widgets can have a default value that evaluates to False"""
textfield = widgets.TextField("name")
assert textfield.default is None
output = textfield.render(format='xhtml')
assert 'value' not in output
textfield = widgets.TextField("name", default=0)
assert textfield.default == 0
output = textfield.render(format='xhtml')
assert 'value="0"' in output
textfield = widgets.TextField("name", default="")
assert textfield.default == ""
output = textfield.render(format='xhtml')
assert 'value=""' in output
msfield = widgets.MultipleSelectField("name", default=[], options=(1,2,3))
assert msfield.default == []
output = msfield.render(format='xhtml')
assert 'selected' not in output
def test_labeltext():
"""Label text defaults to the capitalized name"""
textfield = widgets.TextField("name")
assert textfield.label == "Name"
def test_validation():
"""Values can be converted to/from Python values"""
textfield = widgets.TextField("age", validator=validators.Int())
output = textfield.render(2, format="xhtml")
assert 'value="2"' in output
value = "2"
value = textfield.validator.to_python(value)
assert value == 2
def test_unicode_input():
"""Unicode values are rendered correctly"""
tf = widgets.TextField("name", validator=validators.UnicodeString())
output = tf.render(u'Pete \u011C', format='xhtml')
assert 'value="Pete \xc4\x9c"' in output
return # XXX: the folowing causes OTHER(!) tests to fail!
try:
print tf.render('Pete \xfe\xcd')
except ValueError, e:
pass
else:
assert False, "ValueError not raised: non-unicode input not detected"
#tf2 = widgets.TextField("name", validator=validators.String())
# simon: failed inputs are no longer being removed.
#
#def test_failed_validation():
# "If validation fails, the bad value should be removed from the input"
# textfield = widgets.TextField("age", validator=validators.Int())
# values = dict(age="ed")
# try:
# textfield.validate(values)
# except validators.Invalid:
# pass
# assert not values.has_key("age")
def test_widget_css():
"""Widgets can require CSS resources"""
css = widgets.CSSLink(mod=widgets.static, name="foo.css")
css2 = widgets.CSSLink(mod=widgets.static, name="foo.css")
assert css == css2
cssset = set()
cssset.add(css)
cssset.add(css2)
assert len(cssset) == 1
css3 = widgets.CSSLink(mod=widgets.static, name="bar.css")
assert css3 != css2
css4 = widgets.CSSSource(src="foo.css")
assert css != css4
rendered = css.render(format='xhtml')
assert 'link' in rendered
assert 'href="/tg_widgets/turbogears.widgets/foo.css"' in rendered
assert 'type="text/css"' in rendered
assert 'rel="stylesheet"' in rendered
assert 'media="all"' in rendered
rendered = css.render(media="printer", format='xhtml')
assert 'media="printer"' in rendered
css = widgets.CSSSource("h1 { color: black }")
rendered = css.render(format='xhtml')
assert 'h1 { color: black }' in rendered
def test_widget_js():
"""Widgets can require JavaScript resources"""
js = widgets.JSLink(mod=widgets.static, name="foo.js")
js2 = widgets.JSLink(mod=widgets.static, name="foo.js")
assert js2 == js2
js2 = widgets.JSLink(mod=widgets.static, name="bar.js")
assert js2 != js
js2 = widgets.CSSLink(mod=widgets.static, name="foo.js")
assert js2 != js
js2 = widgets.JSSource(src="foo.js")
assert js2 != js
rendered = js.render(format='xhtml')
expected = '<script src="/tg_widgets/turbogears.widgets/foo.js"' \
' type="text/javascript"></script>'
assert rendered == expected
js3 = widgets.JSLink(mod=widgets.static, name="foo.js",
defer=False, charset=None)
assert js3 == js
rendered = js3.render(format='xhtml')
assert rendered == expected
js3 = widgets.JSLink(mod=widgets.static, name="foo.js", defer=True)
assert js3 != js
rendered = js3.render(format='html').lower()
assert ' defer' in rendered \
and expected == rendered.replace(' defer', '', 1)
rendered = js3.render(format='xhtml')
assert ' defer="defer"' in rendered \
and expected == rendered.replace(' defer="defer"', '', 1)
js3 = widgets.JSLink(mod=widgets.static, name="foo.js", charset='Latin-1')
assert js3 != js
rendered = js3.render(format='xhtml')
assert ' charset="Latin-1"' in rendered \
and expected == rendered.replace(' charset="Latin-1"', '', 1)
js3 = widgets.JSSource("alert('hello');")
assert js3 != js and js3 != js2
rendered = js3.render(format='xhtml')
expected = '<script type="text/javascript">alert(\'hello\');</script>'
assert rendered == expected
js4 = widgets.JSSource("alert('hello');", defer=False)
assert js4 == js3
rendered = js4.render(format='xhtml')
assert rendered == expected
js4 = widgets.JSSource("alert('hello');", defer=True)
assert js4 != js3
rendered = js4.render(format='html').lower()
assert ' defer' in rendered \
and expected == rendered.replace(' defer', '', 1)
rendered = js4.render(format='xhtml')
assert ' defer="defer"' in rendered \
and expected == rendered.replace(' defer="defer"', '', 1)
def test_widgetslist_init():
"""Widget lists can be declared in various ways."""
w = widgets.Widget(name="foo")
widgetlist = widgets.WidgetsList(w)
assert len(widgetlist) == 1
assert widgetlist[0] == w
widgetlist = widgets.WidgetsList([w])
assert len(widgetlist) == 1
assert widgetlist[0] == w
w2 = widgets.Widget(name="bar")
widgetlist = widgets.WidgetsList(w, w2)
assert len(widgetlist) == 2
assert widgetlist[0] == w
assert widgetlist[1] == w2
widgetlist = widgets.WidgetsList([w, w2])
assert len(widgetlist) == 2
assert widgetlist[0] == w
assert widgetlist[1] == w2
class W(widgets.WidgetsList):
foo = w
bar = w2
widgetlist = W()
assert len(widgetlist) == 2
assert widgetlist[0] == w
assert widgetlist[1] == w2
def test_widget_url():
"""It might be needed to insert an URL somewhere"""
url = widgets.URLLink(link='http://www.turbogears.org')
rendered = url.render(format='xhtml')
expected = """<a href="http://www.turbogears.org"></a>"""
assert rendered == expected
url = widgets.URLLink(link='http://www.turbogears.org', text='TurboGears Website')
rendered = url.render(format='xhtml')
expected = """<a href="http://www.turbogears.org">TurboGears Website</a>"""
assert rendered == expected
url = widgets.URLLink(link='http://www.turbogears.org', text='TurboGears Website', target="_blank")
rendered = url.render(format='xhtml')
expected = """<a href="http://www.turbogears.org" target="_blank">TurboGears Website</a>"""
assert rendered == expected
def test_submit():
sb = widgets.SubmitButton()
r = sb.render(format='xhtml')
assert 'name' not in r
assert 'id' not in r
r = sb.render('Krakatoa', format='xhtml')
assert 'id' not in r
assert 'name' not in r
sb = widgets.SubmitButton(name='blink')
r = sb.render(format='xhtml')
assert 'name="blink"' in r
assert 'id="blink"' in r
r = sb.render('Krakatoa', format='xhtml')
assert 'name="blink"' in r
assert 'id="blink"' in r
sb = widgets.SubmitButton(name='submit')
r = sb.render(format='xhtml')
assert 'name="submit"' in r
assert 'id="submit"' in r
r = sb.render('Krakatoa', format='xhtml')
assert 'name="submit"' in r
assert 'id="submit"' in r
sb = widgets.SubmitButton(default='Save')
r = sb.render(format='xhtml')
assert 'value="Save"' in r
r = sb.render(value='Discard', format='xhtml')
assert 'value="Discard"' in r
def test_threadsafety():
"""Widget attributes can't be changed after init, for threadsafety"""
w = widgets.TextField("bar")
w.display()
try:
w.name = "foo"
assert False, "should have gotten an exception"
except ValueError:
pass
def test_checkbox():
"""A CheckBox has not a value and is not checked by default"""
w = widgets.CheckBox("foo")
output = w.render(format='xhtml')
assert 'name="foo"' in output
assert 'value' not in output
assert 'checked' not in output
output = w.render(value=True, format='xhtml')
assert 'checked' in output
w = widgets.CheckBox("foo", default=True)
output = w.render(format='xhtml')
assert 'checked' in output
output = w.render(value=False, format='xhtml')
assert 'checked' not in output
#CheckBox should accept alternate validators
value = w.validator.to_python('checked')
assert value == True
w = widgets.CheckBox("foo", validator=validators.NotEmpty())
value = w.validator.to_python('checked')
assert value == 'checked'
def test_field_class():
"""The class of a field corresponds to the name of its Python class"""
w = widgets.TextField("bar")
output = w.render(format='xhtml')
assert 'class="%s"' % w.__class__.__name__.lower() in output
def test_field_id():
"""The id of a field corresponds to the name of the field"""
w = widgets.TextField("bar")
output = w.render(format='xhtml')
assert 'id="bar"' in output
def test_override_name():
"""The name of a widget can be overridden when displayed"""
w = widgets.DataGrid(name="bar", fields=[])
output = w.render([], name='foo', format='xhtml')
assert 'id="foo"' in output
def test_selection_field():
"""A selection field presents a list of options that can be changed
dynamically. One or more options can be selected/checked by default
or dynamically."""
options = [(1, "python"), (2, "java"), (3, "pascal")]
w = widgets.SingleSelectField(options=options)
output = w.render(format='xhtml')
assert 'python' in output
assert 'java' in output
assert 'pascal' in output
output = w.render(value=2, format='xhtml')
assert '<option value="1">' in output
assert '<option selected="selected" value="2">' in output
assert '<option value="3">' in output
w = widgets.SingleSelectField(options=options, default=3)
output = w.render(format='xhtml')
assert '<option value="1">' in output
assert '<option value="2">' in output
assert '<option selected="selected" value="3">' in output
output = w.render(options=options + [(4, "cobol"), (5, "ruby")],
format='xhtml')
assert 'python' in output
assert 'java' in output
assert 'pascal' in output
assert 'cobol' in output
assert 'ruby' in output
output = w.render(options=options
+ [(4, "cobol"), (5, "ruby")], value=5, format='xhtml')
assert '<option value="1">' in output
assert '<option value="2">' in output
assert '<option value="3">' in output
assert '<option value="4">' in output
assert '<option selected="selected" value="5">' in output
w = widgets.MultipleSelectField(options=options, | |
<gh_stars>10-100
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import glob
import os
from oslo_log import log as logging
from oslo_policy import policy
import pkg_resources
import stevedore
from tempest import config
from patrole_tempest_plugin.rbac_authority import RbacAuthority
from patrole_tempest_plugin import rbac_exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
class PolicyAuthority(RbacAuthority):
"""A class that uses ``oslo.policy`` for validating RBAC."""
os_admin = None
def __init__(self, project_id, user_id, service, extra_target_data=None):
"""Initialization of Policy Authority class.
Validates whether a test role can perform a policy action by querying
``oslo.policy`` with necessary test data.
If a policy file does not exist, checks whether the policy file is
registered as a namespace under "oslo.policy.policies". Nova, for
example, doesn't use a policy file by default; its policies are
implemented in code and registered as "nova" under
"oslo.policy.policies".
If the policy file is not found in either code or in a policy file,
then an exception is raised.
Additionally, if a custom policy file exists along with the default
policy in code implementation, the custom policy is prioritized.
:param uuid project_id: project_id of object performing API call
:param uuid user_id: user_id of object performing API call
:param string service: service of the policy file
:param dict extra_target_data: dictionary containing additional object
data needed by oslo.policy to validate generic checks
Example:
.. code-block:: python
# Below is the default policy implementation in code, defined in
# a service like Nova.
test_policies = [
policy.DocumentedRuleDefault(
'service:test_rule',
base.RULE_ADMIN_OR_OWNER,
"This is a description for a test policy",
[
{
'method': 'POST',
'path': '/path/to/test/resource'
}
]),
'service:another_test_rule',
base.RULE_ADMIN_OR_OWNER,
"This is a description for another test policy",
[
{
'method': 'GET',
'path': '/path/to/test/resource'
}
]),
]
.. code-block:: yaml
# Below is the custom override of the default policy in a YAML
# policy file. Note that the default rule is "rule:admin_or_owner"
# and the custom rule is "rule:admin_api". The `PolicyAuthority`
# class will use the "rule:admin_api" definition for this policy
# action.
"service:test_rule" : "rule:admin_api"
# Note below that no override is provided for
# "service:another_test_rule", which means that the default policy
# rule is used: "rule:admin_or_owner".
"""
if extra_target_data is None:
extra_target_data = {}
self.service = self.validate_service(service)
# Prioritize dynamically searching for policy files over relying on
# deprecated service-specific policy file locations.
if CONF.patrole.custom_policy_files:
self.discover_policy_files()
self.rules = self.get_rules()
self.project_id = project_id
self.user_id = user_id
self.extra_target_data = extra_target_data
@classmethod
def validate_service(cls, service):
"""Validate whether the service passed to ``__init__`` exists."""
service = service.lower().strip() if service else None
# Cache the list of available services in memory to avoid needlessly
# doing an API call every time.
if not hasattr(cls, 'available_services') and cls.os_admin:
services_client = (cls.os_admin.identity_services_v3_client
if CONF.identity_feature_enabled.api_v3
else cls.os_admin.identity_services_client)
services = services_client.list_services()['services']
cls.available_services = [s['name'] for s in services]
if not service or service not in cls.available_services:
LOG.debug("%s is NOT a valid service.", service)
raise rbac_exceptions.RbacInvalidServiceException(
"%s is NOT a valid service." % service)
return service
@classmethod
def discover_policy_files(cls):
"""Dynamically discover the policy file for each service in
``cls.available_services``. Pick all candidate paths found
out of the potential paths in ``[patrole] custom_policy_files``.
"""
if not hasattr(cls, 'policy_files'):
cls.policy_files = collections.defaultdict(list)
for service in cls.available_services:
for candidate_path in CONF.patrole.custom_policy_files:
path = candidate_path % service
for filename in glob.iglob(path):
if os.path.isfile(filename):
cls.policy_files[service].append(filename)
def allowed(self, rule_name, roles):
"""Checks if a given rule in a policy is allowed with given role.
:param string rule_name: Policy name to pass to``oslo.policy``.
:param List[string] roles: List of roles to validate for authorization.
:raises RbacParsingException: If ``rule_name`` does not exist in the
cloud (in policy file or among registered in-code policy defaults).
"""
is_admin_context = self._is_admin_context(roles)
is_allowed = self._allowed(
access=self._get_access_token(roles),
apply_rule=rule_name,
is_admin=is_admin_context)
return is_allowed
def _handle_deprecated_rule(self, default):
deprecated_rule = default.deprecated_rule
deprecated_msg = (
'Policy "%(old_name)s":"%(old_check_str)s" was deprecated in '
'%(release)s in favor of "%(name)s":"%(check_str)s". Reason: '
'%(reason)s. Either ensure your deployment is ready for the new '
'default or copy/paste the deprecated policy into your policy '
'file and maintain it manually.' % {
'old_name': deprecated_rule.name,
'old_check_str': deprecated_rule.check_str,
'release': default.deprecated_since,
'name': default.name,
'check_str': default.check_str,
'reason': default.deprecated_reason
}
)
LOG.warn(deprecated_msg)
oslo_policy_version = pkg_resources.parse_version(
pkg_resources.get_distribution("oslo.policy").version)
# NOTE(gmann): oslo policy 3.7.0 onwards does not allow to modify
# the Rule object check attribute.
required_version = pkg_resources.parse_version('3.7.0')
if oslo_policy_version >= required_version:
return policy.OrCheck([default.check, deprecated_rule.check])
else:
default.check = policy.OrCheck(
[policy._parser.parse_rule(cs) for cs in
[default.check_str,
deprecated_rule.check_str]])
return default.check
def get_rules(self):
rules = policy.Rules()
# Check whether policy file exists and attempt to read it.
for path in self.policy_files[self.service]:
try:
with open(path, 'r') as fp:
for k, v in policy.Rules.load(fp.read()).items():
if k not in rules:
rules[k] = v
# If the policy name and rule are the same, no
# ambiguity, so no reason to warn.
elif str(v) != str(rules[k]):
msg = ("The same policy name: %s was found in "
"multiple policies files for service %s. "
"This can lead to policy rule ambiguity. "
"Using rule: %s; Rule from file: %s")
LOG.warning(msg, k, self.service, rules[k], v)
except (ValueError, IOError):
LOG.warning("Failed to read policy file '%s' for service %s.",
path, self.service)
# Check whether policy actions are defined in code. Nova and Keystone,
# for example, define their default policy actions in code.
mgr = stevedore.named.NamedExtensionManager(
'oslo.policy.policies',
names=[self.service],
invoke_on_load=True,
warn_on_missing_entrypoint=False)
if mgr:
policy_generator = {plc.name: plc.obj for plc in mgr}
if self.service in policy_generator:
for rule in policy_generator[self.service]:
if rule.name not in rules:
if CONF.patrole.validate_deprecated_rules:
# NOTE (sergey.vilgelm):
# The `DocumentedRuleDefault` object has no
# `deprecated_rule` attribute in Pike
check = rule.check
if getattr(rule, 'deprecated_rule', False):
check = self._handle_deprecated_rule(rule)
rules[rule.name] = check
elif str(rule.check) != str(rules[rule.name]):
msg = ("The same policy name: %s was found in the "
"policies files and in the code for service "
"%s. This can lead to policy rule ambiguity. "
"Using rule: %s; Rule from code: %s")
LOG.warning(msg, rule.name, self.service,
rules[rule.name], rule.check)
if not rules:
msg = (
'Policy files for {0} service were not found among the '
'registered in-code policies or in any of the possible policy '
'files: {1}.'.format(
self.service,
[loc % self.service
for loc in CONF.patrole.custom_policy_files]))
raise rbac_exceptions.RbacParsingException(msg)
return rules
def _is_admin_context(self, roles):
"""Checks whether a role has admin context.
If context_is_admin is contained in the policy file, then checks
whether the given role is contained in context_is_admin. If it is not
in the policy file, then default to context_is_admin: admin.
"""
if 'context_is_admin' in self.rules:
return self._allowed(
access=self._get_access_token(roles),
apply_rule='context_is_admin')
return CONF.identity.admin_role in roles
def _get_access_token(self, roles):
access_token = {
"token": {
"roles": [{'name': r} for r in roles],
"project_id": self.project_id,
"tenant_id": self.project_id,
"user_id": self.user_id
}
}
return access_token
def _allowed(self, access, apply_rule, is_admin=False):
"""Checks if a given rule in a policy is allowed with given ``access``.
:param dict access: Dictionary from ``_get_access_token``.
:param string apply_rule: Rule to be checked using ``oslo.policy``.
:param bool is_admin: Whether admin context is used.
"""
access_data = copy.copy(access['token'])
access_data['roles'] = [role['name'] for role in access_data['roles']]
access_data['is_admin'] = is_admin
# TODO(felipemonteiro): Dynamically calculate is_admin_project rather
# than hard-coding it to True. is_admin_project cannot be determined
# from the role, but rather from project and domain names. For more
# information, see:
# https://git.openstack.org/cgit/openstack/keystone/tree/keystone/token/providers/common.py?id=37ce5417418f8acbd27f3dacb70c605b0fe48301#n150
access_data['is_admin_project'] = True
class Object(object):
pass
o = Object()
o.rules = self.rules
target = {"project_id": access_data['project_id'],
"tenant_id": access_data['project_id'],
"network:tenant_id": access_data['project_id'],
"user_id": access_data['user_id']}
if self.extra_target_data:
target.update(self.extra_target_data)
result = self._try_rule(apply_rule, target, access_data, o)
return result
def _try_rule(self, apply_rule, target, access_data, o):
if apply_rule not in self.rules:
message = ('Policy action "{0}" not found in policy files: '
'{1} or among registered policy in code defaults for | |
<reponame>hypergravity/starlight_wrapper
# -*- coding: utf-8 -*-
"""
Author
------
<NAME>
Email
-----
<EMAIL>
Created on
----------
- Sat Jul 16 22:30:00 2016
Modifications
-------------
- Sat Jul 16 22:30:00 2016 framework
- Sun Jul 17 23:00:00 2016 StarlightGrid
- Mon Jul 18 09:27:00 2016
Aims
----
- to generate config files of STARLIGHT
"""
from __future__ import print_function, division
import os
import copy
import glob
import numpy as np
from astropy.table import Table, Column
from astropy.io import fits
from .config import EXTRA_COMMENTS, PACKAGE_PATH
# ################
# Starlight Grid #
# ################
class StarlightGrid(object):
""" StarlightGrid class is to represent the Grid file object for STARLIGHT
"""
# specified order of meta data
meta_order = ['num_of_fits_to_run',
'base_dir',
'obs_dir',
'mask_dir',
'out_dir',
'phone_number',
'llow_SN',
'lupp_SN',
'Olsyn_ini',
'Olsyn_fin',
'Odlsyn',
'fscale_chi2',
'fit_fxk',
'IsErrSpecAvailable',
'IsFlagSpecAvailable']
# default values for StarlightGrid instance
meta = dict(num_of_fits_to_run=2,
base_dir='/pool/projects/starlight/STARLIGHTv04/BasesDir/',
obs_dir='/pool/projects/starlight/STARLIGHTv04/',
mask_dir='/pool/projects/starlight/STARLIGHTv04/',
out_dir='/pool/projects/starlight/STARLIGHTv04/',
phone_number='-2007200',
llow_SN=4730.0,
lupp_SN=4780.0,
Olsyn_ini=3400.0,
Olsyn_fin=8900.0,
Odlsyn=1.0,
fit_fxk='FIT',
fscale_chi2=1.0,
IsErrSpecAvailable=1,
IsFlagSpecAvailable=1)
# specified order of arq
arq_order = ['arq_obs',
'arq_config',
'arq_base',
'arq_masks',
'red_law_option',
'v0_start',
'vd_start',
'arq_out']
# default arq data
arq_obs = []
arq_config = []
arq_base = []
arq_masks = []
red_law_option = []
v0_start = []
vd_start = []
arq_out = []
# extra comments
extra = EXTRA_COMMENTS
def __init__(self, **kwargs):
""" initialize instance using arq data """
for key in kwargs.keys():
self.__setattr__(key, kwargs[key])
def sync_nobs(self):
""" should sync the n_obs to meta """
self.meta['num_of_fits_to_run'] = len(self.arq_obs)
def set_meta(self, **kwargs):
""" set meta data """
for key in kwargs.keys():
self.meta[key] = kwargs[key]
def is_arq_validate(self):
""" check if the arq data length validate """
try:
n_obs = len(self.arq_obs)
assert n_obs == len(self.arq_config) \
or np.isscalar(self.arq_config)
assert n_obs == len(self.arq_base) \
or np.isscalar(self.arq_base)
assert n_obs == len(self.arq_masks) \
or np.isscalar(self.arq_masks)
assert n_obs == len(self.red_law_option) \
or np.isscalar(self.red_law_option)
assert n_obs == len(self.v0_start) \
or np.isscalar(self.v0_start)
assert n_obs == len(self.vd_start) \
or np.isscalar(self.vd_start)
assert n_obs == len(self.arq_out)
return True
except AssertionError:
return False
def pprint_meta(self):
""" print meta data """
for key in self.meta_order:
print("%s: %s" % (key, self.meta[key]))
def _print_arq_(self, arq_key):
""" print single arq field data """
assert arq_key in self.arq_order
arq_val = self.__getattribute__(arq_key)
if np.isscalar(arq_val) or len(arq_val) <= 3:
print(arq_val)
else:
print('[%s, %s, ... %s]' % (arq_val[0], arq_val[1], arq_val[-1]))
def pprint_arq(self):
""" print all arq data"""
for key in self.arq_order:
print("%s:" % key),
self._print_arq_(key)
def pprint(self):
""" print a summary of the instance """
print("")
print('StarlightGrid summary:')
print('############### [meta] ###############')
self.pprint_meta()
print('############### [arq] ###############')
self.pprint_arq()
print(self.extra)
print('######################################')
def _meta_to_string(self, val_width=50):
""" convert meta data to string list """
fmt_str = "%%%ds" % (-val_width)
return [(fmt_str + "[%s]\n") % (self.meta[key], key)
for key in self.meta_order]
def _arq_scalar_to_list(self):
# 1. arq data to list
n_obs = len(self.arq_obs)
for key in self.arq_order:
val = self.__getattribute__(key)
if np.isscalar(val):
self.__setattr__(key, [val for _ in range(n_obs)])
else:
assert len(val) == n_obs
# convert to np.array
self.__setattr__(key, np.array(self.__getattribute__(key)))
def _arq_to_string(self, sep=' '):
""" convert arq data to string list """
# 1. arq data to list
n_obs = len(self.arq_obs)
self._arq_scalar_to_list()
# 2. to string
str_list = []
for i in range(n_obs):
arq_data_i = ["%s" % self.__getattribute__(key)[i]
for key in self.arq_order]
str_list.append(sep.join(arq_data_i) + "\n")
return str_list
def write(self, filepath, meta_val_width=50, sep=' '):
""" write to filepath in the form of STARLIGHT grid file """
self.sync_nobs()
f = open(filepath, "w+")
f.writelines(self._meta_to_string(meta_val_width))
f.write('\n')
f.writelines(self._arq_to_string(sep=sep))
f.write('\n')
f.write(self.extra)
f.close()
def write_split(self, n_split=24,
fmt_str='StarlightGrid%03d.sw', **kwargs):
""" split self into *n_split* pieces and write to StarlightGrid files
"""
self.sync_nobs()
n_obs = self.meta['num_of_fits_to_run']
# assert n_split > self.meta['num_of_fits_to_run']
# determine ind
n_splited = np.int(n_obs) / np.int(n_split) + 1
n_list = [n_splited for _ in range(n_split)]
n_list_cs = np.array(np.cumsum(n_list))
n_list_cs[n_list_cs > n_obs] = n_obs
# generate multi instance of StarlightGrid
self._arq_scalar_to_list()
filepaths = []
# split self
for i in range(n_split):
# filepath
filepath = fmt_str % (i+1)
# determine start & stop
if i == 0:
ind0 = 0
else:
ind0 = n_list_cs[i-1]
ind1 = n_list_cs[i]
# possible emtpy StarlightGrid
if ind0 == ind1:
print('@Cham: [empty!]: %s' % filepath)
else:
filepaths.append(filepath)
# deep copy
sg_ = copy.copy(self)
# set arq
for arq_key in self.arq_order:
sg_.__setattr__(arq_key,
sg_.__getattribute__(arq_key)[ind0:ind1])
# write to file
sg_.write(filepath, **kwargs)
return filepaths
def write_split_advanced(self,
n_max=None,
fmt_str='StarlightGrid_RUN%03d_SUBSET%03d.sw',
check_result_existence=False,
create_outdir=False,
**kwargs):
""" Split StarlightGrid file to several pieces
Parameters
----------
n_max: int
the maximum files will be run
fmt_str: string
the format string of the splited StarlightGrid file
kwargs:
Returns
-------
filepaths: list
list of splited StarlightGrid files
"""
# 0. preparation
self.sync_nobs()
self._arq_scalar_to_list()
if check_result_existence:
ind_exist = [
os.path.exists(''.join([self.meta['out_dir'], arq_out_]))
for arq_out_ in self.arq_out]
ind_use = np.logical_not(np.array(ind_exist))
print('@Cham: [%s/%s] files will be used ...'
% (np.sum(ind_use), len(ind_use)))
print('@Cham: setting arqs (used only) ...')
for arq_key in self.arq_order:
self.__setattr__(
arq_key, np.array(self.__getattribute__(arq_key))[ind_use])
# 1. check dir_obs and dir_out
file_obs = np.array(
[self.meta['obs_dir'] + arq_obs_ for arq_obs_ in self.arq_obs])
file_out = np.array(
[self.meta['out_dir'] + arq_out_ for arq_out_ in self.arq_out])
dirname_obs = np.array(
[os.path.dirname(file_obs_) for file_obs_ in file_obs])
basename_obs = np.array(
[os.path.basename(file_obs_) for file_obs_ in file_obs])
dirname_out = np.array(
[os.path.dirname(file_out_) for file_out_ in file_out])
basename_out = np.array(
[os.path.basename(file_out_) for file_out_ in file_out])
dirname_obs_out = np.array(
[dirname_obs_ + dirname_out_
for dirname_obs_, dirname_out_ in zip(dirname_obs, dirname_out)])
dirname_obs_out_u, dirname_obs_out_u_id, dirname_obs_out_u_counts \
= np.unique(dirname_obs_out, return_index=True, return_counts=True)
# n_dirname_obs_out_u = len(dirname_obs_out_u)
n_obs = len(file_obs)
# 2. determine run_id
run_id = np.zeros(n_obs)
run_dict = {}
for i, dirname_obs_out_u_ in enumerate(dirname_obs_out_u):
run_dict[dirname_obs_out_u_] = i
for i in range(len(file_obs)):
run_id[i] = run_dict[dirname_obs_out[i]]
# 3. write SGs
filepaths = []
for i_run, dirname_obs_out_u_ in enumerate(dirname_obs_out_u):
this_run_ind = np.where(dirname_obs_out==dirname_obs_out_u_)
# print('this_run_ind')
# print(this_run_ind)
# this_run_file_obs = file_obs[this_run_ind]
# this_run_file_out = file_out[this_run_ind]
this_run_subs = splitdata_n_max(this_run_ind, n_max)
# print('this_run_subs')
# print(this_run_subs)
this_obs_dir = dirname_obs[this_run_ind[0][0]]
this_out_dir = dirname_out[this_run_ind[0][0]]
# mkdir for plate
if create_outdir:
if not os.path.exists(this_out_dir):
os.mkdir(this_out_dir)
print('@Cham: mkdir [%s]' % this_out_dir)
# write SG for each sub
for i_sub in range(len(this_run_subs)):
this_sub_ind = this_run_subs[i_sub]
filepath = fmt_str % (i_run, i_sub)
filepaths.append(filepath)
# deep copy
sg_ = copy.copy(self)
# set meta
sg_.set_meta(obs_dir=this_obs_dir+os.path.sep,
out_dir=this_out_dir+os.path.sep)
# set arq
for arq_key in self.arq_order:
sg_.__setattr__(
arq_key, np.array(sg_.__getattribute__(arq_key))[this_sub_ind])
sg_.arq_obs = basename_obs[this_sub_ind]
sg_.arq_out = basename_out[this_sub_ind]
# write to file
sg_.write(filepath, **kwargs)
# verbose
print('@Cham: writing StarlightGrid [%s] (dir=%s, nobs=%d) ...'
% (filepath, dirname_obs_out_u_, len(this_sub_ind)))
print('@Cham: len(self.arq_out) = %s' % len(self.arq_out))
return filepaths
def splitdata_n_max(data, n_max):
""" split N into n_max - sized bins """
data = np.array(data).flatten()
N = len(data)
data_splited = []
for i in range(np.int64(np.ceil(N/n_max))):
start = i*n_max
stop = np.min([(i+1)*n_max, N])
data_splited.append(data[start:stop])
return data_splited
def _test_starlight_grid():
sg = StarlightGrid(arq_obs=['0414.51901.393.cxt',
'0784.52327.478.cxt'],
arq_config='StCv04.C99.config',
arq_base='Base.BC03.N',
arq_masks=['Mask.0414.51901.393.cxt.sc1.CRAP.gm.BN',
'Mask.0784.52327.478.cxt.sc2.CRAP.gm.BN'],
red_law_option='CCM',
v0_start=0.0,
vd_start=150.0,
arq_out=['0414.51901.393.cxt.sc4.C99.im.CCM.BN',
'0784.52327.478.cxt.sc4.C99.im.CCM.BN']
)
sg.pprint_meta()
sg.pprint_arq()
sg.pprint()
for s in sg._meta_to_string():
print(s)
for s in sg._arq_to_string(',:'):
print(s)
sg.write('/pool/projects/starlight/STARLIGHTv04/grid_example1.in_')
# ################
# Starlight Base #
# ################
class StarlightBase(object):
""" StarlightBase class is to represent the Base file object for STARLIGHT
"""
# specified order of meta data
meta_order = ['n_base']
# default values for StarlightBase instance
meta = dict(n_base=45)
# specified order of arq data
arq_order = ['spec_file',
'age',
'z',
'code',
'mstar',
'yav',
'afe']
# default values for bases
spec_file = []
age = []
z = []
code = []
mstar = []
yav = []
afe = []
# extra comments
extra = EXTRA_COMMENTS
def __init__(self, **kwargs):
""" initialize instance using arq data """
for key in kwargs.keys():
self.__setattr__(key, kwargs[key])
# count spec_file
self.meta['n_base'] = len(self.spec_file)
def _sync_nbase(self):
""" synchronize nbase """
self.meta['n_base'] = len(self.spec_file)
def _meta_to_string(self, val_width=50):
""" convert meta data to string list """
fmt_str = "%%%ds" % (-val_width)
return [(fmt_str + "[%s]\n") % (self.meta[key], key)
for key in self.meta_order]
def _arq_to_string(self, sep=' '):
""" convert arq data to string list """
# 1. arq data to list
n_obs = len(self.spec_file)
for key in self.arq_order:
val = self.__getattribute__(key)
if np.isscalar(val):
self.__setattr__(key, [val for _ in range(n_obs)])
else:
assert len(val) == n_obs
# 2. to string
str_list = []
for i in range(n_obs):
arq_data_i = ["%s" % self.__getattribute__(key)[i]
for key in self.arq_order]
str_list.append(sep.join(arq_data_i) + "\n")
return str_list
def write(self, filepath, meta_val_width=50, sep=' '):
self._sync_nbase()
f = open(filepath, "w+")
f.writelines(self._meta_to_string(meta_val_width))
# f.write('\n')
f.writelines(self._arq_to_string(sep))
f.write('\n')
f.write(self.extra)
f.close()
def quick_set(self,
template='Base.BC03.N.dat',
template_dir='Base.BC03',
copy_base_to=None,
**kwargs):
""" a quick set of StarlightBase """
# integrated templates
integrated_templates = ['Base.BC03.N.dat',
'Base.BC03.S.dat',
'Base.SED.39.dat',
'Base.SED.Geneva.sw',
'Base.SED.Padova.sw']
# assert | |
= cc.half()
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
torch.testing.assert_allclose(
fc2.float(),
f.float(),
atol=5.0e-3 if fp16 else 1.0e-5,
rtol=5.0e-3 if fp16 else 1.0e-5,
)
goc = torch.cat([go.view(B, -1) for go in gos], dim=1).contiguous()
fc2.backward(goc)
torch.testing.assert_allclose(
cc.weights.grad,
grad_weights,
atol=5.0e-3 if fp16 else 1.0e-4,
rtol=5.0e-3 if fp16 else 1.0e-4,
)
cc = split_table_batched_embeddings_ops.DenseTableBatchedEmbeddingBagsCodegen(
[(E, D) for (E, D) in zip(Es, Ds)],
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=split_table_batched_embeddings_ops.PoolingMode.SUM,
use_cpu=use_cpu,
).double()
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu).double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(cc, (indices, offsets, per_sample_weights))
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
fp16=st.booleans(),
weighted=st.booleans(),
exact=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_backward_sgd( # noqa C901
self,
T,
D,
B,
log_E,
L,
fp16,
weighted,
exact,
mixed,
use_cache,
cache_algorithm,
long_segments,
pooling_mode,
use_cpu,
):
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
# only non-exact supports caching
assume(not exact or not use_cache)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
table_to_replicate = T // 2
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
xs = [
to_device(torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L), replace=True).astype(
np.int64
)
), use_cpu)
for t in feature_table_map
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.05
del bs[table_to_replicate]
new_weights = [(b.weight - b.weight.grad * lr) for b in bs]
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)],
optimizer=OptimType.EXACT_SGD,
feature_table_map=feature_table_map,
learning_rate=0.05,
fp16=fp16,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
goc = torch.cat([go.view(B, -1) for go in gos], dim=1).contiguous()
fc2.backward(goc)
if use_cache:
cc.flush()
for t in range(T):
torch.testing.assert_allclose(
cc.split_embedding_weights()[t],
new_weights[t].half() if fp16 and use_cpu else new_weights[t],
atol=(1.0e-2 if long_segments else 5.0e-3) if fp16 else 1.0e-5,
rtol=(1.0e-2 if long_segments else 5.0e-3) if fp16 else 1.0e-5,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
fp16=st.booleans(),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
exact=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_backward_adagrad( # noqa C901
self,
T,
D,
B,
log_E,
L,
D_gradcheck,
fp16,
stochastic_rounding,
weighted,
row_wise,
exact,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
):
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 1024)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
# stochastic rounding only implemented for rowwise
assume(not stochastic_rounding or row_wise)
# exact only implemented for rowwise non-weighted
assume(not exact or (row_wise and not weighted))
# need unique indices for non-exact tests
assume(exact or int(10 ** log_E) > int(2.1 * B * L))
# only row-wise supports caching
assume(row_wise or not use_cache)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
if exact:
# autograd with shared embedding only works for exact
table_to_replicate = T // 2
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
xs = [
to_device(torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L), replace=exact).astype(
np.int64
)
), use_cpu)
for t in feature_table_map
]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.5
eps = 0.2
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)],
feature_table_map=feature_table_map,
optimizer=OptimType.EXACT_ROWWISE_ADAGRAD
if row_wise
else OptimType.EXACT_ADAGRAD,
learning_rate=lr,
eps=eps,
fp16=fp16,
stochastic_rounding=stochastic_rounding,
pooling_mode=pooling_mode,
)
if exact:
del bs[table_to_replicate]
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
fc2.backward(torch.cat([go.view(B, -1) for go in gos], dim=1))
cc.flush()
split_optimizer_states = [s for (s,) in cc.split_optimizer_states()]
for t in range(T):
ref_optimizer_state = bs[t].weight.grad.float().to_dense().pow(2)
torch.testing.assert_allclose(
split_optimizer_states[t].float(),
ref_optimizer_state.mean(dim=1) if row_wise else ref_optimizer_state,
atol=5.0e-3 if fp16 else 1.0e-4,
rtol=5.0e-3 if fp16 else 1.0e-4,
)
for t in range(T):
# optimizer_state = squares (no row-wise) or sum squares (row-wise)
torch.testing.assert_allclose(
cc.split_embedding_weights()[t].float(),
torch.addcdiv(
bs[t].weight.float(),
value=-lr,
| |
user to share with selected')
else:
for selected_pe_result in pe_results:
for selectedUser in users:
try:
# print "Try barruan"
# code that produces error
post = m.share_pe_results.objects.create(user=User.objects.get(pk=selectedUser),
pe_results=m.PE_results.objects.get(
pk=selected_pe_result), )
except IntegrityError as e:
print {"IntegrityError": e.message}
messages.warning(request,
'The given share relationship is already defined: User: ' + User.objects.get(
pk=selectedUser).username
+ ", Pe result: " + unicode(m.PE_results.objects.get(
uuid=selected_pe_result).uuid) + ", Exception message: " + str(
e.message))
return render(request, "booking/share_PE_results.html",
dict(pe_results=m.PE_results.objects.all(), users=User.objects.all(),
share=m.share_pe_results.objects.all())
)
messages.success(request, 'Thank you for sharing new Pe results' +
'User: ' + User.objects.get(pk=selectedUser).username
+ ", Pe result: " + unicode(
m.PE_results.objects.get(uuid=selected_pe_result).uuid))
sorted_pe_results = []
sorted_share = []
pe_results_secure = [] # This list will contain those pe_results that have no missing sources.list file_order=0 & missing reservation
pe_results = m.PE_results.objects.all()
share = m.share_pe_results.objects.all()
share_secure = [] # This list will contain those shares that have no missing sources.list file_order=0 & missing reservation
if len(pe_results) > 0:
for pe_result in pe_results:
try:
pe_result.pe_results_files.filter(file_order=0)[
0] # If sources.list is missing then it will raise an IndexError, we are not interested in getting sources.list, rather in taking out those pe_results that do not contain a valid
try:
reservation_instance = Reservation.objects.filter(pe_result=pe_result)[0] #Checking if pe_result has reservation to about error in template
pe_results_secure.append(pe_result)
except (ObjectDoesNotExist, IndexError) as e:
logger.error(
"ObjectDoesNotExist, Seems that some of the pe_result don't have reservation; pe_result:" + str(pe_result) + ', exception:' + secure_exception_to_str(
e))
except (ObjectDoesNotExist, IndexError) as e:
logger.error("Error: indexError, " + secure_exception_to_str(
e) + ". pe_result that has missing file_order=0 is: " + unicode(pe_result.uuid))
if len(pe_results_secure) > 0:
sorted_pe_results = sorted(pe_results_secure,
key=lambda pe_result: pe_result.pe_results_files.filter(file_order=0)[0].created,
reverse=True)
if len(share) > 0:
for share_iterator in share:
try:
share_iterator.pe_results.pe_results_files.filter(file_order=0)[0]# If sources.list is missing then it will raise an IndexError, we are not interested in getting
# sources.list, rather in taking out those share that do not contain a valid
try:
reservation_instance = Reservation.objects.filter(pe_result=share_iterator.pe_results)[0] #Checking if pe_result has reservation to about error in template
share_secure.append(share_iterator)
except (ObjectDoesNotExist, IndexError) as e:
logger.error(
"ObjectDoesNotExist, Seems that some of the shared pe_result don't have reservation; pe_result:" + str(pe_result) + ', exception:' + secure_exception_to_str(
e))
except (ObjectDoesNotExist, IndexError) as e:
logger.error("Error: indexError, " + secure_exception_to_str(
e) + ". share_pe_results that has missing file_order=0 is:" + unicode(
share_iterator.pe_results.uuid))
if len(share_secure):
sorted_share = sorted(share_secure, key=lambda share_instance:
share_instance.pe_results.pe_results_files.filter(file_order=0)[0].created, reverse=True)
return render(request, "booking/share_PE_results.html",
dict(pe_results=sorted_pe_results, users=User.objects.all(), share=sorted_share)
)
@login_required
def view_NeuronModels(request):
neuronModels_my = m_neural.get_lems_models(request.user, MODEL_TYPES.NEURON)
synapseModels_my = m_neural.get_lems_models(request.user, MODEL_TYPES.SYN)
neuronModels_curated = m_neural.get_curated_lems_models(MODEL_TYPES.NEURON)
synapseModels_curated = m_neural.get_curated_lems_models(MODEL_TYPES.SYN)
neuronModels_shared = m_neural.get_shared_lems_models(request.user,
MODEL_TYPES.NEURON)
synapseModels_shared = m_neural.get_shared_lems_models(request.user,
MODEL_TYPES.SYN)
return render(request, "booking/view_neuronModels.html",
dict(neuronModels_my=neuronModels_my,
synapseModels_my=synapseModels_my,
neuronModels_curated=neuronModels_curated,
synapseModels_curated=synapseModels_curated,
neuronModels_shared=neuronModels_shared,
synapseModels_shared=synapseModels_shared))
@login_required
def share_NeuronModels(request):
neuronModels_my = m_neural.get_lems_models(request.user, MODEL_TYPES.NEURON)
synapseModels_my = m_neural.get_lems_models(request.user, MODEL_TYPES.SYN)
neuronModels_curated = m_neural.get_curated_lems_models(MODEL_TYPES.NEURON)
synapseModels_curated = m_neural.get_curated_lems_models(MODEL_TYPES.SYN)
neuronModels_public = m_neural.get_public_lems_models(MODEL_TYPES.NEURON)
synapseModels_public = m_neural.get_public_lems_models(MODEL_TYPES.SYN)
neuronModels_shared = m_neural.get_shared_lems_models(request.user,
MODEL_TYPES.NEURON)
synapseModels_shared = m_neural.get_shared_lems_models(request.user,
MODEL_TYPES.SYN)
return render(request, "booking/share_NeuronModels.html",
dict(neuronmodels_my=neuronModels_my,
synapsemodels_my=synapseModels_my,
neuronModels_curated = neuronModels_curated,
synapseModels_curated = synapseModels_curated,
neuronModels_public = neuronModels_public,
synapseModels_public = synapseModels_public,
neuronModels_shared=neuronModels_shared,
synapseModels_shared=synapseModels_shared,
allModels_shared=m_neural.share_LemsModel.objects.all(),
users=User.objects.all()))
@login_required
def networkModel_make_public(request):
if request.method == 'POST':
payload = json.loads(request.body)
networkModel = payload.get('networkModel', '')
public_check = payload.get('public_check', '')
if public_check != '':
try:
ceNetwork = m_net.CENetwork.objects.get(pk=networkModel)
except ObjectDoesNotExist:
resp_data = {'response': -1,
'msg': 'User has send a neuronModel result id which does not exist'} # User has send a behaviour experiment mode id which doesn't exist
return JsonResponse(resp_data)
if public_check == True:
ceNetwork.public = True
ceNetwork.save()
else:
ceNetwork.public = False
ceNetwork.save()
resp_data = {'response': 1, 'msg': 'ceNetwork public field set correctly'}
return JsonResponse(resp_data)
resp_data = {'response': -1, 'msg': 'Bad request'} # Bad request
return JsonResponse(resp_data)
@login_required
def view_NeuralNetworks(request):
neuralNetworks_orig_my = m_net.get_owned_nets(request.user)
neuralNetworks_orig_curated = m_net.get_curated_nets()
neuralNetworks_shared = m_net.share_CENetwork.objects.all()
# for model in neuralNetworks_shared:
# if model.user == request.user:
# if model.ceNetwork not in neuralNetworks_orig:
# neuralNetworks_orig.add(model.ceNetwork)
return render(request, "booking/view_neuralNetworks.html",
dict(neuralNetworks_my=neuralNetworks_orig_my,
neuralNetworks_curated=neuralNetworks_orig_curated,
share=neuralNetworks_shared,
users=User.objects.all()))
@login_required
def share_NetworkModels(request):
neuralNetworks_orig_my = m_net.get_owned_nets(request.user)
neuralNetworks_orig_curated = m_net.get_curated_nets()
neuralNetworks_orig_public = m_net.get_public_nets(request.user)
return render(request, "booking/share_NetworkModels.html",
dict(neuralNetworks_my=neuralNetworks_orig_my,
neuralNetworks_curated = neuralNetworks_orig_curated,
neuralNetworks_public = neuralNetworks_orig_public,
users=User.objects.all(),
models_shared=m_net.share_CENetwork.objects.all())
)
@login_required
def view_RTWs(request):
rtws_orig = m_rtw.RTW_CONF.objects.filter(Q(owner=request.user)).order_by('name')
return render(request, "booking/view_RTWs.html",
dict(rtws=rtws_orig, users=User.objects.all())
)
@login_required
def reserve(request):
if request.method == 'POST':
reservationDelete = request.POST.getlist("reservationDelete", "")
behavExpAdd = request.POST.get("behavExpAdd", "")
wormConfAdd = request.POST.get("wormConfAdd", "")
description = request.POST.get("description", "")
if 'deleteReservation' in request.POST:
if (not reservationDelete):
messages.error(request, 'No reservation selected to delete')
else:
# Delete reservation
# If experiment has more than one creator, the creator is deleted
# IF experiment has only on creator left, do the following
# If experiment needs to be either in WAITING or ABORTED state
# If experiment has a PE_result or RB_results, it cannot be deleted
for reservation in reservationDelete:
reservation_object_instance = m.Reservation.objects.get(uuid=reservation)
if (len(reservation_object_instance.creator.all()))>1:
reservation_object_instance.creator.remove(request.user)
elif (reservation_object_instance.status == "WAITING" or reservation_object_instance.status == "ABORTED" or reservation_object_instance.status == "ERROR") and reservation_object_instance.pe_result == None:
try:
m.Reservation.objects.filter(uuid=reservation).delete()
except Exception as e:
messages.warning(request, 'Cannot delete the Reservation' + secure_exception_to_str(e))
logger.error(
'Strange error while deleting Reservation:' + secure_exception_to_str(
e))
return render(request, "booking/reservation.html",
dict(reservations=m.Reservation.objects.all(),
behaviourExperiments=behaviourExperimentType_model.objects.all()))
messages.success(request, 'Reservation deleted correctly')
else:
messages.warning(request, 'Cannot delete the Reservation, since status is not WAITING or ABORTED or ERROR and / or has some locomotion or readback results')
elif 'addReservation' in request.POST:
if (not behavExpAdd or not wormConfAdd):
messages.error(request, 'No behavioral experiment or worm conf selected to add')
else:
try:
behaviouralExperiment = behaviourExperimentType_model.objects.get(pk=behavExpAdd)
if (m.Reservation.objects.filter(experiment=behaviouralExperiment, worm_conf=wormConfAdd)):
previousReservation = m.Reservation.objects.get(experiment=behaviouralExperiment,
worm_conf=wormConfAdd)
if previousReservation.creator.filter(id=request.user.pk):
warning_message = 'The given experiment is already in the reservation list by the same creator=> ', 'behaviouralExperiment=>', previousReservation.experiment, '; worm_conf=>', previousReservation.worm_conf
messages.warning(request, warning_message)
else:
previousReservation.creator.add(request.user)
previousReservation.save()
success_message = 'The given experiment was reserved previously by another user, you have been ' \
'added as creator to it'
messages.success(request, success_message)
else:
rb_result = m.RB_results()
post = rb_result.save()
if description == "":
description = 'BehavExp description:' + str(behaviouralExperiment.description) + '; network_name:' + str(m_rtw.RTW_CONF.objects.filter(pk=int(wormConfAdd))[0].network.name) + '; Recording_profile_name: ' + str (m_rtw.RTW_CONF.objects.filter(pk=int(wormConfAdd))[0].name)
reservation = m.Reservation(experiment=behaviouralExperiment, worm_conf=wormConfAdd,
rb_result=rb_result, description=description)
if m.Reservation.objects.all().aggregate(Max('sim_id')) == None:
reservation.sim_id = 1
else:
reservation.sim_id = int(m.Reservation.objects.all().aggregate(Max('sim_id')).get('sim_id__max'))+1
post = reservation.save()
reservation.creator.add(request.user)
messages.success(request, 'New reservation added=>' + str(reservation.uuid))
except IntegrityError as e:
logger.error("IntegrityError" + secure_exception_to_str(e))
messages.warning(request, 'The given reservation is already defined' + secure_exception_to_str(e))
except ObjectDoesNotExist as e:
logger.error("ObjectDoesNotExist, Seems that some of the components of the reservation was deleted on the meantime, please try again" + secure_exception_to_str(e))
messages.warning(request, 'Seems that some of the components of the reservation was deleted on the meantime, please try again')
# Query restful to get the list of worm conf
# Code disabled for uploading to GitHub
# CA_BUNDLE = os.path.join(BASE_DIR,"https-certificates/1.crt")
# csrftoken = request.META.get('CSRF_COOKIE', None)
# payload = {'user': request.user}
# # print 'payload=>', payload
# url_referer = 'https://' + request.get_host() + reverse('reserve')
# headers = {'Content-Type': 'application/json', "X-CSRFToken": csrftoken, 'Referer': url_referer}
# #################################################################################
# #For NUIG integrations - change the url name in reverse('worm_conf_per_user') below
# #################################################################################
# url = 'https://' + request.get_host() + reverse('worm_conf_per_user')
# # print 'url', url
# try:
# cookies = dict(request.COOKIES)
# #################################################################################
# # For NUIG integrations - Final deployment, use a self-signed / paid certificate and enable the line
# # with verify=CA_BUNDLE. Right now is commented to work with runserver_plus (FOR DEVELOPMENT),
# # since we (GE and RA) were not able to get it working with any self-signed certificate
# #################################################################################
# # response = requests.post(url, data=payload, verify=CA_BUNDLE, headers=headers, cookies=cookies)
# response = requests.post(url, data=payload, verify=False, headers=headers, cookies=cookies)
#
# worm_confs_user = response.json
#
# except Exception as inst:
# print 'except request to NUIG', sys.exc_info()[0]
# print 'inst args: ', inst.args
# worm_confs_user = []
worm_confs_user = []
return render(request, "booking/reservation.html", dict(reservations=m.Reservation.objects.all(),
behaviourExperiments=behaviourExperimentType_model.objects.all(),
worm_confs_user=worm_confs_user))
@login_required
def jointExperimentReview(request, pe_result_uuid):
PE_result = m.PE_results.objects.get(uuid=pe_result_uuid)
PE_result_files = PE_result.pe_results_files.all()
PE_results_path = ""
for result_file in PE_result_files:
if result_file.file_order == 0:
PE_results_path = result_file.results_file
PE_results_path = settings.MEDIA_URL + str(PE_results_path)
last_slash = PE_results_path.rfind("/")
PE_results_path = PE_results_path[:last_slash + 1]
if PE_results_path == "":
PE_results_path = "-1"
return render(request, "booking/jointExperimentReview.html", {
'PE_results_path': PE_results_path, 'behavExp': PE_result.reservation_for_pe_result.experiment.uuid, 'neurons': Neuron.objects.order_by('name'), 'sim_id' : PE_result.reservation_for_pe_result.sim_id})
@login_required
def experimentReview(request, pe_result_uuid):
PE_result = m.PE_results.objects.get(uuid=pe_result_uuid)
PE_result_files = PE_result.pe_results_files.all()
PE_results_path = ""
for result_file in PE_result_files:
if result_file.file_order == 0:
PE_results_path = result_file.results_file
PE_results_path = settings.MEDIA_URL + str(PE_results_path)
last_slash = PE_results_path.rfind("/")
PE_results_path = PE_results_path[:last_slash + 1]
if PE_results_path == "":
PE_results_path = "-1"
return render(request, "booking/experimentReview.html", {
| |
:ivar gtk.Entry txtProgramProb: the gtk.Entry() to enter and display the
average program probability of observing
a failure.
:ivar gtk.Entry txtTTFF: the gtk.Entry() to enter and display the length of
the first test phase.
"""
def __init__(self, controller, listbook):
"""
Method to initialize the Work Book view for the Reliability Growth Test
Planning.
:param controller: the :py:class:`rtk.testing.growth.Growth.Growth`
data controller.
:param listbook: the :py:class:`rtk.testing.ListBook.ListView`
associated with the WorkBook this is embedded in.
"""
gtk.HPaned.__init__(self)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_handler_id = []
# Initialize private scalar attributes.
self.dtcGrowth = controller
self._listview = listbook
self._testing_model = None
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.btnFindMTBFI = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindMTBFGP = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindt1 = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindTTT = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindAlpha = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindFEF = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindMS = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnFindProb = Widgets.make_button(height=25, width=25,
image='calculate')
self.btnCalculate = Widgets.make_button(width=35, image='calculate')
self.btnSave = Widgets.make_button(width=35, image='save')
self.cmbPlanModel = Widgets.make_combo()
self.cmbAssessModel = Widgets.make_combo()
self.figFigure1 = Figure()
self.pltPlot1 = FigureCanvas(self.figFigure1)
self.axAxis1 = self.figFigure1.add_subplot(111)
self.optMTBF = gtk.RadioButton(label=_(u"Display results as MTBF"))
self.optMTBF.set_name('mtbf')
self.optMTBF.set_active(True)
self.optFailureIntensity = gtk.RadioButton(group=self.optMTBF,
label=_(u"Display results "
u"as failure "
u"intensity"))
self.optFailureIntensity.set_name('failureintensity')
self.optLinear = gtk.RadioButton(label=_(u"Use Linear Scales"))
self.optLinear.set_name('linear')
self.optLinear.set_active(True)
self.optLogarithmic = gtk.RadioButton(group=self.optLinear,
label=_(u"Use Logarithmic "
u"Scales"))
self.optLogarithmic.set_name('log')
self.spnNumPhases = gtk.SpinButton()
self.spnNumPhases.set_digits(0)
self.spnNumPhases.set_increments(1, 5)
self.spnNumPhases.set_range(0, 100)
self.txtTechReq = Widgets.make_entry(width=100)
self.txtMTBFG = Widgets.make_entry(width=100)
self.txtMTBFGP = Widgets.make_entry(width=100)
self.txtMTBFI = Widgets.make_entry(width=100)
self.txtTTT = Widgets.make_entry(width=100)
self.txtAverageGR = Widgets.make_entry(width=100)
self.txtProgramMS = Widgets.make_entry(width=100)
self.txtAverageFEF = Widgets.make_entry(width=100)
self.txtProgramProb = Widgets.make_entry(width=100)
self.txtt1 = Widgets.make_entry(width=100)
# Set gtk.Widget() tooltip text.
self.btnFindMTBFI.set_tooltip_text(_(u"Launches the initial MTBF "
u"calculator."))
self.btnFindMTBFGP.set_tooltip_text(_(u"Calculates the program growth "
u"potential MTBF."))
self.btnFindt1.set_tooltip_text(_(u"Calculates the minimum required "
u"length of the first test phase."))
self.btnFindTTT.set_tooltip_text(_(u"Calculates the minimum required "
u"time for the test program to "
u"achieve the goal MTBF."))
self.btnFindAlpha.set_tooltip_text(_(u"Calculates the minimum "
u"required growth rate to "
u"sustain over the test program "
u"to achieve the goal MTBF."))
self.btnFindFEF.set_tooltip_text(_(u"Calculates the average required "
u"fix effectiveness factor (FEF) "
u"to sustain over the test program "
u"to achieve the goal MTBF."))
self.btnFindMS.set_tooltip_text(_(u"Calculates the average required "
u"management strategy (MS) to "
u"sustain over the test program to "
u"achieve the goal MTBF."))
self.btnFindProb.set_tooltip_text(_(u"Calculates the probability of "
u"observing a failure during the "
u"first phase of the test "
u"program."))
self.btnCalculate.set_tooltip_text(_(u"Calculate the missing test "
u"planning inputs."))
self.btnSave.set_tooltip_text(_(u"Saves changes to the test planning "
u"inputs."))
self.cmbPlanModel.set_tooltip_text(_(u"Selects and displays the "
u"reliability growth planning "
u"model to be used."))
self.cmbAssessModel.set_tooltip_text(_(u"Selects and displays the "
u"reliability growth "
u"assessment model to be "
u"used."))
self.optMTBF.set_tooltip_text(_(u"If selected, test results will be "
u"displayed as MTBF. This is the "
u"default."))
self.optFailureIntensity.set_tooltip_text(_(u"If selected, test "
u"results will be "
u"displayed as failure "
u"intensity (failure "
u"rate)."))
self.optLinear.set_tooltip_text(_(u"Select this option to use linear "
u"scales on the reliability growth "
u"plot."))
self.optLogarithmic.set_tooltip_text(_(u"Select this option to use "
u"logarithmic scales on the "
u"reliability growth plot."))
self.pltPlot1.set_tooltip_text(_(u"Displays the selected test plan "
u"and observed results."))
self.spnNumPhases.set_tooltip_text(_(u"Sets the number of test phases "
u"for the selected test."))
self.spnNumPhases.set_tooltip_text(_(u"The number of reliability "
u"growth phases."))
self.txtMTBFI.set_tooltip_text(_(u"The average MTBF of the first test "
u"phase for the seleceted "
u"reliability growth plan."))
self.txtMTBFG.set_tooltip_text(_(u"The goal MTBF for the selected "
u"reliability growth plan."))
self.txtMTBFGP.set_tooltip_text(_(u"The potential MTBF at maturity "
u"for the assembly associated with "
u"the selected reliability growth "
u"plan."))
self.txtTechReq.set_tooltip_text(_(u"The MTBF require by the "
u"developmental program associated "
u"with the selected reliability "
u"growth plan."))
self.txtt1.set_tooltip_text(_(u"The estimated time to the first fix "
u"during the reliability growth "
u"program."))
self.txtTTT.set_tooltip_text(_(u"The total test time."))
self.txtAverageGR.set_tooltip_text(_(u"The average growth rate over "
u"the entire reliability growth "
u"program."))
self.txtAverageFEF.set_tooltip_text(_(u"The average fix effectiveness "
u"factor (FEF) over the entire "
u"reliability growth program."))
self.txtProgramMS.set_tooltip_text(_(u"The percentage of failures "
u"that will be addressed by "
u"corrective action over the "
u"entire reliability growth "
u"program."))
self.txtProgramProb.set_tooltip_text(_(u"The probability of seeing a "
u"failure during the first "
u"phase of the reliability "
u"growth program."))
# Connect gtk.Widget() signals to callback methods.
self._lst_handler_id.append(
self.btnFindMTBFI.connect('button-release-event',
self._on_button_clicked, 0))
self._lst_handler_id.append(
self.btnFindMTBFGP.connect('button-release-event',
self._on_button_clicked, 1))
self._lst_handler_id.append(
self.btnFindt1.connect('button-release-event',
self._on_button_clicked, 2))
self._lst_handler_id.append(
self.btnFindTTT.connect('button-release-event',
self._on_button_clicked, 3))
self._lst_handler_id.append(
self.btnFindAlpha.connect('button-release-event',
self._on_button_clicked, 4))
self._lst_handler_id.append(
self.btnFindFEF.connect('button-release-event',
self._on_button_clicked, 5))
self._lst_handler_id.append(
self.btnFindMS.connect('button-release-event',
self._on_button_clicked, 6))
self._lst_handler_id.append(
self.btnFindProb.connect('button-release-event',
self._on_button_clicked, 7))
self._lst_handler_id.append(
self.btnCalculate.connect('button-release-event',
self._on_button_clicked, 8))
self._lst_handler_id.append(
self.btnSave.connect('button-release-event',
self._on_button_clicked, 9))
self._lst_handler_id.append(
self.cmbPlanModel.connect('changed', self._on_combo_changed, 10))
self._lst_handler_id.append(
self.cmbAssessModel.connect('changed', self._on_combo_changed, 11))
self.optMTBF.connect('toggled', self._load_plot)
self.optFailureIntensity.connect('toggled', self._load_plot)
self.optLinear.connect('toggled', self._load_plot)
self.optLogarithmic.connect('toggled', self._load_plot)
self._lst_handler_id.append(
self.spnNumPhases.connect('focus-out-event',
self._on_focus_out, 12))
self._lst_handler_id.append(
self.spnNumPhases.connect('value-changed',
self._on_spin_value_changed, 13))
self._lst_handler_id.append(
self.txtMTBFI.connect('focus-out-event', self._on_focus_out, 14))
self._lst_handler_id.append(
self.txtMTBFG.connect('focus-out-event', self._on_focus_out, 15))
self._lst_handler_id.append(
self.txtTechReq.connect('focus-out-event', self._on_focus_out, 16))
self._lst_handler_id.append(
self.txtMTBFGP.connect('focus-out-event', self._on_focus_out, 17))
self._lst_handler_id.append(
self.txtt1.connect('focus-out-event', self._on_focus_out, 18))
self._lst_handler_id.append(
self.txtTTT.connect('focus-out-event', self._on_focus_out, 19))
self._lst_handler_id.append(
self.txtAverageGR.connect('focus-out-event',
self._on_focus_out, 20))
self._lst_handler_id.append(
self.txtAverageFEF.connect('focus-out-event',
self._on_focus_out, 21))
self._lst_handler_id.append(
self.txtProgramMS.connect('focus-out-event',
self._on_focus_out, 22))
self._lst_handler_id.append(
self.txtProgramProb.connect('focus-out-event',
self._on_focus_out, 23))
self.pltPlot1.mpl_connect('button_press_event', _expand_plot)
self.show_all()
def create_page(self):
"""
Method to create the page for displaying the Reliability Growth Test
Phase details for the selected Growth Test.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Build-up the containers for the tab. #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
_hbox = gtk.HBox()
_bbox = gtk.VButtonBox()
_bbox.set_layout(gtk.BUTTONBOX_START)
_hbox.pack_start(_bbox, False, True)
_bbox.pack_start(self.btnCalculate, False, False)
_bbox.pack_start(self.btnSave, False, False)
_fixed = gtk.Fixed()
_frame = Widgets.make_frame(_(u"Program Planning Inputs"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_fixed)
_hbox.pack_end(_frame)
self.pack1(_hbox, False, True)
# Load the gtk.ComboBox()
_results = [["AMSAA-Crow"], ["SPLAN"], ["SSPLAN"]]
Widgets.load_combo(self.cmbPlanModel, _results)
_results = [[_(u"AMSAA/Crow Continuous")], [_(u"AMSAA/Crow Discrete")],
["SSTRACK"], [_(u"AMSAA/Crow Projection")],
[_(u"Crow Extended")]]
Widgets.load_combo(self.cmbAssessModel, _results)
# Create the labels.
_labels = [_(u"RG Planning Model:"), _(u"RG Assessment Model:"),
_(u"Phase 1 Average MTBF (MTBF<sub>I</sub>):"),
_(u"Program Required MTBF (MTBF<sub>TR</sub>):"),
_(u"Program Goal MTBF (MTBF<sub>G</sub>):"),
_(u"Potential Mature MTBF (MTBF<sub>GP</sub>):"),
_(u"Number of Phases:"),
_(u"Time to First Fix (t<sub>1</sub>):"),
_(u"Total Test Time:"), _(u"Average Program Growth Rate:"),
_(u"Average Program FEF:"), _(u"Average Program MS:"),
_(u"Average Program Probability:")]
(_x_pos, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, 30)
_x_pos += 50
# Position the gtk.Widget() on the page.
_fixed.put(self.cmbPlanModel, _x_pos, _y_pos[0])
_fixed.put(self.cmbAssessModel, _x_pos, _y_pos[1])
_fixed.put(self.txtMTBFI, _x_pos, _y_pos[2])
_fixed.put(self.btnFindMTBFI, _x_pos + 125, _y_pos[2])
_fixed.put(self.txtTechReq, _x_pos, _y_pos[3])
_fixed.put(self.txtMTBFG, _x_pos, _y_pos[4])
_fixed.put(self.txtMTBFGP, _x_pos, _y_pos[5])
_fixed.put(self.btnFindMTBFGP, _x_pos + 125, _y_pos[5])
_fixed.put(self.spnNumPhases, _x_pos, _y_pos[6])
_fixed.put(self.txtt1, _x_pos, _y_pos[7])
_fixed.put(self.btnFindt1, _x_pos + 125, _y_pos[7])
_fixed.put(self.txtTTT, _x_pos, _y_pos[8])
_fixed.put(self.btnFindTTT, _x_pos + 125, _y_pos[8])
_fixed.put(self.txtAverageGR, _x_pos, _y_pos[9])
_fixed.put(self.btnFindAlpha, _x_pos + 125, _y_pos[9])
_fixed.put(self.txtAverageFEF, _x_pos, _y_pos[10])
_fixed.put(self.btnFindFEF, _x_pos + 125, _y_pos[10])
_fixed.put(self.txtProgramMS, _x_pos, _y_pos[11])
_fixed.put(self.btnFindMS, _x_pos + 125, _y_pos[11])
_fixed.put(self.txtProgramProb, _x_pos, _y_pos[12])
_fixed.put(self.btnFindProb, _x_pos + 125, _y_pos[12])
# Create the Reliability Growth Plot (right half of page).
_vbox = gtk.VBox()
_fixed = gtk.Fixed()
_vbox.pack_start(_fixed, False, True)
_frame = Widgets.make_frame(label=_(u"Program Planning Curves"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(self.pltPlot1)
_frame.show_all()
_vbox.pack_start(_frame, True, True)
self.pack2(_vbox, True, False)
_y_pos = 5
_fixed.put(self.optMTBF, 5, _y_pos)
_fixed.put(self.optFailureIntensity, 205, _y_pos)
_y_pos += 30
_fixed.put(self.optLinear, 5, _y_pos)
_fixed.put(self.optLogarithmic, 205, _y_pos)
return False
def load_page(self, model):
"""
Method to load the Reliability Growth Test Plan gtk.Notebook() page.
:param model: the :py:class:`rtk.testing.Testing.Model` to load.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self._testing_model = model
fmt = '{0:0.' + str(Configuration.PLACES) + 'g}'
# Load the individual widgets.
self.cmbPlanModel.set_active(model.rg_plan_model)
self.cmbAssessModel.set_active(model.rg_assess_model)
self.txtMTBFI.set_text(str(fmt.format(model.lst_i_mtbfa[0])))
self.txtMTBFG.set_text(str(fmt.format(model.mtbfg)))
self.txtTechReq.set_text(str(fmt.format(model.tr)))
self.txtMTBFGP.set_text(str(fmt.format(model.mtbfgp)))
self.spnNumPhases.set_value(model.n_phases)
self.txtTTT.set_text(str(fmt.format(model.ttt)))
self.txtAverageGR.set_text(str(fmt.format(model.avg_growth)))
self.txtAverageFEF.set_text(str(fmt.format(model.avg_fef)))
self.txtProgramMS.set_text(str(fmt.format(model.avg_ms)))
self.txtProgramProb.set_text(str(fmt.format(model.probability)))
self.txtt1.set_text(str(fmt.format(model.lst_p_test_time[0])))
# Load the Reliability Growth Plan plot.
self._load_plot()
# (Re-)load the List Book.
self._listview.load(self._testing_model)
return False
def _load_plot(self, __button=None, ideal=None, plan=None): # pylint: disable=R0914
"""
Method to load the Reliability Growth planning plot.
:keyword gtk.RadioButton __button: the gtk.RadioButton() that called
this method when it is called by a
gtk.RadioButton().
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# WARNING: Refactor Planning._load_plot; current McCabe Complexity metric=12.
_log = False
if(self._testing_model.ttt <= 0.0 and
len(self._testing_model.dic_test_data.values()) > 0):
self._testing_model.ttt = [x[3] for x in
self._testing_model.dic_test_data.values()][-1]
_times = [_t for _t in range(int(self._testing_model.ttt))]
# If the ideal curve hasn't been calculated, then calculate it's
# values.
if ideal is None or ideal == []:
ideal = self._testing_model.calculate_idealized_growth_curve()
# If the planned curves haven't been calculated, then calculate their
# values.
if plan is None or plan == []:
plan = self._testing_model.create_planned_values()
_xlabel = _(u"Cumulative Test Time")
if self.optMTBF.get_active():
_targets = | |
"""
Flex bond message channels
"""
#
# Copyright 2020 The FLEX Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum
from typing import Any, Union, List, Dict, Callable
from . import commu
class VariableChannel():
"""
A VariableChannel can be used to communicate between local and
remote[single/group] machines.
It can be treated as a duel direction channel.
"""
def __init__(self,
name: str,
remote_id: str,
auto_offset: bool = True):
"""
Args:
name: str, Base name for variable
remote_id: str, Remote ID which is configed in route table.
auto_offset: bool, Whether to use a automatic increment offset
in full variable name.
"""
self.check(remote_id)
self.name: str = name
self.remote_id: str = remote_id
self.local_id: str = commu.get_local_id()
self.job_id: str = commu.get_job_id()
self.auto_offset: bool = auto_offset
self.send_offset: int = 0
self.recv_offset: int = 0
@staticmethod
def check(remote_id):
"""
Check init parms
"""
if commu.get_local_id() == remote_id:
print(
f"Warning, remote_id={remote_id} should be diferent with"+
f" local_id={commu.get_local_id()}. ")
if remote_id not in commu.get_federation_members():
raise ValueError(
f"Remote_id={remote_id} is not in federation,"
" check your config!")
def __send_name(self, tag: str = '*') -> str:
"""
Define the full name of Variable to be send.
"""
return f"{self.job_id}.{self.name}.{self.local_id}->{self.remote_id}" \
f".offset={self.send_offset}.tag={tag}"
def __recv_name(self, tag: str = '*') -> str:
"""
Define the full name of Variable to be receive.
"""
return f"{self.job_id}.{self.name}.{self.remote_id}->{self.local_id}" \
f".offset={self.recv_offset}.tag={tag}"
def send(self, var: Any, tag: str = '*') -> None:
"""
Send local variable.
Args:
var: Any, Local variable to be sent.
tag: str, Optional, if you want to custmize your variable tag.
Return:
Any, Receive variable from remote endpoint.
Example:
>>> var_chan = VariableChannel(name="Exchange_secret_key",
remote=remote_id)
>>> var_chan.send(MyVar)
"""
commu.send(value=var,
key=self.__send_name(tag),
dst=self.remote_id)
if self.auto_offset:
self.send_offset += 1
def recv(self, tag: str = '*') -> Any:
"""
Get remote variable.
Args:
tag: str, Optional, if you want to custmize your variable tag.
Return:
Any, Receive variable from remote endpoint.
Example:
>>> var_chan = VariableChannel(name="Exchange_secret_key",
remote=remote_id)
>>> RemoteVar = var_chan.recv()
"""
result = commu.recv(
key=self.__recv_name(tag),
src=self.remote_id)
if self.auto_offset:
self.recv_offset += 1
return result
def swap(self, var: Any, tag: str = '*') -> Any:
"""
Swap local and remote variable.
Args:
var: Any, Local variable to be sent.
tag: str, Optional, if you want to custmize your variable tag.
Return:
Any, Receive variable from remote endpoint.
Example:
>>> var_chan = VariableChannel(name="Exchange_secret_key",
remote=remote_id)
>>> RemoteVar = var_chan.swap(MyVar)
"""
self.send(var, tag)
return self.recv(tag)
def __enter__(self) -> "__class__":
"""
A context mannager, which can make sure all remote member
syncs before and after.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""
Just do another sync.
"""
return
def make_variable_channel(name: str, endpoint1: str, endpoint2: str, auto_offset: bool = True) -> VariableChannel:
"""
Args:
name: str, Base name for variable
endpoint1: str, ID for one endpoint in communication pair.
endpoint2: str, ID for the other endpoint in communication pair.
auto_offset: bool, Whether to use a automatic increment offset in full variable name.
"""
def check(endpoint1, endpoint2):
"""
Check init parms
"""
for endpoint in (endpoint1, endpoint1):
if endpoint not in commu.get_federation_members():
raise ValueError(
f"Endpoint1={endpoint} is not in federation, check your config!")
if endpoint1 == endpoint2:
raise ValueError("Endpoint1 and endpoint2 should not be the same.")
# if endpoint1 != commu.get_local_id() and endpoint2 != commu.get_local_id():
# raise ValueError(
# f"local_id={commu.get_local_id()} is neither endpoint1 or endpoint2, check your config!")
check(endpoint1, endpoint2)
if commu.get_local_id() == endpoint1:
return VariableChannel(name, remote_id=endpoint2, auto_offset=auto_offset)
elif commu.get_local_id() == endpoint2:
return VariableChannel(name, remote_id=endpoint1, auto_offset=auto_offset)
else:
# This is used for symmetry.
return None
class RemoteVariableBroadcastChannel():
"""
BroadcastChannel is used to build a broad cast like way of communication.
"""
def __init__(self,
name: str,
root: str,
remote_group: List[str],
auto_offset: bool = True):
"""
Args:
name: str, Base name for variable
root: str, ID for the root process,
which is configed in route table
remote_group: List[str], List of remote ID
which is configed in route table.
auto_offset: bool, Whether to use a automatic increment offset
in full variable name.
"""
self.root = root
self.group = remote_group
self.root_channel = VariableChannel(
"Broadcast_"+name, self.root, auto_offset)
def size(self):
"""remote group size
"""
return len(self.group)
def scatter(self, tag: str = '*') -> Any:
"""
Receive root's local variable to all members of remote group [each with
one item from original list].
Args:
tag: str, Optional, if you want to custmize your variable tag.
Return:
Any, Receive variable from root [for remote group member].
None for root.
Example:
In root code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key",
root='local_id',
remote_group=['remote_id1','remote_id2'])
>>> var_chan.scatter(['A', 'B'])
In remote group code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key",
root='local_id',
remote_group=['remote_id1','remote_id2'])
>>> MyVar = var_chan.scatter() # If 'remote_id1' then MyVar == 'A'
"""
return self.root_channel.recv(tag)
def broadcast(self, tag: str = '*') -> Any:
"""
Send root's local variable to all members of remote group.
Args:
tag: str, Optional, if you want to custmize your variable tag.
Return:
Any, Receive variable from root [for remote group member].
None for root.
Example:
In root code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key",
root='local_id',
remote_group=['remote_id1','remote_id2'])
>>> var_chan.broadcast(MyVar) # Send var to remote_group members.
In remote group code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key",
root='local_id',
remote_group=['remote_id1','remote_id2'])
>>> MyVar = var_chan.broadcast() # Receive var from root.
"""
return self.root_channel.recv(tag)
def gather(self, var: Any, tag: str = '*') -> Any:
"""
Get data from all remote group members into root's local variable.
Args:
var: Any, Local variable to be sent [only remote group].
tag: str, Optional, if you want to custmize your variable tag.
Return:
Any, Receive variable from remote group member [for root].
None for remote group member.
Example:
In root code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key",
root='local_id',
remote_group=['remote_id1','remote_id2'])
>>> MyVar = var_chan.gather() # Send var to remote_group members.
In remote group code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key",
root='local_id',
remote_group=['remote_id1','remote_id2'])
>>> var_chan.gather(MyVar) # Receive var from root.
"""
self.root_channel.send(var, tag)
def map(self, var: Any, tag: str = '*') -> Any:
"""
This map function applies a function in root to every item of iterable[from remote group], and get back the results.
Args:
var: Any, Local variable to be sent [only remote group].
tag: str, Optional, if you want to custmize your variable tag.
Return:
None
Example:
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key", root='local_id', remote_group=['remote_id1','remote_id2'])
>>> var_chan.map(lambda x: x+1 )
In remote group code.
>>> var_chan = make_broadcast_channel(name="Exchange_secret_key", root='local_id', remote_group=['remote_id1','remote_id2'])
>>> MyVar_plus1 = var_chan.map(MyVar)
"""
self.gather(var, tag=tag)
return self.scatter(tag=tag)
def allreduce(self, var: Any, tag: str = '*') -> Any:
"""
This reduce function applies a reduce function in root for all items from iterable[from remote group], and broadcast back the results.
Args:
var: Any, Local variable to be sent [only remote group].
tag: str, Optional, if you want to custmize your variable tag.
Return:
None
Example:
>>> var_chan = make_broadcast_channel(name="Example", root='local_id', remote_group=['remote_id1','remote_id2'])
>>> var_chan.allreduce(lambda x: sum(x))
In remote group code.
>>> var_chan = make_broadcast_channel(name="Example", root='local_id', remote_group=['remote_id1','remote_id2'])
>>> MyVar_sum = var_chan.allreduce(MyVar)
"""
self.gather(var, tag=tag)
return self.broadcast(tag=tag)
class RootVariableBroadcastChannel():
"""
BroadcastChannel is used to build a broad cast like way of communication.
"""
def __init__(self,
name: str,
root: str,
remote_group: List[str],
auto_offset: bool = True):
"""
Args:
name: str, Base name for variable
root: str, ID for the root process,
which is configed in route table
remote_group: List[str], List of remote ID
which is configed in route table.
auto_offset: bool, Whether to use a automatic increment offset
in full variable name.
"""
self.root = root
self.group = remote_group
self.my_channels = {
remote_id: VariableChannel(
"Broadcast_"+name,
remote_id,
auto_offset)
for remote_id in self.group}
def size(self):
"""remote group size
"""
return len(self.group)
def scatter(self, variables: List[Any], tag: str = '*') -> Any:
| |
second to complete
return True
def restart(self) -> bool:
response = self.send_message("restartController") # Restart the controller
time.sleep(1) # Give it 1 second to complete
return True
def get_control_constants(self):
return json.loads(self.send_message("getControlConstants", read_response=True))
def set_parameters(self, parameters):
return self.send_message("setParameters", json.dumps(parameters))
def get_dashpanel_info(self):
try: # This is apparently failing when being called in a loop for external_push - Wrapping in a try/except so the loop doesn't die
return json.loads(self.send_message("getDashInfo", read_response=True))
except TypeError:
return None
def circus_parameter(self) -> int:
"""Returns the parameter used by Circus to track this device's processes"""
return self.id
def _get_circusmgr(self) -> CircusMgr:
if USE_DOCKER:
return CircusMgr(circus_endpoint="tcp://127.0.0.1:7555")
else:
return CircusMgr()
def start_process(self):
"""Start this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.start(name=circus_process_name)
def remove_process(self):
"""Remove this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.remove(name=circus_process_name)
def stop_process(self):
"""Stop this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.stop(name=circus_process_name)
def restart_process(self):
"""Restart the device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
fc.restart(name=circus_process_name)
def status_process(self):
"""Status this device process, raises CircusException if error"""
fc = self._get_circusmgr()
circus_process_name = u"dev-{}".format(self.circus_parameter())
status = fc.application_status(name=circus_process_name)
return status
def get_cached_ip(self):
# This only gets called from within BrewPi-script
# I really hate the name of the function, but I can't think of anything else. This basically does three things:
# 1. Looks up the mDNS hostname (if any) set as self.wifi_host and gets the IP address
# 2. Saves that IP address to self.wifi_host_ip (if we were successful in step 1)
# 3. Returns the found IP address (if step 1 was successful), the cached (self.wifi_host_ip) address if it
# wasn't, or 'None' if we don't have a cached address and we weren't able to resolve the hostname
if len(self.wifi_host) > 4:
try:
ip_list = []
ipv6_list = []
ais = socket.getaddrinfo(self.wifi_host, 0, 0, 0, 0)
for result in ais:
if result[0] == socket.AddressFamily.AF_INET:
# IPv4 only
ip_list.append(result[-1][0])
elif result[0] == socket.AddressFamily.AF_INET6:
ipv6_list.append(result[-1][0])
ip_list = list(set(ip_list))
ipv6_list = list(set(ip_list))
if len(ip_list) > 0:
resolved_address = ip_list[0]
else:
resolved_address = ipv6_list[0]
# If we were able to find an IP address, save it to the cache
self.wifi_host_ip = resolved_address
self.save()
return resolved_address
except:
# TODO - Add an error message here
if len(self.wifi_host_ip) > 6:
# We weren't able to resolve the hostname (self.wifi_host) but we DID have a cached IP address.
# Return that.
return self.wifi_host_ip
else:
return None
# In case of error (or we have no wifi_host)
return None
def get_port_from_udev(self):
# This only gets called from within BrewPi-script
# get_port_from_udev() looks for a USB device connected which matches self.udev_serial_number. If one is found,
# it returns the associated device port. If one isn't found, it returns None (to prevent the cached port from
# being used, and potentially pointing to another, unrelated device)
if self.connection_type != self.CONNECTION_SERIAL:
return self.serial_port # If we're connecting via WiFi, don't attempt autodetection
# If the user elected to not use udev to get the port, just return self.serial_port
if not self.prefer_connecting_via_udev:
return self.serial_port
# If the platform doesn't support udev (isn't Linux) then return self.serial_port as well.
if not udev_integration.valid_platform_for_udev():
return self.serial_port
# TODO - Detect if this is a Fuscus board and return self.serial_port (as well as setting prefer_connecting_via_udev)
# If the udev_serial_number isn't yet set, try setting it
if self.udev_serial_number == "":
if not self.set_udev_from_port():
# If we can't set it (device isn't connected, etc.) then return None
return None
udev_node = udev_integration.get_node_from_serial(self.udev_serial_number)
if udev_node is not None:
# The udev lookup found a device! Return the appropriate serial port.
if self.serial_port != udev_node:
# If the serial port changed, cache it.
self.serial_port = udev_node
self.save()
return udev_node
else:
# The udev lookup failed - return None
return None
def set_udev_from_port(self):
# set_udev_from_port() quickly scans the device connected at self.serial_port and - if found - saves the
# associated udev serial number to the object.
udev_serial_number = udev_integration.get_serial_from_node(self.serial_port)
if udev_serial_number is not None:
self.udev_serial_number = udev_serial_number
self.save()
return True
# We failed to look up the udev serial number.
return False
class Beer(models.Model):
# Beers are unique based on the combination of their name & the original device
name = models.CharField(max_length=255, db_index=True,
help_text='Name of the beer being logged (must be unique)')
device = models.ForeignKey(BrewPiDevice, db_index=True, on_delete=models.SET_NULL, null=True,
help_text='The linked temperature control device from which data is logged')
created = models.DateTimeField(default=timezone.now, help_text='When the beer log was initially created')
# format generally should be equal to device.temp_format. We're caching it here specifically so that if the user
# updates the device temp format somehow we will continue to log in the OLD format. We'll need to make a giant
# button that allows the user to convert the log files to the new format if they're different.
format = models.CharField(max_length=1, default='F', help_text='Temperature format to write the logs in')
# model_version is the revision number of the "Beer" and "BeerLogPoint" models, designed to be iterated when any
# change is made to the format/content of the flatfiles that would be written out. The idea is that a separate
# converter could then be written moving between each iteration of model_version that could then be sequentially
# applied to bring a beer log in line with what the model then expects.
# Version 1: Original version
# Version 2: Adds 'state' to 'base_csv' for state plotting
model_version = models.IntegerField(default=2, help_text='Version # used for the logged file format')
gravity_enabled = models.BooleanField(default=False, help_text='Is gravity logging enabled for this beer log?')
def __str__(self):
return self.name
def __unicode__(self):
return self.__str__()
def column_headers(self, which='base_csv', human_readable=False):
if which == 'base_csv':
if human_readable:
headers = ['Log Time', 'Beer Temp', 'Beer Setting', 'Fridge Temp', 'Fridge Setting', 'Room Temp']
else:
headers = ['log_time', 'beer_temp', 'beer_set', 'fridge_temp', 'fridge_set', 'room_temp']
elif which == 'full_csv':
if human_readable:
# Currently unused
headers = ['log_time', 'beer_temp', 'beer_set', 'beer_ann', 'fridge_temp', 'fridge_set', 'fridge_ann',
'room_temp', 'state', 'temp_format', 'associated_beer_id']
else:
headers = ['log_time', 'beer_temp', 'beer_set', 'beer_ann', 'fridge_temp', 'fridge_set', 'fridge_ann',
'room_temp', 'state', 'temp_format', 'associated_beer_id']
else:
return None
# This works because we're appending the gravity data to both logs
if self.gravity_enabled:
if human_readable:
headers.append('Gravity')
headers.append('Gravity Sensor Temp')
else:
headers.append('gravity')
headers.append('grav_temp')
if which == 'base_csv' and self.model_version > 1:
# For model versions 2 and greater, we are appending "state" to the base CSV.
if human_readable:
headers.append('State') # I don't think this gets used anywhere...
else:
headers.append('state')
return headers
def base_column_visibility(self):
# TODO - Determine if we want to take some kind of user setting into account (auto-hide room temp, for example)
# headers = [x, 'beer_temp', 'beer_set', 'fridge_temp', 'fridge_set', 'room_temp']
visibility = "[true, true, true, true, true"
# This works because we're appending the gravity data to both logs
if self.gravity_enabled:
visibility += ", true, true"
if self.model_version >= 1:
visibility += ", false" # Literally the whole point of this code block is to hide "state"
visibility += "]"
return visibility
def column_headers_to_graph_string(self, which='base_csv'):
col_headers = self.column_headers(which, True)
graph_string = ""
for this_header in col_headers:
graph_string += "'" + this_header + "', "
if graph_string.__len__() > 2:
return graph_string[:-2]
else:
return ""
@staticmethod
def name_is_valid(proposed_name):
# Since we're using self.name in a file path, want to make sure no injection-type attacks can occur.
return True if re.match("^[a-zA-Z0-9 _-]*$", proposed_name) else False
def base_filename(self): # This is the "base" filename used in all the files saved out
# Including the beer ID in the file name to ensure uniqueness (if the user duplicates the name, for example)
if self.name_is_valid(self.name):
return "Device " + str(self.device_id) + " - B" + str(self.id) + " - " + self.name
else:
return "Device " + str(self.device_id) + " - B" + str(self.id) + " - NAME ERROR - "
def full_filename(self, which_file):
base_name = self.base_filename()
if which_file == 'base_csv':
return base_name + "_graph.csv"
elif which_file == 'full_csv':
return | |
<filename>tools/mo/openvino/tools/mo/back/add_outputs_recursive.py
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from math import ceil
import numpy as np
from openvino.tools.mo.ops.If import If
from openvino.tools.mo.ops.loop import Loop
from openvino.tools.mo.ops.tensor_iterator import TensorIterator
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph, Node
from openvino.tools.mo.ops.result import Result
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
def ti_set_output_port_shape(cycle_node, internal_id, port_num, iterations_count, axis):
int_node_name = TensorIterator.find_internal_layer_id(cycle_node.body, internal_id)
int_node = Node(cycle_node.body, int_node_name)
assert int_node.op == 'Result'
out_shape = int_node.in_port(0).data.get_shape().copy()
# inside cycle node Unsqueeze was added to have the first dimension for concatenating results along it
assert len(out_shape) >= 1
if axis is not None:
out_shape[axis] = iterations_count
assert port_num in cycle_node.out_ports()
cycle_node.out_port(port_num).data.set_shape(out_shape)
def get_iterations_count_from_output_record(output_rec):
def check_field(record, field):
return field in record and record[field] is not None
# 1. check if we need to concatenate iteration results for given output
if not check_field(output_rec, 'axis'):
# in this case we do not concatenate outputs, so iterations count is not needed really
return None
# 2. check if given output record contains values for 'end', so iterations count can be calculated from this record
if check_field(output_rec, 'end') and check_field(output_rec, 'start') and \
((output_rec['start'] >= 0 and output_rec['end'] >= 0) or
(output_rec['start'] < 0 and output_rec['end'] < 0)):
stride = output_rec['stride'] if check_field(output_rec, 'stride') else 1
# get iterations count from output record
iterations_count = ceil((output_rec['end'] - output_rec['start']) / stride)
return iterations_count
return dynamic_dimension_value
# shape inference for TensorIterator
# copy shapes from internal nodes + insert correct iterations count where needed
def ti_infer(step_node, port_num):
out_port_map = step_node.output_port_map
port_num = port_num + len(step_node.in_ports())
# find out which internal layer maps to port_num
found_rec = None
for record in out_port_map:
if record['external_port_id'] == port_num:
found_rec = record
break
assert found_rec is not None, \
"External port {} is not connected with body in node {}".format(port_num,
step_node.soft_get('name', step_node.id))
port_num = port_num - len(step_node.in_ports())
# find out iterations count for TensorIterator to set output shape correctly
iterations_count = get_iterations_count_from_output_record(found_rec)
if iterations_count is dynamic_dimension_value:
iterations_count = TensorIterator.find_iterations_count_for_output(step_node)
ti_set_output_port_shape(step_node, found_rec['internal_layer_id'], port_num, iterations_count,
found_rec['axis'])
# shape inference for Loop
# copy shapes from internal nodes + insert correct iterations count where needed
# iterations count always in the first dimension
def loop_infer(step_node, port_num):
out_port_map = step_node.output_port_map
int_layer_id = None
iterations_count = Loop.iterations_count(step_node)
for record in out_port_map:
if record['external_port_id'] == port_num:
int_layer_id = record['internal_layer_id']
ti_set_output_port_shape(step_node, int_layer_id, port_num, iterations_count, 0)
def max_internal_layer_id(graph):
max_int_layer_id = 0
for n in graph.get_op_nodes():
if n.has_and_set('internal_layer_id') and n.internal_layer_id > max_int_layer_id:
max_int_layer_id = n.internal_layer_id
return max_int_layer_id
# Add Result (and Unsqueeze is add_unsqueeze=True) to node port port_num in graph cur_graph.
# New nodes will have internal id equal to cur_max_layer_id + 1 (and cur_max_layer_id + 2 if 2 nodes were added)
# New nodes will be inserted in tracks on position i.
def add_output_in_body(node, port_num, cur_graph, cur_max_layer_id, tracks, track_index, add_unsqueeze=True):
port = node.out_port(port_num)
if add_unsqueeze:
unsq_name = port.node.soft_get('name', port.node.id) + "/Unsqueeze"
unsq_node = create_op_node_with_second_input(cur_graph, Unsqueeze,
int64_array([0]),
{'name': unsq_name})
port.connect(unsq_node.in_port(0))
unsq_node['internal_layer_id'] = cur_max_layer_id + 1
cur_max_layer_id += 1
tracks.insert(track_index, {'node': unsq_node, 'graph': cur_graph})
port = unsq_node.out_port(0)
out_name = port.node.soft_get('name', port.node.id) + ":" + str(port_num)
res_node = Result(cur_graph, {'name': out_name}).create_node()
port.connect(res_node.in_port(0))
res_node['internal_layer_id'] = cur_max_layer_id + 1
cur_max_layer_id += 1
tracks.insert(track_index, {'node': res_node, 'graph': cur_graph})
return res_node, tracks, cur_max_layer_id
class AddOutputRecursive(BackReplacementPattern):
"""
Add output to node inside loops. Path to node set in 'additional_outputs' attribute of graph.
Path structure: [node_loop_1, loop_2_in_loop_1,.., if_node, [then_list, else_list]]
After if operation should be sub-list with 2 elements then_list and else_list where each is one node or list of
nodes in according path
For cycles results from all iterations will be concatenated along 0 dimension.
"""
enabled = False
run_not_recursively = True
@staticmethod
def add_output_for_path(graphs_nodes_path):
# add output to nodes according to path
step_node = graphs_nodes_path[-1]['node']
cur_graph = graphs_nodes_path[-1]['graph']
ports_to_add_nodes = []
for o_p in step_node.out_ports():
ports_to_add_nodes.append(o_p)
# update internal_layer_id for new Results
for i in range(len(graphs_nodes_path)-1, 0, -1):
cur_max_layer_id = max_internal_layer_id(cur_graph) + 1
cur_loop_node = graphs_nodes_path[i-1]['node']
new_out_ports = []
if cur_loop_node.op is not 'If':
# add Unsqueeze and Result for TensorIterator and Loop and update output_port_map
for p_num in ports_to_add_nodes:
res_node, graphs_nodes_path, cur_max_layer_id = add_output_in_body(step_node, p_num, cur_graph,
cur_max_layer_id,
graphs_nodes_path, i)
# IR reader fix output port map for Loop, but have not change for TensorIterator
new_port_id = len(cur_loop_node.out_ports())
if cur_loop_node.op == 'TensorIterator':
new_port_id = new_port_id + len(cur_loop_node.in_ports())
cur_loop_node.output_port_map.append({'axis': 0, 'stride': 1, 'part_size': 1, 'start': 0,
'end': -1, 'external_port_id': new_port_id,
'internal_layer_id': res_node['internal_layer_id']})
port_id = new_port_id
if cur_loop_node.op == 'TensorIterator':
port_id = port_id - len(cur_loop_node.in_ports())
new_out_ports.append(port_id)
cur_loop_node.add_output_port(port_id)
else:
# add Result nodes for If and update output_id
for p_num in ports_to_add_nodes:
res_node, graphs_nodes_path, cur_max_layer_id = add_output_in_body(step_node, p_num, cur_graph,
cur_max_layer_id,
graphs_nodes_path, i,
add_unsqueeze=False)
if cur_loop_node.then_graph == cur_graph:
new_port_id = len(cur_loop_node.out_ports())
res_node['output_id'] = new_port_id
cur_loop_node.add_output_port(new_port_id)
new_out_ports.append(new_port_id)
else:
res_node['output_id'] = list(cur_loop_node.out_ports().keys())[-1]
ports_to_add_nodes = new_out_ports
step_node = cur_loop_node
cur_graph = graphs_nodes_path[i-1]['graph']
i = 0
for p_num in ports_to_add_nodes:
port = step_node.out_port(p_num)
out_name = step_node.soft_get('name', step_node.id) + "." + str(p_num)
res_node = Result(cur_graph, {'name': out_name}).create_node()
port.connect(res_node.in_port(0))
# add name of Result to fw_tensor_debug_info to avoid renaming
if step_node.out_nodes()[p_num].has_and_set('fw_tensor_debug_info'):
step_node.out_nodes()[p_num]['fw_tensor_debug_info'].append(out_name)
else:
step_node.out_nodes()[p_num]['fw_tensor_debug_info'] = [[out_name, out_name]]
if step_node.op == 'TensorIterator':
step_node.out_edges()[len(step_node.out_edges())-1]['external_port_id'] = p_num + \
len(step_node.in_ports())
graphs_nodes_path.insert(0, {'node': res_node, 'graph': cur_graph})
i += 1
return graphs_nodes_path
@staticmethod
def infer_shapes_of_nodes_in_path(graphs_nodes_path):
# update shape for new or updated nodes
for i in range(len(graphs_nodes_path) - 1, -1, -1):
step_node = graphs_nodes_path[i]['node']
# update shapes for Loop, TI, If, Unsqueeze
# Result to end node in path added to existing port with already calculated shapes
for p_num in step_node.out_ports():
if not step_node.out_port(p_num).disconnected():
if step_node.op == 'TensorIterator':
ti_infer(step_node, p_num)
elif step_node.op == 'Loop':
loop_infer(step_node, p_num)
elif step_node.op == 'Unsqueeze':
assert step_node.in_port(1).get_source().node.has('value')
axis = step_node.in_port(1).get_source().node.value[0]
out_shape = list(step_node.in_port(0).get_source().data.get_shape())
out_shape.insert(axis, 1)
step_node.out_port(p_num).data.set_shape(out_shape)
elif step_node.op == 'If':
If.update_if_output_ports_shape(step_node)
@staticmethod
def split_path_to_simple_tracks(graph, path):
# Split complex path into simple linear tracks.
# In path after If node list with 2 sub-lists should be. In this function such path is split into 2 tracks:
# one for each sublist with linear structure.
# Number of tracks got from path is 2 * number of If operations in path.
# Track is looks like list of paths with 2 fields : list of nodes on current path and list of according graphs
# Example:
# input path : [loop_1, loop_2, if_1, [[loop3_1, node_1], [node_2]]]
# output track: [{'nodes': [loop_1, loop_2, if_1, loop3_1, node_1],
# 'graphs':[graph, loop_1.body, loop_2.body, if.then_graph, loop3_1.body]},
# {'nodes': [loop_1, loop_2, if_1, node_2],
# 'graphs':[graph, loop_1.body, loop_2.body, if.else_graph]}]
# structure to save tracks
# list with tracks, each track is list of pairs {'node', 'graph'}
paths_nodes_graphs = list()
paths_nodes_graphs.append([])
# stack for sub-graphs that will be traversed in future
future_graphs_stack = [graph]
# index for track that we currently fill
track_idx = 0
# save lists that were started but not finished during processing
lists_stack = [{'list': path, 'pos': -1}]
while len(lists_stack) != 0:
cur_list_pos = lists_stack.pop(-1)
# current list to process
cur_list = cur_list_pos['list']
# index in current list/sub-list
list_idx = cur_list_pos['pos'] + 1
while list_idx < len(cur_list):
el = cur_list[list_idx]
if isinstance(el, (list, np.ndarray)):
lists_stack.append({'list': cur_list, 'pos': list_idx})
# if we have previous node non-list then current sublist is for If node
# and new tracks should be added for sub-graphs (the first subgraph will continue current track)
if list_idx != 0 and isinstance(cur_list[list_idx - 1], str):
for i in range(len(el) - 1):
# copy all nodes from existing track to new one
paths_nodes_graphs.append(paths_nodes_graphs[-1][:])
# new sublist started, so reset index
cur_list = el
list_idx = 0
else:
assert isinstance(el, str)
cur_graph = future_graphs_stack.pop(-1)
step_node = Node(cur_graph, el)
paths_nodes_graphs[track_idx].append({'node': step_node, 'graph': cur_graph})
# if node is not last, check that next node will be on current track or not
if list_idx != len(cur_list) - 1:
# so detect if we are in sublist with branches for If
# then in stack sublist is not the first node of list
# and have previous node with If operation name
if len(lists_stack) != 0 and lists_stack[-1]['pos'] != 0 and \
isinstance(lists_stack[-1]['list'][lists_stack[-1]['pos']-1], str):
# switch to next track
if list_idx != len(cur_list) - | |
<gh_stars>1-10
"""
Copyright 2020 XuaTheGrate
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import inspect
from enum import Enum
from functools import wraps
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union
)
__all__ = [
'Language',
'CountryCode',
# 'Geolocation'
]
T = TypeVar('T', bound='BaseFlags')
class _flag_property:
__slots__ = ('_original_func', '__doc__')
def __init__(self, func: Union[Callable[[T], bool], Callable[[None], bool]]) -> None:
self._original_func = func
self.__doc__ = func.__doc__
def __get__(self, instance: Optional[T], owner: Optional[Type[T]]=None) -> Union[_flag_property, bool]:
if instance is None:
# return super().__get__(instance, owner)
# raise AttributeError(f'"{self._original_func.__name__}"')
return self
return instance._has_flag(self._original_func(None)) # type: ignore
def __set__(self, instance: T, value: bool) -> None:
instance._set_flag(self._original_func(None), value) # type: ignore
class BaseFlags:
__slots__ = ('value',)
flag_values: List[str]
value: int
def __repr__(self) -> str:
return "<{0.__class__.__name__}: {0.value:#x}>".format(self)
def __init_subclass__(cls) -> None:
for name, method in inspect.getmembers(cls):
if isinstance(method, _flag_property):
cls.flag_values.append(name)
def __setattr__(self, name: str, value: Any) -> None:
if not hasattr(self, name) and name != 'value':
raise AttributeError(name)
super(BaseFlags, self).__setattr__(name, value)
@classmethod
def _from_value(cls: Type[T], value: int) -> T:
self = cls()
self.value = value
return self
def _has_flag(self: T, flag: int) -> bool:
return (self.value & flag) == flag
def _set_flag(self: T, flag: int, toggle: bool) -> None:
if toggle:
self.value |= flag
else:
self.value &= ~flag
def __iter__(self) -> Generator[Tuple[str, bool], None, None]:
return ((k, v) for k, v in self.__dict__.items())
@classmethod
def all(cls: Type[T]) -> T:
"""Factory method that returns the flags with all flags enabled."""
return cls._from_value(int('1'*len(cls.flag_values), 2))
@classmethod
def none(cls: Type[T]) -> T:
"""Factory method that returns the flags with all flags disabled."""
return cls._from_value(0)
@classmethod
def from_flags(cls: Type[T], **flags: Mapping[str, _flag_property]) -> T:
"""Factory method that returns the flags with specific flags enabled."""
self = cls._from_value(0)
for name, value in flags.items():
if not hasattr(self, name):
raise AttributeError(name)
setattr(self, name, value)
return self
@property
def _all_flags_enabled(self: T) -> bool:
return self.value == (1 << len(self.flag_values)) - 1
def to_google_flags(self: T) -> Optional[str]:
"""Converts these flags to a url-safe parameter value."""
val = bin(self.value)
if self.value == 0 or self._all_flags_enabled:
return None
if val.count('1') > (len(self.flag_values) / 2):
return '-(' + '|'.join(getattr(self.__class__, k).__doc__ for k, v in self if not v) + ')'
return '|'.join(getattr(self.__class__, k).__doc__ for k, v in self if v)
class Language(BaseFlags):
"""
Bit flags for the ``lr`` and ``cr`` parameters.
"""
__slots__ = ('value',)
flag_values: List[str] = []
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {name: getattr(self, name) for name in self.flag_values}
@_flag_property
def arabic(self) -> bool:
'lang_ar'
return 1 # type: ignore
@_flag_property
def bulgarian(self) -> bool:
'lang_bg'
return 1 << 1 # type: ignore
@_flag_property
def catalan(self) -> bool:
'lang_ca'
return 1 << 2 # type: ignore
@_flag_property
def czech(self) -> bool:
'lang_cs'
return 1 << 3 # type: ignore
@_flag_property
def danish(self) -> bool:
'lang_da'
return 1 << 4 # type: ignore
@_flag_property
def german(self) -> bool:
'lang_de'
return 1 << 5 # type: ignore
@_flag_property
def greek(self) -> bool:
'lang_el'
return 1 << 6 # type: ignore
@_flag_property
def english(self) -> bool:
'lang_en'
return 1 << 7 # type: ignore
@_flag_property
def spanish(self) -> bool:
'lang_es'
return 1 << 8 # type: ignore
@_flag_property
def estonian(self) -> bool:
'lang_et'
return 1 << 9 # type: ignore
@_flag_property
def finnish(self) -> bool:
'lang_fi'
return 1 << 10 # type: ignore
@_flag_property
def french(self) -> bool:
'lang_fr'
return 1 << 11 # type: ignore
@_flag_property
def croatian(self) -> bool:
'lang_hr'
return 1 << 12 # type: ignore
@_flag_property
def hungarian(self) -> bool:
'lang_hu'
return 1 << 13 # type: ignore
@_flag_property
def indonesian(self) -> bool:
'lang_id'
return 1 << 14 # type: ignore
@_flag_property
def icelandic(self) -> bool:
'lang_is'
return 1 << 15 # type: ignore
@_flag_property
def italian(self) -> bool:
'lang_it'
return 1 << 16 # type: ignore
@_flag_property
def hebrew(self) -> bool:
'lang_iw'
return 1 << 17 # type: ignore
@_flag_property
def japanese(self) -> bool:
'lang_ja'
return 1 << 18 # type: ignore
@_flag_property
def korean(self) -> bool:
'lang_ko'
return 1 << 19 # type: ignore
@_flag_property
def lithuanian(self) -> bool:
'lang_lt'
return 1 << 20 # type: ignore
@_flag_property
def latvian(self) -> bool:
'lang_lv'
return 1 << 21 # type: ignore
@_flag_property
def dutch(self) -> bool:
'lang_nl'
return 1 << 22 # type: ignore
@_flag_property
def norwegian(self) -> bool:
'lang_no'
return 1 << 23 # type: ignore
@_flag_property
def polish(self) -> bool:
'lang_pl'
return 1 << 24 # type: ignore
@_flag_property
def portuguese(self) -> bool:
'lang_pt'
return 1 << 25 # type: ignore
@_flag_property
def romanian(self) -> bool:
'lang_ro'
return 1 << 26 # type: ignore
@_flag_property
def russian(self) -> bool:
'lang_ru'
return 1 << 27 # type: ignore
@_flag_property
def slovak(self) -> bool:
'lang_sk'
return 1 << 28 # type: ignore
@_flag_property
def slovenian(self) -> bool:
'lang_sl'
return 1 << 29 # type: ignore
@_flag_property
def serbian(self) -> bool:
'lang_sr'
return 1 << 30 # type: ignore
@_flag_property
def swedish(self) -> bool:
'lang_sv'
return 1 << 31 # type: ignore
@_flag_property
def turkish(self) -> bool:
'lang_tr'
return 1 << 32 # type: ignore
@_flag_property
def chinese_simplified(self) -> bool:
'lang_zh-CN'
return 1 << 33 # type: ignore
@_flag_property
def chinese_traditional(self) -> bool:
'lang_zh-TW'
return 1 << 34 # type: ignore
class CountryCode(BaseFlags):
__slots__ = ('value',)
flag_values: List[str] = []
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {name: getattr(self, name) for name in self.flag_values}
@_flag_property
def afghanistan(self) -> bool:
'countryAF'
return 1 << 0 # type: ignore
@_flag_property
def albania(self) -> bool:
'countryAL'
return 1 << 1 # type: ignore
@_flag_property
def algeria(self) -> bool:
'countryDZ'
return 1 << 2 # type: ignore
@_flag_property
def american_samoa(self) -> bool:
'countryAS'
return 1 << 3 # type: ignore
@_flag_property
def andorra(self) -> bool:
'countryAD'
return 1 << 4 # type: ignore
@_flag_property
def angola(self) -> bool:
'countryAO'
return 1 << 5 # type: ignore
@_flag_property
def anguilla(self) -> bool:
'countryAI'
return 1 << 6 # type: ignore
@_flag_property
def antarctica(self) -> bool:
'countryAQ'
return 1 << 7 # type: ignore
@_flag_property
def antigua_and_barbuda(self) -> bool:
'countryAG'
return 1 << 8 # type: ignore
@_flag_property
def argentina(self) -> bool:
'countryAR'
return 1 << 9 # type: ignore
@_flag_property
def armenia(self) -> bool:
'countryAM'
return 1 << 10 # type: ignore
@_flag_property
def aruba(self) -> bool:
'countryAW'
return 1 << 11 # type: ignore
@_flag_property
def australia(self) -> bool:
'countryAU'
return 1 << 12 # type: ignore
@_flag_property
def austria(self) -> bool:
'countryAT'
return 1 << 13 # type: ignore
@_flag_property
def azerbaijan(self) -> bool:
'countryAZ'
return 1 << 14 # type: ignore
@_flag_property
def bahamas(self) -> bool:
'countryBS'
return 1 << 15 # type: ignore
@_flag_property
def bahrain(self) -> bool:
'countryBH'
return 1 << 16 # type: ignore
@_flag_property
def bangladesh(self) -> bool:
'countryBD'
return 1 << 17 # type: ignore
@_flag_property
def barbados(self) -> bool:
'countryBB'
return 1 << 18 # type: ignore
@_flag_property
def belarus(self) -> bool:
'countryBY'
return 1 << 19 # type: ignore
@_flag_property
def belgium(self) -> bool:
'countryBE'
return 1 << 20 # type: ignore
@_flag_property
def belize(self) -> bool:
'countryBZ'
return 1 << 21 # type: ignore
@_flag_property
def benin(self) -> bool:
'countryBJ'
return 1 << 22 # type: ignore
@_flag_property
def bermuda(self) -> bool:
'countryBM'
return 1 << 23 # type: ignore
@_flag_property
def bhutan(self) -> bool:
'countryBT'
return 1 << 24 # type: ignore
@_flag_property
def bolivia(self) -> bool:
'countryBO'
return 1 << 25 # type: ignore
@_flag_property
def bosnia_and_herzegovina(self) -> bool:
'countryBA'
return 1 << 26 # type: ignore
@_flag_property
def botswana(self) -> bool:
'countryBW'
return 1 << 27 # type: ignore
@_flag_property
def bouvet_island(self) -> bool:
'countryBV'
return 1 << 28 # type: ignore
@_flag_property
def brazil(self) -> bool:
'countryBR'
return 1 << 29 # type: ignore
@_flag_property
def british_indian_ocean_territory(self) -> bool:
'countryIO'
return 1 << 30 # type: ignore
@_flag_property
def brunei_darussalam(self) -> bool:
'countryBN'
return 1 << 31 # type: ignore
@_flag_property
def bulgaria(self) -> bool:
'countryBG'
return 1 << 32 # type: ignore
@_flag_property
def burkina_faso(self) -> bool:
'countryBF'
return 1 << 33 # type: ignore
@_flag_property
def burundi(self) -> bool:
'countryBI'
return 1 << 34 # type: ignore
@_flag_property
def cambodia(self) -> bool:
'countryKH'
return 1 << 35 # type: ignore
@_flag_property
def cameroon(self) -> bool:
'countryCM'
return 1 << 36 # type: ignore
@_flag_property
def canada(self) -> bool:
'countryCA'
return 1 << 37 # type: ignore
@_flag_property
def cape_verde(self) -> bool:
'countryCV'
return 1 << 38 # type: ignore
@_flag_property
def cayman_islands(self) -> bool:
'countryKY'
return 1 << 39 # type: ignore
@_flag_property
def central_african_republic(self) -> bool:
'countryCF'
return 1 << 40 # type: ignore
@_flag_property
def chad(self) -> bool:
'countryTD'
return 1 << 41 # type: ignore
@_flag_property
def chile(self) -> bool:
'countryCL'
return 1 << 42 # type: ignore
@_flag_property
def china(self) -> bool:
'countryCN'
return 1 << 43 # type: ignore
@_flag_property
def christmas_island(self) -> bool:
'countryCX'
return 1 << 44 # type: ignore
@_flag_property
def cocos_keeling_islands(self) -> bool:
'countryCC'
return 1 << 45 # type: ignore
@_flag_property
def colombia(self) -> bool:
'countryCO'
return 1 << 46 # type: ignore
@_flag_property
def comoros(self) -> bool:
'countryKM'
return 1 << 47 # type: ignore
@_flag_property
def congo(self) -> bool:
'countryCG'
return 1 << 48 # type: ignore
@_flag_property
def _the_democratic_republic_of_the_congo(self) -> bool:
'countryCD'
return 1 << 49 # type: ignore
@_flag_property
def cook_islands(self) -> bool:
'countryCK'
return 1 << 50 # type: ignore
@_flag_property
def costa_rica(self) -> bool:
'countryCR'
return 1 << 51 # type: ignore
@_flag_property
def cote_divoire(self) -> bool:
'countryCI'
return 1 << 52 # type: ignore
@_flag_property
def croatia_hrvatska(self) -> bool:
'countryHR'
return 1 << 53 # type: ignore
@_flag_property
def cuba(self) -> bool:
'countryCU'
return 1 << 54 # type: ignore
@_flag_property
def cyprus(self) -> bool:
'countryCY'
return 1 << 55 # type: ignore
@_flag_property
def czech_republic(self) -> bool:
'countryCZ'
return 1 << 56 # type: ignore
@_flag_property
def denmark(self) -> bool:
'countryDK'
return 1 << 57 # type: ignore
@_flag_property
def djibouti(self) -> bool:
'countryDJ'
return 1 << 58 # type: ignore
@_flag_property
def dominica(self) -> bool:
'countryDM'
return 1 << 59 # type: ignore
@_flag_property
def dominican_republic(self) -> bool:
'countryDO'
return 1 << 60 # type: ignore
@_flag_property
def east_timor(self) -> bool:
'countryTP'
return 1 << 61 # type: ignore
@_flag_property
def ecuador(self) -> bool:
'countryEC'
return 1 << 62 # type: ignore
@_flag_property
def egypt(self) -> bool:
'countryEG'
return 1 << 63 # type: ignore
@_flag_property
def el_salvador(self) -> bool:
'countrySV'
return 1 | |
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
from ..utils import per_example_grad_conv
from ..object.vector import PVector, FVector
from ..layercollection import LayerCollection
class Jacobian:
"""
Computes jacobians :math:`\mathbf{J}_{ijk}=\\frac{\partial f\left(x_{j}\\right)_{i}}{\delta\mathbf{w}_{k}}`,
FIM matrices :math:`\mathbf{F}_{k,k'}=\\frac{1}{n}\sum_{i,j}\mathbf{J}_{ijk}\mathbf{J}_{ijk'}`
and NTK matrices :math:`\mathbf{K}_{iji'j'}=\sum_{k}\mathbf{J}_{ijk}\mathbf{J}_{ijk'}`.
This generator is written in pure PyTorch and exploits some tricks in order to make computations
more efficient.
:param layer_collection:
:type layer_collection: :class:`.layercollection.LayerCollection`
:param model:
:type model: Pytorch `nn.Module`
:param function: A function :math:`f\left(X,Y,Z\\right)` where :math:`X,Y,Z` are minibatchs
returned by the dataloader (Note that in some cases :math:`Y,Z` are not required). If None,
it defaults to `function = lambda *x: model(x[0])`
:type function: python function
:param n_output: How many output is there for each example of your function. E.g. in 10 class
classification this would probably be 10.
:type n_output: integer
"""
def __init__(self, model, function=None, n_output=1,
centering=False, layer_collection=None):
self.model = model
self.handles = []
self.xs = dict()
self.n_output = n_output
self.centering = centering
if function is None:
function = lambda *x: model(x[0])
self.function = function
if layer_collection is None:
self.layer_collection = LayerCollection.from_model(model)
else:
self.layer_collection = layer_collection
# maps parameters to their position in flattened representation
self.l_to_m, self.m_to_l = \
self.layer_collection.get_layerid_module_maps(model)
def get_device(self):
return next(self.model.parameters()).device
def get_covariance_matrix(self, examples):
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_flat_grad,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
n_parameters = self.layer_collection.numel()
bs = loader.batch_size
G = torch.zeros((n_parameters, n_parameters), device=device)
self.grads = torch.zeros((1, bs, n_parameters), device=device)
if self.centering:
grad_mean = torch.zeros((self.n_output, n_parameters),
device=device)
self.start = 0
self.i_output = 0
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for i in range(self.n_output):
self.grads.zero_()
torch.autograd.grad(output[i], [inputs],
retain_graph=i < self.n_output - 1,
only_inputs=True)
G += torch.mm(self.grads[0].t(), self.grads[0])
if self.centering:
grad_mean[i].add_(self.grads[0].sum(dim=0))
G /= n_examples
if self.centering:
grad_mean /= n_examples
G -= torch.mm(grad_mean.t(), grad_mean)
# remove hooks
del self.grads
self.xs = dict()
for h in self.handles:
h.remove()
return G
def get_covariance_diag(self, examples):
if self.centering:
raise NotImplementedError
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_diag,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
n_parameters = self.layer_collection.numel()
self.diag_m = torch.zeros((n_parameters,), device=device)
self.start = 0
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for i in range(self.n_output):
torch.autograd.grad(output[i], [inputs],
retain_graph=i < self.n_output - 1,
only_inputs=True)
diag_m = self.diag_m / n_examples
# remove hooks
del self.diag_m
self.xs = dict()
for h in self.handles:
h.remove()
return diag_m
def get_covariance_quasidiag(self, examples):
if self.centering:
raise NotImplementedError
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_quasidiag,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
self._blocks = dict()
for layer_id, layer in self.layer_collection.layers.items():
s = layer.numel()
if layer.bias is None:
self._blocks[layer_id] = (torch.zeros((s, ), device=device),
None)
else:
cross_s = layer.weight.size
self._blocks[layer_id] = (torch.zeros((s, ), device=device),
torch.zeros(cross_s, device=device))
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for i in range(self.n_output):
torch.autograd.grad(output[i], [inputs],
retain_graph=i < self.n_output - 1,
only_inputs=True)
for d, c in self._blocks.values():
d.div_(n_examples)
if c is not None:
c.div_(n_examples)
blocks = self._blocks
# remove hooks
del self._blocks
self.xs = dict()
for h in self.handles:
h.remove()
return blocks
def get_covariance_layer_blocks(self, examples):
if self.centering:
raise NotImplementedError
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_layer_blocks,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
self._blocks = dict()
for layer_id, layer in self.layer_collection.layers.items():
s = layer.numel()
self._blocks[layer_id] = torch.zeros((s, s), device=device)
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for i in range(self.n_output):
torch.autograd.grad(output[i], [inputs],
retain_graph=i < self.n_output - 1,
only_inputs=True)
blocks = {m: self._blocks[m] / n_examples for m in self._blocks.keys()}
# remove hooks
del self._blocks
self.xs = dict()
for h in self.handles:
h.remove()
return blocks
def get_kfac_blocks(self, examples):
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_kfac_blocks,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
self._blocks = dict()
for layer_id, layer in self.layer_collection.layers.items():
layer_class = layer.__class__.__name__
if layer_class == 'LinearLayer':
sG = layer.out_features
sA = layer.in_features
elif layer_class == 'Conv2dLayer':
sG = layer.out_channels
sA = layer.in_channels * layer.kernel_size[0] * \
layer.kernel_size[1]
if layer.bias is not None:
sA += 1
self._blocks[layer_id] = (torch.zeros((sA, sA), device=device),
torch.zeros((sG, sG), device=device))
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for self.i_output in range(self.n_output):
retain_graph = self.i_output < self.n_output - 1
torch.autograd.grad(output[self.i_output], [inputs],
retain_graph=retain_graph,
only_inputs=True)
for layer_id in self.layer_collection.layers.keys():
self._blocks[layer_id][0].div_(n_examples / self.n_output**.5)
self._blocks[layer_id][1].div_(self.n_output**.5 * n_examples)
blocks = self._blocks
# blocks = {layer_id: (self._blocks[layer_id][0] / n_examples *
# self.n_output**.5,
# self._blocks[layer_id][1] / n_examples /
# self.n_output**.5)
# for layer_id in self.layer_collection.layers.keys()}
# remove hooks
del self._blocks
del self.i_output
self.xs = dict()
for h in self.handles:
h.remove()
return blocks
def get_jacobian(self, examples):
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_flat_grad,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
n_parameters = self.layer_collection.numel()
self.grads = torch.zeros((self.n_output, n_examples, n_parameters),
device=device)
self.start = 0
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for self.i_output in range(self.n_output):
retain_graph = self.i_output < self.n_output - 1
torch.autograd.grad(output[self.i_output], [inputs],
retain_graph=retain_graph,
only_inputs=True)
self.start += inputs.size(0)
grads = self.grads
if self.centering:
grads -= grads.mean(dim=1, keepdim=True)
# remove hooks
del self.grads
del self.start
del self.i_output
self.xs = dict()
for h in self.handles:
h.remove()
return grads
def get_gram_matrix(self, examples):
# add hooks
self.handles += self._add_hooks(self._hook_savex_io, self._hook_kxy,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
self.G = torch.zeros((self.n_output, n_examples,
self.n_output, n_examples), device=device)
self.x_outer = dict()
self.x_inner = dict()
self.gy_outer = dict()
self.e_outer = 0
for i_outer, d in enumerate(loader):
# used in hooks to switch between store/compute
inputs_outer = d[0]
inputs_outer.requires_grad = True
bs_outer = inputs_outer.size(0)
self.outerloop_switch = True
output_outer = self.function(*d).view(bs_outer, self.n_output) \
.sum(dim=0)
for self.i_output_outer in range(self.n_output):
self.outerloop_switch = True
torch.autograd.grad(output_outer[self.i_output_outer],
[inputs_outer], retain_graph=True,
only_inputs=True)
self.outerloop_switch = False
self.e_inner = 0
for i_inner, d in enumerate(loader):
if i_inner > i_outer:
break
inputs_inner = d[0]
inputs_inner.requires_grad = True
bs_inner = inputs_inner.size(0)
output_inner = self.function(*d).view(bs_inner,
self.n_output) \
.sum(dim=0)
for self.i_output_inner in range(self.n_output):
torch.autograd.grad(output_inner[self.i_output_inner],
[inputs_inner], retain_graph=True,
only_inputs=True)
# since self.G is a symmetric matrix we only need to
# compute the upper or lower triangle
# => copy block and exclude diagonal
if (i_inner < i_outer and
self.i_output_outer == self.n_output - 1):
self.G[:, self.e_outer:self.e_outer+bs_outer, :,
self.e_inner:self.e_inner+bs_inner] += \
self.G[:, self.e_inner:self.e_inner+bs_inner, :,
self.e_outer:self.e_outer+bs_outer] \
.permute(2, 3, 0, 1)
self.e_inner += inputs_inner.size(0)
self.e_outer += inputs_outer.size(0)
G = self.G
if self.centering:
C = torch.eye(n_examples, device=G.device) - \
torch.ones((n_examples, n_examples), device=G.device) / \
n_examples
sG = G.size()
G = torch.mm(G.view(-1, n_examples), C)
G = torch.mm(C, G.view(sG[0], sG[1], -1).permute(1, 0, 2)
.contiguous().view(n_examples, -1)) \
.view(sG[1], sG[0], -1).permute(1, 0, 2).contiguous().view(*sG)
# remove hooks
del self.e_inner, self.e_outer
del self.G
del self.x_inner
del self.x_outer
del self.gy_outer
for h in self.handles:
h.remove()
return G
def get_kfe_diag(self, kfe, examples):
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_kfe_diag,
self.l_to_m.values())
device = next(self.model.parameters()).device
loader = self._get_dataloader(examples)
n_examples = len(loader.sampler)
self._diags = dict()
self._kfe = kfe
for layer_id, layer in self.layer_collection.layers.items():
layer_class = layer.__class__.__name__
if layer_class == 'LinearLayer':
sG = layer.out_features
sA = layer.in_features
elif layer_class == 'Conv2dLayer':
sG = layer.out_channels
sA = layer.in_channels * layer.kernel_size[0] * \
layer.kernel_size[1]
if layer.bias is not None:
sA += 1
self._diags[layer_id] = torch.zeros((sG * sA), device=device)
for d in loader:
inputs = d[0]
inputs.requires_grad = True
bs = inputs.size(0)
output = self.function(*d).view(bs, self.n_output) \
.sum(dim=0)
for self.i_output in range(self.n_output):
retain_graph = self.i_output < self.n_output - 1
torch.autograd.grad(output[self.i_output], [inputs],
retain_graph=retain_graph,
only_inputs=True)
diags = {l_id: self._diags[l_id] / n_examples
for l_id in self.layer_collection.layers.keys()}
# remove hooks
del self._diags
del self._kfe
self.xs = dict()
for h in self.handles:
h.remove()
return diags
def implicit_mv(self, v, examples):
# add hooks
self.handles += self._add_hooks(self._hook_savex,
self._hook_compute_Jv,
self.l_to_m.values())
self._v = v.get_dict_representation()
parameters = []
output = dict()
for layer_id, layer in self.layer_collection.layers.items():
mod = self.l_to_m[layer_id]
mod_class = mod.__class__.__name__
if mod_class in ['BatchNorm1d', 'BatchNorm2d']:
raise NotImplementedError
parameters.append(mod.weight)
output[mod.weight] = torch.zeros_like(mod.weight)
if layer.bias is not None:
parameters.append(mod.bias)
| |
<gh_stars>10-100
import datetime
from itertools import product
from tempfile import mkdtemp
import os
import numpy as np
import pycuda.autoinit
from pycuda.compiler import SourceModule
from pycuda.gpuarray import GPUArray, to_gpu
from tf_implementations.forward_pass_implementations\
import build_multi_view_cnn_forward_pass_with_features,\
build_full_multi_view_cnn_forward_pass_with_features
from utils.geometry import project, distance
from .cuda_implementations.sample_points import\
compute_depth_from_distribution
from .cuda_implementations.similarities import\
perform_multi_view_cnn_forward_pass_with_depth_estimation
from .cuda_implementations.mvcnn_with_ray_marching_and_voxels_mapping import\
batch_mvcnn_voxel_traversal_with_ray_marching_with_depth_estimation
from .cuda_implementations.raynet_fp import perform_raynet_fp
class ForwardPass(object):
"""Provide the basic interface for the different forward passes we might
use.
"""
def __init__(
self,
model,
generation_params,
sampling_scheme,
image_shape,
rays_batch=50000,
filter_out_rays=False
):
# The trained model used to export features
self._model = model
# Parameters used for data generation
self._generation_params = generation_params
self._sampling_scheme = sampling_scheme
# The number of rays per mini-batch
self.rays_batch = rays_batch
# Flag used to indicate whether we want to filter out rays
self._filter_out_rays = filter_out_rays
self._fp = None
@staticmethod
def create_depth_map_from_distribution(
scene,
img_idx,
S,
truncate=800,
sampling_scheme="sample_in_bbox"
):
""" Given a set of uniformly sampled points along all rays from the
reference image identified with iand the corresponding per-ray depth
distributions, we want to convert them to a depth map
Arguments:
----------
scene: Scene object
The Scene to be processed
img_idx, int, Index to refer to the reference image
S: np.array(shape=(N, D), dtype=np.float32)
The per pixel depth distribution
Returns:
--------
depth_map: numpy array, with the same dimensions as the image shape
"""
# Extract the dimensions of the camera center
H, W = scene.image_shape
_, D = S.shape
# Get the camera center of the reference image
camera_center = scene.get_image(img_idx).camera.center
D = compute_depth_from_distribution(
np.arange(H*W, dtype=np.int32),
scene.get_image(img_idx).camera.P_pinv,
camera_center,
H,
W,
scene.bbox.ravel(),
S,
np.arange(H*W, dtype=np.float32),
sampling_scheme,
).reshape(W, H).T
return np.minimum(D, truncate)
@staticmethod
def create_depth_map_from_distribution_with_voting(
scene,
img_idx,
points,
S,
truncate=800
):
"""Given a set of uniformly sampled points along all rays from the
reference image identified with iand the corresponding per-ray depth
distributions, we want to convert them to a depth map, using the
expectation value along the depth direction.
Arguments:
----------
scene: Scene object
The Scene to be processed
img_idx, int, Index to refer to the reference image
points: np.array(shape=(4, N, D), dtype=np.float32)
The uniformly sampled points across all rays, where N is
the number of rays and D is the number of discretization
steps used.
S: np.array(shape=(N, D), dtype=np.float32)
The per pixel depth distribution
Returns:
--------
depth_map: numpy array, with the same dimensions as the image shape
"""
# Extract the dimensions of the camera center
H, W = scene.image_shape
# Get the camera center of the reference image
camera_center = scene.get_image(img_idx).camera.center
# Compute the distances form the camera center for every depth
# hypotheses
dists = np.sqrt(
((camera_center.reshape(-1, 1, 1) - points)**2).sum(axis=0)
)
assert dists.shape == (H*W, points.shape[-1])
D = (S*dists).sum(axis=-1)
return np.minimum(D.reshape(W, H).T, truncate)
@staticmethod
def upsample_features(features, model):
"""Parse the model for strides>1 and determine how much we should
upsample the features.
NOTE: The code assumes that the network is a single stream of conv -
pool layers.
Arguments
---------
features: (N, H, W, F)
Array with features of N images with F dimensions
model: Keras model
The model that created the features
"""
# Collect strides
strides = [
l.strides[0] if hasattr(l, "strides") else 1
for l in model.layers
]
upsample = sum(s for s in strides if s > 1)
if upsample <= 1:
return features
else:
return np.kron(features, np.ones((1, upsample, upsample, 1)))
def get_valid_rays_per_image(self, scene, i):
H, W = scene.image_shape
idxs = np.arange(H*W, dtype=np.int32)
if self._filter_out_rays:
idxs = idxs.reshape(W, H).T
# Get the gt depth map for the current scene
G = scene.get_depth_map(ref_idx)
# Collect the idxs where the ground truth is non-zero
return idxs[G != 0].ravel()
else:
return idxs
def _to_list_with_zeropadded_images(self, images, inputs=None):
# Check if the inputs is None or if it already contains elements
if inputs is None:
inputs = []
# Dimensions of the image
H, W, C = images[0].image.shape
p = self._generation_params.padding
zp_shape = (H+2*p, W+2*p, C)
# Add the image one by one
for im in images:
# Create zeroppaded image that will be used for the forward pass
zeropadded = np.zeros(zp_shape)
# Apply the zeropadding
zeropadded[p:p+H, p:p+W, :] = im.image
inputs.append(zeropadded)
return inputs
def sample_points(self, scene, i):
# TODO: DELETE THIS FUCTION
pass
def sample_points_batched(self, scene, i, batch):
# TODO: DELETE THIS FUCTION
pass
def forward_pass(self, scene, images_range):
"""Given a scene and an image range that identify the indices of the images, we predict the
corresponding depth maps for them.
Arguments:
---------
scene: Scene object
The Scene to be processed
images_range: tuple, Indices to specify the images to be used for
the reconstruction
Returns:
--------
depth_map: numpy array, with the same dimensions as the image shape
"""
raise NotImplementedError()
class MultiViewCNNForwardPass(ForwardPass):
"""Perform the forward pass only for the MultiViewCNN"""
def __init__(
self,
model,
generation_params,
sampling_scheme,
image_shape,
rays_batch,
filter_out_rays=False
):
super(MultiViewCNNForwardPass, self).__init__(
model,
generation_params,
sampling_scheme,
image_shape,
rays_batch,
filter_out_rays
)
self.ref_idx = -1
# Allocate GPU memory
D = self._generation_params.depth_planes
self.s_gpu = to_gpu(
np.zeros((self.rays_batch, D), dtype=np.float32)
)
self.points_gpu = to_gpu(
np.zeros((self.rays_batch, D, 4), dtype=np.float32)
)
def sim(self, scene, feature_size):
if self._fp is None:
self._fp = perform_multi_view_cnn_forward_pass_with_depth_estimation(
self._generation_params.depth_planes,
self._generation_params.neighbors + 1,
feature_size,
scene.image_shape[0],
scene.image_shape[1],
self._generation_params.padding,
scene.bbox.ravel(),
self._sampling_scheme
)
return self._fp
def forward_pass(self, scene, images_range):
# Make sure that the images_range is a tuple
assert isinstance(images_range, tuple)
# Declare some variables that we will need
(start_img_idx, end_img_idx, skip) = images_range
D = self._generation_params.depth_planes # number of depth planes
batch_size = self.rays_batch # number of rays in each mini-batch
H, W = scene.image_shape
self.ref_idx = start_img_idx # initiaze with the first image
while self.ref_idx < end_img_idx:
# Get the valid rays for the current image
ray_idxs = self.get_valid_rays_per_image(scene, self.ref_idx)
# Based on the i index compute the multi-view Image objects
images = scene.get_image_with_neighbors(self.ref_idx)
# Start adding the features
a = datetime.datetime.now()
features = self._model.predict(
np.stack(self._to_list_with_zeropadded_images(images), axis=0)
)
b = datetime.datetime.now()
c = b - a
print "Features computation - ", c.total_seconds()
# Get the projection matrices of all the neighbor views, the
# projection matrix and the camera center of the reference view
P = [im.camera.P for im in images]
P_inv = images[0].camera.P_pinv
camera_center = images[0].camera.center
a = datetime.datetime.now()
# Move to GPU to save some time frome copying
features_gpu = to_gpu(features.ravel())
ray_idxs_gpu = to_gpu(ray_idxs.astype(np.int32))
P_gpu = to_gpu(np.array(P).ravel())
P_inv_gpu = to_gpu(P_inv.ravel())
camera_center_gpu = to_gpu(camera_center)
_, _, _, F = features.shape
depth_map = to_gpu(
np.zeros((H*W), dtype=np.float32)
)
# Start iterationg over the batch of rays
for i in range(0, len(ray_idxs), batch_size):
self.s_gpu.fill(0)
self.points_gpu.fill(0)
self.sim(scene, F)(
ray_idxs_gpu[i:i+batch_size],
features_gpu,
P_gpu,
P_inv_gpu,
camera_center_gpu,
self.s_gpu,
self.points_gpu,
depth_map[i:i+batch_size]
)
b = datetime.datetime.now()
c = b - a
print "Per-pixel depth estimation - ", c.total_seconds()
# Move to the next image
self.ref_idx += skip
yield depth_map.get().reshape(W, H).T
# TODO: Fix the memory allocation pattern so we don't delete and
# reallocate
del features_gpu
class MultiViewCNNVoxelSpaceForwardPass(ForwardPass):
"""Perform the forward pass only for the MultiViewCNN"""
def __init__(
self,
model,
generation_params,
sampling_scheme,
image_shape,
rays_batch,
filter_out_rays=False
):
super(MultiViewCNNVoxelSpaceForwardPass, self).__init__(
model,
generation_params,
sampling_scheme,
image_shape,
rays_batch,
filter_out_rays
)
self.ref_idx = -1
# Allocate GPU memory
M = self._generation_params.max_number_of_marched_voxels
self.s_gpu = to_gpu(
np.zeros((rays_batch, M), dtype=np.float32)
)
self.ray_voxel_count_gpu = to_gpu(
np.zeros((rays_batch,), dtype=np.int32)
)
self.ray_voxel_indices_gpu = to_gpu(
np.zeros((rays_batch, M, 3), dtype=np.int32)
)
self.voxel_grid_gpu = None
def sim(self, scene, feature_size):
if self._fp is None:
grid_shape = np.array(
scene.voxel_grid(
self._generation_params.grid_shape
).shape[1:]
)
self._fp = batch_mvcnn_voxel_traversal_with_ray_marching_with_depth_estimation(
self._generation_params.max_number_of_marched_voxels,
self._generation_params.depth_planes,
self._generation_params.neighbors + 1,
feature_size,
scene.image_shape[0],
scene.image_shape[1],
self._generation_params.padding,
scene.bbox.ravel(),
grid_shape,
self._sampling_scheme
)
return self._fp
def voxel_grid_to_gpu(self, scene):
if self.voxel_grid_gpu is None:
self.voxel_grid_gpu = to_gpu(scene.voxel_grid(
self._generation_params.grid_shape
).transpose(1, 2, 3, 0).ravel())
return self.voxel_grid_gpu
def forward_pass(self, scene, images_range):
# Make sure that the images_range is a tuple
assert isinstance(images_range, tuple)
# Declare some variables that we will need
(start_img_idx, end_img_idx, skip) = images_range
D = self._generation_params.depth_planes # number of depth planes
batch_size = self.rays_batch # number of rays in each mini-batch
H, W = scene.image_shape
self.ref_idx = start_img_idx # initiaze with the first image
while self.ref_idx < end_img_idx:
# Get the valid rays for the current image
ray_idxs = self.get_valid_rays_per_image(scene, self.ref_idx)
# Based on the i index compute the multi-view Image objects
images = scene.get_image_with_neighbors(self.ref_idx)
| |
Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/countNonPayloadPerNode', methods=['POST', 'GET'])
def NonPayloadQueriesPerNode():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
schema = param['schema']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.countNonPayloadQueriesPerNode(
sprkDF, task_id, DB, schema)
app.logger.info('lol')
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/queryPercentagePerSchema', methods=['POST', 'GET'])
def queryPercentagePerSchema():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.queryPercentagePerSchema(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/queryPercentagePerNode', methods=['POST', 'GET'])
def queryPercentagePerNode():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
schema = param['schema']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.queryPercentagePerNode(sprkDF, DB, schema, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/countQueriesPerQueryTime', methods=['POST', 'GET'])
def countQueriesPerQueryTime():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.countQueriesPerQueryTime(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/countQueriesPerDbTime', methods=['POST', 'GET'])
def countQueriesPerDbTime():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.countQueriesPerDbTime(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/TimeDistribution', methods=['POST', 'GET'])
def TimeDistribution():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.TimeDistribution(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/responseSizeDistribution', methods=['POST', 'GET'])
def responseSizeDistribution():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.responseSizeDistribution(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/sizePercentagePerSchema', methods=['POST', 'GET'])
def sizePercentagePerSchema():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.sizePercentagePerSchema(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/sizePercentagePerNode', methods=['POST', 'GET'])
def sizePercentagePerNode():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
schema = param['schema']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.sizePercentagePerNode(sprkDF, DB, schema, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/HighQueryDistPerSchema', methods=['POST', 'GET'])
def HighQueryDistPerSchema():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.HighQueryDistributionPerSchema(sprkDF, DB, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/HighQueryDistPerNode', methods=['POST', 'GET'])
def HighQueryDistPerNode():
if request.method == 'POST':
try:
param = request.form
# get the request parameters
user = param['userID']
task_id = param['task']
DB = param['db']
schema = param['schema']
# get the corresponding spark dataframe
sprkDF = SparkSql.CurrentSparkDataFrames[int(user)]
# filter data for the plot
data = SparkSql.HighQueryDistributionPerNode(
sprkDF, DB, schema, task_id)
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/Plots/deleteSession', methods=['POST', 'GET'])
def deleteSession():
if request.method == 'POST':
try:
param = request.form
SparkSql.CurrentSparkDataFrames.pop(param['userID'])
return jsonify({})
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
"""
************************************* section 3: Caching Efficiency page methods
*************************************
"""
@app.route('/calculateCachingEfficiency/addCachingEfficiencyFolder', methods=['POST', 'GET'])
def addCachingEfficiencyFolder():
if request.method == 'POST':
try:
param = request.form
folder = (param['db'], param['schema'], param['node'])
foldersListForCachingEfficiency.append(folder)
app.logger.info(foldersListForCachingEfficiency)
return jsonify(foldersListForCachingEfficiency)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/calculateCachingEfficiency/removeCachingEfficiencyFolder', methods=['POST', 'GET'])
def deleteCachingEfficiencyFolder():
if request.method == 'POST':
try:
param = request.form
folder = (param['db'], param['schema'], param['node'])
if folder in foldersListForCachingEfficiency:
foldersListForCachingEfficiency.remove(folder)
return jsonify(foldersListForCachingEfficiency)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/calculateCachingEfficiency/calculate')
def calculate_caching_efficiency():
try:
folders = request.args.get('folders')
parquetFile = request.args.get('parquet_file') + '.parquet'
path = os.path.join(dirpath, parquetFile)
foldersList = extractFolders(folders)
#results = calculateCachingEfficiency(path,foldersList,settings)
task = celery.send_task(
'tasks.caching_efficiency', args=[path, foldersList])
task_id = task.id
data = {'task_id': task_id}
return jsonify(data)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/calculateCachingEfficiency/calculate_status', methods=['POST', 'GET'])
def calculate_caching_efficiency_status():
if request.method == 'POST':
try:
param = request.form
task = celery.AsyncResult(param['task_id'])
results = {}
if task.state == "SUCCESS":
results = task.get()
response = {
'state': task.state,
'results': results,
}
return jsonify(response)
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/stopCalculating', methods=['POST', 'GET'])
def stop_calculating_caching_efficiency():
if request.method == 'POST':
try:
task = request.form['task_id']
celery.send_task('tasks.stop', args=[task])
return jsonify('success')
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error) + \
". Check input parameters or ElasticSearch connection"
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
@app.route('/CachingEfficiency/storeCachingEfficiency', methods=['POST', 'GET'])
def writeElasticSearch():
if request.method == 'POST':
try:
# get file and data
param = request.form
file, data = (param['file_name'],
convertJsonToDictionary(param['data']))
# find the parquet file path
file_path = os.path.join(dirpath, file + ".parquet")
# read the parquet file in a spark dataframe
sparkDF = SparkSql.readParquetFileInSparkDataFrame(file_path)
df = sparkDF.first()
# get info about the file creation : task_id or time_range
if df['data_taskid'] != 'None':
task_id = int(df['data_taskid'])
else:
task_id = df['data_taskid']
data['Task_id'], data['Since'], data['Until'] = task_id, df['data_since'], df['data_to']
# save data into Elasticsearch
write_caching_efficiency_data(sparkDF, data, settings, SparkSql)
return jsonify("success")
except Exception as error:
httpcode, errortype = ('500', 'Internal Server Error')
error = str(error)
return render_template('Errors.html', Httperror=httpcode, error=error, errorType=errortype)
"""
General Functions used for validate and check the input/output
"""
def getTagForFolder(df, folder):
folder_componenets = folder.split(' , ')
db, schema, node = (
folder_componenets[0], folder_componenets[1], folder_componenets[2])
tag = df['tagname'].where(df['nodefullpath'] == node)
return tag[0]
def extractFolders(folders):
"""
convert a string of folders to a list of tuples (db , schema, node)
:param folders: a string containing folders db-schema-node seperated by ,
:return: a list of tuples (db , schema, node)
"""
output = []
folderList = folders.split('-')
for folder in folderList:
folderComponents = folder.split(',')
output.append(
(folderComponents[0], folderComponents[1], folderComponents[2]))
return output
def validateInput(input):
'''
check and validate data recieved from a form
:param input: form parameters for extracting ES data
:return: validated data parameters
'''
output = {}
if not input['Task_id']:
output['Task_id'] = None
else:
output['Task_id'] = input['Task_id']
if input['Cached'] == "Cached Queries":
output['Cached'] = True
elif input['Cached'] == "Not Cached Queries":
output['Cached'] = False
else:
output['Cached'] = None
if not | |
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
'''
`craft-py-ext` is an experimental tool for generating the boilerplate to build simple CPython extensions.
It should be considered a work in progress.
To use it, you write a `.pyi` type declaration file, and then generate the boilerplate from that.
The boilerplate comes in two layers.
An outer function provides the C extension interface (e.g. PyObject* types),
and does a certain amount of error checking and unwrapping to native C types.
Then, a corresponding inner function is called, which is where the actual implementation goes.
The idea is to let the implementor fill out the inner funcntion, and keep most of the generated boilerplate separate.
'''
import re
from argparse import ArgumentParser
from ast import (AST, AnnAssign, Assign, AsyncFunctionDef, ClassDef, Expr as ExprStmt, FunctionDef, Import, ImportFrom, Module,
Name, Str, parse, stmt as Stmt)
from dataclasses import dataclass
from enum import Enum
from functools import singledispatch
from inspect import Parameter, Signature, signature
from typing import Any, ByteString, Callable, Dict, Iterator, List, NoReturn, Optional, TextIO, Tuple, Type, Union
from mypy_extensions import VarArg
from pithy.io import errL, errSL, read_from_path, read_line_from_path
from pithy.path import path_name, path_stem
KEYWORD_ONLY = Parameter.KEYWORD_ONLY
POSITIONAL_ONLY = Parameter.POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = Parameter.POSITIONAL_OR_KEYWORD
VAR_KEYWORD = Parameter.VAR_KEYWORD
empty = Parameter.empty
def main() -> None:
arg_parser = ArgumentParser(description='Generate CPython extension stubs from .pyi files.')
arg_parser.add_argument('paths', nargs='+', default=[])
arg_parser.add_argument('-dbg', action='store_true')
args = arg_parser.parse_args()
if not args.paths: exit('No paths specified.')
for path in args.paths:
if not path.endswith('.pyi'): exit(f'interface path does not end with `.pyi`: {path}')
generate_ext(path=path)
# Python type mapppings.
@dataclass
class TypeInfo:
'Maps Python types, as parsed from .pyi files, to C extension types and associated metadata.'
type:Any
c_type:str
c_init:str
c_arg_parser_fmt:str
return_conv:Optional[str]
type_info_any = TypeInfo(Any,
c_type='PyObject *', c_init='NULL', c_arg_parser_fmt='o', return_conv='(PyObject*)')
type_infos = { t.type : t for t in [
type_info_any,
TypeInfo(None,
c_type='void ', c_init='', c_arg_parser_fmt='', return_conv=None),
TypeInfo(bytes,
c_type='PyBytesObject *', c_init='NULL', c_arg_parser_fmt='S', return_conv='(PyObject *)'),
TypeInfo(Union[str,ByteString],
c_type='Py_buffer ', c_init='{.buf=NULL, .obj=NULL, .len=0}', c_arg_parser_fmt='s*', return_conv=None),
]}
TypeAnn = Union[None,str,Type[Any]]
@dataclass
class Par:
'Function parameter info, as parsed from Python annotations.'
name:str
type:TypeAnn
dflt:Any
@property
def c_arg_cleanup(self) -> Optional[str]:
'Optional C argument cleanup code.'
if self.ti.c_type == 'Py_buffer ': return f'if ({self.name}.obj) PyBuffer_Release(&{self.name})'
return None
@property
def ti(self) -> TypeInfo: return type_infos.get(self.type, type_info_any)
class FuncKind(Enum):
Plain = 0
Method = 1
Class = 2
Static = 3
@dataclass
class Func:
'Function info, as parsed from Python annotations.'
name:str
type_name:Optional[str]
sig:Signature
pars:List[Par]
ret:TypeAnn
doc:str
kind:FuncKind
@dataclass
class Var:
name:str
type:Type[Any]
Decl = Union['Class',Func,Var]
class SourceReporter:
'Base class that can report source diagnostics from an AST node.'
path:str
def warn(self, node:AST, msg:str) -> None:
errSL('warning:', node_diagnostic(path=self.path, node=node, msg=msg))
def error(self, node:AST, msg:str) -> NoReturn:
exit('error: ' + node_diagnostic(path=self.path, node=node, msg=msg))
class Scope(SourceReporter):
'Scope base class is either a ExtMod (whole module being generated) or a Class.'
def __init__(self, path:str, name:str, doc:str) -> None:
self.path = path
self.name = name
self.doc:str = doc
self.decls:List[Decl] = []
class ExtMod(Scope):
'The parsed/generated extension module.'
class Class(Scope):
'Class scope; both a Scope and a Decl, which is what makes the whole thing compicated.'
def generate_ext(path:str) -> None:
'Top level parsing and code generation for a path.'
errL('\n', path)
stem = path_stem(path)
name = path_name(stem)
mod_source = parse_pyi_module(path=path) # Input.
mod = ExtMod(path=path, name=name, doc=mod_source.doc)
for name, syntax, obj in mod_source:
parse_decl(syntax, name=name, obj=obj, scope=mod, global_vals=mod_source.vals)
dst_c = stem + '.gen.cpp'
dst_h = stem + '.gen.h'
with open(dst_c, 'w') as c, open(dst_h, 'w') as h:
write_module(mod, c=c, h=h)
ScopeNode = Union[ClassDef,Module]
@dataclass
class ScopeSource(SourceReporter):
'The source of a module or class scope. Contains both the syntactic and dynamic representations.'
path:str
node:ScopeNode
vals:Dict[str,Any]
@property
def body(self) -> List[Stmt]: return self.node.body
@property
def doc(self) -> str:
body = self.body
if not (body and isinstance(body[0], ExprStmt) and isinstance(body[0].value, Str)):
self.error(self.node, 'missing docstring')
doc_expr = body[0].value
doc = doc_expr.s
assert isinstance(doc, str)
m = invalid_doc_re.search(doc)
if m:
s, e = m.span()
self.error(doc_expr, f'invalid docstring: {m[0]!r}')
return doc
def __iter__(self) -> Iterator[Tuple[str,AST,Any]]:
'Iterate over a source and return (name, AST statement, runtime value) triples.'
for stmt in self.body:
name:str
if isinstance(stmt, AnnAssign) and isinstance(stmt.target, Name):
name = stmt.target.id
elif isinstance(stmt, (AsyncFunctionDef, ClassDef, FunctionDef)):
name = stmt.name
elif isinstance(stmt, (Assign, Import, ImportFrom)):
continue
elif isinstance(stmt, ExprStmt) and isinstance(stmt.value, Str):
continue # Docstring.
else:
type_name = type(stmt).__name__
self.warn(stmt, msg=f'unexpected interface statement: {type_name}')
continue
yield (name, stmt, self.vals[name])
def parse_pyi_module(path:str) -> ScopeSource:
'''
Parse .pyi declarations by both execing the source, and also parsing it into an AST.
The former lets us inspect the dynamic objects;
the latter lets us distinguish between declarations and imports.
'''
src = read_from_path(path)
# Parse src into an AST Module.
module = parse(src, filename=path)
# Compile.
try: code = compile(module, filename=path, mode='exec', optimize=1)
except SyntaxError as e:
line1 = e.lineno or 0 # If lineno is None, then line0 in our diagnostic becomes -1, which will print as '0'.
exit(src_diagnostic(path, line0=line1-1, col0=(e.offset or 0), msg=str(e)))
except ValueError as e: exit(src_diagnostic(path, line0=0, col0=0, msg=str(e)))
# Exec.
globals:Dict[str,Any] = {'__builtins__': __builtins__}
exec(code, globals) # As of python3.7, passing separate locals does not work because type annotation lookup is broken.
return ScopeSource(path=path, node=module, vals=globals)
# Parsing is dispatched over syntax type.
@singledispatch
def parse_decl(syntax:AST, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Default implementation raises.'
raise Exception(f'unknown syntax type: {name}; type: {syntax}')
@parse_decl.register
def _(syntax:AnnAssign, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Parse an annotated variable declaration.'
scope.warn(syntax, f'assignment not implemented')
@parse_decl.register
def _(syntax:AsyncFunctionDef, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Async function.'
scope.warn(syntax, f'async function def is not implemented')
@parse_decl.register
def _(syntax:FunctionDef, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Function declaration.'
is_method = isinstance(scope, Class)
if is_method:
if isinstance(obj, classmethod):
kind = FuncKind.Class
elif isinstance(obj, staticmethod):
kind = FuncKind.Static
else: # Instance method.
kind = FuncKind.Method
else: # Plain module function.
kind = FuncKind.Plain
is_class_method = isinstance(obj, (classmethod, staticmethod)) # Not sure if it is correct to handle both kinds the same way.
if is_class_method:
func = obj.__func__
else:
func = obj
doc = func.__doc__ or ''
sig = signature(func)
pars:List[Par] = []
for i, p in enumerate(sig.parameters.values()):
n = p.name
t = p.annotation
d = p.default
#k = p.kind # POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, KEYWORD_ONLY, VAR_KEYWORD.
if isinstance(t, str):
try: t = global_vals[t]
except KeyError: scope.error(syntax, f'parameter {n!r} has invalid string annotation: {t!r}')
if i == 0 and is_method:
expected_name = 'cls' if is_class_method else 'self'
if n != expected_name: scope.error(syntax, f'parameter {n!r} has unexpected name; expected {expected_name!r}')
elif t == empty: scope.error(syntax, f'parameter {n!r} has no type annotation')
pars.append(Par(name=n, type=t, dflt=d))
ret = sig.return_annotation
if isinstance(ret, str):
try: ret = global_vals[ret]
except KeyError: scope.error(syntax, f'return type has invalid string annotation: {ret!r}')
ret_ti = type_infos.get(ret, type_info_any)
if ret is not None and ret_ti.return_conv is None:
scope.error(syntax, f'return type is mapped to a C type that cannot be converted to a return value: {ret!r}')
type_name = scope.name if is_method else None
scope.decls.append(Func(name=name, type_name=type_name, sig=sig, pars=pars, ret=ret, doc=doc, kind=kind))
@parse_decl.register
def _(syntax:ClassDef, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Class declaration.'
class_source = ScopeSource(path=scope.path, node=syntax, vals=vars(obj))
c = Class(path=scope.path, name=name, doc=class_source.doc)
for member_name, decl_syntax, member in class_source:
parse_decl(decl_syntax, name=member_name, obj=member, scope=c, global_vals=global_vals)
# Register this custom type in our global dictionary.
type_infos[obj] = TypeInfo(obj,
c_type=f'{name} *', c_init='NULL', c_arg_parser_fmt='o', return_conv='(PyObject*)')
scope.decls.append(c)
# Code generation.
_Writers = Tuple[Callable[[VarArg(str)],None],...] # Cheap hack to provied convenience writer functions.
def write_module(mod:ExtMod, c:TextIO, h:TextIO) -> None:
'Generate code for a module.'
def bZ(*strings:str) -> None:
'Both.'
for s in strings:
c.write(s)
h.write(s)
def bL(*strings:str) -> None:
bZ(*strings, '\n')
def cZ(*strings:str) -> None:
'C only.'
for s in strings: c.write(s)
def cL(*strings:str) -> None:
cZ(*strings, '\n')
def hZ(*strings:str) -> None:
'Header only.'
for s in strings: h.write(s)
def hL(*strings:str) -> None:
hZ(*strings, '\n')
writers = (bZ, bL, cZ, cL, hZ, hL)
bL('// Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.') # TODO: license config.
bL()
hL('#define PY_SSIZE_T_CLEAN')
hL('#include "Python.h"')
cL(f'#include "{mod.name}.h"')
write_scope(scope=mod, prefix='', writers=writers)
cL()
cL()
cL( 'static struct PyModuleDef module_def = {')
cL( ' PyModuleDef_HEAD_INIT,')
cL(f' .m_name = "{mod.name}",')
cL(f' .m_doc = {mod.name}_doc,')
cL( ' .m_size = 0,')
cL(f' .m_methods = {mod.name}_methods,')
cL( ' .m_slots = NULL, // Single-phase initialization.')
cL( '};')
cL()
cL()
cL('PyMODINIT_FUNC')
cL('PyInit_hashing_cpy(void) {')
cL()
cL(' PyObject *module = PyModule_Create(&module_def);')
cL(' if (!module) return NULL;')
for decl in mod.decls:
if not isinstance(decl, Class):continue
type_obj = decl.name + '_type'
| |
from datetime import datetime
from urllib.parse import urlencode
from operator import itemgetter
import json
import pytz
from django.contrib.gis.geos import Point
from django.urls import reverse
from mock import patch
from rest_framework import status
from rest_framework.test import APITestCase
from robber import expect
from freezegun import freeze_time
from data.cache_managers import allegation_cache_manager
from data.factories import (
OfficerFactory,
AllegationFactory,
AllegationCategoryFactory,
OfficerAllegationFactory,
AttachmentFileFactory,
InvestigatorAllegationFactory,
PoliceWitnessFactory,
)
from pinboard.factories import PinboardFactory, ExamplePinboardFactory
from pinboard.models import Pinboard
from trr.factories import TRRFactory, ActionResponseFactory
@patch('data.constants.MAX_VISUAL_TOKEN_YEAR', 2016)
class PinboardMobileViewSetTestCase(APITestCase):
def test_retrieve_pinboard(self):
example_pinboard_1 = PinboardFactory(
id='eeee1111',
title='Example pinboard 1',
description='Example pinboard 1',
)
example_pinboard_2 = PinboardFactory(
id='eeee2222',
title='Example pinboard 2',
description='Example pinboard 2',
)
ExamplePinboardFactory(pinboard=example_pinboard_1)
ExamplePinboardFactory(pinboard=example_pinboard_2)
officer_1 = OfficerFactory(id=11)
officer_2 = OfficerFactory(id=22)
allegation_1 = AllegationFactory(crid='abc123')
allegation_2 = AllegationFactory(crid='abc456')
trr_1 = TRRFactory(id=33)
trr_2 = TRRFactory(id=44)
pinboard = PinboardFactory(
id='f871a13f',
title='My Pinboard',
description='abc',
officers=[officer_1, officer_2],
allegations=[allegation_1, allegation_2],
trrs=[trr_1, trr_2],
)
# Current client does not own the pinboard, should clone it
response = self.client.get(reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': 'f871a13f'}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
cloned_pinboard_id = response.data['id']
expect(cloned_pinboard_id).to.ne('f871a13f')
expect(response.data['title']).to.eq('My Pinboard')
expect(response.data['description']).to.eq('abc')
expect(set(response.data['officer_ids'])).to.eq({11, 22})
expect(set(response.data['crids'])).to.eq({'abc123', 'abc456'})
expect(set(response.data['trr_ids'])).to.eq({33, 44})
cloned_pinboard = Pinboard.objects.get(id=cloned_pinboard_id)
expect(cloned_pinboard.source_pinboard).to.eq(pinboard)
expect(cloned_pinboard.title).to.eq('My Pinboard')
expect(cloned_pinboard.description).to.eq('abc')
expect(set(cloned_pinboard.officer_ids)).to.eq({11, 22})
expect(set(cloned_pinboard.crids)).to.eq({'abc123', 'abc456'})
expect(set(cloned_pinboard.trr_ids)).to.eq({33, 44})
# Now current client owns the user, successive requests should not clone pinboard
# `id` is case-insensitive
response = self.client.get(reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': cloned_pinboard_id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data['id']).to.eq(cloned_pinboard_id)
expect(response.data['title']).to.eq('My Pinboard')
expect(set(response.data['officer_ids'])).to.eq({11, 22})
expect(set(response.data['crids'])).to.eq({'abc123', 'abc456'})
expect(set(response.data['trr_ids'])).to.eq({33, 44})
expect(response.data['description']).to.eq('abc')
expect(response.data).not_to.contain('example_pinboards')
def test_retrieve_pinboard_not_found(self):
PinboardFactory(
id='d91ba25d',
title='My Pinboard',
description='abc',
)
expect(Pinboard.objects.count()).to.eq(1)
response = self.client.get(reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': 'a4f34019'}))
expect(Pinboard.objects.count()).to.eq(2)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data['id']).to.ne('d91ba25d')
expect(response.data['title']).to.eq('')
expect(response.data['officer_ids']).to.eq([])
expect(response.data['crids']).to.eq([])
expect(response.data['trr_ids']).to.eq([])
expect(response.data['description']).to.eq('')
def test_update_pinboard_in_the_same_session(self):
OfficerFactory(id=1)
OfficerFactory(id=2)
AllegationFactory(crid='123abc')
AllegationFactory(crid='456def')
TRRFactory(id=1, officer=OfficerFactory(id=3))
TRRFactory(id=2, officer=OfficerFactory(id=4))
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
}),
content_type='application/json',
)
pinboard_id = response.data['id']
response = self.client.put(
reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': pinboard_id}),
json.dumps({
'title': 'New Pinboard',
'officer_ids': [1],
'crids': ['456def'],
'trr_ids': [1, 2],
'description': 'def',
}),
content_type='application/json',
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': pinboard_id,
'title': 'New Pinboard',
'officer_ids': [1],
'crids': ['456def'],
'trr_ids': [1, 2],
'description': 'def',
})
pinboard = Pinboard.objects.get(id=pinboard_id)
officer_ids = set([officer.id for officer in pinboard.officers.all()])
crids = set([allegation.crid for allegation in pinboard.allegations.all()])
trr_ids = set([trr.id for trr in pinboard.trrs.all()])
expect(pinboard.title).to.eq('New Pinboard')
expect(pinboard.description).to.eq('def')
expect(officer_ids).to.eq({1})
expect(crids).to.eq({'456def'})
expect(trr_ids).to.eq({1, 2})
def test_update_pinboard_in_the_same_session_with_source_id(self):
officer_1 = OfficerFactory(id=1)
officer_2 = OfficerFactory(id=2)
allegation_1 = AllegationFactory(crid='123abc')
AllegationFactory(crid='456def')
trr_1 = TRRFactory(id=1, officer=OfficerFactory(id=3))
TRRFactory(id=2, officer=OfficerFactory(id=4))
source_pinboard = PinboardFactory(
id='eeee1111',
title='Example pinboard 1',
description='Example pinboard 1',
)
source_pinboard.officers.set([officer_1, officer_2])
source_pinboard.allegations.set([allegation_1])
source_pinboard.trrs.set([trr_1])
response = self.client.post(
reverse('api-v2:pinboards-list'),
json.dumps({
'title': '',
'officer_ids': [],
'crids': [],
'trr_ids': [],
'description': '',
}),
content_type='application/json'
)
pinboard_id = response.data['id']
response = self.client.put(
reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': pinboard_id}),
json.dumps({
'source_pinboard_id': 'eeee1111',
}),
content_type='application/json'
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': pinboard_id,
'title': 'Example pinboard 1',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'Example pinboard 1',
})
pinboard = Pinboard.objects.get(id=pinboard_id)
officer_ids = set([officer.id for officer in pinboard.officers.all()])
crids = set([allegation.crid for allegation in pinboard.allegations.all()])
trr_ids = set([trr.id for trr in pinboard.trrs.all()])
expect(pinboard.title).to.eq('Example pinboard 1')
expect(pinboard.description).to.eq('Example pinboard 1')
expect(officer_ids).to.eq({1, 2})
expect(crids).to.eq({'123abc'})
expect(trr_ids).to.eq({1})
def test_update_when_have_multiple_pinboards_in_session(self):
owned_pinboards = []
OfficerFactory(id=1)
OfficerFactory(id=2)
AllegationFactory(crid='123abc')
AllegationFactory(crid='456def')
TRRFactory(id=1, officer=OfficerFactory(id=3))
TRRFactory(id=2, officer=OfficerFactory(id=4))
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
}),
content_type='application/json',
)
owned_pinboards.append(response.data['id'])
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
}),
content_type='application/json',
)
owned_pinboards.append(response.data['id'])
# Try updating the old pinboardresponse = self.client.put(
response = self.client.put(
reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': owned_pinboards[0]}),
json.dumps({
'title': 'New Pinboard',
'officer_ids': [1],
'crids': ['456def'],
'trr_ids': [1, 2],
'description': 'def',
}),
content_type='application/json',
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': owned_pinboards[0],
'title': 'New Pinboard',
'officer_ids': [1],
'crids': ['456def'],
'trr_ids': [1, 2],
'description': 'def',
})
def test_update_pinboard_out_of_session(self):
OfficerFactory(id=1)
OfficerFactory(id=2)
AllegationFactory(crid='123abc')
AllegationFactory(crid='456def')
TRRFactory(id=1, officer=OfficerFactory(id=3))
TRRFactory(id=2, officer=OfficerFactory(id=4))
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
}),
content_type='application/json',
)
self.client.cookies.clear()
response = self.client.put(
reverse('api-v2:pinboards-mobile-detail', kwargs={'pk': response.data['id']}),
json.dumps({
'title': 'New Pinboard',
'officer_ids': [1],
'crids': ['456def'],
'trr_ids': [1, 2],
'description': 'def',
}),
content_type='application/json',
)
expect(response.status_code).to.eq(status.HTTP_403_FORBIDDEN)
def test_create_pinboard(self):
OfficerFactory(id=1)
OfficerFactory(id=2)
AllegationFactory(crid='123abc')
TRRFactory(id=1, officer=OfficerFactory(id=3))
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
}),
content_type='application/json',
)
expect(response.status_code).to.eq(status.HTTP_201_CREATED)
expect(response.data['id']).to.be.a.string()
expect(response.data['id']).to.have.length(8)
expect(response.data).to.eq({
'id': response.data['id'],
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc'
})
expect(Pinboard.objects.count()).to.eq(1)
pinboard = Pinboard.objects.all()
expect(pinboard[0].title).to.eq('My Pinboard')
expect(pinboard[0].description).to.eq('abc')
expect(set(pinboard.values_list('officers', flat=True))).to.eq({1, 2})
expect(set(pinboard.values_list('allegations', flat=True))).to.eq({'123abc'})
expect(set(pinboard.values_list('trrs', flat=True))).to.eq({1})
def test_create_pinboard_ignore_id(self):
ignored_id = '1234ab'
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'id': ignored_id,
'title': 'My Pinboard',
'officer_ids': [],
'crids': [],
'trr_ids': [],
'description': 'abc',
}),
content_type='application/json',
)
expect(response.status_code).to.eq(status.HTTP_201_CREATED)
expect(response.data['id']).to.be.a.string()
expect(response.data['id']).to.have.length(8)
expect(response.data['id']).to.ne(ignored_id)
expect(response.data).to.eq({
'id': response.data['id'],
'title': 'My Pinboard',
'officer_ids': [],
'crids': [],
'trr_ids': [],
'description': 'abc',
'example_pinboards': []
})
expect(Pinboard.objects.filter(id=response.data['id']).exists()).to.be.true()
def test_create_pinboard_not_found_pinned_item_ids(self):
OfficerFactory(id=1)
OfficerFactory(id=2)
AllegationFactory(crid='123abc')
TRRFactory(id=1, officer=OfficerFactory(id=3))
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2, 3, 4, 5],
'crids': ['789xyz', 'zyx123', '123abc'],
'trr_ids': [0, 1, 3, 4],
'description': 'abc',
}),
content_type='application/json'
)
expect(response.status_code).to.eq(status.HTTP_201_CREATED)
expect(response.data['id']).to.be.a.string()
expect(response.data['id']).to.have.length(8)
expect(response.data).to.eq({
'id': response.data['id'],
'title': 'My Pinboard',
'officer_ids': [1, 2, 3],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
'not_found_items': {
'officer_ids': [4, 5],
'crids': ['789xyz', 'zyx123'],
'trr_ids': [0, 3, 4],
}
})
expect(Pinboard.objects.count()).to.eq(1)
pinboard = Pinboard.objects.all()
expect(pinboard[0].title).to.eq('My Pinboard')
expect(pinboard[0].description).to.eq('abc')
expect(set(pinboard.values_list('officers', flat=True))).to.eq({1, 2, 3})
expect(set(pinboard.values_list('allegations', flat=True))).to.eq({'123abc'})
expect(set(pinboard.values_list('trrs', flat=True))).to.eq({1})
def test_latest_retrieved_pinboard_return_null(self):
# No previous pinboard, data returned should be null
response = self.client.get(reverse('api-v2:pinboards-mobile-latest-retrieved-pinboard'))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({})
def test_latest_retrieved_pinboard_return_null_when_create_is_not_true(self):
response = self.client.get(
reverse('api-v2:pinboards-mobile-latest-retrieved-pinboard'),
{'create': 'not true'}
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({})
def test_latest_retrieved_pinboard_return_new_empty_pinboard(self):
example_pinboard_1 = PinboardFactory(
id='eeee1111',
title='Example pinboard 1',
description='Example pinboard 1',
)
example_pinboard_2 = PinboardFactory(
id='eeee2222',
title='Example pinboard 2',
description='Example pinboard 2',
)
ExamplePinboardFactory(pinboard=example_pinboard_1)
ExamplePinboardFactory(pinboard=example_pinboard_2)
response = self.client.get(
reverse('api-v2:pinboards-mobile-latest-retrieved-pinboard'),
{'create': 'true'}
)
expect(response.status_code).to.eq(status.HTTP_200_OK)
response.data['example_pinboards'] = sorted(
response.data['example_pinboards'],
key=lambda pinboard: pinboard['id']
)
expect(self.client.session.get('owned_pinboards')).to.eq([response.data['id']])
expect(self.client.session.get('latest_retrieved_pinboard')).to.eq(response.data['id'])
expect(response.data['id']).to.be.a.string()
expect(response.data['id']).to.have.length(8)
expect(response.data).to.eq({
'id': response.data['id'],
'title': '',
'description': '',
'officer_ids': [],
'crids': [],
'trr_ids': [],
'example_pinboards': [{
'id': 'eeee1111',
'title': 'Example pinboard 1',
'description': 'Example pinboard 1',
}, {
'id': 'eeee2222',
'title': 'Example pinboard 2',
'description': 'Example pinboard 2',
}],
})
def test_latest_retrieved_pinboard(self):
# Create a pinboard in current session
OfficerFactory(id=1)
OfficerFactory(id=2)
AllegationFactory(crid='123abc')
TRRFactory(id=1, officer=OfficerFactory(id=3))
response = self.client.post(
reverse('api-v2:pinboards-mobile-list'),
json.dumps({
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
}),
content_type='application/json',
)
pinboard_id = response.data['id']
# Latest retrieved pinboard is now the above one
response = self.client.get(reverse('api-v2:pinboards-mobile-latest-retrieved-pinboard'))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq({
'id': pinboard_id,
'title': 'My Pinboard',
'officer_ids': [1, 2],
'crids': ['123abc'],
'trr_ids': [1],
'description': 'abc',
})
def test_selected_complaints(self):
category1 = AllegationCategoryFactory(
category='Use Of Force',
allegation_name='Miscellaneous',
)
category2 = AllegationCategoryFactory(
category='Verbal Abuse',
allegation_name='Miscellaneous',
)
allegation1 = AllegationFactory(
crid='1000001',
incident_date=datetime(2010, 1, 1, tzinfo=pytz.utc),
point=Point(1.0, 1.0),
)
allegation2 = AllegationFactory(
crid='1000002',
incident_date=datetime(2011, 1, 1, tzinfo=pytz.utc),
point=None,
)
allegation3 = AllegationFactory(
crid='1000003',
incident_date=datetime(2012, 1, 1, tzinfo=pytz.utc),
point=Point(3.0, 3.0),
)
OfficerAllegationFactory(allegation=allegation1, allegation_category=category1)
OfficerAllegationFactory(allegation=allegation2, allegation_category=category2)
OfficerAllegationFactory(allegation=allegation3, allegation_category=category2)
allegation_cache_manager.cache_data()
allegation1.refresh_from_db()
allegation2.refresh_from_db()
allegation3.refresh_from_db()
pinboard = PinboardFactory(allegations=(allegation1, allegation2))
response = self.client.get(reverse('api-v2:pinboards-mobile-complaints', kwargs={'pk': pinboard.id}))
results = sorted(response.data, key=itemgetter('crid'))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(results).to.eq([
{
'crid': '1000001',
'incident_date': '2010-01-01',
'point': {'lon': 1.0, 'lat': 1.0},
'category': 'Use Of Force',
},
{
'crid': '1000002',
'incident_date': '2011-01-01',
'category': 'Verbal Abuse',
}
])
def test_selected_officers(self):
officer1 = OfficerFactory(
id=1, first_name='Daryl', last_name='Mack',
trr_percentile=12.0000,
civilian_allegation_percentile=98.4344,
internal_allegation_percentile=99.7840,
complaint_percentile=99.3450,
rank='Police Officer'
)
officer2 = OfficerFactory(
id=2,
first_name='Ronald', last_name='Watts',
trr_percentile=0.0000,
civilian_allegation_percentile=98.4344,
internal_allegation_percentile=99.7840,
complaint_percentile=99.5000,
rank='Detective'
)
OfficerFactory(id=3)
pinboard = PinboardFactory(officers=(officer1, officer2))
response = self.client.get(reverse('api-v2:pinboards-mobile-officers', kwargs={'pk': pinboard.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq([
{
'id': 1,
'full_name': '<NAME>',
'complaint_count': 0,
'rank': 'Police Officer',
'percentile_trr': '12.0000',
'percentile_allegation': '99.3450',
'percentile_allegation_civilian': '98.4344',
'percentile_allegation_internal': '99.7840',
},
{
'id': 2,
'full_name': '<NAME>',
'complaint_count': 0,
'rank': 'Detective',
'percentile_trr': '0.0000',
'percentile_allegation': '99.5000',
'percentile_allegation_civilian': '98.4344',
'percentile_allegation_internal': '99.7840',
}
])
def test_selected_trrs(self):
trr1 = TRRFactory(
id=1,
trr_datetime=datetime(2012, 1, 1, tzinfo=pytz.utc),
point=Point(1.0, 1.0),
)
trr2 = TRRFactory(
id=2,
trr_datetime=datetime(2013, 1, 1, tzinfo=pytz.utc),
point=None,
)
TRRFactory(id=3)
ActionResponseFactory(trr=trr1, force_type='Physical Force - Stunning', action_sub_category='1')
ActionResponseFactory(trr=trr1, force_type='Impact Weapon', action_sub_category='2')
pinboard = PinboardFactory(trrs=(trr1, trr2))
response = self.client.get(reverse('api-v2:pinboards-mobile-trrs', kwargs={'pk': pinboard.id}))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq([
{
'id': 1,
'trr_datetime': '2012-01-01',
'category': 'Impact Weapon',
'point': {'lon': 1.0, 'lat': 1.0},
},
{
'id': 2,
'trr_datetime': '2013-01-01',
'category': 'Unknown',
'point': None,
}
])
def test_relevant_documents(self):
pinned_officer_1 = OfficerFactory(
id=1,
rank='Police Officer',
first_name='Jerome',
last_name='Finnigan',
allegation_count=10,
trr_percentile='99.99',
complaint_percentile='88.88',
civilian_allegation_percentile='77.77',
internal_allegation_percentile='66.66'
)
pinned_officer_2 = OfficerFactory(
id=2,
rank='Detective',
first_name='Edward',
last_name='May',
allegation_count=3,
trr_percentile='11.11',
complaint_percentile='22.22',
civilian_allegation_percentile='33.33',
internal_allegation_percentile='44.44'
)
pinned_officer_3 = OfficerFactory(id=3)
officer_4 = OfficerFactory(
id=4,
rank='Senior Police Officer',
first_name='Raymond',
last_name='Piwinicki',
complaint_percentile=None,
allegation_count=20,
)
relevant_allegation_1 = AllegationFactory(
crid='1',
incident_date=datetime(2002, 2, 21, tzinfo=pytz.utc),
most_common_category=AllegationCategoryFactory(category='Operation/Personnel Violations'),
point=Point([0.01, 0.02]),
)
relevant_allegation_2 = AllegationFactory(
crid='2',
incident_date=datetime(2002, 2, 22, tzinfo=pytz.utc),
point=None,
)
not_relevant_allegation = | |
<filename>pypy/module/_ssl/interp_ssl.py<gh_stars>10-100
from pypy.rpython.rctypes.tool import ctypes_platform
from pypy.rpython.rctypes.tool.libc import libc
import pypy.rpython.rctypes.implementation # this defines rctypes magic
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import W_Root, ObjSpace, Wrappable
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from ctypes import *
import ctypes.util
import sys
import socket
import select
from ssl import SSL_CTX, SSL, X509, SSL_METHOD, X509_NAME
from bio import BIO
c_void = None
libssl = cdll.LoadLibrary(ctypes.util.find_library("ssl"))
## user defined constants
X509_NAME_MAXLEN = 256
# these mirror ssl.h
PY_SSL_ERROR_NONE, PY_SSL_ERROR_SSL = 0, 1
PY_SSL_ERROR_WANT_READ, PY_SSL_ERROR_WANT_WRITE = 2, 3
PY_SSL_ERROR_WANT_X509_LOOKUP = 4
PY_SSL_ERROR_SYSCALL = 5 # look at error stack/return value/errno
PY_SSL_ERROR_ZERO_RETURN, PY_SSL_ERROR_WANT_CONNECT = 6, 7
# start of non ssl.h errorcodes
PY_SSL_ERROR_EOF = 8 # special case of SSL_ERROR_SYSCALL
PY_SSL_ERROR_INVALID_ERROR_CODE = 9
SOCKET_IS_NONBLOCKING, SOCKET_IS_BLOCKING = 0, 1
SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3
SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5
class CConfig:
_header_ = """
#include <openssl/ssl.h>
#include <openssl/opensslv.h>
#include <openssl/bio.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/poll.h>
"""
OPENSSL_VERSION_NUMBER = ctypes_platform.ConstantInteger(
"OPENSSL_VERSION_NUMBER")
SSL_FILETYPE_PEM = ctypes_platform.ConstantInteger("SSL_FILETYPE_PEM")
SSL_OP_ALL = ctypes_platform.ConstantInteger("SSL_OP_ALL")
SSL_VERIFY_NONE = ctypes_platform.ConstantInteger("SSL_VERIFY_NONE")
SSL_ERROR_WANT_READ = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_READ")
SSL_ERROR_WANT_WRITE = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_WRITE")
SSL_ERROR_ZERO_RETURN = ctypes_platform.ConstantInteger(
"SSL_ERROR_ZERO_RETURN")
SSL_ERROR_WANT_X509_LOOKUP = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_X509_LOOKUP")
SSL_ERROR_WANT_CONNECT = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_CONNECT")
SSL_ERROR_SYSCALL = ctypes_platform.ConstantInteger("SSL_ERROR_SYSCALL")
SSL_ERROR_SSL = ctypes_platform.ConstantInteger("SSL_ERROR_SSL")
FD_SETSIZE = ctypes_platform.ConstantInteger("FD_SETSIZE")
SSL_CTRL_OPTIONS = ctypes_platform.ConstantInteger("SSL_CTRL_OPTIONS")
BIO_C_SET_NBIO = ctypes_platform.ConstantInteger("BIO_C_SET_NBIO")
pollfd = ctypes_platform.Struct("struct pollfd",
[("fd", c_int), ("events", c_short), ("revents", c_short)])
nfds_t = ctypes_platform.SimpleType("nfds_t", c_uint)
POLLOUT = ctypes_platform.ConstantInteger("POLLOUT")
POLLIN = ctypes_platform.ConstantInteger("POLLIN")
class cConfig:
pass
cConfig.__dict__.update(ctypes_platform.configure(CConfig))
OPENSSL_VERSION_NUMBER = cConfig.OPENSSL_VERSION_NUMBER
HAVE_OPENSSL_RAND = OPENSSL_VERSION_NUMBER >= 0x0090500fL
SSL_FILETYPE_PEM = cConfig.SSL_FILETYPE_PEM
SSL_OP_ALL = cConfig.SSL_OP_ALL
SSL_VERIFY_NONE = cConfig.SSL_VERIFY_NONE
SSL_ERROR_WANT_READ = cConfig.SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE = cConfig.SSL_ERROR_WANT_WRITE
SSL_ERROR_ZERO_RETURN = cConfig.SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_X509_LOOKUP = cConfig.SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_WANT_CONNECT = cConfig.SSL_ERROR_WANT_CONNECT
SSL_ERROR_SYSCALL = cConfig.SSL_ERROR_SYSCALL
SSL_ERROR_SSL = cConfig.SSL_ERROR_SSL
FD_SETSIZE = cConfig.FD_SETSIZE
SSL_CTRL_OPTIONS = cConfig.SSL_CTRL_OPTIONS
BIO_C_SET_NBIO = cConfig.BIO_C_SET_NBIO
POLLOUT = cConfig.POLLOUT
POLLIN = cConfig.POLLIN
pollfd = cConfig.pollfd
nfds_t = cConfig.nfds_t
arr_x509 = c_char * X509_NAME_MAXLEN
constants = {}
constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN
constants["SSL_ERROR_WANT_READ"] = PY_SSL_ERROR_WANT_READ
constants["SSL_ERROR_WANT_WRITE"] = PY_SSL_ERROR_WANT_WRITE
constants["SSL_ERROR_WANT_X509_LOOKUP"] = PY_SSL_ERROR_WANT_X509_LOOKUP
constants["SSL_ERROR_SYSCALL"] = PY_SSL_ERROR_SYSCALL
constants["SSL_ERROR_SSL"] = PY_SSL_ERROR_SSL
constants["SSL_ERROR_WANT_CONNECT"] = PY_SSL_ERROR_WANT_CONNECT
constants["SSL_ERROR_EOF"] = PY_SSL_ERROR_EOF
constants["SSL_ERROR_INVALID_ERROR_CODE"] = PY_SSL_ERROR_INVALID_ERROR_CODE
libssl.SSL_load_error_strings.restype = c_void
libssl.SSL_library_init.restype = c_int
if HAVE_OPENSSL_RAND:
libssl.RAND_add.argtypes = [c_char_p, c_int, c_double]
libssl.RAND_add.restype = c_void
libssl.RAND_status.restype = c_int
libssl.RAND_egd.argtypes = [c_char_p]
libssl.RAND_egd.restype = c_int
libssl.SSL_CTX_new.argtypes = [POINTER(SSL_METHOD)]
libssl.SSL_CTX_new.restype = POINTER(SSL_CTX)
libssl.SSLv23_method.restype = POINTER(SSL_METHOD)
libssl.SSL_CTX_use_PrivateKey_file.argtypes = [POINTER(SSL_CTX), c_char_p, c_int]
libssl.SSL_CTX_use_PrivateKey_file.restype = c_int
libssl.SSL_CTX_use_certificate_chain_file.argtypes = [POINTER(SSL_CTX), c_char_p]
libssl.SSL_CTX_use_certificate_chain_file.restype = c_int
libssl.SSL_CTX_ctrl.argtypes = [POINTER(SSL_CTX), c_int, c_int, c_void_p]
libssl.SSL_CTX_ctrl.restype = c_int
libssl.SSL_CTX_set_verify.argtypes = [POINTER(SSL_CTX), c_int, c_void_p]
libssl.SSL_CTX_set_verify.restype = c_void
libssl.SSL_new.argtypes = [POINTER(SSL_CTX)]
libssl.SSL_new.restype = POINTER(SSL)
libssl.SSL_set_fd.argtypes = [POINTER(SSL), c_int]
libssl.SSL_set_fd.restype = c_int
libssl.BIO_ctrl.argtypes = [POINTER(BIO), c_int, c_int, c_void_p]
libssl.BIO_ctrl.restype = c_int
libssl.SSL_get_rbio.argtypes = [POINTER(SSL)]
libssl.SSL_get_rbio.restype = POINTER(BIO)
libssl.SSL_get_wbio.argtypes = [POINTER(SSL)]
libssl.SSL_get_wbio.restype = POINTER(BIO)
libssl.SSL_set_connect_state.argtypes = [POINTER(SSL)]
libssl.SSL_set_connect_state.restype = c_void
libssl.SSL_connect.argtypes = [POINTER(SSL)]
libssl.SSL_connect.restype = c_int
libssl.SSL_get_error.argtypes = [POINTER(SSL), c_int]
libssl.SSL_get_error.restype = c_int
have_poll = False
if hasattr(libc, "poll"):
have_poll = True
libc.poll.argtypes = [POINTER(pollfd), nfds_t, c_int]
libc.poll.restype = c_int
libssl.ERR_get_error.restype = c_int
libssl.ERR_error_string.argtypes = [c_int, c_char_p]
libssl.ERR_error_string.restype = c_char_p
libssl.SSL_get_peer_certificate.argtypes = [POINTER(SSL)]
libssl.SSL_get_peer_certificate.restype = POINTER(X509)
libssl.X509_get_subject_name.argtypes = [POINTER(X509)]
libssl.X509_get_subject_name.restype = POINTER(X509_NAME)
libssl.X509_get_issuer_name.argtypes = [POINTER(X509)]
libssl.X509_get_issuer_name.restype = POINTER(X509_NAME)
libssl.X509_NAME_oneline.argtypes = [POINTER(X509_NAME), arr_x509, c_int]
libssl.X509_NAME_oneline.restype = c_char_p
libssl.X509_free.argtypes = [POINTER(X509)]
libssl.X509_free.restype = c_void
libssl.SSL_free.argtypes = [POINTER(SSL)]
libssl.SSL_free.restype = c_void
libssl.SSL_CTX_free.argtypes = [POINTER(SSL_CTX)]
libssl.SSL_CTX_free.restype = c_void
libssl.SSL_write.argtypes = [POINTER(SSL), c_char_p, c_int]
libssl.SSL_write.restype = c_int
libssl.SSL_pending.argtypes = [POINTER(SSL)]
libssl.SSL_pending.restype = c_int
libssl.SSL_read.argtypes = [POINTER(SSL), c_char_p, c_int]
libssl.SSL_read.restype = c_int
def _init_ssl():
libssl.SSL_load_error_strings()
libssl.SSL_library_init()
if HAVE_OPENSSL_RAND:
# helper routines for seeding the SSL PRNG
def RAND_add(space, string, entropy):
"""RAND_add(string, entropy)
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
buf = c_char_p(string)
libssl.RAND_add(buf, len(string), entropy)
RAND_add.unwrap_spec = [ObjSpace, str, float]
def RAND_status(space):
"""RAND_status() -> 0 or 1
Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not.
It is necessary to seed the PRNG with RAND_add() on some platforms before
using the ssl() function."""
res = libssl.RAND_status()
return space.wrap(res)
RAND_status.unwrap_spec = [ObjSpace]
def RAND_egd(space, path):
"""RAND_egd(path) -> bytes
Queries the entropy gather daemon (EGD) on socket path. Returns number
of bytes read. Raises socket.sslerror if connection to EGD fails or
if it does provide enough data to seed PRNG."""
socket_path = c_char_p(path)
bytes = libssl.RAND_egd(socket_path)
if bytes == -1:
msg = "EGD connection failed or EGD did not return"
msg += " enough data to seed the PRNG"
raise OperationError(space.w_Exception, space.wrap(msg))
return space.wrap(bytes)
RAND_egd.unwrap_spec = [ObjSpace, str]
class SSLObject(Wrappable):
def __init__(self, space):
self.space = space
self.w_socket = None
self.ctx = POINTER(SSL_CTX)()
self.ssl = POINTER(SSL)()
self.server_cert = POINTER(X509)()
self._server = arr_x509()
self._issuer = arr_x509()
def server(self):
return self.space.wrap(self._server.value)
server.unwrap_spec = ['self']
def issuer(self):
return self.space.wrap(self._issuer.value)
issuer.unwrap_spec = ['self']
def __del__(self):
if self.server_cert:
libssl.X509_free(self.server_cert)
if self.ssl:
libssl.SSL_free(self.ssl)
if self.ctx:
libssl.SSL_CTX_free(self.ctx)
def write(self, data):
"""write(s) -> len
Writes the string s into the SSL object. Returns the number
of bytes written."""
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The write operation timed out"))
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket has been closed."))
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket too large for select()."))
num_bytes = 0
while True:
err = 0
num_bytes = libssl.SSL_write(self.ssl, data, len(data))
err = libssl.SSL_get_error(self.ssl, num_bytes)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The connect operation timed out"))
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket has been closed."))
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if num_bytes > 0:
return self.space.wrap(num_bytes)
else:
errstr, errval = _ssl_seterror(self.space, self, num_bytes)
raise OperationError(self.space.w_Exception,
self.space.wrap("%s: %d" % (errstr, errval)))
write.unwrap_spec = ['self', str]
def read(self, num_bytes=1024):
"""read([len]) -> string
Read up to len bytes from the SSL socket."""
count = libssl.SSL_pending(self.ssl)
if not count:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The read operation timed out"))
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket too large for select()."))
buf = create_string_buffer(num_bytes)
while True:
err = 0
count = libssl.SSL_read(self.ssl, buf, num_bytes)
err = libssl.SSL_get_error(self.ssl, count)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The read operation timed out"))
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if count <= 0:
errstr, errval = _ssl_seterror(self.space, self, count)
raise OperationError(self.space.w_Exception,
self.space.wrap("%s: %d" % (errstr, errval)))
if count != num_bytes:
# resize
data = buf.raw
assert count >= 0
try:
new_data = data[0:count]
except:
raise OperationError(self.space.w_MemoryException,
self.space.wrap("error in resizing of the buffer."))
buf = create_string_buffer(count)
buf.raw = new_data
return self.space.wrap(buf.value)
read.unwrap_spec = ['self', int]
SSLObject.typedef = TypeDef("SSLObject",
server = interp2app(SSLObject.server,
unwrap_spec=SSLObject.server.unwrap_spec),
issuer = interp2app(SSLObject.issuer,
unwrap_spec=SSLObject.issuer.unwrap_spec),
write = interp2app(SSLObject.write,
unwrap_spec=SSLObject.write.unwrap_spec),
read = interp2app(SSLObject.read, unwrap_spec=SSLObject.read.unwrap_spec)
)
def new_sslobject(space, w_sock, w_key_file, w_cert_file):
ss = SSLObject(space)
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
w_timeout = space.call_method(w_sock, "gettimeout")
if space.is_w(w_timeout, space.w_None):
has_timeout = False
else:
has_timeout = True
if space.is_w(w_key_file, space.w_None):
key_file = None
else:
key_file = space.str_w(w_key_file)
if space.is_w(w_cert_file, space.w_None):
cert_file = None
else:
cert_file = space.str_w(w_cert_file)
if ((key_file and not cert_file) or (not key_file and cert_file)):
raise OperationError(space.w_Exception,
space.wrap("Both the key & certificate files must be specified"))
ss.ctx = libssl.SSL_CTX_new(libssl.SSLv23_method()) # set up context
if not ss.ctx:
raise OperationError(space.w_Exception, space.wrap("SSL_CTX_new error"))
if key_file:
ret = libssl.SSL_CTX_use_PrivateKey_file(ss.ctx, key_file,
SSL_FILETYPE_PEM)
if ret < 1:
raise OperationError(space.w_Exception,
space.wrap("SSL_CTX_use_PrivateKey_file error"))
ret = libssl.SSL_CTX_use_certificate_chain_file(ss.ctx, cert_file)
libssl.SSL_CTX_ctrl(ss.ctx, SSL_CTRL_OPTIONS, SSL_OP_ALL, c_void_p())
if ret < 1:
raise OperationError(space.w_Exception,
space.wrap("SSL_CTX_use_certificate_chain_file error"))
libssl.SSL_CTX_set_verify(ss.ctx, SSL_VERIFY_NONE, c_void_p()) # set verify level
ss.ssl = libssl.SSL_new(ss.ctx) # new ssl struct
libssl.SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL
# If the socket is in non-blocking mode or timeout mode, set the BIO
# to non-blocking mode (blocking is the default)
if has_timeout:
# Set both the read and write BIO's to non-blocking mode
libssl.BIO_ctrl(libssl.SSL_get_rbio(ss.ssl), BIO_C_SET_NBIO, 1, c_void_p())
libssl.BIO_ctrl(libssl.SSL_get_wbio(ss.ssl), BIO_C_SET_NBIO, 1, c_void_p())
libssl.SSL_set_connect_state(ss.ssl)
# Actually negotiate SSL connection
# XXX If SSL_connect() returns 0, it's also a failure.
sockstate = 0
while True:
ret = libssl.SSL_connect(ss.ssl)
err = libssl.SSL_get_error(ss.ssl, ret)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(space, w_sock, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(space, w_sock, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(space.w_Exception,
space.wrap("The connect operation timed out"))
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise OperationError(space.w_Exception,
space.wrap("Underlying socket has been closed."))
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise OperationError(space.w_Exception,
space.wrap("Underlying socket too large for select()."))
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
| |
(isinstance(mean_local, list) == True)): # Checks to see if we are dealing with arrays.
N_times_mean_local = np.multiply(N_local, mean_local)
N_times_var_local = np.multiply(N_local, np.multiply(std_local, std_local))
N_local = np.array(N_local).astype(float)
N_times_mean_local = np.array(N_times_mean_local).astype(np.float32)
if rank == 0: # Only rank 0 holds the final arrays so only it requires proper definitions.
N_times_mean_pool = np.zeros_like(N_times_mean_local)
N_pool_function = np.zeros_like(N_local)
N_times_var_pool = np.zeros_like(N_times_var_local)
N_times_mean_pool = N_times_mean_pool.astype(np.float64) # Recast everything to double precision then use MPI.DOUBLE.
N_pool_function = N_pool_function.astype(np.float64)
N_times_var_pool = N_times_var_pool.astype(np.float64)
else:
N_times_mean_pool = None
N_pool_function = None
N_times_var_pool = None
comm.Barrier()
N_times_mean_local = N_times_mean_local.astype(np.float64)
N_local = N_local.astype(np.float64)
N_times_var_local = N_times_var_local.astype(np.float64)
comm.Reduce([N_times_mean_local, MPI.DOUBLE], [N_times_mean_pool, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the arrays across processors.
comm.Reduce([N_local, MPI.DOUBLE],[N_pool_function, MPI.DOUBLE], op = MPI.SUM, root = 0)
comm.Reduce([N_times_var_local, MPI.DOUBLE], [N_times_var_pool, MPI.DOUBLE], op = MPI.SUM, root = 0)
else:
N_times_mean_local = N_local * mean_local
N_times_var_local = N_local * std_local * std_local
N_times_mean_pool = comm.reduce(N_times_mean_local, op = MPI.SUM, root = 0)
N_pool_function = comm.reduce(N_local, op = MPI.SUM, root = 0)
N_times_var_pool = comm.reduce(N_times_var_local, op = MPI.SUM, root = 0)
if rank == 0:
mean_pool_function = np.zeros((len(N_pool_function)))
std_pool_function = np.zeros((len(N_pool_function)))
for i in range(0, len(N_pool_function)):
if N_pool_function[i] == 0:
mean_pool_function[i] = 0.0
else:
mean_pool_function[i] = np.divide(N_times_mean_pool[i], N_pool_function[i])
if N_pool_function[i] < 3:
std_pool_function[i] = 0.0
else:
std_pool_function[i] = np.sqrt(np.divide(N_times_var_pool[i], N_pool_function[i]))
mean_pool.append(mean_pool_function)
std_pool.append(std_pool_function)
N_pool.append(N_pool_function)
return mean_pool, std_pool, N_pool
else:
return mean_pool, std_pool, N_pool_function # Junk return because non-rank 0 doesn't care.
##
def StellarMassFunction(SnapList, SMF, simulation_norm, FirstFile, LastFile, NumFile, ResolutionLimit_mean, model_tags, observations, paper_plot, output_tag):
'''
Calculates the stellar mass function for given galaxies with the option to overplot observations by Song et al. (2013) at z = 6, 7, 8 and/or Baldry et al. (2008) at z = 0.1.
Parallel compatible.
NOTE: The plotting assumes the redshifts we are plotting at are (roughly) the same for each model.
Parameters
---------
SnapList : Nested 'array-like`, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots that we plot the stellar mass function at for each model.
SMF : Nested 2-dimensional array, SMF[model_number0][snapshot0] = [bin0galaxies, ..., binNgalaxies], with length equal to the number of bins (NB_gal).
The count of galaxies within each stellar mass bin. Bounds are given by 'm_gal_low' and 'm_gal_high' in bins given by 'bin_width'.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
ResolutionLimit_mean : array of floats with the same shape as SMF.
This is the mean stellar mass for a halo with len (number of N-body simulation particles) between 'stellar_mass_halolen_lower' and 'stellar_mass_halolen_upper'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
observations : int
Denotes whether we want to overplot observational results.
0 : Don't plot anything.
1 : Plot Song et al. (2016) at z = 6, 7, 8.
2 : Plot Baldry et al. (2008) at z = 0.1.
3 : Plot both of these.
paper_plot : int
Denotes whether we want to split the plotting over three panels (z = 6, 7, 8) for the paper or keep it all to one figure.
output_tag : string
Name of the file that will be generated. File will be saved in the current directory with the output format defined by the 'output_format' variable at the beggining of the file.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Stellar Mass is in units of log10(Msun).
'''
## Empty array initialization ##
title = []
normalization_array = []
redshift_labels = []
counts_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
counts_array.append([])
bin_middle_array.append([])
redshift_labels.append([])
####
for model_number in range(0, len(SnapList)): # Does this for each of the models.
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
print("We are creating the stellar mass function using {0:.4f} of the box's volume.".format(box_factor))
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * bin_width * box_factor
normalization_array.append(norm)
####
for snapshot_idx in range(0, len(SnapList[model_number])): # Loops for each snapshot in each model.
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]) # Assigns a redshift label.
redshift_labels[model_number].append(tmp)
## We perform the plotting on Rank 0 so only this rank requires the final counts array. ##
if rank == 0:
counts_total = np.zeros_like(SMF[model_number][snapshot_idx])
else:
counts_total = None
comm.Reduce([SMF[model_number][snapshot_idx], MPI.FLOAT], [counts_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
if rank == 0:
counts_array[model_number].append(counts_total)
bin_middle_array[model_number].append(np.arange(m_gal_low, m_gal_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
####
## Plotting ##
if rank == 0: # Plot only on rank 0.
if paper_plot == 0:
f = plt.figure()
ax = plt.subplot(111)
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0: # We assume the redshifts for each model are the same, we only want to put a legend label for each redshift once.
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
plt.plot(bin_middle_array[model_number][snapshot_idx], counts_array[model_number][snapshot_idx] / normalization_array[model_number], color = PlotScripts.colors[snapshot_idx], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
#print(np.min(np.log10(ResolutionLimit_mean)))
#ax.axvline(np.max(np.log10(ResolutionLimit_mean)), color = 'k', linewidth = PlotScripts.global_linewidth, linestyle = '--')
#ax.text(np.max(np.log10(ResolutionLimit_mean)) + 0.1, 1e-3, "Resolution Limit", color = 'k')
for model_number in range(0, len(SnapList)): # Place legend labels for each of the models. NOTE: Placed after previous loop for proper formatting of labels.
plt.plot(1e100, 1e100, color = 'k', linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], rasterized=True, linewidth = PlotScripts.global_linewidth)
## Adjusting axis labels/limits. ##
plt.yscale('log', nonposy='clip')
plt.axis([6, 11.5, 1e-6, 1e-0])
ax.set_xlabel(r'$\log_{10}\ m_{\mathrm{*}} \:[M_{\odot}]$', fontsize = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]$', fontsize = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.set_xticks(np.arange(6.0, 12.0))
if (observations == 1 or observations == 3): # If we wanted to plot Song.
Obs.Get_Data_SMF()
delta = 0.05
caps = 5
## Song (2016) Plotting ##
plt.errorbar(Obs.Song_SMF_z6[:,0], 10**Obs.Song_SMF_z6[:,1], yerr= (10**Obs.Song_SMF_z6[:,1] - 10**Obs.Song_SMF_z6[:,3], 10**Obs.Song_SMF_z6[:,2] - 10**Obs.Song_SMF_z6[:,1]), xerr = 0.25, capsize = caps, elinewidth = PlotScripts.global_errorwidth, alpha = 1.0, lw=2.0, marker='o', ls='none', label = 'Song 2015, z = 6', color = PlotScripts.colors[0], rasterized=True)
plt.errorbar(Obs.Song_SMF_z7[:,0], 10**Obs.Song_SMF_z7[:,1], yerr= (10**Obs.Song_SMF_z7[:,1] - 10**Obs.Song_SMF_z7[:,3], 10**Obs.Song_SMF_z7[:,2] - 10**Obs.Song_SMF_z7[:,1]), xerr = 0.25, capsize = caps, alpha=0.75, elinewidth = PlotScripts.global_errorwidth, lw=1.0, marker='o', ls='none', label = 'Song 2015, z = 7', color = PlotScripts.colors[1], rasterized=True)
plt.errorbar(Obs.Song_SMF_z8[:,0], 10**Obs.Song_SMF_z8[:,1], yerr= (10**Obs.Song_SMF_z8[:,1] - 10**Obs.Song_SMF_z8[:,3], 10**Obs.Song_SMF_z8[:,2] - 10**Obs.Song_SMF_z8[:,1]), xerr = 0.25, capsize = caps, alpha=0.75, elinewidth = PlotScripts.global_errorwidth, lw=1.0, marker='o', ls='none', label = 'Song 2015, z = 8', color = PlotScripts.colors[2], rasterized=True)
####
if ((observations == 2 or observations == 3) and rank == 0): # If we wanted to plot Baldry.
Baldry_xval = np.log10(10 ** Obs.Baldry_SMF_z0[:, 0] /AllVars.Hubble_h/AllVars.Hubble_h)
Baldry_xval = Baldry_xval - 0.26 # convert back to Chabrier IMF
Baldry_yvalU = (Obs.Baldry_SMF_z0[:, 1]+Obs.Baldry_SMF_z0[:, 2]) * AllVars.Hubble_h*AllVars.Hubble_h*AllVars.Hubble_h
Baldry_yvalL = (Obs.Baldry_SMF_z0[:, 1]-Obs.Baldry_SMF_z0[:, 2]) * AllVars.Hubble_h*AllVars.Hubble_h*AllVars.Hubble_h
plt.fill_between(Baldry_xval, Baldry_yvalU, Baldry_yvalL,
facecolor='purple', alpha=0.25, label='Baldry et al. 2008 (z=0.1)')
####
leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # | |
<reponame>Jamal-dev/asymproj_edge_dnn_tensorFlow2
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Edge Neural Network."""
import numpy
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from tensorflow.python.platform import flags
import tf_slim as slim
flags.DEFINE_float('learn_rate', 0.1, '')
flags.DEFINE_string('optimizer', 'pd',
'Training algorithm for the EdgeNN parameters. Choices are '
'"pd" and "adam", respecitvely, for "PercentDelta" and '
'"AdamOptimizer". Nonetheless, the embeddings are always '
'trained with PercentDelta, as per deep_edge_trainer.py')
FLAGS = flags.FLAGS
class EdgeNN(object):
"""Neural Network for representing an edge using node embeddings.
The network maps two embedding vectors, `left` and `right`, both of size D,
onto a scalar, indicating the edge score between the `left` and `right`
embedding vectors.
The score gets computed as:
output = DNN(left) x L x R x DNN(right),
where `DNN` is a feed-forward neural network (with trainable parameters) and
(L, R) are two "projection" matrices.
This class does *not* keep track of the embedding matrix. It assumes that
embeddings are tracked outside and are provided as input during training or
inference.
"""
def __init__(self):
"""Sets public members to None, which are re-set in build_net()."""
self.embeddings_a = None # Placeholder for left embeddings.
self.embeddings_b = None # Placeholder for right embeddings.
# Placeholder for batch size.
self.batch_size = tf.compat.v1.placeholder(tf.int32, shape=(), name='sizes')
### Placeholders For training.
self.learn_rate = tf.compat.v1.placeholder(tf.float32, ())
# User is expected to feed `labels` as binary array of size `batch_size`.
# If labels[i] == 0.0, then nodes with embeddings at embeddings_a[i] and
# embeddings_b[i] are expected to be "negatives", but if == 1.0, they are
# expected to be positives (i.e. direct neighbors or close and sampled
# via Random Walks).
self.labels = tf.compat.v1.placeholder(tf.float32, shape=(None,), name='labels')
# Tensors of activations of Neural Network layers.
self.layers = []
# Output of the network, in terms of `embeddings_{a,b}`.
self.output = None
# Used for debugging.
self.trainable_values = None
def build_net(self, embedding_dim=None, dnn_dims=None, projection_dim=None,
num_projections=1):
"""Creates the feed-forward DNN, projection matrices L, R, and training ops.
Args:
embedding_dim: Integer for D, indicating the input embedding dimension.
dnn_dims: List of integers. Specifies the latent dimensions of hidden
layers of DNN. Activation functions will be tf.nn.relu for all but the
last layer will have no activation. BatchNorm will be used on all
layers. If empty list, then no DNN will be used.
projection_dim: Iinner dimension of the projection matrices "L" and "R".
This is the "bottleneck" (i.e. smallest) dimension. The outer-dimension
of "L" and "R" is inferred as last entry in `[embed_dim] + dnn_dims`.
If set to <= 0, then no "L" nor "R" would be used. Instead, the edge
function becomes: `w^T (DNN(left) * DNN(right))`, where * is hadamard
product and w is a trainable vector.
"""
if dnn_dims is None or dnn_dims == '':
dnn_dims = []
elif isinstance(dnn_dims, str):
dnn_dims = map(int, dnn_dims.split(','))
### Placeholders For training and inference.
# `left` and `right` embedding matrices. First (batch size) is `None` since
# we want to support dynamic batch size. `embeddings_a` and `embeddings_b`
# must be fed arrays of the same size, with first dimension equal to scalar
# fed into `batch_size`.
self.embeddings_a = tf.compat.v1.placeholder(
tf.float32, shape=(None, embedding_dim), name='embeddings_a')
self.embeddings_b = tf.compat.v1.placeholder(
tf.float32, shape=(None, embedding_dim), name='embeddings_b')
### DNN.
# Input is a concatenation of embeddings_a and embeddings_b, since they
# both go through the same DNN transformation.
embeddings_combined = tf.concat([
tf.reshape(self.embeddings_a, (self.batch_size, embedding_dim)),
tf.reshape(self.embeddings_b, (self.batch_size, embedding_dim))
], 0)
self.layers.append(embeddings_combined)
# For-loop creates the Neural Network layers. Last layer has no activation
# but others have relu activation.
# changed
dnn_dims = list(dnn_dims)
net = embeddings_combined
for i, f_d in enumerate(dnn_dims):
if i < len(dnn_dims) - 1:
net = slim.fully_connected(
net, f_d, activation_fn=tf.nn.relu,
weights_regularizer=tf.keras.regularizers.l2(0.5 * (1e-6)),
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': True})
else:
# Last layer.
net = slim.fully_connected(
net, f_d, activation_fn=tf.nn.relu,
weights_regularizer=tf.keras.regularizers.l2(0.5 * (1e-6)),
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': True})
self.layers.append(net)
# Undo our concatenation. Set f_a to DNN(embeddings_a) and f_b to
# DNN(embeddings_b)
f_a = net[:self.batch_size]
f_b = net[self.batch_size:]
self.f_a = f_a
self.f_b = f_b
### Projection with matrices "L" and "R" (i.e. g_left and g_right).
if projection_dim > 0:
g_outer_d = embedding_dim
if len(dnn_dims) > 0:
g_outer_d = dnn_dims[-1]
self.g_lefts = []
self.g_rights = []
self.edge_r = []
for i in range(num_projections):
name_suffix = ''
if i > 0:
name_suffix = '_%i' % i
g_left = tf.compat.v1.get_variable(
name="g_left" + name_suffix, shape=(g_outer_d, projection_dim),
regularizer=slim.regularizers.l2_regularizer(1e-6))
g_right = tf.compat.v1.get_variable(
name="g_right" + name_suffix, shape=(projection_dim, g_outer_d),
regularizer=slim.regularizers.l2_regularizer(1e-6))
self.g_left = g_left
self.g_right = g_right
self.g_lefts.append(g_left)
self.g_rights.append(g_right)
g_l_bottleneck = tf.matmul(f_a, g_left)
g_r_bottleneck = tf.matmul(f_b, tf.transpose(a=g_right))
self.g_l_bottleneck = g_l_bottleneck
self.g_r_bottleneck = g_r_bottleneck
self.layers.append(g_l_bottleneck)
self.layers.append(g_r_bottleneck)
output = tf.reduce_sum(
input_tensor=g_l_bottleneck * g_r_bottleneck, axis=[1])
self.edge_r.append(tf.expand_dims(output, 1))
if num_projections > 1:
output = tf.concat(self.edge_r, 1)
output = slim.batch_norm(output)
output = tf.nn.relu(output)
output = slim.fully_connected(output, 1, activation_fn=None)
output = tf.squeeze(output, 1)
else:
output = tf.multiply(f_a, f_b)
output = slim.fully_connected(output, 1, activation_fn=None)
output = tf.reduce_sum(input_tensor=output, axis=[1])
self.output = output
self.build_train_op()
def build_train_op(self):
"""Sets gradient tensors and creates tensor `train_op`."""
self.min_objective = tf.compat.v1.losses.sigmoid_cross_entropy(
multi_class_labels=self.labels[:self.batch_size],
logits=self.output)
variables = [self.embeddings_a, self.embeddings_b] + tf.compat.v1.global_variables()
all_losses = tf.compat.v1.losses.get_regularization_losses() + [self.min_objective]
grads = tf.gradients(ys=all_losses, xs=variables)
self.gradients = []
self.gradients_for = []
for v, g in zip(variables, grads):
if g is None:
continue
self.gradients_for.append(v)
self.gradients.append(g)
if len(self.gradients) > 2:
if FLAGS.optimizer == 'adam':
optimizer = tf.compat.v1.train.AdamOptimizer(self.learn_rate)
elif FLAGS.optimizer == 'pd':
# Percent Delta. Works better than Adam and does not require learning
# rate tuning.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(self.learn_rate)
self.train_op = optimizer.apply_gradients(
zip(self.gradients[2:], self.gradients_for[2:]))
def get_gradients(self, sess, left_embeddings, right_embeddings, labels):
"""Calculates gradients w.r.t. objective.
Matrices `left_embeddings` and `right_embeddings` must be of shape (b, D),
and labels must be of shape (b), where `b` is batch size and `D` is the
dimension of input embeddings.
Args:
sess: TensorFlow Session.
left_embeddings: float32 numpy array of left embeddings, shape=(b, D).
right_embeddings: float32 numpy array of right embeddings, shape=(b, D).
labels: float32 numpy array of a binary vector of shape=(b). Entries must
be 0.0 or 1.0, respectively, for negative and positive pairs at
corresponding position in left_embeddings and right_embeddings.
Returns:
tuple (gradients, objective) where `gradients` contains gradients w.r.t.
(left embeddings, right embeddings, DNN parameters, L, R). The gradients
w.r.t. {left, right} embeddings must be applied on the embedding matrix
by the caller, as the embedding matrix is not maintained by this class.
"""
grads_and_objective = sess.run(
self.gradients + [self.min_objective],
feed_dict={
self.embeddings_a: left_embeddings,
self.embeddings_b: right_embeddings,
self.batch_size: len(left_embeddings),
self.labels: labels,
})
objective = grads_and_objective[-1]
grads = grads_and_objective[:-1]
return grads, objective
def apply_gradients(self, sess, grads, epoch):
"""Applies `grads` to the parameters of the edge neural network.
The optimizer is indicated using flag --optimizer. The size of grads must be
equal to the number of tensors of the edge neural network, which must equal
to the number of gradients returned by `get_gradients() - 2`, since the
first two entries returned by `get_gradients()` are the gradients of
embeddings (src, dst embeddings).
Args:
sess: TensorFlow session holding the parameters.
grads: Output of get_gradients, as in `get_gradients()[0][2:]`.
epoch: Current iteration number over train data. Used if --optimizer=pd
Returns:
The deltas in the tensors (i.e. result of the update).
"""
if len(grads) == 0:
return
assert len(grads) == len(self.gradients) - 2
if self.trainable_values is None:
self.trainable_values = sess.run(self.gradients_for[2:])
assert len(grads) == len(self.trainable_values)
deltas = []
if FLAGS.optimizer == 'pd':
for (g, v) in zip(grads, self.trainable_values):
mean_percent_grad = numpy.mean(numpy.abs(g / PlusEpsilon(v)))
deltas.append(PickLearnRate(mean_percent_grad, epoch))
else:
deltas = [1] * len(grads)
feed_dict = {self.gradients[2 + i]: grads[i] * deltas[i]
for i in range(len(grads))}
feed_dict[self.learn_rate] = FLAGS.learn_rate
sess.run(self.train_op, feed_dict=feed_dict)
new_trainable_values = sess.run(self.gradients_for[2:])
deltas = [numpy.mean(abs((v0 - v1) / v1))
for (v0, v1) in zip(self.trainable_values, new_trainable_values)]
self.trainable_values | |
supported
try: audioop.bias(b"", 3, 0) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
raw_data = b"".join(raw_data[i + 1:i + 4] for i in range(0, len(raw_data), 4)) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample
else: # 24-bit audio fully supported, we don't need to shim anything
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
else:
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
# if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again
if convert_width == 1:
raw_data = audioop.bias(raw_data, 1, 128) # add 128 to every sample to make them act like unsigned samples again
return raw_data
def get_wav_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# generate the WAV file contents
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
wav_writer.setframerate(sample_rate)
wav_writer.setsampwidth(sample_width)
wav_writer.setnchannels(1)
wav_writer.writeframes(raw_data)
wav_data = wav_file.getvalue()
finally: # make sure resources are cleaned up
wav_writer.close()
return wav_data
class Recognizer(AudioSource):
def __init__(self):
"""
Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
"""
self.energy_threshold = 300 # minimum audio energy to consider for recording
self.dynamic_energy_threshold = True
self.dynamic_energy_adjustment_damping = 0.15
self.dynamic_energy_ratio = 1.5
self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete
self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
def record(self, source, duration = None, offset = None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before recording, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
offset_time = 0
offset_reached = False
while True: # loop for the total number of chunks needed
if offset and not offset_reached:
offset_time += seconds_per_buffer
if offset_time > offset:
offset_reached = True
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
if offset_reached or not offset:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def adjust_for_ambient_noise(self, source, duration = 1):
"""
Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# dynamically adjust the energy threshold using assymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
def listen(self, source, timeout = None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that it will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, it will wait indefinitely.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio before the phrase is complete
phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout: # handle timeout if specified
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold: break
# dynamically adjust the energy threshold using assymmetric weighted average
if self.dynamic_energy_threshold:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
while True:
elapsed_time += seconds_per_buffer
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count
if phrase_count >= phrase_buffer_count: break # phrase is long enough, stop listening
# obtain frame data
for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(list(frames))
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen_in_background(self, source, callback):
"""
Spawns a | |
<reponame>ian-cooke/basilisk_mag
''' '''
'''
ISC License
Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
#
# Basilisk Scenario Script and Integrated Test
#
# Purpose: Integrated test of the spacecraftPlus(), extForceTorque, simpleNav(),
# MRP_Feedback() with attitude navigation modules. This script is a
# spinoff from the attitude guidance tutorial, it implements a hyperbolic
# trajectory and uses the velocityPoint module.
# Author: <NAME>
# Creation Date: Aug. 28th, 2017
#
import inspect
import os
import numpy as np
from datetime import datetime
from Basilisk import __path__
import matplotlib.pyplot as plt
from Basilisk.fswAlgorithms import MRP_Feedback, attTrackingError, fswMessages, velocityPoint, mag_attTrack, \
inertial3D
from Basilisk.simulation import extForceTorque, simple_nav, spacecraftPlus, simMessages, eclipse, \
mag_meter, torqueRodDynamicEffector
from Basilisk.utilities import SimulationBaseClass, macros, orbitalMotion, simIncludeGravBody
# general support file with common unit test functions
from Basilisk.utilities import unitTestSupport
# import message declarations
from Basilisk.fswAlgorithms import fswMessages
bskPath = __path__[0]
from Basilisk import pyswice
def plot_track_error_norm(timeLineSet, dataSigmaBR):
plt.figure(1)
fig = plt.gcf()
ax = fig.gca()
vectorData = unitTestSupport.pullVectorSetFromData(dataSigmaBR)
sNorm = np.array([np.linalg.norm(v) for v in vectorData])
plt.plot(timeLineSet, sNorm,
color=unitTestSupport.getLineColor(1, 3),
)
plt.xlabel('Time [min]')
plt.ylabel('Attitude Error Norm $|\sigma_{B/R}|$')
ax.set_yscale('log')
def plot_control_torque(timeLineSet, dataLr):
plt.figure(2)
for idx in range(1, 4):
plt.plot(timeLineSet, dataLr[:, idx],
color=unitTestSupport.getLineColor(idx, 3),
label='$L_{r,' + str(idx) + '}$')
plt.legend(loc='lower right')
plt.xlabel('Time [min]')
plt.ylabel('Control Torque $L_r$ [Nm]')
def plot_rate_error(timeLineSet, dataOmegaBR):
plt.figure(3)
for idx in range(1, 4):
plt.plot(timeLineSet, macros.R2D * dataOmegaBR[:, idx],
color=unitTestSupport.getLineColor(idx, 3),
label='$\omega_{BR,' + str(idx) + '}$')
p1ds = plt.axhline(y=1, color='r', linestyle='--', label='+1 deg/s')
n1ds = plt.axhline(y=-1, color='r', linestyle='--', label='-1 deg/s')
plt.legend(loc='lower right')
plt.xlabel('Time [min]')
plt.ylabel('Rate Tracking Error [deg/s] ')
def plot_orbit(oe, mu, planet_radius, dataPos, dataVel):
# draw orbit in perifocal frame
p = oe.a * (1 - oe.e * oe.e)
plt.figure(4, figsize=np.array((1.0, 1.)) * 4.75, dpi=100)
# draw the planet
fig = plt.gcf()
ax = fig.gca()
planetColor = '#008800'
# planet = gravFactory.createEarth()
planetRadius = planet_radius / 1000
ax.add_artist(plt.Circle((0, 0), planetRadius, color=planetColor))
# draw the actual orbit
rData = []
fData = []
for idx in range(0, len(dataPos)):
oeData = orbitalMotion.rv2elem(mu, dataPos[idx, 1:4], dataVel[idx, 1:4])
rData.append(oeData.rmag)
fData.append(oeData.f + oeData.omega - oe.omega)
plt.plot(rData * np.cos(fData) / 1000, rData * np.sin(fData) / 1000,
color='#aa0000', linewidth=3.0, label='Simulated Flight')
plt.axis(np.array([-1, 1, -1, 1]) * 1.25 * np.amax(rData) / 1000)
# draw the full osculating orbit from the initial conditions
fData = np.linspace(0, 2 * np.pi, 100)
rData = []
for idx in range(0, len(fData)):
rData.append(p / (1 + oe.e * np.cos(fData[idx])))
plt.plot(rData * np.cos(fData) / 1000, rData * np.sin(fData) / 1000, '--', color='#555555', label='Orbit Track')
plt.xlabel('$i_e$ Cord. [km]')
plt.ylabel('$i_p$ Cord. [km]')
plt.legend(loc='lower left')
plt.grid()
## \defgroup Tutorials_2_1_1
## @{
# How to use guidance modules to align the spacecraft frame to the velocity-pointing frame.
#
# Attitude Alignment for a Spacecraft on a Hyperbolic Trajectory {#scenarioAttGuideHyperbolic}
# ====
#
# Scenario Description
# -----
# This script sets up a 6-DOF spacecraft which is on a hyperbolic trajectory near Earth.
# It aligns the spacecraft to point along the velocity vector throughout the orbit.
# The scenario is setup to be run in two different configurations:
# Setup | useAltBodyFrame
# ----- | -------------------
# 1 | False
# 2 | True
#
# To run the default scenario 1., call the python script through
#
# python scenarioAttGuideHyperbolic.py
#
# The simulation layout is shown in the following illustration. A single simulation process is created
# which contains both the spacecraft simulation modules, as well as the Flight Software (FSW) algorithm
# modules.
# 
#
# When the simulation completes 4 plots are shown. This first three show the MRP attitude history, the rate
# tracking errors, and the control torque vector. The fourth shows the hyperbolic trajectory
# and the segment of that trajectory flown during the simulation.
#
# The basic simulation setup is the same as the one used in
# [scenarioAttitudeGuidance.py](@ref scenarioAttitudeGuidance).
# The dynamics simulation is setup using a SpacecraftPlus() module to which a gravity
# effector is attached. Note that both the rotational and translational degrees of
# freedom of the spacecraft hub are turned on here to get a 6-DOF simulation. For more
# information on how to setup an orbit, see [scenarioBasicOrbit.py](@ref scenarioBasicOrbit)
#
# Where the Attitude Guidance Tutorial pointed the spacecraft relative to the Hill frame, this tutorial
# points it relative to the velocity vector.
# Note that mu must be assigned to attGuidanceConfig.mu when using the velocityPoint() module:
# ~~~~~~~~~~~~~{.py}
# attGuidanceConfig = velocityPoint.velocityPointConfig()
# attGuidanceWrap = scSim.setModelDataWrap(attGuidanceConfig)
# attGuidanceWrap.ModelTag = "velocityPoint"
# attGuidanceConfig.inputNavDataName = sNavObject.outputTransName
# attGuidanceConfig.inputCelMessName = earth.bodyInMsgName
# attGuidanceConfig.outputDataName = "guidanceOut"
# attGuidanceConfig.mu = mu
# scSim.AddModelToTask(simTaskName, attGuidanceWrap, attGuidanceConfig)
# ~~~~~~~~~~~~~
# Note that in contrast to Hill pointing mode used in
# [scenarioAttitudeGuidance.py](@ref scenarioAttitudeGuidance), the orbit velocity frame pointing
# requires the attacting celestial body gravitational constant mu to be set.
#
# Setup 1
# -----
#
# Which scenario is run is controlled at the bottom of the file in the code
# ~~~~~~~~~~~~~{.py}
# if __name__ == "__main__":
# run(
# True, # show_plots
# False # useAltBodyFrame
# )
# ~~~~~~~~~~~~~
# The first 2 arguments can be left as is. The remaining argument controls the
# simulation scenario flags to turn on or off certain simulation conditions. The
# default scenario shown has the `useAltBodyFrame` flag turned off. This means that we seek
# to align the body frame *B* with the velocity vector *V*.
# 
# 
# 
# 
#
#
# Setup 2
# -----
#
# To run the second scenario, change the main routine at the bottom of the file to read:
# ~~~~~~~~~~~~~{.py}
# if __name__ == "__main__":
# run(
# True, # show_plots
# True # useAltBodyFrame
# )
# ~~~~~~~~~~~~~
# Here the control should not align the principal body frame *B* with *V*, but rather an alternate,
# corrected body frame *Bc*. For example, if a thruster is located on the \f$\hat b_1\f$ face, and it
# is desired to point it along the negative V-bar, this is achieved through:
# ~~~~~~~~~~~~~{.py}
# attErrorConfig.sigma_R0R = [0,0,-1]
# ~~~~~~~~~~~~~
# This corrected body frame has an orientation which is rotated 180 degrees about \f$\hat b_3\f$,
# to point the correct face of the spacecraft along the negative V-bar.
#
# The resulting attitude and control torque histories are shown below.
# 
# 
# 
#
## @}
def run(show_plots, useAltBodyFrame):
'''Call this routine directly to run the tutorial scenario.'''
# Create simulation variable names
simTaskName = "simTask"
simProcessName = "simProcess"
# Create a sim module as an empty container
scSim = SimulationBaseClass.SimBaseClass()
scSim.TotalSim.terminateSimulation()
# set the simulation time variable used later on
simulationTime = macros.min2nano(180.0)
#
# create the simulation process
#
dynProcess = scSim.CreateNewProcess(simProcessName)
# create the dynamics task and specify the integration update time
simulationTimeStep = macros.sec2nano(0.05)
dynProcess.addTask(scSim.CreateNewTask(simTaskName, simulationTimeStep))
# if this scenario is to interface with the BSK Viz, uncomment the following lines
# unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'earth')
# The Viz only support 'earth', 'mars', or 'sun'
#
# setup the simulation tasks/objects
#
# initialize spacecraftPlus object and set properties
scObject = spacecraftPlus.SpacecraftPlus()
scObject.ModelTag = "spacecraftBody"
# define the simulation inertia
I = [0.008, 0., 0.,
0., 0.04, 0.,
0., 0., 0.04]
scObject.hub.mHub = 4.8 # kg - spacecraft mass
scObject.hub.r_BcB_B = [[0.0], [0.0], [0.0]] # m - position vector of body-fixed point B relative to CM
scObject.hub.IHubPntBc_B = unitTestSupport.np2EigenMatrix3d(I)
# add spacecraftPlus object to the simulation process
scSim.AddModelToTask(simTaskName, scObject)
# clear prior gravitational body and SPICE setup definitions
gravFactory = simIncludeGravBody.gravBodyFactory()
gravBodies = gravFactory.createBodies(['earth', 'sun', 'moon'])
# setup Earth Gravity Body
earth = gravBodies['earth']
earth.isCentralBody = True # ensure this is the central gravitational body
mu = earth.mu
| |
*
sage: P = back_circulant(2)
sage: P[1,1] = -1
sage: P.random_empty_cell()
[1, 1]
"""
cells = {}
for r in range(self.nrows()):
for c in range(self.ncols()):
if self[r, c] < 0:
cells[ (r,c) ] = True
cells = list(cells)
if not cells:
return None
rc = cells[ ZZ.random_element(len(cells)) ]
return [rc[0], rc[1]]
def is_uniquely_completable(self):
"""
Returns True if the partial latin square self has exactly one
completion to a latin square. This is just a wrapper for the
current best-known algorithm, Dancing Links by Knuth. See
dancing_links.spyx
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: back_circulant(4).gcs().is_uniquely_completable()
True
::
sage: G = elementary_abelian_2group(3).gcs()
sage: G.is_uniquely_completable()
True
::
sage: G[0, 0] = -1
sage: G.is_uniquely_completable()
False
"""
return self.dlxcpp_has_unique_completion()
def is_completable(self):
"""
Returns True if the partial latin square can be completed to a
latin square.
EXAMPLES:
The following partial latin square has no completion because there
is nowhere that we can place the symbol 0 in the third row::
sage: B = LatinSquare(3)
::
sage: B[0, 0] = 0
sage: B[1, 1] = 0
sage: B[2, 2] = 1
::
sage: B
[ 0 -1 -1]
[-1 0 -1]
[-1 -1 1]
::
sage: B.is_completable()
False
::
sage: B[2, 2] = 0
sage: B.is_completable()
True
"""
return len(dlxcpp_find_completions(self, nr_to_find = 1)) > 0
def gcs(self):
"""
A greedy critical set of a latin square self is found by
successively removing elements in a row-wise (bottom-up) manner,
checking for unique completion at each step.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: A = elementary_abelian_2group(3)
sage: G = A.gcs()
sage: A
[0 1 2 3 4 5 6 7]
[1 0 3 2 5 4 7 6]
[2 3 0 1 6 7 4 5]
[3 2 1 0 7 6 5 4]
[4 5 6 7 0 1 2 3]
[5 4 7 6 1 0 3 2]
[6 7 4 5 2 3 0 1]
[7 6 5 4 3 2 1 0]
sage: G
[ 0 1 2 3 4 5 6 -1]
[ 1 0 3 2 5 4 -1 -1]
[ 2 3 0 1 6 -1 4 -1]
[ 3 2 1 0 -1 -1 -1 -1]
[ 4 5 6 -1 0 1 2 -1]
[ 5 4 -1 -1 1 0 -1 -1]
[ 6 -1 4 -1 2 -1 0 -1]
[-1 -1 -1 -1 -1 -1 -1 -1]
"""
n = self.nrows()
from copy import copy
G = copy(self)
for r in range(n-1, -1, -1):
for c in range(n-1, -1, -1):
e = G[r, c]
G[r, c] = -1
if not G.dlxcpp_has_unique_completion():
G[r, c] = e
return G
def dlxcpp_has_unique_completion(self):
"""
Check if the partial latin square self of order n can be embedded
in precisely one latin square of order n.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: back_circulant(2).dlxcpp_has_unique_completion()
True
sage: P = LatinSquare(2)
sage: P.dlxcpp_has_unique_completion()
False
sage: P[0, 0] = 0
sage: P.dlxcpp_has_unique_completion()
True
"""
return len(dlxcpp_find_completions(self, nr_to_find = 2)) == 1
def vals_in_row(self, r):
"""
Returns a dictionary with key e if and only if row r of self has
the symbol e.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: B = back_circulant(3)
sage: B[0, 0] = -1
sage: back_circulant(3).vals_in_row(0)
{0: True, 1: True, 2: True}
"""
n = self.ncols()
vals_in_row = {}
for c in range(n):
e = self[r, c]
if e >= 0: vals_in_row[e] = True
return vals_in_row
def vals_in_col(self, c):
"""
Returns a dictionary with key e if and only if column c of self has
the symbol e.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: B = back_circulant(3)
sage: B[0, 0] = -1
sage: back_circulant(3).vals_in_col(0)
{0: True, 1: True, 2: True}
"""
n = self.nrows()
vals_in_col = {}
for r in range(n):
e = self[r, c]
if e >= 0: vals_in_col[e] = True
return vals_in_col
def latex(self):
r"""
Returns LaTeX code for the latin square.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: print(back_circulant(3).latex())
\begin{array}{|c|c|c|}\hline 0 & 1 & 2\\\hline 1 & 2 & 0\\\hline 2 & 0 & 1\\\hline\end{array}
"""
a = ""
a += r"\begin{array}{" + self.ncols()*"|c" + "|}"
for r in range(self.nrows()):
a += r"\hline "
for c in range(self.ncols()):
s = self[r, c]
if s < 0: a += "~"
else: a += str(s)
if c < self.ncols()-1: a += " & "
else: a += "\\\\"
a += r"\hline"
a += r"\end{array}"
return a
def disjoint_mate_dlxcpp_rows_and_map(self, allow_subtrade):
"""
Internal function for find_disjoint_mates.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: B = back_circulant(4)
sage: B.disjoint_mate_dlxcpp_rows_and_map(allow_subtrade = True)
([[0, 16, 32],
[1, 17, 32],
[2, 18, 32],
[3, 19, 32],
[4, 16, 33],
[5, 17, 33],
[6, 18, 33],
[7, 19, 33],
[8, 16, 34],
[9, 17, 34],
[10, 18, 34],
[11, 19, 34],
[12, 16, 35],
[13, 17, 35],
[14, 18, 35],
[15, 19, 35],
[0, 20, 36],
[1, 21, 36],
[2, 22, 36],
[3, 23, 36],
[4, 20, 37],
[5, 21, 37],
[6, 22, 37],
[7, 23, 37],
[8, 20, 38],
[9, 21, 38],
[10, 22, 38],
[11, 23, 38],
[12, 20, 39],
[13, 21, 39],
[14, 22, 39],
[15, 23, 39],
[0, 24, 40],
[1, 25, 40],
[2, 26, 40],
[3, 27, 40],
[4, 24, 41],
[5, 25, 41],
[6, 26, 41],
[7, 27, 41],
[8, 24, 42],
[9, 25, 42],
[10, 26, 42],
[11, 27, 42],
[12, 24, 43],
[13, 25, 43],
[14, 26, 43],
[15, 27, 43],
[0, 28, 44],
[1, 29, 44],
[2, 30, 44],
[3, 31, 44],
[4, 28, 45],
[5, 29, 45],
[6, 30, 45],
[7, 31, 45],
[8, 28, 46],
[9, 29, 46],
[10, 30, 46],
[11, 31, 46],
[12, 28, 47],
[13, 29, 47],
[14, 30, 47],
[15, 31, 47]],
{(0, 16, 32): (0, 0, 0),
(0, 20, 36): (1, 0, 0),
(0, 24, 40): (2, 0, 0),
(0, 28, 44): (3, 0, 0),
(1, 17, 32): (0, 0, 1),
(1, 21, 36): (1, 0, 1),
(1, 25, 40): (2, 0, 1),
(1, 29, 44): (3, 0, 1),
(2, 18, 32): (0, 0, 2),
(2, 22, 36): (1, 0, 2),
(2, 26, 40): (2, 0, 2),
(2, 30, 44): (3, 0, 2),
(3, 19, 32): (0, 0, 3),
(3, 23, 36): (1, 0, 3),
(3, 27, 40): (2, 0, 3),
(3, 31, 44): (3, 0, 3),
(4, 16, 33): (0, 1, 0),
(4, 20, 37): (1, 1, 0),
(4, 24, 41): (2, 1, 0),
(4, 28, 45): (3, 1, 0),
(5, 17, 33): (0, 1, 1),
(5, 21, 37): (1, 1, 1),
(5, 25, 41): (2, 1, 1),
(5, 29, 45): (3, 1, 1),
(6, 18, 33): (0, 1, 2),
(6, 22, 37): (1, 1, 2),
(6, 26, 41): (2, 1, 2),
(6, 30, 45): (3, 1, 2),
(7, 19, 33): (0, 1, 3),
(7, 23, 37): (1, 1, 3),
(7, 27, 41): (2, 1, 3),
(7, 31, 45): (3, 1, 3),
(8, 16, 34): (0, 2, 0),
(8, 20, 38): (1, 2, 0),
(8, 24, 42): (2, 2, 0),
(8, 28, 46): (3, 2, 0),
(9, 17, 34): (0, 2, 1),
(9, 21, 38): (1, 2, 1),
(9, 25, 42): (2, 2, 1),
(9, 29, 46): (3, 2, 1),
(10, 18, 34): (0, 2, 2),
(10, 22, 38): (1, 2, 2),
(10, 26, 42): (2, 2, 2),
(10, 30, 46): (3, 2, 2),
(11, 19, 34): (0, 2, 3),
(11, 23, 38): (1, 2, 3),
(11, 27, 42): (2, 2, 3),
(11, 31, 46): (3, 2, 3),
(12, 16, 35): (0, 3, 0),
(12, 20, 39): (1, 3, 0),
(12, 24, 43): (2, 3, 0),
(12, 28, 47): (3, 3, 0),
(13, 17, 35): (0, 3, 1),
(13, 21, 39): (1, 3, 1),
(13, 25, 43): (2, 3, 1),
(13, 29, 47): (3, 3, 1),
(14, 18, 35): (0, 3, 2),
(14, 22, 39): (1, | |
networks = []
if all(id in cached_purenets for id in self.networks):
for id in self.networks:
qlknet = cached_purenets[id]['net'].to_QuaLiKizNN(cached_purenets=cached_purenets)
networks.append(qlknet)
else:
nets = {net.id: net for net in Network.select().where(Network.id.in_(network_ids))}
for id in self.networks:
qlknet = nets[id].to_QuaLiKizNN(cached_purenets=cached_purenets)
networks.append(qlknet)
recipe = self.recipe
for ii in range(len(network_ids)):
recipe = re.sub('nn(\d*)', 'args[\\1]', recipe)
exec('def combo_func(*args): return ' + recipe, globals()) in globals(), locals()
return QuaLiKizComboNN(self.target_names, networks, combo_func, **combo_kwargs)
def to_QuaLiKizNN(self, cached_purenets=None, combo_kwargs=None, **nn_kwargs):
if combo_kwargs is None:
combo_kwargs = {}
if self.networks is None:
net = self.to_QuaLiKizNDNN(cached_purenets=cached_purenets, **nn_kwargs)
else:
net = self.to_QuaLiKizComboNN(cached_purenets=cached_purenets, combo_kwargs=combo_kwargs, **nn_kwargs)
return net
@classmethod
def calc_op(cls, column):
query = (cls.select(
ComboNetwork,
ComboNetwork.id.alias('combo_id'),
fn.ARRAY_AGG(getattr(Hyperparameters, column), coerce=False).alias(column))
.join(Network, on=(Network.id == fn.ANY(ComboNetwork.networks)))
.join(Hyperparameters, on=(Network.id == Hyperparameters.network_id))
.group_by(cls.id)
)
return query
def get_pure_children(self):
if self.networks is None:
return [self]
else:
subq = self.get_recursive_subquery('cost_l2_scale')
subq = subq.having(SQL('root') == self.id)
if len(subq) == 1:
pure_ids = subq.get().pure_children
query = Network.select().where(Network.id.in_(pure_ids))
return [net for net in query]
else:
raise
class PureNetworkParams(BaseModel):
network = ForeignKeyField(Network, backref='pure_network_params', unique=True)
filter = ForeignKeyField(Filter, backref='pure_network_params')
dataset = TextField()
train_script = ForeignKeyField(TrainScript, backref='pure_network_params')
feature_prescale_bias = HStoreField()
feature_prescale_factor = HStoreField()
target_prescale_bias = HStoreField()
target_prescale_factor = HStoreField()
feature_min = HStoreField()
feature_max = HStoreField()
target_min = HStoreField()
target_max = HStoreField()
timestamp = DateTimeField(constraints=[SQL('DEFAULT now()')])
def download_raw(self):
root_dir = 'Network_' + str(self.network_id)
if os.path.isdir('Network_' + str(self.network_id)):
print('{!s} already exists! Skipping..', root_dir)
return
os.mkdir(root_dir)
network_json = self.network_json.get()
with open(os.path.join(root_dir, 'settings.json'), 'w') as settings_file:
json.dump(network_json.settings_json, settings_file,
sort_keys=True, indent=4)
with open(os.path.join(root_dir, 'nn.json'), 'w') as network_file:
json.dump(network_json.network_json, network_file,
sort_keys=True, indent=4)
with open(os.path.join(root_dir, 'train_NDNN.py'), 'w') as train_file:
train_file.writelines(self.train_script.get().script)
@classmethod
def find_similar_topology_by_settings(cls, settings_path):
with open(settings_path) as file_:
json_dict = json.load(file_)
cls.find_similar_topology_by_values(
json_dict['hidden_neurons'],
json_dict['hidden_activation'],
json_dict['output_activation'],
train_dim=json_dict['train_dim'])
return query
@classmethod
def find_similar_topology_by_id(cls, pure_network_params_id, match_train_dim=True):
query = (cls
.select(
Hyperparameters.hidden_neurons,
Hyperparameters.hidden_activation,
Hyperparameters.output_activation)
.where(cls.id == pure_network_params_id)
.join(Hyperparameters)
)
train_dim, = (cls
.select(
Network.target_names)
.where(cls.id == pure_network_params_id)
.join(Network)
).tuples().get()
if match_train_dim is not True:
train_dim = None
query = cls.find_similar_topology_by_values(*query.tuples().get(), train_dim=train_dim)
query = query.where(cls.id != pure_network_params_id)
return query
@classmethod
def find_similar_topology_by_values(cls, hidden_neurons, hidden_activation, output_activation, train_dim=None):
query = (cls.select()
.join(Hyperparameters)
.where(Hyperparameters.hidden_neurons ==
hidden_neurons)
.where(Hyperparameters.hidden_activation ==
hidden_activation)
.where(Hyperparameters.output_activation ==
output_activation))
if train_dim is not None:
query = (query.where(Network.target_names == train_dim)
.switch(cls)
.join(Network))
return query
@classmethod
def find_similar_networkpar_by_settings(cls, settings_path):
with open(settings_path) as file_:
json_dict = json.load(file_)
query = cls.find_similar_networkpar_by_values(json_dict['train_dim'],
json_dict['goodness'],
json_dict['cost_l2_scale'],
json_dict['cost_l1_scale'],
json_dict['early_stop_after'],
json_dict['early_stop_measure'])
return query
@classmethod
def find_similar_networkpar_by_id(cls, pure_network_params_id, ignore_pars=None, match_train_dim=True):
if ignore_pars is None:
ignore_pars = []
networkpars = ['goodness', 'cost_l2_scale', 'cost_l1_scale', 'early_stop_measure', 'early_stop_after']
select_pars = [getattr(Hyperparameters, par) for par in networkpars if par not in ignore_pars]
query = (cls
.select(*select_pars)
.where(cls.id == pure_network_params_id)
.join(Hyperparameters)
)
filter_id, train_dim = (cls
.select(cls.filter_id,
Network.target_names)
.where(cls.id == pure_network_params_id)
.join(Network)
).tuples().get()
if match_train_dim is not True:
train_dim = None
query = cls.find_similar_networkpar_by_values(query.dicts().get(), filter_id=filter_id, train_dim=train_dim)
query = query.where(cls.id != pure_network_params_id)
return query
@classmethod
def find_similar_networkpar_by_values(cls, networkpar_dict, filter_id=None, train_dim=None):
# TODO: Add new hyperparameters here?
query = (cls.select().join(Hyperparameters))
for parname, val in networkpar_dict.items():
attr = getattr(Hyperparameters, parname)
if isinstance(val, float):
attr = attr.cast('numeric')
query = query.where(attr == val)
if train_dim is not None:
query = (query.where(Network.target_names ==
train_dim)
.switch(cls)
.join(Network)
)
if filter_id is not None:
query = query.where(cls.filter_id ==
filter_id)
else:
print('Warning! Not filtering on filter_id')
return query
#@classmethod
#def find_similar_networkpar_by_settings(cls, settings_path):
# with open(settings_path) as file_:
# json_dict = json.load(file_)
# query = cls.find_similar_networkpar_by_values(json_dict['train_dim'],
# json_dict['goodness'],
# json_dict['cost_l2_scale'],
# json_dict['cost_l1_scale'],
# json_dict['early_stop_measure'])
# return query
@classmethod
def find_similar_trainingpar_by_id(cls, pure_network_params_id):
query = (cls
.select(Network.target_names,
Hyperparameters.minibatches,
Hyperparameters.optimizer,
Hyperparameters.standardization,
Hyperparameters.early_stop_after)
.where(cls.id == pure_network_params_id)
.join(Hyperparameters)
.join(Network)
)
filter_id = (cls
.select(cls.filter_id)
.where(cls.id == cls.network_id)
).tuples().get()[0]
query = cls.find_similar_trainingpar_by_values(*query.tuples().get())
query = query.where(cls.id != pure_network_params_id)
return query
@classmethod
def find_similar_trainingpar_by_values(cls, train_dim, minibatches, optimizer, standardization, early_stop_after):
query = (cls.select()
.where(Network.target_names == AsIs(train_dim))
.join(Hyperparameters)
.where(Hyperparameters.minibatches == minibatches)
.where(Hyperparameters.optimizer == optimizer)
.where(Hyperparameters.standardization == standardization)
.where(Hyperparameters.early_stop_after == early_stop_after)
)
return query
@staticmethod
def is_ready_to_be_submitted(pwd):
script_path = os.path.join(pwd, 'train_NDNN.py')
settings_path = os.path.join(pwd, 'settings.json')
for path in [script_path, settings_path]:
if not os.path.isfile(path):
print('{!s} does not exist. Is this even a NN folder?'.format(path))
return False
json_path = os.path.join(pwd, 'nn.json')
if not os.path.isfile(json_path):
print('{!s} does not exist. No checkpoints or final networks found'.format(json_path))
return False
else:
with open(json_path) as file_:
json_dict = json.load(file_)
if not '_metadata' in json_dict:
print('{!s} exists but does not contain metadata. Training not done'.format(json_path))
return False
return True
@classmethod
def from_folders(cls, pwd, **kwargs):
for path_ in os.listdir(pwd):
path_ = os.path.join(pwd, path_)
if os.path.isdir(path_):
try:
cls.from_folder(path_, **kwargs)
except OSError:
print('Could not parse', path_, 'is training done?')
@classmethod
@db.atomic()
def from_folder(cls, pwd):
if not cls.is_ready_to_be_submitted(pwd):
raise OSError('{!s} is not ready to be submitted!'.format(pwd))
script_path = os.path.join(pwd, 'train_NDNN.py')
#with open(script_file, 'r') as script:
# script = script.read()
train_script = TrainScript.from_file(script_path)
json_path = os.path.join(pwd, 'nn.json')
nn = QuaLiKizNDNN.from_json(json_path)
with open(json_path) as file_:
json_dict = json.load(file_)
dict_ = {}
for name in ['feature_prescale_bias', 'feature_prescale_factor',
'target_prescale_bias', 'target_prescale_factor',
'feature_names', 'feature_min', 'feature_max',
'target_names', 'target_min', 'target_max']:
attr = getattr(nn, '_' + name)
if 'names' in name:
dict_[name] = list(attr)
else:
dict_[name] = {str(key): str(val) for key, val in attr.items()}
dict_['train_script'] = train_script
net_dict = {'feature_names': dict_.pop('feature_names'),
'target_names': dict_.pop('target_names')}
settings_path = os.path.join(pwd, 'settings.json')
with open(settings_path) as file_:
settings = json.load(file_)
unstable, set, gen, dim, dataset, filter_id = parse_dataset_name(settings['dataset_path'])
dict_['filter_id'] = filter_id
dict_['dataset'] = dataset
network = Network.create(**net_dict)
dict_['network'] = network
pure_network_params = PureNetworkParams.create(**dict_)
pure_network_params.save()
hyperpar = Hyperparameters.from_settings(pure_network_params, settings)
hyperpar.save()
if settings['optimizer'] == 'lbfgs':
optimizer = LbfgsOptimizer(
pure_network_params=pure_network_params,
maxfun=settings['lbfgs_maxfun'],
maxiter=settings['lbfgs_maxiter'],
maxls=settings['lbfgs_maxls'])
elif settings['optimizer'] == 'adam':
optimizer = AdamOptimizer(
pure_network_params=pure_network_params,
learning_rate=settings['learning_rate'],
beta1=settings['adam_beta1'],
beta2=settings['adam_beta2'])
elif settings['optimizer'] == 'adadelta':
optimizer = AdadeltaOptimizer(
pure_network_params=pure_network_params,
learning_rate=settings['learning_rate'],
rho=settings['adadelta_rho'])
elif settings['optimizer'] == 'rmsprop':
optimizer = RmspropOptimizer(
pure_network_params=pure_network_params,
learning_rate=settings['learning_rate'],
decay=settings['rmsprop_decay'],
momentum=settings['rmsprop_momentum'])
optimizer.save()
activations = settings['hidden_activation'] + [settings['output_activation']]
for ii, layer in enumerate(nn.layers):
nwlayer = NetworkLayer.create(
pure_network_params=pure_network_params,
weights = np.float32(layer._weights).tolist(),
biases = np.float32(layer._biases).tolist(),
activation = activations[ii])
NetworkMetadata.from_dict(json_dict['_metadata'], pure_network_params)
TrainMetadata.from_folder(pwd, pure_network_params)
network_json = NetworkJSON.create(
pure_network_params=pure_network_params,
network_json=json_dict,
settings_json=settings)
return network
#def to_QuaLiKizNDNN(self, **nn_kwargs):
# json_dict = self.network_json.get().network_json
# nn = QuaLiKizNDNN(json_dict, **nn_kwargs)
# return nn
#to_QuaLiKizNN = to_QuaLiKizNDNN
def to_matlab_dict(self):
js = self.network_json.get().network_json
matdict = nn_dict_to_matlab(js)
return matdict
def to_matlab(self):
import scipy.io as io
io.savemat(str(self.id) + '.mat', self.to_matlab_dict())
def summarize(self):
net = self.select().get()
print({'target_names': net.target_names,
'rms_test': net.network_metadata.get().rms_test,
'rms_train': net.network_metadata.get().rms_train,
'rms_validation': net.network_metadata.get().rms_validation,
'epoch': net.network_metadata.get().epoch,
'train_time': net.train_metadata.get().walltime[-1],
'hidden_neurons': net.hyperparameters.get().hidden_neurons,
'standardization': net.hyperparameters.get().standardization,
'cost_l2_scale': net.hyperparameters.get().cost_l2_scale,
'early_stop_after': net.hyperparameters.get().early_stop_after}
)
class NetworkJSON(BaseModel):
pure_network_params = ForeignKeyField(PureNetworkParams, backref='network_json', unique=True)
network_json = BinaryJSONField()
settings_json = BinaryJSONField()
class NetworkLayer(BaseModel):
pure_network_params = ForeignKeyField(PureNetworkParams, backref='network_layer')
weights = ArrayField(FloatField)
biases = ArrayField(FloatField)
activation = TextField()
class NetworkMetadata(BaseModel):
pure_network_params = ForeignKeyField(PureNetworkParams, backref='network_metadata', unique=True)
epoch = IntegerField()
best_epoch = IntegerField()
rms_test = FloatField(null=True)
rms_train = FloatField(null=True)
rms_validation = FloatField()
rms_validation_descaled = FloatField(null=True)
loss_test = FloatField(null=True)
loss_train = FloatField(null=True)
loss_validation = FloatField()
l2_loss_validation = FloatField(null=True)
walltime = FloatField()
stop_reason = TextField()
stable_positive_loss_validation = FloatField(null=True)
metadata = HStoreField()
@staticmethod
def parse_dict(json_dict):
stringified = {str(key): str(val) for key, val in json_dict.items()}
try:
rms_train = json_dict['rms_train']
loss_train = json_dict['loss_train']
except KeyError:
loss_train = rms_train = None
try:
loss_test = json_dict['loss_test']
rms_test = json_dict['loss_test']
except KeyError:
rms_test = loss_test = None
try:
rms_validation_descaled = json_dict['rms_validation_descaled']
except KeyError:
rms_validation_descaled = None
return dict(epoch=json_dict['epoch'],
best_epoch=json_dict['best_epoch'],
rms_train=rms_train,
rms_validation=json_dict['rms_validation'],
rms_validation_descaled=rms_validation_descaled,
rms_test=rms_test,
loss_train=loss_train,
loss_validation=json_dict['loss_validation'],
loss_test=loss_test,
l2_loss_validation=json_dict['l2_loss_validation'],
walltime=json_dict['walltime [s]'],
stop_reason=json_dict['stop_reason'],
stable_positive_loss_validation=json_dict['stable_positive_loss_validation'],
metadata=stringified)
@classmethod
@db.atomic()
def from_dict(cls, json_dict, pure_network_params):
dict_ = cls.parse_dict(json_dict)
network_metadata = NetworkMetadata(
pure_network_params=pure_network_params,
**dict_
)
network_metadata.save()
return network_metadata
class TrainMetadata(BaseModel):
pure_network_params = ForeignKeyField(PureNetworkParams, backref='train_metadata')
set = TextField(choices=['train', 'test', 'validation'])
step = ArrayField(IntegerField)
epoch = ArrayField(IntegerField)
walltime = ArrayField(FloatField)
loss = ArrayField(FloatField)
mse = ArrayField(FloatField)
mabse = ArrayField(FloatField, null=True)
l1_norm = ArrayField(FloatField, null=True)
l2_norm = ArrayField(FloatField, null=True)
stable_positive_loss = ArrayField(FloatField, null=True)
hostname = TextField()
@classmethod
@db.atomic()
def from_folder(cls, pwd, pure_network_params):
train_metadatas = None
for name in cls.set.choices:
train_metadatas = []
try:
with open(os.path.join(pwd, name + '_log.csv')) as file_:
df = pd.read_csv(file_)
except IOError:
pass
else:
# TODO: Only works on debian-like
df.columns = [col.strip() for col in df.columns]
train_metadata = TrainMetadata(
pure_network_params=pure_network_params,
set=name,
step=[int(x) for x in df.index],
epoch=[int(x) for x in df['epoch']],
walltime=df['walltime'],
loss=df['loss'],
mse=df['mse'],
mabse=df['mabse'],
l1_norm=df['l1_norm'],
l2_norm=df['l2_norm'],
stable_positive_loss=df['stable_positive_loss'],
hostname=socket.gethostname()
)
train_metadata.save()
train_metadatas.append(train_metadata)
return train_metadatas
class Hyperparameters(BaseModel):
pure_network_params = ForeignKeyField(PureNetworkParams, backref='hyperparameters', unique=True)
hidden_neurons = ArrayField(IntegerField)
hidden_activation = ArrayField(TextField)
output_activation = TextField()
standardization = TextField()
goodness = TextField()
drop_chance = FloatField()
optimizer = TextField()
cost_l2_scale = FloatField()
cost_l1_scale | |
<gh_stars>1-10
"""
---------------------------------------------------------------------
-- Author: <NAME>
---------------------------------------------------------------------
Semisupervised generative model with metric embedding auxiliary task
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from networks.CatVAENetwork import *
from losses.LossFunctions import *
from utils.partition import *
from utils.assignment import *
class SSVAE:
def __init__(self, params):
self.batch_size = params.batch_size
self.batch_size_val = params.batch_size_val
self.initial_temperature = params.temperature
self.decay_temperature = params.decay_temperature
self.num_epochs = params.num_epochs
self.loss_type = params.loss_type
self.num_classes = params.num_classes
self.w_gauss = params.w_gaussian
self.w_categ = params.w_categorical
self.w_recon = params.w_reconstruction
self.decay_temp_rate = params.decay_temp_rate
self.gaussian_size = params.gaussian_size
self.feature_size = params.feature_size
self.min_temperature = params.min_temperature
self.temperature = params.temperature # current temperature
self.verbose = params.verbose
self.sess = tf.Session()
self.network = CatVAENetwork(params)
self.losses = LossFunctions()
self.w_assign = params.w_assign
self.num_labeled = params.num_labeled
self.knn = params.knn
self.metric_loss = params.metric_loss
self.w_metric = tf.placeholder(tf.float32, [])
self.initial_w_metric = params.w_metric
self._w_metric = params.w_metric
self.anneal_metric_loss = params.anneal_w_metric
self.learning_rate = tf.placeholder(tf.float32, [])
self.lr = params.learning_rate
self.decay_epoch = params.decay_epoch
self.lr_decay = params.lr_decay
self.pretrain = params.pretrain
self.num_labeled_batch = params.num_labeled_batch
self.dataset = params.dataset
self.metric_margin = params.metric_margin
def create_dataset(self, is_training, data, labels, batch_size, x_labeled = None, y_labeled = None):
"""Create dataset given input data
Args:
is_training: (bool) whether to use the train or test pipeline.
At training, we shuffle the data and have multiple epochs
data: (array) corresponding array containing the input data
labels: (array) corresponding array containing the labels of the input data
batch_size: (int) size of each batch to consider from the data
x_labeled: (array) corresponding array containing the labeled input data
y_labeled: (array) corresponding array containing the labeles of the labeled input data
Returns:
output: (dict) contains what will be the input of the tensorflow graph
"""
num_samples = data.shape[0]
# create dataset object
if labels is None:
dataset = tf.data.Dataset.from_tensor_slices(data)
else:
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
# shuffle data in training phase
if is_training:
dataset = dataset.shuffle(num_samples).repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
# create reinitializable iterator from dataset
iterator = dataset.make_initializable_iterator()
labeled_data = False
if labels is None:
data = iterator.get_next()
else:
data, labels = iterator.get_next()
# append labeled data to each batch
if x_labeled is not None:
labeled_data = True
_data = tf.concat([data, x_labeled], 0)
_labels = tf.concat([labels, y_labeled], 0)
iterator_init = iterator.initializer
if labeled_data:
output = {'data': _data, 'labels': _labels, 'iterator_init': iterator_init}
output['labels_semisupervised'] = y_labeled
else:
output = {'data': data, 'labels': labels, 'iterator_init': iterator_init}
output['labels_semisupervised'] = None
return output
def create_model(self, is_training, inputs, output_size):
"""Model function defining the graph operations.
Args:
is_training: (bool) whether we are in training phase or not
inputs: (dict) contains the inputs of the graph (features, labels...)
this can be `tf.placeholder` or outputs of `tf.data`
output_size: (int) size of the output layer
Returns:
model_spec: (dict) contains the graph operations or nodes needed for training / evaluation
"""
data, _labels = inputs['data'], inputs['labels']
# create network and obtain latent vectors that will be used in loss functions
latent_spec = self.network.encoder(data, self.num_classes, is_training)
gaussian, mean, logVar = latent_spec['gaussian'], latent_spec['mean'], latent_spec['logVar']
categorical, prob, log_prob = latent_spec['categorical'], latent_spec['prob_cat'], latent_spec['log_prob_cat']
_logits, features = latent_spec['logits'], latent_spec['features']
output = self.network.decoder(gaussian, categorical, output_size, is_training)
# reconstruction loss
if self.loss_type == 'bce':
loss_rec = self.losses.binary_cross_entropy(data, output)
elif self.loss_type == 'mse':
loss_rec = tf.losses.mean_squared_error(data, output)
else:
raise "invalid loss function... try bce or mse..."
# kl-divergence loss
loss_kl = self.losses.kl_gaussian(mean, logVar)
loss_kl_cat = self.losses.kl_categorical(prob, log_prob, self.num_classes)
# auxiliary task to assign labels and regularize the feature space
if _labels is not None:
labeled_ss = inputs['labels_semisupervised']
if labeled_ss is not None:
# assignment loss only if labeled data is available (training phase)
predicted_labels = assign_labels_semisupervised(features, labeled_ss, self.num_labeled_batch,
self.batch_size, self.num_classes, self.knn)
# use assigned labels and logits to calculate cross entropy loss
loss_assign = tf.losses.sparse_softmax_cross_entropy(labels=predicted_labels, logits=_logits)
else:
# predict labels from logits or softmax(logits) (validation/testing phase)
loss_assign = tf.constant(0.)
predicted_labels = tf.argmax(prob, axis=1)
# calculate accuracy using the predicted and true labels
accuracy = tf.reduce_mean( tf.cast( tf.equal(_labels, predicted_labels), tf.float32 ) )
# metric embedding loss
if self.metric_loss == 'triplet':
loss_metric = tf.contrib.losses.metric_learning.triplet_semihard_loss(predicted_labels, features, margin=self.metric_margin)
elif self.metric_loss == 'lifted':
loss_metric = tf.contrib.losses.metric_learning.lifted_struct_loss(predicted_labels, features, margin=self.metric_margin)
else:
raise "invalid metric loss... currently we support triplet and lifted loss"
else:
accuracy = tf.constant(0.)
loss_assign = tf.constant(0.)
loss_metric = tf.constant(0.)
predicted_labels = tf.constant(0.)
# variational autoencoder loss
loss_vae = self.w_recon * loss_rec
loss_vae += self.w_gauss * loss_kl
loss_vae += self.w_categ * loss_kl_cat
# total loss
loss_total = loss_vae + self.w_assign * loss_assign + self.w_metric * loss_metric
if is_training:
# use adam for optimization
optimizer = tf.train.AdamOptimizer(self.learning_rate)
# needed for batch normalization layer
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op_vae = optimizer.minimize(loss_vae)
train_op_tot = optimizer.minimize(loss_total)
# create model specification
model_spec = inputs
model_spec['variable_init_op'] = tf.global_variables_initializer()
model_spec['output'] = output
model_spec['features'] = features
model_spec['predicted_labels'] = predicted_labels
model_spec['true_labels'] = _labels
model_spec['loss_rec'] = loss_rec
model_spec['loss_kl'] = loss_kl
model_spec['loss_kl_cat'] = loss_kl_cat
model_spec['loss_total'] = loss_total
model_spec['loss_metric'] = loss_metric
model_spec['loss_assign'] = loss_assign
model_spec['accuracy'] = accuracy
# optimizers are only available in training phase
if is_training:
model_spec['train_op'] = train_op_tot
model_spec['train_vae'] = train_op_vae
return model_spec
def evaluate_dataset(self, is_training, num_batches, model_spec, labeled_data = None, labeled_labels = None):
"""Evaluate the model
Args:
is_training: (bool) whether we are training or not
num_batches: (int) number of batches to train/test
model_spec: (dict) contains the graph operations or nodes needed for evaluation
labeled_data: (array) corresponding array containing the labeled input data
labeled_labels: (array) corresponding array containing the labeles of the labeled input data
Returns:
(dic) average of loss functions and metrics for the given number of batches
"""
avg_accuracy = 0.0
avg_nmi = 0.0
avg_loss_rec = 0.0
avg_loss_kl = 0.0
avg_loss_cat = 0.0
avg_loss_total = 0.0
avg_loss_metric = 0.0
avg_loss_assign = 0.0
# initialize dataset iteratior
self.sess.run(model_spec['iterator_init'])
if is_training:
# pretraining will train only the variational autoencoder losses
if self.pretrain < 1:
train_optimizer = model_spec['train_op']
else:
train_optimizer = model_spec['train_vae']
# training phase
for j in range(num_batches):
# select randomly subsets of labeled data according to the batch size
_x_labeled, _y_labeled, _, _ = create_semisupervised_dataset(labeled_data, labeled_labels,
self.num_classes, self.num_labeled_batch)
# run the tensorflow flow graph
_, loss_rec, loss_kl, loss_metric, loss_assign, loss_cat, loss_total, accuracy = self.sess.run([train_optimizer,
model_spec['loss_rec'], model_spec['loss_kl'],
model_spec['loss_metric'], model_spec['loss_assign'],
model_spec['loss_kl_cat'], model_spec['loss_total'],
model_spec['accuracy']],
feed_dict={self.network.temperature: self.temperature
, self.w_metric: self._w_metric
, self.learning_rate: self.lr
, self.x_labeled: _x_labeled
, self.y_labeled: _y_labeled})
# accumulate values
avg_accuracy += accuracy
avg_loss_rec += loss_rec
avg_loss_kl += loss_kl
avg_loss_cat += loss_cat
avg_loss_total += loss_total
avg_loss_metric += loss_metric
avg_loss_assign += loss_assign
else:
# validation phase
for j in range(num_batches):
# run the tensorflow flow graph
loss_rec, loss_kl, loss_metric, loss_assign, loss_cat, loss_total, accuracy = self.sess.run([
model_spec['loss_rec'], model_spec['loss_kl'],
model_spec['loss_metric'], model_spec['loss_assign'],
model_spec['loss_kl_cat'], model_spec['loss_total'],
model_spec['accuracy']],
feed_dict={self.network.temperature: self.temperature
,self.w_metric: self._w_metric
,self.learning_rate: self.lr})
# accumulate values
avg_accuracy += accuracy
avg_loss_rec += loss_rec
avg_loss_kl += loss_kl
avg_loss_cat += loss_cat
avg_loss_total += loss_total
avg_loss_metric += loss_metric
avg_loss_assign += loss_assign
# average values by the given number of batches
avg_loss_rec /= num_batches
avg_loss_kl /= num_batches
avg_accuracy /= num_batches
avg_loss_cat /= num_batches
avg_loss_total /= num_batches
avg_loss_metric /= num_batches
avg_loss_assign /= num_batches
return {'avg_loss_rec': avg_loss_rec, 'avg_loss_kl': avg_loss_kl, 'avg_loss_cat': avg_loss_cat,
'total_loss': avg_loss_total, 'avg_accuracy': avg_accuracy,
'avg_loss_metric': avg_loss_metric, 'avg_loss_assign': avg_loss_assign}
def train(self, train_data, train_labels, val_data, val_labels, labeled_data, labeled_labels):
"""Train the model
Args:
train_data: (array) corresponding array containing the training data
train_labels: (array) corresponding array containing the labels of the training data
val_data: (array) corresponding array containing the validation data
val_labels: (array) corresponding array containing the labels of the validation data
labeled_data: (array) corresponding array containing the labeled input data
labeled_labels: (array) corresponding array containing the labeles of the labeled input data
Returns:
output: (dict) contains the history of train/val loss
"""
train_history_loss, val_history_loss = [], []
train_history_acc, val_history_acc = [], []
train_history_nmi, val_history_nmi = [], []
# placeholders for the labeled data
self.x_labeled = tf.placeholder(tf.float32, shape = [self.num_labeled_batch, labeled_data.shape[1]])
self.y_labeled = tf.placeholder(tf.int64, shape = [self.num_labeled_batch])
# create training and validation dataset
train_dataset = self.create_dataset(True, train_data, train_labels,
self.batch_size - self.num_labeled_batch, self.x_labeled, self.y_labeled)
val_dataset = self.create_dataset(True, val_data, val_labels, self.batch_size_val)
| |
<gh_stars>1000+
from typing import Text, List, Any, Tuple, Callable, Dict, Optional
import dataclasses
import numpy as np
import pytest
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.constants import SPACY_DOCS, TOKENS_NAMES
from rasa.shared.nlu.constants import TEXT, INTENT, RESPONSE
from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
@pytest.fixture()
def resource() -> Resource:
return Resource("regex_featurizer")
@pytest.fixture()
def create_featurizer(
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
resource: Resource,
) -> Callable[..., RegexFeaturizer]:
def inner(
config: Dict[Text, Any] = None,
known_patterns: Optional[List[Dict[Text, Any]]] = None,
) -> RegexFeaturizer:
config = config or {}
return RegexFeaturizer(
{**RegexFeaturizer.get_default_config(), **config},
default_model_storage,
resource,
default_execution_context,
known_patterns,
)
return inner
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"labeled_tokens",
[
(
"hey how are you today",
[
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[0.0, 1.0, 0.0],
[0],
),
(
"hey 456 how are you",
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[1.0, 1.0, 0.0],
[1, 0],
),
(
"blah balh random eh",
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[0.0, 0.0, 0.0],
[],
),
(
"a 1 digit number",
[[0.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[1.0, 0.0, 1.0],
[1, 1],
),
],
)
def test_regex_featurizer(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[int],
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence, RESPONSE: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, tokens, expected_sequence_features, expected_sentence_features,"
"labeled_tokens",
[
(
"明天上海的天气怎么样?",
[("明天", 0), ("上海", 2), ("的", 4), ("天气", 5), ("怎么样", 7), ("?", 10)],
[[0.0, 1.0], [1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[1.0, 1.0],
[0.0, 1.0],
),
(
"北京的天气如何?",
[("北京", 0), ("的", 2), ("天气", 3), ("如何", 5), ("?", 7)],
[[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[1.0, 0.0],
[0.0],
),
(
"昨天和今天的天气都不错",
[("昨天", 0), ("和", 2), ("今天", 3), ("的", 5), ("天气", 6), ("都", 8), ("不错", 9)],
[
[0.0, 1.0],
[0.0, 0.0],
[0.0, 1.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
],
[0.0, 1.0],
[0.0, 2.0],
),
(
"后天呢?",
[("后天", 0), ("呢", 2), ("?", 3)],
[[0.0, 1.0], [0.0, 0.0], [0.0, 0.0]],
[0.0, 1.0],
[0.0],
),
],
)
def test_lookup_tables_without_use_word_boundaries(
sentence: Text,
tokens: List[Tuple[Text, float]],
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[float],
create_featurizer: Callable[..., RegexFeaturizer],
):
from rasa.nlu.tokenizers.tokenizer import Token
lookups = [
{"name": "cites", "elements": ["北京", "上海", "广州", "深圳", "杭州"]},
{"name": "dates", "elements": ["昨天", "今天", "明天", "后天"]},
]
ftr = create_featurizer({"use_word_boundaries": False})
training_data = TrainingData()
training_data.lookup_tables = lookups
ftr.train(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(TOKENS_NAMES[TEXT], [Token(word, start) for (word, start) in tokens])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features, "
"labeled_tokens",
[
(
"lemonade and mapo tofu",
[[1.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[1.0, 1.0],
[0.0, 2.0, 3.0],
),
(
"a cup of tea",
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [1.0, 0.0]],
[1.0, 0.0],
[3.0],
),
(
"Is burrito my favorite food?",
[[0.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
[0.0, 1.0],
[1.0],
),
("I want club?mate", [[0.0, 0.0], [0.0, 0.0], [1.0, 0.0]], [1.0, 0.0], [2.0]),
],
)
def test_lookup_tables(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
labeled_tokens: List[float],
spacy_nlp: Any,
spacy_tokenizer: SpacyTokenizer,
create_featurizer: Callable[..., RegexFeaturizer],
):
lookups = [
{
"name": "drinks",
"elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club?mate"],
},
{"name": "plates", "elements": "data/test/lookup_tables/plates.txt"},
]
ftr = create_featurizer()
training_data = TrainingData()
training_data.lookup_tables = lookups
ftr.train(training_data)
ftr.process_training_data(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set("text_spacy_doc", spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray(), expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray(), expected_sentence_features, atol=1e-10
)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features",
[
("hey how are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]),
("hey 456 how are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]),
("blah balh random eh", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]),
("a 1 digit number", [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]),
],
)
def test_regex_featurizer_no_sequence(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer(known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray()[0], expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray()[-1], expected_sentence_features, atol=1e-10
)
def test_regex_featurizer_train(
create_featurizer: Callable[..., RegexFeaturizer],
whitespace_tokenizer: WhitespaceTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
featurizer = create_featurizer()
sentence = "hey how are you today 19.12.2019 ?"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
whitespace_tokenizer.process_training_data(TrainingData([message]))
training_data = TrainingData([message], regex_features=patterns)
featurizer.train(training_data)
featurizer.process_training_data(training_data)
expected = np.array([0, 1, 0])
expected_cls = np.array([1, 1, 1])
seq_vecs, sen_vec = message.get_sparse_features(TEXT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (6, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
seq_vecs, sen_vec = message.get_sparse_features(RESPONSE, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert (6, 3) == seq_vecs.shape
assert (1, 3) == sen_vec.shape
assert np.all(seq_vecs.toarray()[0] == expected)
assert np.all(sen_vec.toarray()[-1] == expected_cls)
seq_vecs, sen_vec = message.get_sparse_features(INTENT, [])
if seq_vecs:
seq_vecs = seq_vecs.features
if sen_vec:
sen_vec = sen_vec.features
assert seq_vecs is None
assert sen_vec is None
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"case_sensitive",
[
("Hey How are you today", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], True),
("Hey How are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], False),
("Hey 456 How are you", [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], True),
("Hey 456 How are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], False),
],
)
def test_regex_featurizer_case_sensitive(
sentence: Text,
expected_sequence_features: List[float],
expected_sentence_features: List[float],
case_sensitive: bool,
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = create_featurizer({"case_sensitive": case_sensitive}, known_patterns=patterns)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
sequence_features, sentence_features = ftr._features_for_patterns(message, TEXT)
assert np.allclose(
sequence_features.toarray()[0], expected_sequence_features, atol=1e-10
)
assert np.allclose(
sentence_features.toarray()[-1], expected_sentence_features, atol=1e-10
)
@pytest.mark.parametrize(
"sentence, expected_sequence_features, expected_sentence_features,"
"labeled_tokens, use_word_boundaries",
[
("how are you", [[1.0], [0.0], [0.0]], [1.0], [0.0], True),
("how are you", [[1.0], [0.0], [0.0]], [1.0], [0.0], False),
("Take a shower", [[0.0], [0.0], [0.0]], [0.0], [], True),
("Take a shower", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
("What a show", [[0.0], [0.0], [0.0]], [0.0], [], True),
("What a show", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
("The wolf howled", [[0.0], [0.0], [0.0]], [0.0], [], True),
("The wolf howled", [[0.0], [0.0], [1.0]], [1.0], [2.0], False),
],
)
def test_lookup_with_and_without_boundaries(
sentence: Text,
expected_sequence_features: List[List[float]],
expected_sentence_features: List[float],
labeled_tokens: List[float],
use_word_boundaries: bool,
spacy_nlp: Any,
create_featurizer: Callable[..., RegexFeaturizer],
spacy_tokenizer: SpacyTokenizer,
):
ftr = create_featurizer({"use_word_boundaries": use_word_boundaries})
training_data = TrainingData()
# we use lookups because the "use_word_boundaries" flag is only used when
# producing patterns from lookup tables
lookups = [{"name": "how", "elements": ["how"]}]
training_data.lookup_tables = lookups
ftr.train(training_data)
# adds tokens to the message
message = Message(data={TEXT: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
spacy_tokenizer.process([message])
(sequence_features, sentence_features) = ftr._features_for_patterns(message, | |
#
# Tobii controller for PsychoPy
#
# author: <NAME>
# Distributed under the terms of the GNU General Public License v3 (GPLv3).
#
# edited by: <NAME> and <NAME>
#
#
from __future__ import division
from __future__ import absolute_import
import types
import datetime
import numpy as np
import time
import warnings
import math
import tobii_research
try:
import Image
import ImageDraw
except:
from PIL import Image
from PIL import ImageDraw
import psychopy.visual
import psychopy.event
import psychopy.core
import psychopy.monitors
import psychopy.logging
import psychopy.sound
class tobii_controller:
"""
Default estimates, subject to change
"""
dist_to_screen = 60
screen_width = 1200
screen_height = 800
"""
PsychoPy specfications
"""
psychopy.logging.console.setLevel(psychopy.logging.CRITICAL) # IGNORE UNSAVED MONITOR WARNINGS IN CONSOLE
default_background_color = [-1,-1,-1]
is_mouse_enabled = False
rot_deg_per_frame = 3 # how many degrees of rotation per frame
default_calibration_target_dot_size = {
'pix': 2.0, 'norm':0.004, 'height':0.002, 'cm':0.05,
'deg':0.05, 'degFlat':0.05, 'degFlatPos':0.05
}
default_calibration_target_disc_size = {
'pix': 2.0*20, 'norm':0.004*20, 'height':0.002*20, 'cm':0.05*20,
'deg':0.05*20, 'degFlat':0.05*20, 'degFlatPos':0.05*20
}
default_key_index_dict = {
'1':0, 'num_1':0, '2':1, 'num_2':1, '3':2, 'num_3':2,
'4':3, 'num_4':3, '5':4, 'num_5':4, '6':5, 'num_6':5,
'7':6, 'num_7':6, '8':7, 'num_8':7, '9':8, 'num_9':8
}
"""
Tobii controller for PsychoPy
tobii_research package is required to use this class.
"""
eyetracker = None
calibration = None
win = None
control_window = None
gaze_data = []
event_data = []
retry_points = []
datafile = None
embed_events = False
recording = False
key_index_dict = default_key_index_dict.copy()
# Tobii data collection parameters
subscribe_to_data = False
do_reset_recording = True
current_target = (0.5, 0.5)
global_gaze_data = []
gaze_params = [
'device_time_stamp',
'left_gaze_origin_in_trackbox_coordinate_system',
'left_gaze_origin_in_user_coordinate_system',
'left_gaze_origin_validity',
'left_gaze_point_in_user_coordinate_system',
'left_gaze_point_on_display_area',
'left_gaze_point_validity',
'left_pupil_diameter',
'left_pupil_validity',
'right_gaze_origin_in_trackbox_coordinate_system',
'right_gaze_origin_in_user_coordinate_system',
'right_gaze_origin_validity',
'right_gaze_point_in_user_coordinate_system',
'right_gaze_point_on_display_area',
'right_gaze_point_validity',
'right_pupil_diameter',
'right_pupil_validity',
'system_time_stamp',
'current_target_point_on_display_area'
]
# license_file = "licenses/license_key_00395217_-_DTU_Compute_IS404-100106342114" #lab
license_file = "licenses/license_key_00395217_-_DTU_Compute_IS404-100106241134" #home
def __init__(self, screen_width, screen_height, eyetracker_id=0):
"""
Initialize tobii_controller object.
:param win: PsychoPy Window object.
:param int id: ID of Tobii unit to connect with.
Default value is 0.
"""
self.screen_width = screen_width
self.screen_height = screen_height
self.sound = psychopy.sound.Sound('sounds/baby_einstein.wav')
self.set_up_eyetracker(eyetracker_id)
def set_up_eyetracker(self, eyetracker_id=0):
eyetrackers = tobii_research.find_all_eyetrackers()
if len(eyetrackers)==0:
print('No Tobii eyetrackers')
else:
try:
self.eyetracker = eyetrackers[eyetracker_id]
with open(self.license_file, "rb") as f:
license = f.read()
res = self.eyetracker.apply_licenses(license)
if len(res) == 0:
print("Successfully applied license from single key")
else:
print("Failed to apply license from single key. Validation result: %s." % (res[0].validation_result))
except:
raise ValueError('Invalid eyetracker ID {}\n({} eyetrackers found)'.format(eyetracker_id, len(eyetrackers)))
if self.is_eye_tracker_on():
self.calibration = tobii_research.ScreenBasedCalibration(self.eyetracker)
else:
self.eyetracker = None
def is_eye_tracker_on(self):
self.subscribe_dict()
self.start_recording()
time.sleep(1)
self.stop_recording()
self.unsubscribe_dict()
return len(self.global_gaze_data) > 0
def set_dist_to_screen(self, dist_to_screen):
self.dist_to_screen = dist_to_screen
def play_sound(self):
self.sound.play()
def pause_sound(self):
self.sound.stop()
def cm2deg(self, cm, monitor, correctFlat=False):
"""
Bug-fixed version of psychopy.tools.monitorunittools.cm2deg
(PsychoPy version<=1.85.1).
"""
if not isinstance(monitor, psychopy.monitors.Monitor):
msg = ("cm2deg requires a monitors.Monitor object as the second "
"argument but received %s")
raise ValueError(msg % str(type(monitor)))
dist = monitor.getDistance()
if dist is None:
msg = "Monitor %s has no known distance (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if correctFlat:
return np.degrees(np.arctan(cm / dist))
else:
return cm / (dist * 0.017455)
def pix2deg(self, pixels, monitor, correctFlat=False):
"""
Bug-fixed version of psychopy.tools.monitorunittools.pix2deg
(PsychoPy version<=1.85.1).
"""
scrWidthCm = monitor.getWidth()
scrSizePix = monitor.getSizePix()
if scrSizePix is None:
msg = "Monitor %s has no known size in pixels (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if scrWidthCm is None:
msg = "Monitor %s has no known width in cm (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
cmSize = pixels * float(scrWidthCm) / scrSizePix[0]
return self.cm2deg(cmSize, monitor, correctFlat)
def make_psycho_window(self, background_color=None, screen=1):
self.bg_color = background_color
# make a new monitor for the window - ignore the warning (we dont store any calibrations for this monitor)
mon = psychopy.monitors.Monitor('MyScreen')
width = self.screen_width if screen == 1 else 700
height = self.screen_width if screen == 1 else 500
mon.setDistance(self.dist_to_screen)
mon.setSizePix((width, height))
bg = self.bg_color if self.bg_color != None else self.default_background_color
if screen == 1:
self.win = psychopy.visual.Window(size=(self.screen_width, self.screen_height), screen=screen, fullscr=True, units='norm', monitor=mon)
self.win.setColor(bg, colorSpace='rgb')
psychopy.event.Mouse(visible=self.is_mouse_enabled, win=self.win)
if screen == 0:
self.control_window = psychopy.visual.Window(size=(width, height), screen=screen, fullscr=False, units='norm', monitor=mon, pos = [1920-width-10,1080/4])
self.control_window.setColor(bg, colorSpace='rgb')
print(self.control_window.pos)
def close_psycho_window(self, screen=1):
self.bg_color = None # reset color scheme
if screen == 1:
self.win.winHandle.set_fullscreen(False) # disable fullscreen
self.win.close()
elif screen == 0:
# self.control_window.winHandle.set_fullscreen(False) # disable fullscreen
self.control_window.close()
def show_status_admin(self, text_color='white', enable_mouse=False, screen=1):
"""
Draw eyetracker status on the screen.
:param text_color: Color of message text. Default value is 'white'
:param bool enable_mouse: If True, mouse operation is enabled.
Default value is False.
"""
self.make_psycho_window(background_color="gray", screen=screen)
window = self.win if screen == 1 else self.control_window
# if enable_mouse == False:
# mouse = psychopy.event.Mouse(visible=False, win=self.win)
self.gaze_data_status = None
msg = psychopy.visual.TextStim(window, color=text_color,
height=0.02, pos=(0,-0.35), units='height', autoLog=False, text="No eye tracker data detected")
bgrect = psychopy.visual.Rect(window,
width=0.6, height=0.6, lineColor='white', fillColor='black',
units='height', autoLog=False)
leye = psychopy.visual.Circle(window,
size=0.05, units='height', lineColor=None, fillColor='green',
autoLog=False)
reye = psychopy.visual.Circle(window, size=0.05, units='height',
lineColor=None, fillColor='red', autoLog=False)
b_show_status = True
while b_show_status:
bgrect.draw()
if self.gaze_data_status is not None:
lp, lv, rp, rv = self.gaze_data_status
msgst = 'Left: {:.3f},{:.3f},{:.3f}\n'.format(*lp)
msgst += 'Right: {:.3f},{:.3f},{:.3f}\n'.format(*rp)
msg.setText(msgst)
if lv:
leye.setPos(((1-lp[0]-0.5)/2,(1-lp[1]-0.5)/2))
leye.setRadius((1-lp[2])/2)
leye.draw()
if rv:
reye.setPos(((1-rp[0]-0.5)/2,(1-rp[1]-0.5)/2))
reye.setRadius((1-rp[2])/2)
reye.draw()
for key in psychopy.event.getKeys():
if key == 'escape' or key == 'space':
b_show_status = False
# if enable_mouse and mouse.getPressed()[0]:
# b_show_status = False
msg.draw()
window.flip()
self.close_psycho_window(screen=screen)
def show_status(self, text_color='white', enable_mouse=False, screen=1):
"""
Draw eyetracker status on the screen.
:param text_color: Color of message text. Default value is 'white'
:param bool enable_mouse: If True, mouse operation is enabled.
Default value is False.
"""
self.make_psycho_window(background_color="gray", screen=screen)
window = self.win if screen == 1 else self.control_window
# if enable_mouse == False:
# mouse = psychopy.event.Mouse(visible=False, win=self.win)
self.gaze_data_status = None
# if self.eyetracker is not None:
# self.eyetracker.subscribe_to(tobii_research.EYETRACKER_GAZE_DATA, self.on_gaze_data_status)
msg = psychopy.visual.TextStim(window, color=text_color,
height=0.02, pos=(0,-0.35), units='height', autoLog=False, text="No eye tracker data detected")
bgrect = psychopy.visual.Rect(window,
width=0.6, height=0.6, lineColor='white', fillColor='black',
units='height', autoLog=False)
leye = psychopy.visual.Circle(window,
size=0.05, units='height', lineColor=None, fillColor='green',
autoLog=False)
reye = psychopy.visual.Circle(window, size=0.05, units='height',
lineColor=None, fillColor='red', autoLog=False)
b_show_status = True
while b_show_status:
bgrect.draw()
if self.gaze_data_status is not None:
lp, lv, rp, rv = self.gaze_data_status
msgst = 'Left: {:.3f},{:.3f},{:.3f}\n'.format(*lp)
msgst += 'Right: {:.3f},{:.3f},{:.3f}\n'.format(*rp)
msg.setText(msgst)
if lv:
leye.setPos(((1-lp[0]-0.5)/2,(1-lp[1]-0.5)/2))
leye.setRadius((1-lp[2])/2)
leye.draw()
if rv:
reye.setPos(((1-rp[0]-0.5)/2,(1-rp[1]-0.5)/2))
reye.setRadius((1-rp[2])/2)
reye.draw()
for key in psychopy.event.getKeys():
if key == 'escape' or key == 'space':
b_show_status = False
# if enable_mouse and mouse.getPressed()[0]:
# b_show_status = False
# msg.draw()
window.flip()
# if self.eyetracker is not None:
# self.eyetracker.unsubscribe_from(tobii_research.EYETRACKER_GAZE_DATA)
self.close_psycho_window(screen=screen)
def on_gaze_data_status(self, gaze_data):
"""
Callback function used by
:func:`~psychopy_tobii_controller.tobii_controller.show_status`
Usually, users don't have to call this method.
"""
lp = gaze_data.left_eye.gaze_origin.position_in_track_box_coordinates
lv = gaze_data.left_eye.gaze_origin.validity
rp = gaze_data.right_eye.gaze_origin.position_in_track_box_coordinates
rv = gaze_data.right_eye.gaze_origin.validity
self.gaze_data_status = (lp, lv, rp, rv)
def start_custom_calibration(self, num_points=2, stim_type="default", stimuli_path="stimuli/smiley_yellow.png"):
# Run calibration.
target_points = [(-0.5, 0.0), (0.5, 0.0)]
if num_points == 5:
target_points = [(-0.4,0.4), (0.4,0.4), (0.0,0.0), (-0.4,-0.4), (0.4,-0.4)]
self.run_calibration(target_points, stim_type=stim_type, stimuli_path="stimuli/smiley_yellow.png")
# THIS CODE MAKES A GAZE TRACE AFTER THE CALIBRATION
# # If calibration is aborted by pressing ESC key, return value of run_calibration()
# # is 'abort'.
# if ret != 'abort':
#
# marker = psychopy.visual.Rect(self.win, width=0.01, height=0.01)
#
# # Start recording.
# self.subscribe()
# waitkey = True
# while waitkey:
# # Get the latest gaze position data.
# currentGazePosition = self.get_current_gaze_position()
#
# # Gaze position is a tuple of four values (lx, ly, rx, ry).
# # The value is numpy.nan if Tobii failed to detect gaze position.
# if not np.nan in currentGazePosition:
# marker.setPos(currentGazePosition[0:2])
# marker.setLineColor('white')
# else:
# marker.setLineColor('red')
# keys = psychopy.event.getKeys ()
# if 'space' in keys:
# waitkey=False
# elif len(keys)>=1:
# # Record the first key name to the data file.
# self.record_event(keys[0])
#
# marker.draw()
# self.win.flip()
# # Stop recording.
# self.unsubscribe()
# # Close the data file.
# self.close_datafile()
# self.close_psycho_window()
def run_calibration(self, calibration_points, move_duration=1.5,
shuffle=True, start_key='space', decision_key='space',
text_color='white', enable_mouse=False, stim_type="default", stimuli_path="stimuli/smiley_yellow.png"):
"""
Run calibration.
:param calibration_points: List of position of calibration points.
:param float move_duration: Duration of animation of calibration target.
Unit is second. Default value is 1.5.
:param bool shuffle: | |
time range to draw.
All child chart instances are updated when time range is updated.
Args:
t_start: Left boundary of drawing in units of cycle time or real time.
t_end: Right boundary of drawing in units of cycle time or real time.
seconds: Set `True` if times are given in SI unit rather than dt.
Raises:
VisualizationError: When times are given in float without specifying dt.
"""
# convert into nearest cycle time
if seconds:
if self.device.dt is not None:
t_start = int(np.round(t_start / self.device.dt))
t_end = int(np.round(t_end / self.device.dt))
else:
raise VisualizationError('Setting time range with SI units requires '
'backend `dt` information.')
self.time_range = (t_start, t_end)
def set_disable_channel(self,
channel: pulse.channels.Channel,
remove: bool = True):
"""Interface method to control visibility of pulse channels.
Specified object in the blocked list will not be shown.
Args:
channel: A pulse channel object to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if remove:
self.disable_chans.add(channel)
else:
self.disable_chans.discard(channel)
def set_disable_type(self,
data_type: types.DataTypes,
remove: bool = True):
"""Interface method to control visibility of data types.
Specified object in the blocked list will not be shown.
Args:
data_type: A drawing data type to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if isinstance(data_type, Enum):
data_type_str = str(data_type.value)
else:
data_type_str = data_type
if remove:
self.disable_types.add(data_type_str)
else:
self.disable_types.discard(data_type_str)
def update(self):
"""Update all associated charts and generate actual drawing data from template object.
This method should be called before the canvas is passed to the plotter.
"""
for chart in self.charts:
chart.update()
class Chart:
"""A collection of drawing to be shown on the same line.
Multiple pulse channels can be assigned to a single `Chart`.
The parent `DrawerCanvas` should be specified to refer to the current user preference.
The vertical value of each `Chart` should be in the range [-1, 1].
This truncation should be performed in the plotter interface.
"""
# unique index of chart
chart_index = 0
# list of waveform type names
waveform_types = [str(types.WaveformType.REAL.value),
str(types.WaveformType.IMAG.value),
str(types.WaveformType.OPAQUE.value)]
def __init__(self, parent: DrawerCanvas, name: Optional[str] = None):
"""Create new chart.
Args:
parent: `DrawerCanvas` that this `Chart` instance belongs to.
name: Name of this `Chart` instance.
"""
self.parent = parent
# data stored in this channel
self._collections = dict()
self._output_dataset = dict()
# channel metadata
self.index = self._cls_index()
self.name = name or ''
self._channels = set()
# vertical axis information
self.vmax = 0
self.vmin = 0
self.scale = 1.0
self._increment_cls_index()
def add_data(self, data: drawings.ElementaryData):
"""Add drawing to collections.
If the given object already exists in the collections,
this interface replaces the old object instead of adding new entry.
Args:
data: New drawing to add.
"""
self._collections[data.data_key] = data
def load_program(self,
program: pulse.Schedule,
chan: pulse.channels.Channel):
"""Load pulse schedule.
This method internally generates `ChannelEvents` to parse the program
for the specified pulse channel. This method is called once
Args:
program: Pulse schedule to load.
chan: A pulse channels associated with this instance.
"""
chan_events = events.ChannelEvents.load_program(program, chan)
chan_events.set_config(dt=self.parent.device.dt,
init_frequency=self.parent.device.get_channel_frequency(chan),
init_phase=0)
# create objects associated with waveform
for gen in self.parent.generator['waveform']:
waveforms = chan_events.get_waveforms()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(waveform) for waveform in waveforms]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
# create objects associated with frame change
for gen in self.parent.generator['frame']:
frames = chan_events.get_frame_changes()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(frame) for frame in frames]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
self._channels.add(chan)
def update(self):
"""Update vertical data range and scaling factor of this chart.
Those parameters are updated based on current time range in the parent canvas.
"""
self._output_dataset.clear()
self.vmax = 0
self.vmin = 0
# waveform
for key, data in self._collections.items():
if data.data_type not in Chart.waveform_types:
continue
# truncate, assume no abstract coordinate in waveform sample
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# update y range
scale = min(self.parent.chan_scales.get(chan, 1.0) for chan in data.channels)
self.vmax = max(scale * np.max(trunc_y), self.vmax)
self.vmin = min(scale * np.min(trunc_y), self.vmin)
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
# calculate chart level scaling factor
if self.parent.formatter['control.auto_chart_scaling']:
max_val = max(abs(self.vmax),
abs(self.vmin),
self.parent.formatter['general.vertical_resolution'])
self.scale = min(1.0 / max_val, self.parent.formatter['general.max_scale'])
else:
self.scale = 1.0
# update vertical range with scaling and limitation
self.vmax = max(self.scale * self.vmax,
self.parent.formatter['channel_scaling.pos_spacing'])
self.vmin = min(self.scale * self.vmin,
self.parent.formatter['channel_scaling.neg_spacing'])
# other data
for key, data in self._collections.items():
if data.data_type in Chart.waveform_types:
continue
# truncate
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
@property
def is_active(self) -> bool:
"""Check if there is any active waveform data in this entry.
Returns:
Return `True` if there is any visible waveform in this chart.
"""
for data in self._output_dataset.values():
if data.data_type in Chart.waveform_types and self._check_visible(data):
return True
return False
@property
def collections(self) -> Iterator[Tuple[str, drawings.ElementaryData]]:
"""Return currently active entries from drawing data collection.
The object is returned with unique name as a key of an object handler.
When the horizontal coordinate contains `AbstractCoordinate`,
the value is substituted by current time range preference.
"""
for name, data in self._output_dataset.items():
# prepare unique name
unique_id = 'chart{ind:d}_{key}'.format(ind=self.index, key=name)
if self._check_visible(data):
yield unique_id, data
@property
def channels(self) -> List[pulse.channels.Channel]:
"""Return a list of channels associated with this chart.
Returns:
List of channels associated with this chart.
"""
return list(self._channels)
def _truncate_data(self,
data: drawings.ElementaryData) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to truncate drawings according to time breaks.
# TODO: move this function to common module to support axis break for timeline.
Args:
data: Drawing object to truncate.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xvals = self._bind_coordinate(data.xvals)
yvals = self._bind_coordinate(data.yvals)
if isinstance(data, drawings.BoxData):
# truncate box data. these object don't require interpolation at axis break.
return self._truncate_boxes(xvals, yvals)
elif data.data_type in [types.LabelType.PULSE_NAME, types.LabelType.OPAQUE_BOXTEXT]:
# truncate pulse labels. these objects are not removed by truncation.
return self._truncate_pulse_labels(xvals, yvals)
else:
# other objects
return self._truncate_vectors(xvals, yvals)
def _truncate_pulse_labels(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to remove text according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xpos = xvals[0]
t0, t1 = self.parent.time_range
if xpos < t0 or xpos > t1:
return np.array([]), np.array([])
offset_accumulation = 0
for tl, tr in self.parent.time_breaks:
if xpos < tl:
return np.array([xpos - offset_accumulation]), yvals
if tl < xpos < tr:
return np.array([tl - offset_accumulation]), yvals
else:
offset_accumulation += tr - tl
return np.array([xpos - offset_accumulation]), yvals
def _truncate_boxes(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to clip box object according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
x0, x1 = xvals
t0, t1 = self.parent.time_range
if x1 < t0 or x0 > t1:
# out of drawing range
return np.array([]), np.array([])
# clip outside
x0 = max(t0, x0)
x1 = min(t1, x1)
offset_accumulate = 0
for tl, tr in self.parent.time_breaks:
tl -= offset_accumulate
tr -= offset_accumulate
#
# truncate, there are 5 patterns wrt the relative position of truncation and xvals
#
if x1 < tl:
break
if tl < x0 and tr > x1:
# case 1: all data points are truncated
# : +-----+ :
# : |/////| :
# -----:---+-----+---:-----
# l 0 1 r
return np.array([]), np.array([])
elif tl < x1 < tr:
# case 2: t < tl, right side is truncated
# +---:-----+ :
# | ://///| :
# -----+---:-----+---:-----
# 0 l 1 r
x1 = tl
elif tl < x0 < tr:
# case 3: tr > t, left side is truncated
# : +-----:---+
# : |/////: |
# -----:---+-----:---+-----
# l 0 r 1
| |
<filename>tests/tools/copy_table_to_blackhole_table_test.py
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import mock
import pytest
from data_pipeline.schematizer_clientlib.models.refresh import RefreshStatus
from data_pipeline.tools.copy_table_to_blackhole_table import FullRefreshRunner
from data_pipeline.tools.copy_table_to_blackhole_table import TopologyFile
# TODO(justinc|DATAPIPE-710): These tests are a little overly complicated and
# should be refactored. In particular - it would be nice to simplify this so
# that only public methods are tested. Mocking just enough to see what SQL
# queries are executed with fixed data should be enough.
class TestFullRefreshRunner(object):
@pytest.fixture
def base_path(self):
return "data_pipeline.tools.copy_table_to_blackhole_table"
@pytest.fixture
def topology_path(self):
return "/nail/srv/configs/topology.yaml"
@pytest.fixture
def cluster(self):
return "test_cluster"
@pytest.fixture
def database_name(self):
return "yelp"
@pytest.fixture
def table_name(self):
return "test_db"
@pytest.fixture
def temp_name(self, table_name):
return "{name}_data_pipeline_refresh".format(name=table_name)
@pytest.fixture
def show_table_query(self, table_name):
return 'SHOW CREATE TABLE {0}'.format(table_name)
@pytest.fixture
def fake_query(self):
return 'SELECT * FROM faketable'
@pytest.fixture
def fake_original_table(self):
return (
'CREATE TABLE test_db('
'PersonID int,'
'LastName varchar(255),'
'FirstName varchar(255),'
'Address varchar(255),'
'City varchar(255))'
'ENGINE=InnoDB'
)
@pytest.fixture
def fake_new_table(self):
return (
'CREATE TABLE IF NOT EXISTS test_db_data_pipeline_refresh('
'PersonID int,'
'LastName varchar(255),'
'FirstName varchar(255),'
'Address varchar(255),'
'City varchar(255))'
'ENGINE=BLACKHOLE'
)
@pytest.fixture
def refresh_params(self, cluster, table_name, database_name):
return {
'refresh_id': 1,
'cluster': cluster,
'database': database_name,
'config_path': 'test_config.yaml',
'table_name': table_name,
'offset': 0,
'batch_size': 200,
'primary': 'id',
'where_clause': None,
'dry_run': True,
}
@pytest.yield_fixture
def mock_get_schematizer(self, base_path):
with mock.patch(base_path + '.get_schematizer'):
yield
@pytest.yield_fixture
def mock_load_config(self, base_path):
with mock.patch(base_path + '.load_default_config'):
yield
@pytest.yield_fixture
def refresh_batch(
self,
cluster,
table_name,
topology_path,
mock_load_config,
database_name
):
batch = FullRefreshRunner()
batch.process_commandline_options([
'--dry-run',
'--table-name={0}'.format(table_name),
'--primary-key=id',
'--cluster={0}'.format(cluster),
'--topology-path={0}'.format(topology_path),
'--database={0}'.format(database_name)
])
batch._init_global_state()
yield batch
@pytest.yield_fixture
def refresh_batch_custom_where(self, table_name, mock_load_config, database_name):
batch = FullRefreshRunner()
batch.process_commandline_options([
'--dry-run',
'--table-name={0}'.format(table_name),
'--primary-key=id',
'--where={0}'.format("country='CA'"),
'--database={0}'.format(database_name)
])
batch._init_global_state()
yield batch
@pytest.yield_fixture
def managed_refresh_batch(
self,
table_name,
cluster,
mock_load_config,
database_name,
mock_get_schematizer
):
batch = FullRefreshRunner()
batch.process_commandline_options([
'--dry-run',
'--table-name={}'.format(table_name),
'--database={}'.format(database_name),
'--cluster={}'.format(cluster),
'--offset=0',
'--batch-size=200',
'--primary-key=id',
'--refresh-id=1'
])
batch._init_global_state()
yield batch
@pytest.yield_fixture
def _read(self, refresh_batch):
with mock.patch.object(
refresh_batch,
'read_session',
autospec=True
) as mock_read_session:
yield mock_read_session
@pytest.yield_fixture
def _write(self, refresh_batch):
with mock.patch.object(
refresh_batch,
'write_session',
autospec=True
) as mock_write_session:
yield mock_write_session
@pytest.yield_fixture
def read_session(self, refresh_batch, _read):
with refresh_batch.read_session() as read_session:
yield read_session
@pytest.yield_fixture
def write_session(self, refresh_batch, _write):
with refresh_batch.write_session() as write_session:
yield write_session
@pytest.yield_fixture
def _managed_write(self, managed_refresh_batch):
with mock.patch.object(
managed_refresh_batch,
'write_session',
autospec=True
) as mock_write_session:
yield mock_write_session
@pytest.yield_fixture
def managed_write_session(self, managed_refresh_batch, _managed_write):
with managed_refresh_batch.write_session() as write_session:
yield write_session
@pytest.yield_fixture
def _rw_conn(self, refresh_batch):
with mock.patch.object(
refresh_batch,
'rw_conn'
) as mock_rw_conn:
yield mock_rw_conn
@pytest.yield_fixture
def rw_conn(self, _rw_conn):
with _rw_conn() as conn:
yield conn
@pytest.yield_fixture
def _ro_conn(self, refresh_batch):
with mock.patch.object(
refresh_batch,
'ro_conn'
) as mock_ro_conn:
yield mock_ro_conn
@pytest.yield_fixture
def ro_conn(self, _ro_conn):
with _ro_conn() as conn:
yield conn
@pytest.yield_fixture
def sessions(
self,
refresh_batch,
_read,
_write,
read_session,
write_session
):
yield
@pytest.yield_fixture
def mock_unlock_tables(self):
with mock.patch.object(
FullRefreshRunner,
'unlock_tables'
) as mock_unlock_tables:
yield mock_unlock_tables
@pytest.yield_fixture
def mock_throttle_throughput(self):
with mock.patch.object(
FullRefreshRunner,
'throttle_throughput'
) as mock_throttle_throughput:
yield mock_throttle_throughput
@pytest.yield_fixture
def mock_row_count(self):
with mock.patch.object(
FullRefreshRunner,
'total_row_count',
new_callable=mock.PropertyMock
) as mock_row_count:
yield mock_row_count
@pytest.yield_fixture
def mock_execute(self):
with mock.patch.object(
FullRefreshRunner,
'_execute_query'
) as mock_execute:
yield mock_execute
@pytest.yield_fixture
def mock_create_table_src(self):
with mock.patch.object(
FullRefreshRunner,
'create_table_from_src_table'
) as mock_create:
yield mock_create
def test_setup_connections(
self,
base_path,
refresh_batch,
cluster
):
with mock.patch(
base_path + '.TransactionManager'
) as mock_manager, mock.patch.object(
refresh_batch,
'get_connection_set_from_cluster'
) as mock_get_conn:
refresh_batch.setup_connections()
mock_manager.assert_called_once_with(
cluster_name=cluster,
ro_replica_name=cluster,
rw_replica_name=cluster,
connection_set_getter=mock_get_conn
)
def test_initial_action_with_db(
self,
database_name,
refresh_batch,
mock_execute,
mock_unlock_tables,
mock_throttle_throughput,
mock_create_table_src,
sessions,
write_session
):
with mock.patch.object(
refresh_batch,
'_wait_for_replication'
) as wait_for_replication_mock:
refresh_batch.initial_action()
assert wait_for_replication_mock.call_count == 1
mock_execute.assert_called_once_with(
write_session,
"USE {0}".format(database_name),
)
mock_create_table_src.assert_called_once_with(write_session)
assert write_session.rollback.call_count == 1
def test_initial_action_managed_refresh(
self,
database_name,
managed_refresh_batch,
mock_execute,
mock_unlock_tables,
mock_throttle_throughput,
mock_create_table_src,
sessions,
managed_write_session
):
with mock.patch.object(
managed_refresh_batch,
'_wait_for_replication'
) as wait_for_replication_mock:
managed_refresh_batch.initial_action()
assert wait_for_replication_mock.call_count == 1
update_refresh = managed_refresh_batch.schematizer.update_refresh
update_refresh.assert_called_once_with(
1,
RefreshStatus.IN_PROGRESS,
0
)
mock_execute.assert_called_once_with(
managed_write_session,
"USE {0}".format(database_name),
)
mock_create_table_src.assert_called_once_with(managed_write_session)
assert managed_write_session.rollback.call_count == 1
def test_final_action(
self,
refresh_batch,
temp_name,
write_session,
mock_execute,
database_name
):
refresh_batch.final_action()
calls = [
mock.call(write_session, 'USE {0}'.format(database_name)),
mock.call(
write_session,
'DROP TABLE IF EXISTS {0}'.format(temp_name),
),
]
mock_execute.assert_has_calls(calls)
def test_after_row_processing(self, refresh_batch, write_session, rw_conn):
with mock.patch.object(
refresh_batch,
'throttle_to_replication'
) as throttle_mock, mock.patch.object(
refresh_batch,
'_wait_for_throughput',
return_value=None
) as mock_wait:
# count can be anything since self.avg_throughput_cap is set to None
refresh_batch.unlock_tables(write_session)
refresh_batch.throttle_throughput(count=0)
assert write_session.rollback.call_count == 1
write_session.execute.assert_called_once_with('UNLOCK TABLES')
assert write_session.commit.call_count == 1
throttle_mock.assert_called_once_with(rw_conn)
assert mock_wait.call_count == 1
assert refresh_batch.avg_rows_per_second_cap == refresh_batch.DEFAULT_AVG_ROWS_PER_SECOND_CAP
def test_build_select(
self,
refresh_batch,
refresh_batch_custom_where,
table_name
):
offset = 0
batch_size = refresh_batch_custom_where.options.batch_size
expected_where_query = (
'SELECT * FROM {origin} WHERE {clause} ORDER BY id '
'LIMIT {offset}, {batch_size}'
).format(
origin=table_name,
clause="country='CA'",
offset=offset,
batch_size=batch_size
)
where_query = refresh_batch_custom_where.build_select(
'*',
'id',
offset,
batch_size
)
expected_count_query = 'SELECT COUNT(*) FROM {origin}'.format(
origin=table_name
)
count_query = refresh_batch.build_select('COUNT(*)')
assert expected_where_query == where_query
assert expected_count_query == count_query
def test_create_table_from_src_table(
self,
refresh_batch,
fake_original_table,
fake_new_table,
show_table_query,
write_session
):
with mock.patch.object(
refresh_batch,
'_execute_query',
autospec=True
) as mock_execute:
mock_execute.return_value.fetchone.return_value = [
'test_db',
fake_original_table
]
refresh_batch.create_table_from_src_table(write_session)
calls = [
mock.call(write_session, show_table_query),
mock.call(write_session, fake_new_table)
]
mock_execute.assert_has_calls(calls, any_order=True)
def test_execute_query(self, refresh_batch, write_session, fake_query):
refresh_batch._execute_query(write_session, fake_query)
write_session.execute.assert_called_once_with(fake_query)
def insert_batch_test_helper(
self,
batch,
session,
temp_name,
table_name,
mock_execute,
clause
):
min_pk = 1
max_pk = 2
batch.insert_batch(session, min_pk, max_pk)
# The queries below are formatted this way to match the whitespace of
# the original query that was being called for the purposes of
# assertion
if clause is not None:
query = """INSERT INTO {0}
SELECT * FROM {1}
WHERE id>{2} AND id<={3}
AND {4}""".format(
temp_name,
table_name,
min_pk,
max_pk,
clause,
batch.options.batch_size
)
else:
query = """INSERT INTO {0}
SELECT * FROM {1}
WHERE id>{2} AND id<={3}
""".format(
temp_name,
table_name,
min_pk,
max_pk,
)
mock_execute.assert_called_once_with(session, query)
def test_insert_batch_default_where(
self,
refresh_batch,
mock_execute,
table_name,
temp_name,
write_session
):
clause = None
self.insert_batch_test_helper(
refresh_batch,
write_session,
temp_name,
table_name,
mock_execute,
clause
)
def test_insert_batch_custom_where(
self,
refresh_batch_custom_where,
temp_name,
table_name,
mock_execute,
write_session,
):
clause = "country='CA'"
self.insert_batch_test_helper(
refresh_batch_custom_where,
write_session,
temp_name,
table_name,
mock_execute,
clause
)
@pytest.fixture(params=[
{
'min_ret_val': 1,
'max_ret_val': 31,
'row_side_eff': [10, 10, 10, 1],
'row_count': 31,
'calls': [(0, 10), (10, 20), (20, 30), (30, 40)]
},
{
'min_ret_val': 1,
'max_ret_val': 30,
'row_side_eff': [10, 10, 10],
'row_count': 30,
'calls': [(0, 10), (10, 20), (20, 30)]
},
{
'min_ret_val': 1,
'max_ret_val': 29,
'row_side_eff': [10, 10, 9],
'row_count': 29,
'calls': [(0, 10), (10, 20), (20, 30)]
},
{
'min_ret_val': 1,
'max_ret_val': 5,
'row_side_eff': [5],
'row_count': 5,
'calls': [(0, 10)]
}
]
)
def inputs(self, request):
return request.param
def test_process_table(
self,
refresh_batch,
mock_row_count,
mock_unlock_tables,
mock_throttle_throughput,
sessions,
write_session,
read_session,
inputs
):
with mock.patch.object(
refresh_batch,
'insert_batch'
) as mock_insert, mock.patch.object(
refresh_batch,
'count_inserted'
) as mock_rows, mock.patch.object(
refresh_batch,
'_get_min_primary_key'
) as mock_min_pk, mock.patch.object(
refresh_batch,
'_get_max_primary_key'
) as mock_max_pk, mock.patch.object(
refresh_batch,
'batch_size',
10
):
mock_min_pk.return_value = inputs['min_ret_val']
mock_max_pk.return_value = inputs['max_ret_val']
mock_rows.side_effect = inputs['row_side_eff']
mock_row_count.return_value = inputs['row_count']
refresh_batch.process_table()
call_inputs = inputs['calls']
calls = []
for x, y in call_inputs:
calls.append(
mock.call(write_session, x, y)
)
mock_insert.assert_has_calls(calls)
def test_process_table_managed_refresh(
self,
managed_refresh_batch,
mock_row_count,
mock_unlock_tables,
mock_throttle_throughput,
sessions,
managed_write_session
):
with mock.patch.object(
managed_refresh_batch,
'insert_batch'
) as mock_insert, mock.patch.object(
managed_refresh_batch,
'count_inserted'
) as mock_rows, mock.patch.object(
managed_refresh_batch,
'_get_min_primary_key'
) as mock_min_pk, mock.patch.object(
managed_refresh_batch,
'_get_max_primary_key'
) as mock_max_pk, mock.patch.object(
managed_refresh_batch,
'batch_size',
10
):
mock_min_pk.return_value = 1
mock_max_pk.return_value = 25
mock_rows.side_effect = [10, 10, 5]
mock_row_count.return_value = 25
managed_refresh_batch.process_table()
calls = [
mock.call(managed_write_session, 0, 10),
mock.call(managed_write_session, 10, 20),
mock.call(managed_write_session, 20, 30)
]
mock_insert.assert_has_calls(calls)
managed_refresh_batch.schematizer.update_refresh.assert_called_once_with(
refresh_id=1,
status=RefreshStatus.SUCCESS,
offset=0
)
def test_get_connection_set_from_cluster(
self,
refresh_batch,
base_path,
database_name,
topology_path
):
mock_topology = mock.Mock()
mock_conn_defs = mock.Mock()
mock_conn_config = mock.Mock()
with mock.patch.object(
TopologyFile,
'new_from_file',
return_value=mock_topology
) as mock_tf, mock.patch.object(
refresh_batch,
'_get_conn_defs',
return_value=mock_conn_defs
) as mock_get_defs, mock.patch(
base_path + '.ConnectionSetConfig',
return_value=mock_conn_config
) as mock_init_config, mock.patch(
base_path + '.ConnectionSet'
) as mock_conn:
refresh_batch.get_connection_set_from_cluster(database_name)
mock_tf.assert_called_once_with(topology_path)
mock_get_defs.assert_called_once_with(
mock_topology,
database_name
)
mock_init_config.assert_called_once_with(
database_name,
mock_conn_defs,
read_only=False
)
mock_conn.from_config.assert_called_once_with(mock_conn_config)
def test_throughput_wait(
self,
refresh_batch
):
with mock.patch.object(
refresh_batch,
'avg_rows_per_second_cap',
1000
), mock.patch.object(
time,
'time',
return_value=0.1 # Simulating that it took 100 milliseconds to | |
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import yaml
from collections import defaultdict
from google.api import metric_pb2 as ga_metric
from google.api_core import protobuf_helpers
from google.cloud import monitoring_v3, asset_v1
from google.protobuf import field_mask_pb2
from googleapiclient import discovery
# Organization ID containing the projects to be monitored
ORGANIZATION_ID = os.environ.get("ORGANIZATION_ID")
# list of projects from which function will get quotas information
MONITORED_PROJECTS_LIST = os.environ.get("MONITORED_PROJECTS_LIST").split(",")
# project where the metrics and dahsboards will be created
MONITORING_PROJECT_ID = os.environ.get("MONITORING_PROJECT_ID")
MONITORING_PROJECT_LINK = f"projects/{MONITORING_PROJECT_ID}"
service = discovery.build('compute', 'v1')
# Existing GCP metrics per network
GCE_INSTANCES_LIMIT_METRIC = "compute.googleapis.com/quota/instances_per_vpc_network/limit"
L4_FORWARDING_RULES_LIMIT_METRIC = "compute.googleapis.com/quota/internal_lb_forwarding_rules_per_vpc_network/limit"
L7_FORWARDING_RULES_LIMIT_METRIC = "compute.googleapis.com/quota/internal_managed_forwarding_rules_per_vpc_network/limit"
SUBNET_RANGES_LIMIT_METRIC = "compute.googleapis.com/quota/subnet_ranges_per_vpc_network/limit"
def main(event, context):
'''
Cloud Function Entry point, called by the scheduler.
Parameters:
event: Not used for now (Pubsub trigger)
context: Not used for now (Pubsub trigger)
Returns:
'Function executed successfully'
'''
metrics_dict, limits_dict = create_metrics()
# Asset inventory queries
gce_instance_dict = get_gce_instance_dict()
l4_forwarding_rules_dict = get_l4_forwarding_rules_dict()
l7_forwarding_rules_dict = get_l7_forwarding_rules_dict()
subnet_range_dict = get_subnet_ranges_dict()
# Per Network metrics
get_gce_instances_data(metrics_dict, gce_instance_dict,
limits_dict['number_of_instances_limit'])
get_l4_forwarding_rules_data(
metrics_dict, l4_forwarding_rules_dict,
limits_dict['internal_forwarding_rules_l4_limit'])
get_vpc_peering_data(metrics_dict,
limits_dict['number_of_vpc_peerings_limit'])
get_dynamic_routes(metrics_dict,
limits_dict['dynamic_routes_per_network_limit'])
# Per VPC peering group metrics
get_pgg_data(
metrics_dict["metrics_per_peering_group"]["instance_per_peering_group"],
gce_instance_dict, GCE_INSTANCES_LIMIT_METRIC,
limits_dict['number_of_instances_ppg_limit'])
get_pgg_data(
metrics_dict["metrics_per_peering_group"]
["l4_forwarding_rules_per_peering_group"], l4_forwarding_rules_dict,
L4_FORWARDING_RULES_LIMIT_METRIC,
limits_dict['internal_forwarding_rules_l4_ppg_limit'])
get_pgg_data(
metrics_dict["metrics_per_peering_group"]
["l7_forwarding_rules_per_peering_group"], l7_forwarding_rules_dict,
L7_FORWARDING_RULES_LIMIT_METRIC,
limits_dict['internal_forwarding_rules_l7_ppg_limit'])
get_pgg_data(
metrics_dict["metrics_per_peering_group"]
["subnet_ranges_per_peering_group"], subnet_range_dict,
SUBNET_RANGES_LIMIT_METRIC,
limits_dict['number_of_subnet_IP_ranges_ppg_limit'])
return 'Function executed successfully'
def get_l4_forwarding_rules_dict():
'''
Calls the Asset Inventory API to get all L4 Forwarding Rules under the GCP organization.
Parameters:
None
Returns:
forwarding_rules_dict (dictionary of string: int): Keys are the network links and values are the number of Forwarding Rules per network.
'''
client = asset_v1.AssetServiceClient()
read_mask = field_mask_pb2.FieldMask()
read_mask.FromJsonString('name,versionedResources')
forwarding_rules_dict = defaultdict(int)
response = client.search_all_resources(
request={
"scope": f"organizations/{ORGANIZATION_ID}",
"asset_types": ["compute.googleapis.com/ForwardingRule"],
"read_mask": read_mask,
})
for resource in response:
internal = False
network_link = ""
for versioned in resource.versioned_resources:
for field_name, field_value in versioned.resource.items():
if field_name == "loadBalancingScheme":
internal = (field_value == "INTERNAL")
if field_name == "network":
network_link = field_value
if internal:
if network_link in forwarding_rules_dict:
forwarding_rules_dict[network_link] += 1
else:
forwarding_rules_dict[network_link] = 1
return forwarding_rules_dict
def get_l7_forwarding_rules_dict():
'''
Calls the Asset Inventory API to get all L7 Forwarding Rules under the GCP organization.
Parameters:
None
Returns:
forwarding_rules_dict (dictionary of string: int): Keys are the network links and values are the number of Forwarding Rules per network.
'''
client = asset_v1.AssetServiceClient()
read_mask = field_mask_pb2.FieldMask()
read_mask.FromJsonString('name,versionedResources')
forwarding_rules_dict = defaultdict(int)
response = client.search_all_resources(
request={
"scope": f"organizations/{ORGANIZATION_ID}",
"asset_types": ["compute.googleapis.com/ForwardingRule"],
"read_mask": read_mask,
})
for resource in response:
internal = False
network_link = ""
for versioned in resource.versioned_resources:
for field_name, field_value in versioned.resource.items():
if field_name == "loadBalancingScheme":
internal = (field_value == "INTERNAL_MANAGED")
if field_name == "network":
network_link = field_value
if internal:
if network_link in forwarding_rules_dict:
forwarding_rules_dict[network_link] += 1
else:
forwarding_rules_dict[network_link] = 1
return forwarding_rules_dict
def get_gce_instance_dict():
'''
Calls the Asset Inventory API to get all GCE instances under the GCP organization.
Parameters:
None
Returns:
gce_instance_dict (dictionary of string: int): Keys are the network links and values are the number of GCE Instances per network.
'''
client = asset_v1.AssetServiceClient()
gce_instance_dict = defaultdict(int)
response = client.search_all_resources(
request={
"scope": f"organizations/{ORGANIZATION_ID}",
"asset_types": ["compute.googleapis.com/Instance"],
})
for resource in response:
for field_name, field_value in resource.additional_attributes.items():
if field_name == "networkInterfaceNetworks":
for network in field_value:
if network in gce_instance_dict:
gce_instance_dict[network] += 1
else:
gce_instance_dict[network] = 1
return gce_instance_dict
def get_subnet_ranges_dict():
'''
Calls the Asset Inventory API to get all Subnet ranges under the GCP organization.
Parameters:
None
Returns:
subnet_range_dict (dictionary of string: int): Keys are the network links and values are the number of subnet ranges per network.
'''
client = asset_v1.AssetServiceClient()
subnet_range_dict = defaultdict(int)
read_mask = field_mask_pb2.FieldMask()
read_mask.FromJsonString('name,versionedResources')
response = client.search_all_resources(
request={
"scope": f"organizations/{ORGANIZATION_ID}",
"asset_types": ["compute.googleapis.com/Subnetwork"],
"read_mask": read_mask,
})
for resource in response:
ranges = 0
network_link = None
for versioned in resource.versioned_resources:
for field_name, field_value in versioned.resource.items():
if field_name == "network":
network_link = field_value
ranges += 1
if field_name == "secondaryIpRanges":
for range in field_value:
ranges += 1
if network_link in subnet_range_dict:
subnet_range_dict[network_link] += ranges
else:
subnet_range_dict[network_link] = ranges
return subnet_range_dict
def create_client():
'''
Creates the monitoring API client, that will be used to create, read and update custom metrics.
Parameters:
None
Returns:
client (monitoring_v3.MetricServiceClient): Monitoring API client
interval (monitoring_v3.TimeInterval): Interval for the metric data points (24 hours)
'''
try:
client = monitoring_v3.MetricServiceClient()
now = time.time()
seconds = int(now)
nanos = int((now - seconds) * 10**9)
interval = monitoring_v3.TimeInterval({
"end_time": {
"seconds": seconds,
"nanos": nanos
},
"start_time": {
"seconds": (seconds - 86400),
"nanos": nanos
},
})
return (client, interval)
except Exception as e:
raise Exception("Error occurred creating the client: {}".format(e))
def create_metrics():
'''
Creates all Cloud Monitoring custom metrics based on the metric.yaml file
Parameters:
None
Returns:
metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions
limits_dict (dictionary of dictionary of string: int): limits_dict[metric_name]: dict[network_name] = limit_value
'''
client = monitoring_v3.MetricServiceClient()
existing_metrics = []
for desc in client.list_metric_descriptors(name=MONITORING_PROJECT_LINK):
existing_metrics.append(desc.type)
limits_dict = {}
with open("metrics.yaml", 'r') as stream:
try:
metrics_dict = yaml.safe_load(stream)
for metric_list in metrics_dict.values():
for metric in metric_list.values():
for sub_metric_key, sub_metric in metric.items():
metric_link = f"custom.googleapis.com/{sub_metric['name']}"
# If the metric doesn't exist yet, then we create it
if metric_link not in existing_metrics:
create_metric(sub_metric["name"], sub_metric["description"])
# Parse limits (both default values and network specific ones)
if sub_metric_key == "limit":
limits_dict_for_metric = {}
for network_link, limit_value in sub_metric["values"].items():
limits_dict_for_metric[network_link] = limit_value
limits_dict[sub_metric["name"]] = limits_dict_for_metric
return metrics_dict, limits_dict
except yaml.YAMLError as exc:
print(exc)
def create_metric(metric_name, description):
'''
Creates a Cloud Monitoring metric based on the parameter given if the metric is not already existing
Parameters:
metric_name (string): Name of the metric to be created
description (string): Description of the metric to be created
Returns:
None
'''
client = monitoring_v3.MetricServiceClient()
descriptor = ga_metric.MetricDescriptor()
descriptor.type = f"custom.googleapis.com/{metric_name}"
descriptor.metric_kind = ga_metric.MetricDescriptor.MetricKind.GAUGE
descriptor.value_type = ga_metric.MetricDescriptor.ValueType.DOUBLE
descriptor.description = description
descriptor = client.create_metric_descriptor(name=MONITORING_PROJECT_LINK,
metric_descriptor=descriptor)
print("Created {}.".format(descriptor.name))
def get_gce_instances_data(metrics_dict, gce_instance_dict, limit_dict):
'''
Gets the data for GCE instances per VPC Network and writes it to the metric defined in instance_metric.
Parameters:
metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions
gce_instance_dict (dictionary of string: int): Keys are the network links and values are the number of GCE Instances per network.
limit_dict (dictionary of string:int): Dictionary with the network link as key and the limit as value
Returns:
gce_instance_dict
'''
# Existing GCP Monitoring metrics for GCE instances
metric_instances_limit = "compute.googleapis.com/quota/instances_per_vpc_network/limit"
for project in MONITORED_PROJECTS_LIST:
network_dict = get_networks(project)
current_quota_limit = get_quota_current_limit(f"projects/{project}",
metric_instances_limit)
current_quota_limit_view = customize_quota_view(current_quota_limit)
for net in network_dict:
set_limits(net, current_quota_limit_view, limit_dict)
usage = 0
if net['self_link'] in gce_instance_dict:
usage = gce_instance_dict[net['self_link']]
write_data_to_metric(
project, usage, metrics_dict["metrics_per_network"]
["instance_per_network"]["usage"]["name"], net['network_name'])
write_data_to_metric(
project, net['limit'], metrics_dict["metrics_per_network"]
["instance_per_network"]["limit"]["name"], net['network_name'])
write_data_to_metric(
project, usage / net['limit'], metrics_dict["metrics_per_network"]
["instance_per_network"]["utilization"]["name"], net['network_name'])
print(f"Wrote number of instances to metric for projects/{project}")
def get_vpc_peering_data(metrics_dict, limit_dict):
'''
Gets the data for VPC peerings (active or not) and writes it to the metric defined (vpc_peering_active_metric and vpc_peering_metric).
Parameters:
metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions
limit_dict (dictionary of string:int): Dictionary with the network link as key and the limit as value
Returns:
None
'''
for project in MONITORED_PROJECTS_LIST:
active_vpc_peerings, vpc_peerings = gather_vpc_peerings_data(
project, limit_dict)
for peering in active_vpc_peerings:
write_data_to_metric(
project, peering['active_peerings'],
metrics_dict["metrics_per_network"]["vpc_peering_active_per_network"]
["usage"]["name"], peering['network_name'])
write_data_to_metric(
project, peering['network_limit'], metrics_dict["metrics_per_network"]
["vpc_peering_active_per_network"]["limit"]["name"],
peering['network_name'])
write_data_to_metric(
project, peering['active_peerings'] / peering['network_limit'],
metrics_dict["metrics_per_network"]["vpc_peering_active_per_network"]
["utilization"]["name"], peering['network_name'])
print("Wrote number of active VPC peerings to custom metric for project:",
project)
for peering in vpc_peerings:
write_data_to_metric(
project, peering['peerings'], metrics_dict["metrics_per_network"]
["vpc_peering_per_network"]["usage"]["name"], peering['network_name'])
write_data_to_metric(
project, peering['network_limit'], metrics_dict["metrics_per_network"]
["vpc_peering_per_network"]["limit"]["name"], peering['network_name'])
write_data_to_metric(
project, peering['peerings'] / peering['network_limit'],
metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["utilization"]["name"], peering['network_name'])
print("Wrote number of VPC peerings to custom metric for project:", project)
def gather_vpc_peerings_data(project_id, limit_dict):
'''
Gets the data for all VPC peerings (active or not) in project_id and writes it to the metric defined in vpc_peering_active_metric and vpc_peering_metric.
Parameters:
project_id (string): We will take all VPCs in that project_id | |
<filename>L10_packet_dir/For_Job_01_L10_Game_fool_card_desc.py
#!python3.7
#coding: utf-8
# Желательно настроить шрифт (File-Settings-Editor-Font) - Consolas, size:13, Line spacing:0.8
# <NAME>, к домашнему заданию урока №9 (Python)
class Game_fool_card_desc:
#------------------------------------------------------------------------------------------------------------------
Label='\033[47m -=СТОЛ КАРТОЧНОЙ ИГРЫ "В ДУРАКА"=- \033[0m'
for i in range(70): print('\033[31m*', end='')
print(Label, end='')
for i in range(70): print('\033[31m*', end='')
print('*\033[0m')
#------------------------------------------------------------------------------------------------------------------
def __init__(self, name1, gmr1c, gmr1s, name2, gmr2c, gmr2s, dsc_c, dsc_s, cld_n):
self.card=[' ','§',' 6',' 7',' 8',' 9','10',' V',' D',' K',' T']
self.suit=[' ','§','♣','\033[31m♦\033[0m','\033[31m♥\033[0m','♠']
#self.suit=[' ','§','♣','♦','♥','♠']
self.cld = cld_n
self.g1c = gmr1c
self.g1s = gmr1s
self.dc = dsc_c
self.ds = dsc_s
self.g2c = gmr2c
self.g2s = gmr2s
self.fg1=0
self.fg2=0
self.gmr_name1=name1
self.gmr_name2=name2
def blk_chk(self):
if len(self.g1c)==len(self.g1s) and len(self.g2c)==len(self.g2s) and len(self.dc)==len(self.ds):
self.chk_error = 0
else: self.chk_error = 1
self.action1=' '
self.action2=' '
if self.dc[5]!=0:
self.action1='ходит!'
self.action2='отбивает!'
if self.ds[5]!=0:
self.action1='отбивает!'
self.action2='ходит!'
if len(self.gmr_name1)>9:self.gmr_name1=self.gmr_name1[0:9]
if len(self.gmr_name2)>9:self.gmr_name2=self.gmr_name2[0:9]
def Dsc_img(self):
# 3-ряда карт на столе,1-верхний(закрытые карты),2-средний(колода, козырь, бой, бита),3-нижний(открытые карты)
for d in range(3):
if d==0:
self.nc=len(self.g1c) # количество карт игрока1 для отображения len(tcard)=len(tsuit)
self.tc=self.g1c # num_card:[0,1,2,,,10] список иерархического имени карты игрока1
self.ts=self.g1s # num_suit:[0,1,2,,,10] список масти карты игрока1
if d==1:
# включение режима наложения карт - "карточный бой"
if (self.dc[5]>=1 and self.ds[6]>=1) or (self.ds[5]>=1 and self.dc[6]>=1):
del self.ds[0]
self.ds.insert(0,1) # флаг включения режима -- правильно!
#self.ds.insert(0,0) # флаг включения режима --------------- не правильно!
self.nc=len(self.dc) # количество карт компьютера для отображения len(tcard)=len(tsuit)
self.tc=self.dc # num_card:[0,1,2,,,10] список иерархического имени козыря, изображение колоды, боя и биты
self.ts=self.ds # num_suit:[0,1,2,,,5] список масти козыря, изображение колоды, боя и биты
if self.cld==0:
del self.dc[1]
del self.ds[1]
self.dc.insert(1,0)
self.ds.insert(1,0)
if d==2:
self.nc=len(self.g2c) # количество карт игрока2 для отображения len(tcard)=len(tsuit)
self.tc=self.g2c # num_card:[0,1,2,,,10] список иерархического имени карты игрока2
self.ts=self.g2s # num_suit:[0,1,2,,,5] список масти карты игрока2
self.img=[]
self.line_img=[]
self.line_img_buffer=''
l=0
# здесь n - номер по счёту, включая пустые позиции изображения карт, в одном ряду
for n in range(0, self.nc):
if d==1:
self.fg1=0
self.fg2=0
self.cn =0
self.sn =0
if n>4 and n<(len(self.dc)-2):
if self.dc[n]!=0:
self.fg1=1
self.nc=len(self.g1c) # количество карт игрока1 для отображения len(tcard)=len(tsuit)
self.tc=self.g1c # num_card:[0,1,2,,,10]lst иерархического имени карты игрока1
self.ts=self.g1s # num_suit:[0,1,2,,,10]lst масти карты игрока1
self.cn = self.tc[self.dc[n]] # изъятие указателя на порядковый № в списке имён
self.sn = self.ts[self.dc[n]] # изъятие указателя на порядковый № в списке матей
c = self.card[self.cn] # получение символа имени карты из текстового списка имён
s = self.suit[self.sn] # получение символа масти карты из текстового списка мастей
if self.ds[n]!=0:
self.fg2=1
self.nc=len(self.g2c) # количество карт игрока2 для отображения len(tcard)=len(tsuit)
self.tc=self.g2c # num_card:[0,1,2,,,10]lst иерархического имени карты игрока2
self.ts=self.g2s # num_suit:[0,1,2,,,5]lst масти карты игрока2
self.cn = self.tc[self.ds[n]] # изъятие указателя на порядковый № в списке имён
self.sn = self.ts[self.ds[n]] # изъятие указателя на порядковый № в списке матей
c = self.card[self.cn] # получение символа имени карты из текстового списка имён
s = self.suit[self.sn] # получение символа масти карты из текстового списка мастей
if (n<=4 or n>=(len(self.dc)-2)):
self.nc=len(self.dc) # количество карт компьютера для отображения len(tcard)=len(tsuit)
self.tc=self.dc # num_card:[0,1,,,10]lst иерархического имени козыря, изобр. колоды, боя и биты
self.ts=self.ds # num_suit:[0,1,,,5]lst масти козыря, изображение колоды, боя и биты
self.cn = self.tc[n] # изъятие указателя на порядковый № в списке имён
self.sn = self.ts[n] # изъятие указателя на порядковый № в списке матей
if self.cn==1: self.sn=1
c = self.card[self.cn] # получение символа имени карты из текстового списка имён
s = self.suit[self.sn] # получение символа масти карты из текстового списка мастей
if d!=1:
self.cn = self.tc[n] # изъятие указателя на порядковый № в списке имён
self.sn = self.ts[n] # изъятие указателя на порядковый № в списке матей
if self.cn==1: self.sn=1
c = self.card[self.cn] # получение символа имени карты из текстового списка имён
s = self.suit[self.sn] # получение символа масти карты из текстового списка мастей
# все свободные позиции карт на столе
if self.cn==0:
for i in range(9):
if n==0 and (i==3 or i==4 or i==5):
if d==0:
if i==3: self.img.insert(l+3,'\033[46m1 ИГРОК: \033[0m')
if i==4: self.img.insert(l+4,f'\033[46m{self.gmr_name1:<9}\033[0m')
if i==5: self.img.insert(l+5,f'\033[46m{self.action1:<9}\033[0m')
elif d==2:
if i==3: self.img.insert(l+3,'\033[42m2 ИГРОК: \033[0m')
if i==4: self.img.insert(l+4,f'\033[42m{self.gmr_name2:<9}\033[0m')
if i==5: self.img.insert(l+5,f'\033[42m{self.action2:<9}\033[0m')
else: self.img.insert(l+i,' ')
elif d==1 and n==1 and i==8:
self.img.insert(l+8,'\033[47m"Колода" \033[0m')
elif d==1 and n==(len(self.ds)-1) and i==8:
self.img.insert(l+8,'\033[47m "Бито" \033[0m')
else:
self.img.insert(l+8,' ')
l+=9
# все закрытые карты
if self.cn==1:
if n==1:
self.img.insert(l+0,f'\033[47m {(self.cld):>3} \033[0m')
else:
self.img.insert(l+0,' ')
self.img.insert(l+1,'┌───────┐')
if n==3 and d==1: s = (self.suit[self.ds[3]])
for i in range(2,7): self.img.insert(l+i,f'|{s+s+s+s+s+s+s}|')
self.img.insert(l+7,'└───────┘')
if n==1:
self.img.insert(l+8,'\033[47m"Колода" \033[0m')
elif n==(len(self.ds)-1):
self.img.insert(l+8,'\033[47m "Бито" \033[0m')
else:
self.img.insert(l+8,' ')
l+=9
# все открытые карты
#if self.cn in {2,3,4,5,6,7,8,9,10}:
# все раздельно открытые карты
if self.cn in {2,3,4,5,6,7,8,9,10} and (d!=1 \
or (d==1 and (n in {5,7,9,11,13,15}) and ((self.fg1==1 and self.ds[n+1]==0) or (self.fg2==1 and self.dc[n+1]==0))) \
or (d==1 and (n<=4 or n>=(len(self.dc)-2)))):
sh=0
if d==0:
self.fight=self.dc[5:len(self.dc)-2]
if d==2:
self.fight=self.ds[5:len(self.ds)-2]
if (d==0 or d==2) and n in self.fight[0:]:
sh=1
if sh==0:
if d==1 and n!=3:
if self.fg1==1:
self.img.insert(l+0,f' \033[46m({(c + s)}\033[46m)\033[0m ')
else:
self.img.insert(l+0,' ')
else:
self.img.insert(l+0,' ')
self.img.insert(l+1,'┌───────┐')
self.img.insert(l+2,f'| {s} |')
self.img.insert(l+3,f'|{c} |')
self.img.insert(l+4,'| |')
self.img.insert(l+5,f'| {c}|')
self.img.insert(l+6,f'| {s}|')
self.img.insert(l+7,'└───────┘')
if d==1 and n==3: self.img.insert(l+8,f'\033[47m"Козырь{s}"\033[0m')
else:
if d==1 and n>4 and n<(len(self.dc)-2):
if self.fg2==1: self.img.insert(l+8,f' \033[42m({(c + s)}\033[42m)\033[0m ')
if self.fg2==0: self.img.insert(l+8,' ')
elif d==0: self.img.insert(l+8,f' \033[46m({(c + s)}\033[46m)\033[0m ')
else:
self.img.insert(l+8,f' \033[42m({(c + s)}\033[42m)\033[0m ')
if sh==1:
if d==1 and n!=3:
if self.fg1==1:
self.img.insert(l+0,f' \033[46m({(c + s)}\033[46m)\033[0m ')
else:
self.img.insert(l+0,' ')
else:
self.img.insert(l+0,' ')
self.img.insert(l+1,'\033[36m┌───────┐\033[0m')
self.img.insert(l+2,f'\033[36m| {s} \033[36m|\033[0m')
self.img.insert(l+3,f'\033[36m|{c} \033[36m|\033[0m')
self.img.insert(l+4,'\033[36m| |\033[0m')
self.img.insert(l+5,f'\033[36m| {c}\033[36m|\033[0m')
self.img.insert(l+6,f'\033[36m| {s}\033[36m|\033[0m')
self.img.insert(l+7,'\033[36m└───────┘\033[0m')
if d==1 and n==3: self.img.insert(l+8,f'\033[47m"Козырь!{s}"\033[0m')
else:
if d==1 and n>4 and n<(len(self.dc)-2):
if self.fg2==1: self.img.insert(l+8,f' \033[42m({(c + s)}\033[42m)\033[0m ')
if self.fg2==0: self.img.insert(l+8,' ')
elif d==0: self.img.insert(l+8,f' \033[46m({(c + s)}\033[46m)\033[0m ')
else:
self.img.insert(l+8,f' \033[42m({(c + s)}\033[42m)\033[0m ')
l+=9
# наложение открытых карт - "карточный бой"
if d==1 and self.ds[0]==1 and n>4 and n<(len(self.dc)-2) and self.cn in {2,3,4,5,6,7,8,9,10}:
if n in {5,7,9,11,13,15}:
if (self.fg1==1 and self.ds[n+1]!=0) or (self.fg2==1 and self.dc[n+1]!=0):
self.img.insert(l+0,' ┌───')
self.img.insert(l+1,f' | {s} ')
self.img.insert(l+2,f' |{c}┌')
self.img.insert(l+3,' | |')
self.img.insert(l+4,' | |')
self.img.insert(l+5,' | |')
self.img.insert(l+6,' └──|')
self.img.insert(l+7,' |')
self.img.insert(l+8,' └')
if n in {6,8,10,12,14,16}:
if (self.fg1==1 and self.ds[n-1]!=0) or (self.fg2==1 and self.dc[n-1]!=0):
self.img.insert(l+0,'────┐ ')
self.img.insert(l+1,' | ')
self.img.insert(l+2,'───────┐ ')
self.img.insert(l+3,f'{c} | ')
self.img.insert(l+4,f' {s} | ')
self.img.insert(l+5,' | ')
self.img.insert(l+6,f' {s}| ')
self.img.insert(l+7,f' {c}| ')
self.img.insert(l+8,'───────┘ ')
l+=9
#---------------------------------------
# Блок горизонтального вывода на экран
for i in range(9):
self.line_img_buffer=''
for n in range(0, len(self.img), 9):
self.line_img_buffer+= self.img[i+n]
self.line_img.insert(i, self.line_img_buffer)
if self.chk_error == 0:
for i in range(9):
print(self.line_img[i])
#--------------------------------------------------------------------------------------------------------------
for i in range(179): print('\033[31m*', end='')
print('*\033[0m')
#--------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
# Пример вызова класса "Game_fool_card_desc"
'''
name1='' #──── имя игрока 1
name2='' #──── имя игрока 2
# id num_card: [' ','§','6','7','8','9','10','V','D','K','T'] (''-'Т')
# id num_card: [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10] (0 - 10)
# id num_suit: [' ','§','♣','♦','♥','♠'] (''-'♠')
# id num_suit: [ 0 , 1 , 2 , 3 , 4 , 5 ] (0 - 5 )
gmr1c = [0,0,0,0,0,0,0,0,0 возможно увеличение ] #──── ранг карты, игрока 1
gmr1s = [0,0,0,0,0,0,0,0,0 занимаемых картомест] #──── масть карты, игрока 1
# | | |
# | └─└── свободное картоместо (с 1 по 3, список картомест игрока 1)
# |
# └─ 'надпись "1 ИГРОК:"'
# └─ ' "имя"'
# └─ ' "действие"'
# [0,,,5] and [len-2,len-1] id num_card: [' ','§','6','7','8','9','10','V','D','K','T'] (''-'Т')
# [0,,,5] and [len-2,len-1] id num_card: [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10] (0 - 10)
# [0,,,5] and [len-2,len-1] id num_suit: [' ','§','♣','♦','♥','♠'] (''-'♠')
# [0,,,5] and [len-2,len-1] id num_suit: [ 0 , 1 , 2 , 3 , 4 , 5 ] (0 - 5 )
# [5,,,len-3] flaf_gamer_1 and address id num_card in gmr1c: ['0','1',,,,len(gmr1c)]
# [5,,,len-3] flaf_gamer_2 and address id num_card in gmr2c: ['0','1',,,,len(gmr2c)]
# ┌─ место "Колоды"
# | ┌─ место "Козыря (с 5 по 15,все нечётные, если ходит, dsc_c адреса списка карт игрока 1)
# | | (с 6 по 16, все чётные, если отбивается, dsc_c адреса списка карт игрока 1)
# | | наличие ходов игроками
# | | ┌───────────────────────┐ ┌─ место "Биты"
dsc_c = [0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] #──── ранг карты, для позиций с 1 по 4 | |
"""
This module provides the DataCoordinator class for reading data from
atomistic codes and organizing data into DataFrames using Pandas.
"""
import os
import re
import io as pio
import fnmatch
from typing import List, Dict, Tuple
import numpy as np
import pandas as pd
import tables
import ase
from ase import io as ase_io
from ase import db as ase_db
from ase.db import core as db_core
from ase.io import lammpsrun as ase_lammpsrun
from ase.calculators import singlepoint
from ase.calculators import calculator as ase_calc
from uf3.util import subsample
class DataCoordinator:
"""
Handler class for reading data from atomistic codes and organizing data
into DataFrames using Pandas.
"""
def __init__(self,
atoms_key='geometry',
energy_key='energy',
force_key='force',
size_key='size',
overwrite=False
):
"""
Args:
atoms_key (str): column name for geometries, default "geometry".
Modify when parsed geometries are part of a larger pipeline.
energy_key (str): column name for energies, default "energy".
force_key (str): identifier for forces, default "force".
size_key (str): column name for number of atoms per geometry,
default "size".
overwrite (bool): Allow overwriting of existing DataFrame
with matching key when loading.
"""
self.atoms_key = atoms_key
self.energy_key = energy_key
self.force_key = force_key
self.size_key = size_key
self.overwrite = overwrite
self.data = {}
self.keys = []
@staticmethod
def from_config(config):
"""Instantiate from configuration dictionary"""
keys = ['atoms_key',
'energy_key',
'force_key',
'size_key',
'overwrite']
config = {k: v for k, v in config.items() if k in keys}
return DataCoordinator(**config)
def __repr__(self):
summary = ["DataCoordinator:",
]
if len(self.keys) == 0:
summary.append(f" Datasets: None")
else:
summary.append(f" Datasets: {len(self.keys)} ({self.keys})")
return "\n".join(summary)
def __str__(self):
return self.__repr__()
def consolidate(self, remove_duplicates=True, keep='first'):
"""Wrapper for io.concat_dataframes()"""
dataframes = [self.data[k] for k in self.keys]
df = concat_dataframes(dataframes,
remove_duplicates=remove_duplicates,
keep=keep)
return df
def load_dataframe(self, dataframe, prefix=None):
"""Load existing pd.DataFrame"""
for key in [self.atoms_key, self.energy_key, self.size_key]:
if key not in dataframe.columns:
raise RuntimeError("Missing \"{}\" column.".format(key))
name_0 = dataframe.index[0] # existing prefix takes priority
if isinstance(name_0, str):
if '_' in name_0:
prefix = '_'.join(name_0.split('_')[:-1])
if prefix is None: # no prefix provided
prefix = len(self.data)
pattern = '{}_{{}}'.format(prefix)
dataframe = dataframe.rename(pattern.format)
if prefix in self.data:
print('Data already exists with prefix "{}".'.format(prefix),
end=' ')
if self.overwrite is True:
print('Overwriting...')
self.data[prefix] = dataframe
else:
print('Skipping...')
return
else:
self.data[prefix] = dataframe
self.keys.append(prefix)
def dataframe_from_lists(self,
geometries,
prefix=None,
energies=None,
forces=None,
load=True,
**kwargs):
"""Wrapper for io.prepare_dataframe_from_lists()"""
if prefix is None:
prefix = len(self.data)
df = prepare_dataframe_from_lists(geometries,
prefix,
energies=energies,
forces=forces,
atoms_key=self.atoms_key,
energy_key=self.energy_key,
force_key=self.force_key,
size_key=self.size_key,
**kwargs)
if load:
self.load_dataframe(df, prefix=prefix)
else:
return df
def dataframe_from_trajectory(self,
filename,
prefix=None,
load=True,
energy_key=None,
force_key=None,
**kwargs):
"""Wrapper for io.parse_trajectory()"""
if prefix is None:
prefix = len(self.data)
if energy_key is None:
energy_key = self.energy_key
if force_key is None:
force_key = self.force_key
df = parse_trajectory(filename,
prefix=prefix,
atoms_key=self.atoms_key,
energy_key=energy_key,
force_key=force_key,
size_key=self.size_key,
**kwargs)
if energy_key != self.energy_key:
df.rename(columns={energy_key: self.energy_key},
inplace=True)
if load:
self.load_dataframe(df, prefix=prefix)
else:
return df
dataframe_from_xyz = dataframe_from_trajectory
dataframe_from_vasprun = dataframe_from_trajectory
def dataframe_from_lammps_run(self,
path,
lammps_aliases,
prefix=None,
column_subs={"PotEng": "energy"},
log_fname="log.lammps",
dump_fname="dump.lammpstrj",
load=True,
**kwargs):
"""Wrapper for io.parse_lammps_outputs()"""
if prefix is None:
prefix = len(self.data)
df = parse_lammps_outputs(path,
lammps_aliases,
prefix=prefix,
column_subs=column_subs,
log_fname=log_fname,
dump_fname=dump_fname,
atoms_key=self.atoms_key,
size_key=self.size_key,
**kwargs)
if load:
self.load_dataframe(df, prefix=prefix)
else:
return df
def concat_dataframes(dataframes: List[pd.DataFrame],
remove_duplicates: bool = True,
keep: str = 'first'
) -> pd.DataFrame:
"""
Concatenate list of dataframes with optional removal of duplicate keys.
Args:
dataframes (list): list of DataFrames to merge
remove_duplicates (bool): remove duplicates.
keep (str, bool): 'first', 'last', or False.
Returns:
df (pandas.DataFrame)
"""
df = pd.concat(dataframes)
duplicate_array = df.index.duplicated(keep=keep)
if np.any(duplicate_array):
print('Duplicates keys found:', np.sum(duplicate_array))
if remove_duplicates:
print('Removing with keep=', keep)
df = df[~duplicate_array]
print('Unique keys:', len(df))
return df
def prepare_dataframe_from_lists(geometries: List[ase.Atoms],
prefix: str = None,
energies: List[float] = None,
forces: List[np.ndarray] = None,
atoms_key: str = 'geometry',
energy_key: str = 'energy',
force_key: str = 'force',
size_key: str = 'size',
copy: bool = True
) -> pd.DataFrame:
"""
Convenience function for arranging data into pandas DataFrame
with expected column names. Extracts energies and forces from
provided ase.Atoms objects if unspecified. If specified,
adds/overwrites energies and/or forces in ase.Atoms objects
via info and arrays attributes. Length of geometries, energies,
and forces must match.
Args:
geometries (list): list of ase.Atoms configurations.
prefix (str): prefix for DataFrame index.
e.g. "bulk" -> [bulk_0, bulk_1, bulk_2, ...]
energies (list or np.ndarray): vector of energy for each geometry.
forces (list): list of n x 3 arrays of forces for each geometry.
atoms_key (str): column name for geometries, default "geometry".
Modify when parsed geometries are part of a larger pipeline.
energy_key (str): column name for energies, default "energy".
force_key (str): identifier for forces, default "force".
size_key (str): column name for number of atoms per geometry,
default "size".
copy (bool): copy geometries, energies and forces before modification.
Returns:
df (pandas.DataFrame): standard dataframe with columns
[atoms_key, energy_key, fx, fy, fz]
"""
if copy:
geometries = [geom.copy() for geom in geometries]
geometries = update_geometries_from_calc(geometries,
energy_key=energy_key,
force_key=force_key)
# generate dataframe
default_columns = [atoms_key, energy_key, 'fx', 'fy', 'fz']
df = pd.DataFrame(columns=default_columns)
df[atoms_key] = geometries
scalar_keys = ()
array_keys = ()
if energies is not None:
if copy:
energies = np.array(energies)
df[energy_key] = energies
scalar_keys = ('energy',) # add energies to ase.Atoms objects
if forces is not None:
if copy:
forces = [array.copy() for array in forces]
df['fx'] = [np.array(array)[:, 0] for array in forces]
df['fy'] = [np.array(array)[:, 1] for array in forces]
df['fz'] = [np.array(array)[:, 2] for array in forces]
array_keys = ('fx', 'fy', 'fz') # add forces to ase.Atoms objects
# If values are provided, overwrite attributes for consistency.
update_geometries_from_dataframe(df,
scalar_keys=scalar_keys,
array_keys=array_keys)
# Otherwise, pull energies and forces from objects.
scalar_keys = ()
array_keys = ()
if energies is None:
scalar_keys = ('energy',) # get energies from ase.Atoms objects
if forces is None:
array_keys = ('fx', 'fy', 'fz') # get forces from ase.Atoms objects
df = update_dataframe_from_geometries(df,
atoms_key=atoms_key,
size_key=size_key,
scalar_keys=scalar_keys,
array_keys=array_keys,
inplace=True)
if prefix is not None:
pattern = '{}_{{}}'.format(prefix)
df = df.rename(pattern.format)
return df
def parse_trajectory(fname: str,
scalar_keys: List[str] = (),
array_keys: List[str] = (),
prefix: str = None,
atoms_key: str = "geometry",
energy_key: str = "energy",
force_key: str = 'force',
size_key: str = 'size'):
"""
Wrapper for ase.io.read, which is compatible with
many file formats (notably VASP's vasprun.xml and extended xyz).
If available, force information is written to each ase.Atoms object's
arrays attribute as separate "fx", "fy", and "fz" entries.
Args:
fname (str): filename.
scalar_keys (list): list of ase.Atoms.info keys to query and
include as a DataFrame column. e.g. ["config_type"].
array_keys (list): list of ase.Atoms.arrays keys to query and
include as a DataFrame column. e.g. ["charge"].
prefix (str): prefix for DataFrame index.
e.g. "bulk" -> [bulk_0, bulk_1, bulk_2, ...]
atoms_key (str): column name for geometries, default "geometry".
Modify when parsed geometries are part of a larger pipeline.
energy_key (str): column name for energies, default "energy".
force_key (str): identifier for forces, default "force".
size_key (str): column name for number of atoms per geometry,
default "size".
Returns:
df (pandas.DataFrame): standard dataframe with columns
[atoms_key, energy_key, fx, fy, fz]
"""
extension = os.path.splitext(fname)[-1]
kws = ['mysql', 'postgres', 'mariadb']
if extension in ['.db', '.json'] or any([kw in fname for kw in kws]):
# handle differently to retrieve attached names instead of reindexing
geometries = read_database(fname, index=slice(None, None))
new_index = [geom.info.get('row_name', None) for geom in geometries]
index_errors = new_index.count(None)
if index_errors > 1:
new_index = None
else: # flexible read function for a variety of filetypes
geometries = ase_io.read(fname, index=slice(None, None))
new_index = None
if not isinstance(geometries, list):
geometries = [geometries]
geometries = update_geometries_from_calc(geometries,
energy_key=energy_key,
force_key=force_key)
# create DataFrame
default_columns = [atoms_key, energy_key, 'fx', 'fy', 'fz']
scalar_keys = [p for p in scalar_keys
if p not in default_columns]
array_keys = [p for p in array_keys
if p not in default_columns]
columns = default_columns + scalar_keys + array_keys
df = pd.DataFrame(columns=columns)
df[atoms_key] = geometries
# object-dataframe consistency
scalar_keys = scalar_keys + [energy_key]
array_keys = array_keys + ["<KEY>"]
df = update_dataframe_from_geometries(df,
atoms_key=atoms_key,
size_key=size_key,
scalar_keys=scalar_keys,
array_keys=array_keys,
inplace=True)
if new_index is not None:
df.index = new_index
print('Loaded index from file:', fname)
elif prefix is not None:
pattern = '{}_{{}}'.format(prefix)
df = df.rename(pattern.format)
return df
def read_database(filename: str, index: bool = None, **kwargs):
"""
Read ase.db-type database file.
| |
emoji_name[1:]
if emoji_name.endswith(":"):
emoji_name = emoji_name[:-1]
if nextcord.utils.get(client.emojis, name=emoji_name) != None:
emoji_list = [names.name for names in client.emojis if names.name == emoji_name]
le = len(emoji_list)
if le >= 2:
if number > le - 1:
number = le - 1
user = getattr(ctx, 'author', getattr(ctx, 'user', None))
emoji = [names for names in client.emojis if names.name == emoji_name][number]
webhook = await ctx.channel.create_webhook(name=user.name)
await webhook.send(emoji, username=user.name, avatar_url=safe_pfp(user))
await webhook.delete()
else:
await ctx.send(
embed=nextcord.Embed(
description="The emoji is not available",
color=nextcord.Color(value=re[8]),
)
)
@client.slash_command(name="svg2png", description="Convert SVG image to png format")
async def svg2png_slash(ctx, url):
req()
await ctx.response.defer()
img = svg2png(url)
await ctx.send(file=nextcord.File(BytesIO(img), "svg.png"))
@client.command(aliases=["cw"])
@commands.check(check_command)
async def clear_webhooks(ctx):
webhooks = await ctx.channel.webhooks()
print(webhooks)
for webhook in webhooks:
try:
if webhook.user is client.user:
await webhook.delete()
except Exception as e:
print(e)
await ctx.send(
embed=cembed(
title="Done",
description="Deleted all the webhooks by alfred",
color=re[8],
thumbnail=client.user.avatar.url
)
)
@client.slash_command(name="color",description="Change color theme", guild_ids= [822445271019421746])
async def color_slash(ctx, rgb_color=defa(default="")):
rgb_color = rgb_color.replace("(","").replace(")","").split(",")
if str(ctx.user.id) not in dev_users:
await ctx.send(
embed=cembed(
title="Woopsies",
description="This is a `developer-only` function",
color=discord.Color.red(),
thumbnail=client.user.avatar.url
)
)
return
if len(rgb_color)!=3:
await ctx.send(
embed=cembed(
title="Error",
description="You need RGB values, 3 values seperated with commas\nExample: `(128,128,128)`",
color=re[8],
footer="Give it another try",
thumbnail=client.user.avatar.url
)
)
return
re[8] = discord.Color.from_rgb(*[int(i) for i in rgb_color]).value
if re[8]>16777215: re[8] = 16777215
embed=cembed(
title="Done",
description=f"Color set as {nextcord.Color(re[8]).to_rgb()}\n`{re[8]}`",
color=re[8],
thumbnail = client.user.avatar.url,
footer=f"Executed by {ctx.user.name} in {ctx.channel.name}"
)
await ctx.send(embed=embed)
await client.get_channel(dev_channel).send(embed=embed)
@client.command()
@commands.check(check_command)
async def load(ctx):
print("Load", str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
req()
try:
cpu_per = str(int(psutil.cpu_percent()))
cpu_freq = str(int(psutil.cpu_freq().current))
ram = str(psutil.virtual_memory().percent)
swap = str(psutil.swap_memory().percent)
usage = f"""
CPU Percentage: {cpu_per}%
CPU Frequency : {cpu_freq}Mhz
RAM usage: {ram}%
Swap usage: {swap}%
Nextcord: {nextcord.__version__}
"""
embed = nextcord.Embed(
title="Current load",
description='\n'.join([i.strip() for i in usage.split('\n')]),
color=nextcord.Color(value=re[8]),
)
embed.set_thumbnail(url=client.user.avatar.url)
except Exception as e:
channel = client.get_channel(dev_channel)
embed = nextcord.Embed(
title="Load failed",
description=str(e),
color=nextcord.Color(value=re[8]),
)
embed.set_thumbnail(url=client.user.avatar.url)
await ctx.channel.send(embed=embed)
@client.slash_command(name="pr", description="Prints what you ask it to print")
async def pr_slash(ctx, text):
req()
await ctx.send(text)
@client.command(aliases=["c"])
@commands.check(check_command)
async def cover_up(ctx):
await ctx.message.delete()
await asyncio.sleep(0.5)
mess = await ctx.send(nextcord.utils.get(client.emojis, name="enrique"))
await mess.delete()
@client.command()
@commands.check(check_command)
async def remove_dev(ctx, member: nextcord.Member):
print(member)
global dev_users
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in ["432801163126243328","803855283821871154","723539849969270894"]:
dev_users.remove(str(member.id))
await ctx.send(member.mention + " is no longer a dev")
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission Denied",
description="Dude! You are not Alvin",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
@commands.check(check_command)
async def add_dev(ctx, member: nextcord.Member):
print(member)
print("Add dev", str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
global dev_users
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in dev_users:
dev_users.add(str(member.id))
await ctx.send(member.mention + " is a dev now")
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission Denied",
description="Dude! you are not a dev",
color=nextcord.Color(value=re[8]),
)
)
@client.command()
@commands.check(check_command)
async def dev_op(ctx):
if str(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) in list(dev_users):
print("devop", str(getattr(ctx, 'author', getattr(ctx, 'user', None))))
channel = client.get_channel(dev_channel)
await devop_mtext(client, channel, re[8])
else:
await ctx.send(embed=cembed(title="Permission Denied",description="You cannot use the devop function, only a developer can",color=re[8]))
@client.command()
@commands.check(check_command)
async def docs(ctx, name):
try:
if name.find("(") == -1:
await ctx.send(
embed=nextcord.Embed(
title="Docs",
description=str(eval(name + ".__doc__")),
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permissions Denied",
description="Functions are not allowed. Try without the brackets to get the information",
color=nextcord.Color(value=re[8]),
)
)
except Exception as e:
await ctx.send(
embed=nextcord.Embed(
title="Error", description=str(e), color=nextcord.Color(value=re[8])
)
)
@client.slash_command(name="snipe", description="Get the last few deleted messages")
async def snipe_slash(inter, number = 50):
req()
await snipe(inter, number)
@client.command()
@commands.check(check_command)
async def snipe(ctx, number=50):
number = int(number)
if (
getattr(ctx, 'author', getattr(ctx, 'user', None)).guild_permissions.administrator
or ctx.guild.id not in config['snipe']
):
message = deleted_message.get(ctx.channel.id,[("Empty","Nothing to snipe here")])[::-1]
count=0
embeds = []
s = ""
for i in message[:number]:
count+=1
if len(i) < 3:
s+="**" + i[0] + ":**\n" + i[1]+"\n\n"
if count%5==0 or count == len(message) or count == number:
embed=cembed(
title="Snipe",
description=s,
color=re[8],
thumbnail=safe_pfp(ctx.guild)
)
embeds.append(embed)
s=""
else:
await ctx.send("**" + i[0] + ":**",embed=i[1])
if len(embeds)>0:
await assets.pa(ctx, embeds, start_from=0, restricted=True)
else:
await ctx.send(
embed=cembed(
title="Permissions Denied",
description="Sorry guys, only admins can snipe now",
color=re[8],
thumbnail=getattr(client.user.avatar,'url'),
)
)
@client.event
async def on_bulk_message_delete(messages):
for i in messages:
await on_message_delete(i)
@client.event
async def on_message_delete(message):
if not message.channel.id in list(deleted_message.keys()):
deleted_message[message.channel.id] = []
if len(message.embeds) <= 0:
if not message.author.bot:
deleted_message[message.channel.id].append(
(str(message.author), message.content)
)
else:
if not message.author.bot:
deleted_message[message.channel.id].append(
(str(message.author), message.embeds[0], True)
)
@client.event
async def on_member_join(member):
print(member.guild)
print("Join")
if member.guild.id in config['welcome']:
channel = client.get_channel(config['welcome'][member.guild.id])
else: return
await channel.send(member.mention + " is here")
embed = nextcord.Embed(
title="Welcome!!!",
description="Welcome to the server, " + member.name,
color=nextcord.Color(value=re[8]),
)
embed.set_thumbnail(
url="https://image.shutterstock.com/image-vector/welcome-poster-spectrum-brush-strokes-260nw-1146069941.jpg"
)
await channel.send(embed=embed)
if member.guild.id in config['security']:
audit_log = await member.guild.audit_logs(limit=10).flatten()
latest=audit_log[0]
if member.bot:
channel = client.get_channel(config['security'][member.guild.id])
if channel:
await channel.send(
embed=cembed(
title="Bot added",
description=f"{latest.target.mention} was added by {latest.user.mention}, please be careful while handling bots and try not to provide it with all the permissions as it can be dangerous",
color=re[8],
footer="Security alert by Alfred"
)
)
@client.event
async def on_member_remove(member):
print(member.guild)
if member.guild.id in config.get('welcome',[]):
channel = client.get_channel(config['welcome'][member.guild.id])
else: return
embed = cembed(
title="Bye!!!",
description="Hope you enjoyed your stay " + member.name,
color=nextcord.Color(value=re[8]),
thumbnail="https://thumbs.dreamstime.com/b/bye-bye-man-says-45256525.jpg"
)
await channel.send(member.mention + " is no longer here", embed=embed)
if member.guild.id in config['security']:
a = client.get_guild(member.guild.id)
audit_log = await a.audit_logs(limit=10).flatten()
latest = audit_log[0]
if latest.target == member:
channel = client.get_channel(config['security'][member.guild.id])
if latest.action == nextcord.AuditLogAction.ban:
await channel.send(
embed=cembed(
title=f"Banned",
description=f"{latest.user.mention} banned {latest.target.name}",
color=re[8],
footer="Security alert by Alfred",
thumbnail=member.guild.icon.url
)
)
elif latest.action == nextcord.AuditLogAction.kick:
await channel.send(
embed=cembed(
title=f"Kicked",
description=f"{latest.user.mention} kicked {latest.target.name}",
color=re[8],
footer="Security alert by Alfred",
thumbnail=member.guild.icon.url
)
)
@client.command()
@commands.check(check_command)
async def remove(ctx, n):
req()
mem = [names.id for names in ctx.guild.voice_client.channel.members] if ctx.guild.voice_client else []
if mem.count(getattr(ctx, 'author', getattr(ctx, 'user', None)).id) > 0:
if int(n) < len(queue_song[str(ctx.guild.id)]):
await ctx.send(
embed=nextcord.Embed(
title="Removed",
description=da1[queue_song[str(ctx.guild.id)][int(n)]],
color=nextcord.Color(value=re[8]),
)
)
if re[3][str(ctx.guild.id)]>int(n):re[3][str(ctx.guild.id)]-=1
del da1[queue_song[str(ctx.guild.id)][int(n)]]
queue_song[str(ctx.guild.id)].pop(int(n))
else:
await ctx.send(
embed=nextcord.Embed(
title="Not removed",
description="Only "
+ len(queue_song[str(ctx.guild.id)])
+ " song(s) in your queue",
color=nextcord.Color(value=re[8]),
)
)
else:
await ctx.send(
embed=nextcord.Embed(
title="Permission denied",
description="Join the voice channel to modify queue",
color=nextcord.Color(value=re[8]),
)
)
def repeat(ctx, voice):
req()
songs = queue_song.get(str(ctx.guild.id),[])
if len(songs) == 0: return
index = re[3].get(str(ctx.guild.id),0)
if len(songs)<index:
index = 0
re[3][str(ctx.guild.id)]=index
song = songs[index]
if not song in da1.keys():
aa = str(urllib.request.urlopen(song).read().decode())
starting = aa.find("<title>") + len("<title>")
ending = aa.find("</title>")
da1[song] = (
aa[starting:ending]
.replace("'", "'")
.replace(" - YouTube", "")
.replace("&", "&")
)
time.sleep(1)
if re[7].get(ctx.guild.id,-1) == 1 and not voice.is_playing():
re[3][str(ctx.guild.id)] += 1
if re[3][str(ctx.guild.id)] >= len(queue_song[str(ctx.guild.id)]):
re[3][str(ctx.guild.id)] = 0
if re[2].get(ctx.guild.id,-1) == 1 or re[7].get(ctx.guild.id,-1) == 1:
if not voice.is_playing():
URL = youtube_download(ctx, song)
voice.play(
nextcord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS),
after=lambda e: repeat(ctx, voice),
)
@client.command(aliases=["q"])
@commands.check(check_command)
@commands.cooldown(1,5,commands.BucketType.guild)
async def queue(ctx, *, name=""):
req()
st = ""
num = 0
try:
mem = [str(names) for names in ctx.guild.voice_client.channel.members]
except:
mem = []
if mem.count(str(getattr(ctx, 'author', getattr(ctx, 'user', None)))) > 0 and name != "":
if 'spotify' in name:
if 'playlist' in name:
await ctx.send('Enqueued the given Spotify playlist.')
try:
songs = await fetch_spotify_playlist(name, 500)
for song in songs:
try:
name = convert_to_url(song)
sear = "https://www.youtube.com/results?search_query=" + name
htm = await get_async(sear)
video = regex.findall(r"watch\?v=(\S{11})", htm)
url = "https://www.youtube.com/watch?v=" + video[0]
st = ""
num = 0
name_of_the_song = await get_name(url)
da1[url] = name_of_the_song
queue_song[str(ctx.guild.id)].append(url)
except Exception as e:
print(e)
break
except Exception as e:
print(e)
elif 'track' in name:
name = convert_to_url(name)
sear = "https://www.youtube.com/results?search_query=" + name
htm = await get_async(sear)
video = regex.findall(r"watch\?v=(\S{11})", htm)
url = "https://www.youtube.com/watch?v=" + video[0]
st = ""
num = 0
name_of_the_song = await get_name(url)
print(name_of_the_song, ":", url)
da1[url] = name_of_the_song
queue_song[str(ctx.guild.id)].append(url)
else:
name = convert_to_url(name)
sear = "https://www.youtube.com/results?search_query=" + name
htm = await get_async(sear)
video = regex.findall(r"watch\?v=(\S{11})", htm)
url = "https://www.youtube.com/watch?v=" + video[0]
st = ""
await ctx.send("Added to queue")
num = 0
name_of_the_song = await get_name(url)
print(name_of_the_song, ":", url)
da1[url] = name_of_the_song
queue_song[str(ctx.guild.id)].append(url)
for i in queue_song[str(ctx.guild.id)]:
if num >= len(queue_song[str(ctx.guild.id)]) - 10:
if not i in da1.keys():
da1[i] = await get_name(i)
st = st + str(num) + ". " + da1[i].replace(""", "'") + "\n"
num += 1
# st=st+str(num)+". "+da1[i]+"\n"
if st == "":
st = "_Empty_"
em = nextcord.Embed(
title="Queue", description=st, color=nextcord.Color(value=re[8])
)
mess = await ctx.send(embed=em)
if type(ctx) == nextcord.Interaction:
mess = await ctx.original_message()
await player_pages(mess)
elif name == "":
num = 0
st = ""
if len(queue_song[str(ctx.guild.id)]) < 30:
for i | |
To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_still_frames(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to original video width.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to original video height.
:param float frames_per_second: Optional; How many video frames per second to be returned as PNG images. Minimum value is 0.1, maximum is 60. Default is 1 frame per second. Maximum of 2000 total frames.
:return: StillFramesResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.video_convert_to_still_frames_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.video_convert_to_still_frames_with_http_info(**kwargs) # noqa: E501
return data
def video_convert_to_still_frames_with_http_info(self, **kwargs): # noqa: E501
"""Convert Video to PNG Still Frames. # noqa: E501
Automatically detect video file format and convert it to an array of still frame PNG images. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute of processing time over 5 minutes, up to a maximum of 25 minutes total processing time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_still_frames_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to original video width.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to original video height.
:param float frames_per_second: Optional; How many video frames per second to be returned as PNG images. Minimum value is 0.1, maximum is 60. Default is 1 frame per second. Maximum of 2000 total frames.
:return: StillFramesResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'file_url', 'max_width', 'max_height', 'frames_per_second'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method video_convert_to_still_frames" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'file_url' in params:
header_params['fileUrl'] = params['file_url'] # noqa: E501
if 'max_width' in params:
header_params['maxWidth'] = params['max_width'] # noqa: E501
if 'max_height' in params:
header_params['maxHeight'] = params['max_height'] # noqa: E501
if 'frames_per_second' in params:
header_params['framesPerSecond'] = params['frames_per_second'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = params['input_file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/video/convert/to/still-frames', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StillFramesResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def video_convert_to_webm(self, **kwargs): # noqa: E501
"""Convert Video to WEBM format. # noqa: E501
Automatically detect video file format and convert it to WEBM format. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute of processing time over 5 minutes, up to a maximum of 25 minutes total processing time. Maximum output file size is 50GB. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_webm(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to original video width.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to original video height.
:param bool preserve_aspect_ratio: Optional; If false, the original video's aspect ratio will not be preserved, allowing customization of the aspect ratio using maxWidth and maxHeight, potentially skewing the video. Default is true.
:param int frame_rate: Optional; Specify the frame rate of the output video. Defaults to original video frame rate.
:param int quality: Optional; Specify the quality of the output video, where 100 is lossless and 1 is the lowest possible quality with highest compression. Default is 50.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.video_convert_to_webm_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.video_convert_to_webm_with_http_info(**kwargs) # noqa: E501
return data
def video_convert_to_webm_with_http_info(self, **kwargs): # noqa: E501
"""Convert Video to WEBM format. # noqa: E501
Automatically detect video file format and convert it to WEBM format. Supports many input video formats, including AVI, ASF, FLV, MP4, MPEG/MPG, Matroska/WEBM, 3G2, OGV, MKV, M4V and MOV. Uses 1 API call per 10 MB of file size. Also uses 1 API call per additional minute of processing time over 5 minutes, up to a maximum of 25 minutes total processing time. Maximum output file size is 50GB. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_convert_to_webm_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file: Input file to perform the operation on.
:param str file_url: Optional; URL of a video file being used for conversion. Use this option for files larger than 2GB.
:param int max_width: Optional; Maximum width of the output video, up to the original video width. Defaults to original video width.
:param int max_height: Optional; Maximum height of the output video, up to the original video width. Defaults to original video height.
:param bool preserve_aspect_ratio: Optional; If false, the original video's aspect ratio will not be preserved, allowing customization of the aspect ratio using maxWidth and maxHeight, potentially skewing the video. Default is true.
:param int frame_rate: Optional; Specify the frame rate of the output video. Defaults to original video frame rate.
:param int quality: Optional; Specify the quality of the output video, where 100 is lossless and 1 is the lowest possible quality with highest compression. Default is 50.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file', 'file_url', 'max_width', 'max_height', 'preserve_aspect_ratio', 'frame_rate', 'quality'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method video_convert_to_webm" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'file_url' in params:
header_params['fileUrl'] = params['file_url'] # noqa: E501
if 'max_width' in params:
header_params['maxWidth'] = params['max_width'] # noqa: E501
if 'max_height' in params:
header_params['maxHeight'] = params['max_height'] # noqa: E501
if 'preserve_aspect_ratio' in params:
header_params['preserveAspectRatio'] = params['preserve_aspect_ratio'] # noqa: E501
if 'frame_rate' in params:
header_params['frameRate'] = params['frame_rate'] # noqa: E501
if 'quality' in params:
header_params['quality'] = params['quality'] # noqa: E501
form_params = []
local_var_files = {}
if 'input_file' in params:
local_var_files['inputFile'] = | |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
import os
import sys
# If you run tests in-place (instead of using py.test), ensure local version is tested!
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from greenery.fsm import *
def test_fsm():
# Buggggs.
abstar = fsm(
alphabet = set(['a', None, 'b']),
states = set([0, 1]),
initial = 0,
finals = set([0]),
map = {
0: {'a': 0, None: 1, 'b': 0},
1: {'a': 1, None: 1, 'b': 1}
}
)
assert str(abstar.lego()) == "[ab]*"
adotb = fsm(
alphabet = set(['a', None, 'b']),
states = set([0, 1, 2, 3, 4]),
initial = 0,
finals = set([4]),
map = {
0: {'a': 2, None: 1, 'b': 1},
1: {'a': 1, None: 1, 'b': 1},
2: {'a': 3, None: 3, 'b': 3},
3: {'a': 1, None: 1, 'b': 4},
4: {'a': 1, None: 1, 'b': 1}
}
)
assert str(adotb.lego()) == "a.b"
from greenery.lego import otherchars
# Odd bug with fsm.__add__(), exposed by "[bc]*c"
int5A = fsm(
alphabet = set(["a", "b", "c", otherchars]),
states = set([0, 1]),
initial = 1,
finals = set([1]),
map = {
0: {otherchars: 0, "a": 0, "b": 0, "c": 0},
1: {otherchars: 0, "a": 0, "b": 1, "c": 1},
}
)
assert int5A.accepts("")
int5B = fsm(
alphabet = set(["a", "b", "c", otherchars]),
states = set([0, 1, 2]),
initial = 1,
finals = set([0]),
map = {
0: {otherchars: 2, "a": 2, "b": 2, "c": 2},
1: {otherchars: 2, "a": 2, "b": 2, "c": 0},
2: {otherchars: 2, "a": 2, "b": 2, "c": 2},
}
)
assert int5B.accepts("c")
int5C = int5A + int5B
assert int5C.accepts("c")
# assert int5C.initial == 0
# fsm.lego()
# Catch a recursion error
assert str(fsm(
alphabet = set(["0", "1"]),
states = set([0, 1, 2, 3]),
initial = 3,
finals = set([1]),
map = {
0: {"0": 1, "1": 1},
1: {"0": 2, "1": 2},
2: {"0": 2, "1": 2},
3: {"0": 0, "1": 2},
}
).lego()) == "0[01]"
# built-ins testing
assert not null("a").accepts("a")
assert epsilon("a").accepts("")
assert not epsilon("a").accepts("a")
a = fsm(
alphabet = set(["a", "b"]),
states = set([0, 1, "ob"]),
initial = 0,
finals = set([1]),
map = {
0 : {"a" : 1 , "b" : "ob"},
1 : {"a" : "ob", "b" : "ob"},
"ob" : {"a" : "ob", "b" : "ob"},
},
)
assert not a.accepts("")
assert a.accepts("a")
assert not a.accepts("b")
b = fsm(
alphabet = set(["a", "b"]),
states = set([0, 1, "ob"]),
initial = 0,
finals = set([1]),
map = {
0 : {"a" : "ob", "b" : 1 },
1 : {"a" : "ob", "b" : "ob"},
"ob" : {"a" : "ob", "b" : "ob"},
},
)
assert not b.accepts("")
assert not b.accepts("a")
assert b.accepts("b")
# concatenation simple test
concAA = a + a
assert not concAA.accepts("")
assert not concAA.accepts("a")
assert concAA.accepts("aa")
assert not concAA.accepts("aaa")
concAA = epsilon(set(["a", "b"])) + a + a
assert not concAA.accepts("")
assert not concAA.accepts("a")
assert concAA.accepts("aa")
assert not concAA.accepts("aaa")
concAB = a + b
assert not concAB.accepts("")
assert not concAB.accepts("a")
assert not concAB.accepts("b")
assert not concAB.accepts("aa")
assert concAB.accepts("ab")
assert not concAB.accepts("ba")
assert not concAB.accepts("bb")
# alternation simple test
altA = a | null(set(["a", "b"]))
assert not altA.accepts("")
assert altA.accepts("a")
altAB = a | b
assert not altAB.accepts("")
assert altAB.accepts("a")
assert altAB.accepts("b")
assert not altAB.accepts("aa")
assert not altAB.accepts("ab")
assert not altAB.accepts("ba")
assert not altAB.accepts("bb")
# fsmstar simple test
starA = a.star()
assert starA.accepts("")
assert starA.accepts("a")
assert not starA.accepts("b")
assert starA.accepts("aaaaaaaaa")
# multiplication simple test
twoA = a * 2
assert not twoA.accepts("")
assert not twoA.accepts("a")
assert twoA.accepts("aa")
assert not twoA.accepts("aaa")
zeroA = a * 0
assert zeroA.accepts("")
assert not zeroA.accepts("a")
# intersection simple test
intAB = a & b
assert not intAB.accepts("")
assert not intAB.accepts("a")
assert not intAB.accepts("b")
everythingbutA = a.everythingbut()
assert everythingbutA.accepts("")
assert not everythingbutA.accepts("a")
assert everythingbutA.accepts("b")
assert everythingbutA.accepts("aa")
assert everythingbutA.accepts("ab")
# this is "0*1" in heavy disguise. crawl should resolve this duplication
# Notice how states 2 and 3 behave identically. When resolved together,
# states 1 and 2&3 also behave identically, so they, too should be resolved
# (this is impossible to spot before 2 and 3 have been combined).
merged = fsm(
alphabet = set(["0", "1"]),
states = set([1, 2, 3, 4, "oblivion"]),
initial = 1,
finals = set([4]),
map = {
1 : {"0" : 2 , "1" : 4 },
2 : {"0" : 3 , "1" : 4 },
3 : {"0" : 3 , "1" : 4 },
4 : {"0" : "oblivion", "1" : "oblivion"},
"oblivion" : {"0" : "oblivion", "1" : "oblivion"},
}
).reduce()
assert len(merged.states) == 3
# this is (a*ba)*
starred = fsm(
alphabet = set(["a", "b"]),
states = set([0, 1, 2, "oblivion"]),
initial = 0,
finals = set([2]),
map = {
0 : {"a" : 0 , "b" : 1 },
1 : {"a" : 2 , "b" : "oblivion"},
2 : {"a" : "oblivion", "b" : "oblivion"},
"oblivion" : {"a" : "oblivion", "b" : "oblivion"},
}
).star()
assert starred.alphabet == frozenset(["a", "b"])
assert starred.accepts("")
assert not starred.accepts("a")
assert not starred.accepts("b")
assert not starred.accepts("aa")
assert starred.accepts("ba")
assert starred.accepts("aba")
assert starred.accepts("aaba")
assert not starred.accepts("aabb")
assert starred.accepts("abababa")
# reduce() behaviour test
# FSM accepts no strings but has 3 states, needs only 1
asdf = fsm(
alphabet = set([None]),
states = set([0, 1, 2]),
initial = 0,
finals = set([1]),
map = {
0 : {None : 2},
1 : {None : 2},
2 : {None : 2},
},
)
asdf = asdf.reduce()
assert len(asdf.states) == 1
# FSM reversal
abc = fsm(
alphabet = set(["a", "b", "c"]),
states = set([0, 1, 2, 3, None]),
initial = 0,
finals = set([3]),
map = {
0 : {"a" : 1 , "b" : None, "c" : None},
1 : {"a" : None, "b" : 2 , "c" : None},
2 : {"a" : None, "b" : None, "c" : 3 },
3 : {"a" : None, "b" : None, "c" : None},
None : {"a" : None, "b" : None, "c" : None},
},
)
cba = reversed(abc)
assert cba.accepts("cba")
# This is (a|b)*a(a|b)
brzozowski = fsm(
alphabet = set(["a", "b"]),
states = set(["A", "B", "C", "D", "E"]),
initial = "A",
finals = set(["C", "E"]),
map = {
"A" : {"a" : "B", "b" : "D"},
"B" : {"a" : "C", "b" : "E"},
"C" : {"a" : "C", "b" : "E"},
"D" : {"a" : "B", "b" : "D"},
"E" : {"a" : "B", "b" : "D"},
},
)
assert brzozowski.accepts("aa")
assert brzozowski.accepts("ab")
assert brzozowski.accepts("aab")
assert brzozowski.accepts("bab")
assert brzozowski.accepts("abbbbbbbab")
assert not brzozowski.accepts("")
assert not brzozowski.accepts("a")
assert not brzozowski.accepts("b")
assert not brzozowski.accepts("ba")
assert not brzozowski.accepts("bb")
assert not brzozowski.accepts("bbbbbbbbbbbb")
# So this is (a|b)a(a|b)*
b2 = reversed(brzozowski)
assert b2.accepts("aa")
assert b2.accepts("ba")
assert b2.accepts("baa")
assert b2.accepts("bab")
assert b2.accepts("babbbbbbba")
assert not b2.accepts("")
assert not b2.accepts("a")
assert not b2.accepts("b")
assert not b2.accepts("ab")
assert not b2.accepts("bb")
assert not b2.accepts("bbbbbbbbbbbb")
# Test string generator functionality.
gen = b2.strings()
assert next(gen) == ["a", "a"]
assert next(gen) == ["b", "a"]
assert next(gen) == ["a", "a", "a"]
assert next(gen) == ["a", "a", "b"]
assert next(gen) == ["b", "a", "a"]
assert next(gen) == ["b", "a", "b"]
assert next(gen) == ["a", "a", "a", "a"]
# epsilon reversed is epsilon
assert reversed(epsilon("a")).accepts("")
# Bug fix. This is a(a{2})* (i.e. accepts an odd number of "a" chars in a
# row), but when .lego() is called, the result is "a+". Turned out to be
# a fault in the lego.multiplier.__mul__() routine
elesscomplex = fsm(
alphabet = set(["a"]),
states = set([0, 1]),
initial = 0,
finals = set([1]),
map = {
0 : {"a" : 1},
1 : {"a" : 0},
},
)
assert not elesscomplex.accepts("")
assert elesscomplex.accepts("a")
assert not elesscomplex.accepts("aa")
assert elesscomplex.accepts("aaa")
elesscomplex = elesscomplex.lego()
assert str(elesscomplex) in set(["a(aa)*", "(aa)*a"])
elesscomplex = elesscomplex.fsm()
assert not elesscomplex.accepts("")
assert elesscomplex.accepts("a")
assert not elesscomplex.accepts("aa")
assert elesscomplex.accepts("aaa")
gen = elesscomplex.strings()
assert next(gen) == ["a"]
assert next(gen) == ["a", "a", "a"]
assert next(gen) == ["a", "a", "a", "a", "a"]
assert next(gen) == ["a", "a", "a", "a", "a", "a", "a"]
# Binary numbers divisible by 3.
# Disallows the empty string
# Allows "0" on its own, but not leading zeroes.
div3 = fsm(
alphabet = set(["0", "1"]),
states = set(["initial", "zero", 0, 1, 2, None]),
initial = "initial",
finals = set(["zero", 0]),
map = {
"initial" : {"0" : "zero", "1" : 1 },
"zero" : {"0" : None , "1" : None},
0 : {"0" : 0 , "1" : 1 },
1 : {"0" : 2 , "1" : 0 },
2 : {"0" : 1 , "1" : 2 },
None : {"0" : None , "1" : None},
},
)
assert not div3.accepts("")
assert div3.accepts("0")
assert not div3.accepts("1")
assert not div3.accepts("00")
assert not div3.accepts("01")
assert not div3.accepts("10")
assert div3.accepts("11")
assert not div3.accepts("000")
assert not div3.accepts("001")
assert not div3.accepts("010")
assert not div3.accepts("011")
assert not div3.accepts("100")
assert not div3.accepts("101")
assert div3.accepts("110")
assert not div3.accepts("111")
assert not div3.accepts("0000")
assert not div3.accepts("0001")
assert not div3.accepts("0010")
assert not div3.accepts("0011")
assert not div3.accepts("0100")
assert not div3.accepts("0101")
assert not div3.accepts("0110")
assert not div3.accepts("0111")
assert not div3.accepts("1000")
assert div3.accepts("1001")
div3 = div3.lego()
assert str(div3) == "0|1(01*0|10*1)*10*"
gen = div3.strings()
assert next(gen) == "0"
assert next(gen) == "11"
assert next(gen) == "110"
assert next(gen) == "1001"
assert next(gen) == "1100"
# Machine accepts only numbers in selected base (e.g. 2, 10) that are
# divisible by N (e.g. 3, 7).
# "0" alone is acceptable, but leading zeroes (e.g. "00", "07") are not
base = 2
N = 3
assert base <= 10
divN = fsm(
alphabet = set(str(i) for i in range(base)),
states = set(range(N)) | set(["initial", "zero", None]),
initial = "initial",
finals = set(["zero", 0]),
map = dict(
[
("initial", dict([(str(j), j % N) for j in range(1, base)] + [("0", "zero")])),
("zero" , dict([(str(j), None ) for j in range( base)] )),
(None , dict([(str(j), None ) for j in range( base)] )),
] + [
(i , dict([(str(j), (i * base + j) % N) for j in range( base)] ))
for i in range(N)
]
),
)
gen = divN.lego().strings()
a = next(gen)
assert a == "0"
for i in range(7):
b = next(gen)
assert int(a, base) + N == | |
import azure.mgmt.batchai as batchai
from azure.storage.file import FileService
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from datetime import datetime
import os
def setup_bai(
aad_client_id: str = None,
aad_secret: str = None,
aad_tenant: str = None,
subscription_id: str = None,
rg: str = None,
location: str = None,
) -> 'batchai.BatchAIManagementClient':
'''
Setup credentials, batch AI client, and the resource
group that the resources will be created in
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
aad_client_id (str, optional): The client id you get
from creating your Service Principle.
aad_secret (str, optional): The secret key you get
from creating your Service Principle.
aad_tenant (str, optional): The tenant id that your
Service Principle is created in.
subscription_id (str, optional): The subscription id
you wish for your Batch AI resources to be created
in.
rg (str, optional): The Resource Group you will
create your work in.
location (str, optional): The location/region that
will create your Azure resources in.
Returns:
BatchAIManagementClient: An instance of the Batch AI
managment client that can be used to manage Batch
AI resources.
'''
aad_client_id = aad_client_id or os.getenv('AAD_CLIENT_ID')
aad_tenant = aad_tenant or os.getenv('AAD_TENANT')
aad_secret = aad_secret or os.getenv('AAD_SECRET')
subscription_id = subscription_id or os.getenv('SUBSCRIPTION_ID')
rg = rg or os.getenv('RESOURCE_GROUP')
location = location or os.getenv('REGION')
assert aad_client_id
assert aad_tenant
assert aad_secret
assert subscription_id
assert rg
assert location
creds = ServicePrincipalCredentials(
client_id=aad_client_id,
secret=aad_secret,
tenant=aad_tenant
)
resource_management_client = ResourceManagementClient(
credentials=creds,
subscription_id=subscription_id
)
resource = resource_management_client \
.resource_groups.create_or_update(rg, {
'location': location
})
batchai_client = batchai.BatchAIManagementClient(
credentials=creds,
subscription_id=subscription_id
)
return batchai_client
def get_cluster(
batchai_client: 'BatchAIManagementClient',
name: str,
rg: str = None,
ws: str = None
) -> 'batchai.models.Cluster':
'''
Get a BatchAI cluster by cluster name
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
name (str): The name of the cluster to get
rg (str, optional): The resource group to look for
the cluster under.
ws (str, optional): The Batch AI Workspace to look
for the cluster under.
Returns:
batchai.models.Cluster: The cluster object that
is provided by the BatchAI management sdk.
'''
rg = rg or os.getenv('RESOURCE_GROUP')
ws = ws or os.getenv('WORKSPACE')
assert rg
assert ws
return batchai_client.clusters.get(
resource_group_name=rg,
workspace_name=ws,
cluster_name=name
)
def create_experiment(
batchai_client: 'BatchAIManagementClient',
name: str,
rg: str = os.getenv('RESOURCE_GROUP'),
ws: str = os.getenv('WORKSPACE'),
) -> 'batchai.models.Experiment':
'''
Create a BatchAI Experiment (which is the logical
container for a job)
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
name (str): The name of the Experiment
rg (str, optional): The resource group to create
the experiment in.
ws (str, optional): The Batch AI Workspace to
create the experiment in.
Returns:
batchai.models.Experiment: The experiment object
that is provided by the BatchAI management sdk.
'''
return batchai_client.experiments.create(
resource_group_name=rg,
workspace_name=ws,
experiment_name=name
)
def create_job_params(
cluster: 'batchai.models.Cluster',
input_dirs: ['batchai.models.InputDirectory'],
output_dirs: ['batchai.models.OutputDirectory'],
container_image: str,
command_line: str,
job_prep_command_line: str = '',
node_count: int = 1,
cluster_mnt_path: str = None
):
'''
Create the parameter object for the Batch AI job.
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
cluster (batchai.models.Cluster): The cluster to
the parameters for.
input_dir (List(batchai.models.InputDirectory)):
A list of the input directories to setup.
output_dir (List(batchai.models.OutputDirectory)):
A list of the output directories to setup.
container_image (str): The container image to use
when running the job.
command_line (str): The command line to execute.
job_prep_command_line (str, optional): Optional
command line to execute during job_preparation.
node_count (int, optional): The number of nodes
to use for the job.
cluster_mnt_path (str, optional): The mnt path
of the file share on the cluster.
Returns:
batchai.models.JobCreateParameters: The Parameter
object to pass into the job during creation.
'''
cluster_mnt_path = cluster_mnt_path or \
os.getenv('CLUSTER_CONTAINER_MNT_PATH')
assert cluster_mnt_path
return batchai.models.JobCreateParameters(
cluster=batchai.models.ResourceId(id=cluster.id),
node_count=node_count,
input_directories=input_dirs,
output_directories=output_dirs,
std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'\
.format(cluster_mnt_path),
job_preparation=batchai.models.JobPreparation(
command_line=job_prep_command_line
),
container_settings=batchai.models.ContainerSettings(
image_source_registry=batchai.models.ImageSourceRegistry(
image=container_image
)
),
custom_toolkit_settings=batchai.models.CustomToolkitSettings(
command_line=command_line
)
)
def create_job(
batchai_client: 'BatchAIManagementClient',
job_name: str,
job_params: 'batchai.models.JobCreateParameters',
experiment_name: str,
rg: str = os.getenv('RESOURCE_GROUP'),
ws: str = os.getenv('WORKSPACE'),
async_job: bool = True
) -> 'batchai.models.Job':
'''
Create a BatchAI Experiment (which is the logical
container for a job)
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
job_name (str): The name of the job.
job_params (JobCreateParameters): The parameters
to pass to the job.
job_experiment_name (str): The name of the
experiment to create the job under.
rg (str, optional): The resource group to create
the job in.
ws (str, optional): The Batch AI Workspace to
create the job in.
Returns:
batchai.models.Job: The Job object
that is provided by the BatchAI management sdk.
'''
job = batchai_client.jobs.create(
resource_group_name=rg,
workspace_name=ws,
experiment_name=experiment_name,
job_name=job_name,
parameters=job_params
)
if not async_job:
return job.result()
else:
return job
def create_workspace(
batchai_client: 'BatchAIManagementClient',
rg: str = None,
ws: str = None,
location: str = None
) -> 'batchai.models.WorkSpace':
'''
Create a BatchAI Workspace
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
rg (str, optional): The resource group to create
the workspace in.
ws (str, optional): The Batch AI Workspace to
create the job in.
location (str, optional): The location/region that
will create your Workspace in.
Returns:
batchai.models.Workspace: The Workspace object
that is provided by the BatchAI management sdk.
'''
rg = rg or os.getenv('RESOURCE_GROUP')
ws = ws or os.getenv('WORKSPACE')
location = location or os.getenv('REGION')
assert rg
assert ws
assert location
return batchai_client \
.workspaces \
.create(rg, ws, location) \
.result()
def create_autoscale_cluster(
batchai_client: 'BatchAIManagementClient',
cluster_name: str,
vm_size: str = None,
vm_priority: str = None,
min_nodes: int = None,
max_nodes: int = None,
initial_nodes: int = None,
ws: str = None,
rg: str = None,
storage_account_name: str = None,
storage_account_key: str = None,
container_name: str = None,
cluster_mnt_path: str = None,
admin_user_name: str = None,
admin_user_password: str = None
) -> None:
'''
Create an autoscale Batch AI cluster
All optional arguments will default to using the
associated environment variable if the parameter
is not provided.
Args:
batchai_client (BatchAIManagementClient): The
management client to manage Batch AI resources
cluster_name (str): The name of the cluster you
wish to create.
vm_size (str, optional): The vm size of the
cluster you with to create.
vm_priority (str, optional): Choose between low
priority or dedicated.
min_nodes (int, optional): Minimum number of
nodes in the autoscale cluster.
max_nodes (int, optional): Maximum number of
nodes in the autoscale cluster.
initial_nodes (int, optional): Initial number
of nodes in the autoscale cluster.
ws (str, optional): The workspace to create the
cluster in.
rg (str, optional): The resource group to
create the cluster in.
storage_account_name (str, optional): The
storage account to use when mounting the
blob container.
storage_account_key (str, optional): The
key to use when mounting the blob container.
container_name (str, optional): The name of
the container to use in storage.
cluster_mnt_path (str, optional): The mnt path
of the file share on the cluster.
admin_user_name (str, optional): The username
of the user to create for accessing the
cluster.
admin_user_password (str, optional): The
password of the user to create for accesing
the cluster.
Returns:
None
'''
vm_size = vm_size or \
os.getenv('CLUSTER_VM_SIZE')
vm_priority = vm_priority or \
os.getenv('CLUSTER_VM_PRIORITY')
min_nodes = min_nodes if type(min_nodes) is int else \
os.getenv('CLUSTER_MINIMUM_NODE_COUNT')
max_nodes = max_nodes if type(max_nodes) is int else \
os.getenv('CLUSTER_MAXIMUM_NODE_COUNT')
initial_nodes = initial_nodes if type(initial_nodes) is int else \
os.getenv('CLUSTER_INITIAL_NODE_COUNT')
ws = ws or os.getenv('WORKSPACE')
rg = rg or os.getenv('RESOURCE_GROUP')
storage_account_name = storage_account_name or | |
<reponame>gitguige/openpilot0.8.9
#!/usr/bin/env python3
import argparse
import carla # pylint: disable=import-error
import math
import numpy as np
import time
import threading
from cereal import log
from multiprocessing import Process, Queue
from typing import Any
import cereal.messaging as messaging
from common.params import Params
from common.numpy_fast import clip
from common.realtime import Ratekeeper, DT_DMON
from lib.can import can_function
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
import sys,os,signal
# from sys import argv
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--low_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point',
type=int, default=16)
parser.add_argument('--cruise_lead', type=int, default=80) #(1 + 80%)V0 = 1.8V0
parser.add_argument('--cruise_lead2', type=int, default=80) #(1 + 80%)V0 = 1.8V0 #change speed in the middle
parser.add_argument('--init_dist', type=int, default=100) #meters; initial relative distance between vehicle and vehicle2
# parser.add_argument('--faultinfo', type=str, default='')
# parser.add_argument('--scenarioNum', type=int, default=1)
# parser.add_argument('--faultNum', type=int, default=1)
args = parser.parse_args()
W, H = 1164, 874
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
vEgo = 60 #mph #set in selfdrive/controlsd
FI_Enable = True #False: run the code in fault free mode; True: add fault inejction Engine
reInitialize_bridge = False
Mode_FI_duration = 0 # 0: FI lasts 2.5s after t_f; 1: FI whenever context is True between [t_f,t_f+2.5s]
Driver_react_Enable = False
Other_vehicles_Enable = False
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl','controlsState','radarState','modelV2'])
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button= 0
self.is_engaged=False
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
frame_id = 0
def cam_callback(image):
global frame_id
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"image": img.tobytes(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
frame_id += 1
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['pandaState'])
while not exit_event.is_set():
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"timestamp": int(time.time() * 1000),
"flags": 1, # valid fix
"accuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"speed": vehicle_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
# Create a radar's callback that just prints the data
# def radar_callback(weak_radar, sensor_data):
def radar_callback( sensor_data):
# # self = weak_radar()
# # if not self:
# # return
# print("==============",len(sensor_data),'==============')
# for detection in sensor_data:
# print(detection)
# # print('depth: ' + str(detection.depth)) # meters
# # print('azimuth: ' + str(detection.azimuth)) # rad
# # print('altitude: ' + str(detection.altitude)) # rad
# # print('velocity: ' + str(detection.velocity)) # m/s
ret = 0#sensor_data[0]
collision_hist = []
def collision_callback(col_event):
collision_hist.append(col_event)
# print(col_event)
laneInvasion_hist = []
def laneInvasion_callback(LaneInvasionEvent):
laneInvasion_hist.append(LaneInvasionEvent)
def fake_driver_monitoring(exit_event: threading.Event):
pm = messaging.PubMaster(['driverState','driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i+=1
def bridge(q):
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
print("test======================================================================")
print(args.town)
if args.low_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Particles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[args.num_selected_spawn_point] # y -= 100+
spawn_point.location.y -= 80
#=====add 1st vehicle=====
spawn_point1 = carla.Transform(spawn_point.location,spawn_point.rotation)
# spawn_point1.location.y += 20
vehicle = world.spawn_actor(vehicle_bp, spawn_point1)
#=====add second vehicle=====
spawn_point2 = carla.Transform(spawn_point.location,spawn_point.rotation)
spawn_point2.location.y += args.init_dist#20
vehicle2 = world.spawn_actor(vehicle_bp, spawn_point2)
# vehicle2.set_autopilot(True)
#==========3rd vehilce===========
if Other_vehicles_Enable:
spawn_point3 = carla.Transform(spawn_point.location,spawn_point.rotation)
spawn_point3.location.y -= 35
spawn_point3.location.x += 7
spawn_point3.rotation.yaw += 25
vehicle3 = world.spawn_actor(vehicle_bp, spawn_point3) #following vehicle
spawn_point4 = carla.Transform(spawn_point1.location,spawn_point1.rotation)
spawn_point4.location.x += 4
spawn_point4.location.y += 15
vehicle4 = world.spawn_actor(vehicle_bp, spawn_point4)
spawn_point5 = carla.Transform(spawn_point1.location,spawn_point1.rotation)
spawn_point5.location.x += 5
spawn_point5.location.y -= 15
spawn_point5.rotation.yaw += 13
vehicle5 = world.spawn_actor(vehicle_bp, spawn_point5)
spectator = world.get_spectator()
transform = vehicle.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=150), carla.Rotation(pitch=-90)))
#======end line===============
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
print('max_steer_angle',max_steer_angle) #70 degree
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
# # Get radar blueprint
# radar_bp = blueprint_library.filter('sensor.other.radar')[0]
# # Set Radar attributes, by default are:
# radar_bp.set_attribute('horizontal_fov', '30') # degrees
# radar_bp.set_attribute('vertical_fov', '30') # degrees
# # radar_bp.set_attribute('points_per_second', '1500')
# radar_bp.set_attribute('range', '100') # meters
# # Spawn the radar
# radar = world.spawn_actor(radar_bp, transform, attach_to=vehicle, attachment_type=carla.AttachmentType.Rigid)
# # weak_radar = weakref.ref(radar)
# # radar.listen(lambda sensor_data: radar_callback(weak_radar, sensor_data))
# radar.listen(lambda sensor_data: radar_callback(sensor_data))
# # radar.listen(radar_callback)
#collision sensor detector
colsensor_bp = blueprint_library.find("sensor.other.collision")
colsensor = world.spawn_actor(colsensor_bp, transform, attach_to=vehicle)
colsensor.listen(lambda colevent: collision_callback(colevent))
#lane invasion
laneInvasion_bp = blueprint_library.find("sensor.other.lane_invasion")
laneInvasion = world.spawn_actor(laneInvasion_bp, transform, attach_to=vehicle)
laneInvasion.listen(lambda LaneInvasionEvent: laneInvasion_callback(LaneInvasionEvent))
# launch fake car threads
threads = []
exit_event = threading.Event()
threads.append(threading.Thread(target=panda_state_function, args=(exit_event,)))
threads.append(threading.Thread(target=fake_driver_monitoring, args=(exit_event,)))
threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, exit_event,)))
for t in threads:
t.start()
time.sleep(1)
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05) #rate =100, T=1/100s=10ms
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 #keyboard signal is always 1
brake_manual_multiplier = 0.7 #keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO #keyboard signal is always 1
tm = client.get_trafficmanager(8008)
# vehicle2.set_autopilot(True,8008)
tm.vehicle_percentage_speed_difference(vehicle2,-args.cruise_lead) #Sets the difference the vehicle's intended speed and its current speed limit.
# tm.distance_to_leading_vehicle(vehicle2,5)
if Other_vehicles_Enable:
tm.vehicle_percentage_speed_difference(vehicle3,-200)
is_autopilot_engaged =False #vehicle2
fp_res = open('results/data_ADS1_{}mph_{}m_{}V0_{}V0.csv'.format(vEgo,args.init_dist,args.cruise_lead,args.cruise_lead2),'w')
fp_res.write("frameIdx,distance(m),speed(m/s),acceleration(m/s2),angle_steer,gas,brake,steer_torque,actuators_steeringAngleDeg,actuators_steer,actuators_accel,d_rel(m),v_rel(m/s),c_path(m),faultinjection,faultType,alert,hazard,hazardType,alertMsg,hazardMsg,laneInvasion,yPos,Laneline1,Laneline2,Laneline3,Laneline4,leftPath,rightPath,leftEdge,rightEdge,vel_pos.x,vel_pos.y,vel2_pos.x,vel2_pos.y,vel4_pos.x,vel4_pos.y\n")
speed = 0
throttle_out_hist = 0
FI_duration = 1000# set to be a larget value like 10 seconds so it won't be reached in the normal case with human driver engagement #250*10ms =2.5s
Num_laneInvasion = 0
t_laneInvasion = 0
pathleft = pathright = 0
roadEdgeLeft = roadEdgeRight = 0
laneLineleft=-1.85
laneLineright = 1.85
Lead_vehicle_in_vision = False #lead vehicle is captured in the camera
faulttime = -1
alerttime = -1
hazardtime = -1
fault_duration = 0
driver_alerted_time = -1
H2_count = 0
hazMsg = ""
hazard = False
hazType =0x0
alertType_list =[]
alertText1_list = []
alertText2_list = []
FI_flag = 0
FI_Type = 0
frameIdx = 0
FI_H3_combine_enable = 0
while frameIdx<5000:
altMsg = ""
alert = False
if is_openpilot_engaged:
frameIdx += 1
#simulate button Enable event
if rk.frame == 800:
q.put("cruise_up")
if frameIdx == 1000:
if args.cruise_lead != args.cruise_lead2: #change the speed of vehicle2
print("===========change Lead vehicle cruise speed from {}mph to {}mph".format(args.cruise_lead,args.cruise_lead2))
tm.vehicle_percentage_speed_difference(vehicle2,-args.cruise_lead2)
# if frameIdx >2000:
# q.put("quit")
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
actuators_steeringAngleDeg = actuators_steer = actuators_accel = 0
dRel = 0
yRel = 2.5
vRel = 0
vLead = 0
yPos = 0
ylaneLines = []
yroadEdges = | |
offsety = row_keys.unique_count+1
if Accum2.DebugMode: print("**stack len", len(arr), offsety, " showfilterbase:", showfilter_base)
# add showfilter column first if we have to
if showfilter:
newds[FILTERED_LONG_NAME] = arr[0: offsety]
# skip showfilter row for loop below (already added or not added above)
offset = offsety
# fix bug for enums, need to reattach code mapping for correct string
xmode = self._cat_cols.category_mode
if xmode in TypeRegister.Categories.dict_modes:
xcategories = TypeRegister.Categorical(xcategories, _from_categorical=self._cat_cols._categories_wrap)
# cut main array into multiple columns
for i in range(col_keys.unique_count):
new_colname = xcategories[i]
if isinstance(new_colname, bytes):
new_colname = new_colname.decode()
if isinstance(new_colname, str):
if len(new_colname) == 0:
#make up a column name
new_colname=INVALID_SHORT_NAME+str(i)
else:
new_colname = str(new_colname)
start = showfilter_base + offset
stop = offset + offsety
offset += offsety
# possibly skip over filter
arridx = slice(start,stop)
newds[new_colname] = arr[arridx]
return {'ds':newds, 'col_keys':col_keys, 'row_keys':row_keys, 'gbkeys' : gbkeys}
#---------------------------------------------------------------
@classmethod
def _apply_2d_operation(self, func, imatrix, showfilter=True,
filter_rows = None, filter_cols=None):
"""
Called from routines like sum or min where we can make one pass
If there are badrows, then filter_rows is set to the row indexes that are bad
If there are badcols, then filter_cols is set to the col indexes that are bad
filter_rows is a fancy index or none
"""
if callable(func):
row_count, col_count = imatrix.shape
# horizontal add
#print("im0", imatrix.nansum())
totalsY = func(imatrix, axis=1) #[showfilter_base:]
# vertical func operation
totalsX = empty(col_count, dtype=totalsY.dtype)
# possibly remove filtered top row
if not showfilter:
totalsY = totalsY[1:]
# consider #imatrix.nansum(axis=0, out=totalsX)
for i in range(col_count):
arrslice = imatrix[:,i]
# possibly skip over first value
if not showfilter:
arrslice =arrslice[1:]
totalsX[i] = func(arrslice)
return totalsX, totalsY
# function was not callable
return None, None
#---------------------------------------------------------------
@classmethod
def _accum1_pass(cls, cat, origarr, funcNum, showfilter=False, filter=None, func_param=0, **kwargs):
"""
internal call to calculate the Y or X summary axis
the filter muse be passed correctly
returns array with result of operation, size of array is number of uniques
"""
basebin =1
if showfilter:
basebin =0
if callable(funcNum):
# from apply_reduce
#funcList = [GB_FUNCTIONS.GB_SUM]
#accum_tuple = _groupbycalculateall([origarr], ikey, numkeys, funcList, binLowList, binHighList, func_param)
# need a new option here, which is that we want to allocate for a filter
# but we might not use it
# ALSO dont want back a dataaset
accum_tuple = cat.apply_reduce(funcNum, origarr, showfilter=showfilter, filter=filter, nokeys=True, **kwargs)
# the showfilter is handled automatically
return accum_tuple[0]
else:
ikey = cat.grouping.ikey
# if zero base, we need 1 base for these calculations
if cat.grouping.base_index == 0:
ikey = ikey + 1
# Optimization: combine_filter was previously called
if filter is not None:
# N.B. We are going to change ikey, make a copy instead of changing the input. The input
# data will be used again when user calls method on the Accum2 object again.
# zero out anything not in the filter
ikey = where(filter, ikey, 0)
numkeys = cat.unique_count
funcList = [funcNum]
binLowList = [basebin]
binHighList = [numkeys + 1]
if funcNum >= GB_FUNCTIONS.GB_SUM and funcNum < GB_FUNCTIONS.GB_FIRST:
accum_tuple = _groupbycalculateall([origarr], ikey, numkeys, funcList, binLowList, binHighList, func_param)
elif funcNum >= GB_FUNCTIONS.GB_FIRST and funcNum < GB_FUNCTIONS.GB_CUMSUM:
# TODO break out as function
packing= groupbypack(ikey, None, numkeys + 1)
iGroup = packing['iGroup']
iFirstGroup = packing['iFirstGroup']
nCountGroup = packing['nCountGroup']
accum_tuple = _groupbycalculateallpack([origarr], ikey, iGroup, iFirstGroup, nCountGroup, numkeys, funcList, binLowList, binHighList, func_param)
# whether or not they want to see the filter
if basebin != 0:
return accum_tuple[0][basebin:]
else:
return accum_tuple[0]
#---------------------------------------------------------------
@classmethod
def _add_totals(cls, cat_rows, newds, name, totalsX, totalsY, totalOfTotals):
"""
Adds a summary column on the right (totalsY)
Adds a footer on the bottom (totalsX)
"""
if totalsY is not None:
if newds.shape[0] != len(totalsY):
# this path is from custom apply_reduce
emptyarr = empty((newds.shape[0],), dtype=totalsY.dtype)
emptyarr[0:len(totalsY)] = totalsY
emptyarr[-1] = totalOfTotals
newds[name]=emptyarr
else:
# add the Total column to the dataset
newds[name]=totalsY
# add to the right summary
newds.summary_set_names([name])
# tell display that this dataset has a footer
# have to skip over the colkeys
keycount=len(cat_rows.gb_keychain.gbkeys)
# totalsX runs in the horizontal direction on the bottom
# for each column name in the dictionary, give a value
footerdict= dict(zip( [*newds][keycount:], totalsX))
# lower right corner sometimes passed alone
if totalOfTotals is not None:
footerdict[name]=totalOfTotals
newds.footer_set_values( name, footerdict)
#---------------------------------------------------------------
@classmethod
def _calc_badslots(cls, cat, badslots, filter, wantfancy):
"""
internal routine
will combine (row or col filter) badslots with common filter
if there are not badslots, the common filter is returned
otherwise a new filter is returned
the filter is negative (badslots locations are false)
if wantfancy is true, returns fancy index to cols or rows
otherwise full boolean mask combined with existing filter (if exists)
"""
if badslots is None:
if wantfancy:
return None
# user did not pass in any, stick with current filter
return filter
badslots = np.atleast_1d(badslots)
dtypenum = badslots.dtype.num
if wantfancy:
# find out which columns are to be filtered out
# are cols an integer or a string?
if dtypenum > 10:
_, newfilter = cat.grouping.ismember(badslots, reverse=True)
else:
# assume user passed in row or col numbers that are bad
# such as badrows=[3,4]
newfilter = badslots
return newfilter
# are they passing in a boolean filter?
if dtypenum ==0:
# convert bool mask to row numbers and use that mask
badslots = bool_to_fancy(badslots)
if dtypenum <=10:
#assumes there is not Cat of integers..otherwise ambiguous
# add 1 because of base_index
# should we check showfilter?
badslots = badslots + 1
if len(badslots) ==1:
newfilter = cat._fa != badslots[0]
else:
newfilter, _ = ismember(cat._fa, badslots)
# inplace logical not (this is a negative filter)
np.logical_not(newfilter, out=newfilter)
else:
# create filter
newfilter = cat.isin(badslots)
# inplace logical not (this is a negative filter)
np.logical_not(newfilter, out=newfilter)
if filter is not None:
# combine both filters using inplace and of filter
np.logical_and(newfilter, filter, out= newfilter)
#print('newfilter', len(newfilter), newfilter.sum(), newfilter)
# return a new filter
return newfilter
#---------------------------------------------------------------
@classmethod
def _calc_multipass(cls, cat_cols, cat_rows, newds, origarr, funcNum, func, imatrix,
name=None, showfilter=False, filter=None, badrows=None, badcols=None, badcalc=True, **kwargs):
"""
For functions that require multiple passes to get the proper result.
such as mean or median.
If the grid is 7 x 11: there will be 77 + 11 + 7 + 1 => 96 passes
Other Parameters
----------------
func: userfunction to call calculate
name: optional column name (otherwise function name used)
badrows: optional list of bad row keys, will be combined with filter
badcols: optional list of bad col keys, will be combined with filter
badrows/cols is just the keys that are bad (not a boolean filter)
for example badrows=['AAPL','GOOG']
Need new algo to take:
bad bins + ikey + existing boolean filter ==> create a new boolean filter
walk ikey, see if bin is bad in lookup table, if so set filter to False
else copy from existing filter value
"""
if name is None:
name = str.capitalize(func.__name__)
# get a negative boolean filter
newfilterX = cls._calc_badslots(cat_cols, badcols, filter, False)
newfilterY = cls._calc_badslots(cat_rows, badrows, filter, False)
newfilter = None
# first check for any row and col filters
if badrows is not None or badcols is not None:
# the common filter is already merged into the row or col filter
if badrows is not None and badcols is not None:
# both col and row filter are in use so combine the filters
newfilter = newfilterX & newfilterY
else:
if badrows is not None:
newfilter = newfilterY
else:
newfilter = newfilterX
else:
newfilter = filter
# if there is not filter, the value will be None
if Accum2.DebugMode:
print("filterrows", newfilterY)
print("filtercols", newfilterX)
print("filter ", newfilter)
# set to False so that totalsX has invalid where the badcols are
# set to False so that totalsY has invalid where the badrows are
#badcalc =True
if badcalc:
# pass in original filter
totalsX = cls._accum1_pass(cat_cols, origarr, funcNum, showfilter=showfilter, filter=newfilterY, **kwargs)
totalsY = cls._accum1_pass(cat_rows, origarr, funcNum, showfilter=showfilter, filter=newfilterX, **kwargs)
else:
| |
#!/usr/bin/env python
"""
Calculate several measures of segregation in a given data set.
The assumption is that the dataset will be a dictionary-like object
that we can index for one of several parameters to get the required
data.
"""
import sys
import operator
from nces_parser import NCESParser
# ==============================================================================
# Constants
# ==============================================================================
# ==============================================================================
# Utility Functions
# ==============================================================================
# ==============================================================================
class SegCalc(object):
"""
A segregation calculating object.
"""
def __init__(self, school_list, index_dict, only_hs=False, only_el=False, grade=False):
"""
Set a dataset iterator object that we can step through
and a dictionary of indexes for extracting the information
from the dataobjects turned off by the dataset iterator
"""
self.debug = 0
self.schools = school_list
self.only_high_school = only_hs
self.only_elementary = only_el
self.grade = grade
self.minority_idx = index_dict['MINORITY'] # Minority Group Student Count
self.majority_idx = index_dict['MAJORITY'] # Majority Group Student Count
self.total_idx = index_dict['TOTAL'] # Total Student Count
self.cat_idx = index_dict['CATEGORY'] # Index to Categorize along (state, district, etc)
# Search for Some optional arguments
try:
self.sec_minority_idx = index_dict['SEC_MINORITY'] # Minority Group Student Count
except KeyError:
self.sec_minority_idx = None
# Skip items that don't match item[idx] == val
try:
self.match = True
self.match_idx = index_dict['MATCH_IDX']
self.match_val = index_dict['MATCH_VAL']
except KeyError:
self.match = False
# ======================================
# Basic Accessors Functions
# ======================================
def get_minority(self, school):
"""
Return the minority student count for a given school
Handle a secondary minority group, if requested
"""
try:
count = int(school[self.minority_idx])
if self.sec_minority_idx:
count += int(school[self.sec_minority_idx])
except KeyError:
# raise Exception("Problem School:",school.__repr__())
return 0
return count
# ======================================
def get_majority(self, school):
"""
Return the majority student count for a given school
"""
# Free Lunch Majority is the non-Free Lunch people
if self.minority_idx == 'FRELCH':
return self.get_members(school) - self.get_minority(school)
else:
try:
count = int(school[self.majority_idx])
except KeyError:
raise Exception("Problem School:",school.__repr__())
return count
# ======================================
def get_members(self, school):
"""
Return the total student count for a given school
"""
try:
count = int(school[self.total_idx])
except KeyError:
raise Exception("Problem School:",school.__repr__())
return count
# ======================================
@property
def filtered_schools(self):
"""
Filter the schools per the requested matching
school index and value. Cache the results for
later use
"""
try:
return self._filtered_schools
except AttributeError:
if (
self.match == False and
self.only_high_school == False and
self.only_elementary == False and
self.grade == False
):
self._filtered_schools = self.schools
else:
self._filtered_schools = []
for school in self.schools:
append_data = False
if self.match:
# Try to pull out the filter data entry
# it may not be present in which case: No Match
try:
data_match_val = school[self.match_idx]
except KeyError:
continue
if self.match_val.isdigit():
match_int_val = int(self.match_val)
if (
data_match_val == self.match_val or
data_match_val == match_int_val
):
append_data = True
if self.only_high_school and self.is_high_school(school):
append_data = True
if self.only_elementary and self.is_elementary(school):
append_data = True
if self.grade and self.has_grade(school, self.grade):
append_data = True
if append_data:
self._filtered_schools.append(school)
print "Schools Found: %d" % (len(self._filtered_schools))
return self._filtered_schools
# ======================================
def get_idxed_val(self, idx_x, idx_y):
"""
Get a dictionary mapping one index to another
"""
Mapping = {}
for school in self.filtered_schools:
try:
x = school[idx_x]
y = school[idx_y]
except KeyError:
raise Exception("Problem School:",school.__repr__())
Mapping[x] = y
return Mapping
# ======================================
def get_grade(self, school, high=True):
"""
Get the high or low grade
"""
if high:
grade_idx = 'GSHI'
else:
grade_idx = 'GSLO'
try:
grade = int(school[grade_idx])
except KeyError:
raise Exception("Problem School:",school.__repr__())
except ValueError:
if (
school[grade_idx] == 'PK' or
school[grade_idx] == 'KG'
):
grade = 1
elif (
school[grade_idx] == 'UG' or
school[grade_idx] == 'N' or
school[grade_idx][0] == '.'
):
grade = 0
else:
raise Exception("Unknown Grade: %s" % (school[grade_idx]))
return grade
# ======================================
def is_elementary(self, school):
"""
Is this school an elementary school?
"""
high_grade = self.get_grade(school, high=True)
if high_grade <= 6 and high_grade > 0:
return True
else:
return False
# ======================================
def is_k8(self, school):
"""
Is this school an elementary school?
"""
high_grade = self.get_grade(school, high=True)
low_grade = self.get_grade(school, high=False)
if high_grade >= 7 and high_grade < 10 and low_grade < 3:
return True
else:
return False
# ======================================
def is_middle(self, school):
"""
Is this school an elementary school?
"""
high_grade = self.get_grade(school, high=True)
low_grade = self.get_grade(school, high=False)
if high_grade <= 9 and low_grade >= 3:
return True
else:
return False
# ======================================
def is_mh(self, school):
"""
Is this school an elementary school?
"""
high_grade = self.get_grade(school, high=True)
low_grade = self.get_grade(school, high=False)
if high_grade >= 10 and low_grade < 9 and low_grade >= 5:
return True
else:
return False
# ======================================
def is_high_school(self, school):
"""
Is this school an elementary school?
"""
low_grade = self.get_grade(school, high=False)
if low_grade >= 8:
return True
else:
return False
# ======================================
def is_k12(self, school):
"""
Is this school an elementary school?
"""
high_grade = self.get_grade(school, high=True)
low_grade = self.get_grade(school, high=False)
if high_grade >= 11 and low_grade <= 3:
return True
else:
return False
# ======================================
def has_grade(self, school, grade):
"""
Does this school teach this grade?
"""
high_grade = self.get_grade(school, high=True)
low_grade = self.get_grade(school, high=False)
if grade <= high_grade and grade >= low_grade:
return True
else:
return False
# ======================================
# Calculation Methods
# ======================================
# ======================================
def calc_sum(self, x_dict, y_dict):
"""
Given two dictionaries that are grouped data entries, calculate
a new dictionary that is the grouped proportion.
"""
sum_dict = {}
for key in x_dict.keys():
try:
sum_dict[key] = x_dict[key] + y_dict[key]
except KeyError:
raise Exception("Input Dicts didn't have the same keys")
# Missing Key in Y_dict, just use X_dict value only
# sum_dict[key] = x_dict[key]
return sum_dict
# ======================================
def calc_prop(self, num_dict, den_dict):
"""
Given two dictionaries that are grouped data entries, calculate
a new dictionary that is the grouped proportion.
"""
prop_dict = {}
for key in num_dict.keys():
try:
prop_dict[key] = float(num_dict[key]) / float(den_dict[key])
except ZeroDivisionError:
prop_dict[key] = 0.0
except KeyError:
prop_dict[key] = 0.0
# raise Exception("Numerator and Denominator Dicts didn't have the same keys")
return prop_dict
# ======================================
def calc_totals(self, idx=None):
"""
Get a report on the total student count and so forth
"""
Total = {}
for school in self.filtered_schools:
if idx == 'MINORITY':
ti = self.get_minority(school)
elif idx == 'MAJORITY':
ti = self.get_majority(school)
elif not idx: # Default to Totals Student Count
ti = self.get_members(school)
else:
ti = school[idx]
# Make sure the datastructure exists
# Negative numbers mean missing data.
if ti >= 0:
try:
Total[school[self.cat_idx]] += ti
except KeyError:
Total[school[self.cat_idx]] = ti
return Total
# ======================================
def calc_dependant_totals(self, sum_idx, dep_idx, sec_dep_idx=None):
"""
Get a report on the total student count and so forth
"""
Total = {}
for school in self.filtered_schools:
try:
test = Total[school[self.cat_idx]]
except KeyError:
Total[school[self.cat_idx]] = 0
try:
dependant_field = school[dep_idx]
except KeyError:
dependant_field = 0
try:
sec_dependant_field = school[sec_dep_idx]
except KeyError:
sec_dependant_field = 0
if (dependant_field == '1' or
dependant_field == 1 or
dependant_field == 'Y' or
sec_dependant_field == '1' or
sec_dependant_field == 1 or
sec_dependant_field == 'Y'):
ti = school[sum_idx]
# Make sure the datastructure exists
# Negative numbers mean missing data.
if ti >= 0:
Total[school[self.cat_idx]] += ti
return Total
# ======================================
def calc_proportion(self, idx='MINORITY'):
"""
Get a report on the total student count and so forth
"""
Proportion = self.calc_totals(idx)
Total = self.calc_totals()
# Convert the counts to a proportion
for cat_idx in Proportion.keys():
try:
Proportion[cat_idx] = float(Proportion[cat_idx]) / Total[cat_idx]
except ZeroDivisionError:
Proportion[cat_idx] = 0.0
except KeyError:
Proportion[cat_idx] = 0.0
return Proportion
# ======================================
def calc_percentages(self):
"""
Get a report on the total student count and so forth
"""
Percentages = {}
for school in self.filtered_schools:
try:
perc = dict(
WHITE=school['WHITE'],
BLACK=school['BLACK'],
HISP=school['HISP'],
ASIAN=school['ASIAN'],
AM=school['AM'],
MEMBER=school['MEMBER']
)
except KeyError:
raise Exception("Problem School:",school.__repr__())
# Make sure the datastructure exists
try:
test = Percentages[school[self.cat_idx]]
except KeyError:
Percentages[school[self.cat_idx]] = dict(WHITE=0, BLACK=0, HISP=0, ASIAN=0, AM=0, MEMBER=0)
# Negative numbers mean missing data.
for ethn in perc.keys():
if perc[ethn] >= 0:
Percentages[school[self.cat_idx]][ethn] += perc[ethn]
for cat_idx in Percentages.keys():
try:
ti = Percentages[cat_idx]['MEMBER']
Percentages[cat_idx]['WHITE'] = float(Percentages[cat_idx]['WHITE'])/ti
Percentages[cat_idx]['BLACK'] = float(Percentages[cat_idx]['BLACK'])/ti
Percentages[cat_idx]['HISP'] = float(Percentages[cat_idx]['HISP'])/ti
Percentages[cat_idx]['ASIAN'] = float(Percentages[cat_idx]['ASIAN'])/ti
Percentages[cat_idx]['AM'] = float(Percentages[cat_idx]['AM'])/ti
except ZeroDivisionError:
Percentages[cat_idx]['WHITE'] = 0.0
Percentages[cat_idx]['BLACK'] = 0.0
Percentages[cat_idx]['HISP'] = 0.0
Percentages[cat_idx]['ASIAN'] = 0.0
Percentages[cat_idx]['AM'] = 0.0
# import | |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 | |
sec_channel_offset=567 ht vht' ]:
if "FAIL" not in dev[0].request("TDLS_CHAN_SWITCH " + args):
raise Exception("Unexpected success on invalid TDLS_CHAN_SWITCH: " + args)
def test_wpas_ctrl_addr(dev):
"""wpa_supplicant ctrl_iface invalid address"""
if "FAIL" not in dev[0].request("TDLS_SETUP "):
raise Exception("Unexpected success on invalid TDLS_SETUP")
if "FAIL" not in dev[0].request("TDLS_TEARDOWN "):
raise Exception("Unexpected success on invalid TDLS_TEARDOWN")
if "FAIL" not in dev[0].request("FT_DS "):
raise Exception("Unexpected success on invalid FT_DS")
if "FAIL" not in dev[0].request("WPS_PBC 00:11:22:33:44"):
raise Exception("Unexpected success on invalid WPS_PBC")
if "FAIL" not in dev[0].request("WPS_PIN 00:11:22:33:44"):
raise Exception("Unexpected success on invalid WPS_PIN")
if "FAIL" not in dev[0].request("WPS_NFC 00:11:22:33:44"):
raise Exception("Unexpected success on invalid WPS_NFC")
if "FAIL" not in dev[0].request("WPS_REG 00:11:22:33:44 12345670"):
raise Exception("Unexpected success on invalid WPS_REG")
if "FAIL" not in dev[0].request("IBSS_RSN 00:11:22:33:44"):
raise Exception("Unexpected success on invalid IBSS_RSN")
if "FAIL" not in dev[0].request("BLACKLIST 00:11:22:33:44"):
raise Exception("Unexpected success on invalid BLACKLIST")
def test_wpas_ctrl_wps_errors(dev):
"""wpa_supplicant ctrl_iface WPS error cases"""
if "FAIL" not in dev[0].request("WPS_REG 00:11:22:33:44:55"):
raise Exception("Unexpected success on invalid WPS_REG")
if "FAIL" not in dev[0].request("WPS_REG 00:11:22:33:44:55 12345670 2233"):
raise Exception("Unexpected success on invalid WPS_REG")
if "FAIL" not in dev[0].request("WPS_REG 00:11:22:33:44:55 12345670 2233 OPEN"):
raise Exception("Unexpected success on invalid WPS_REG")
if "FAIL" not in dev[0].request("WPS_REG 00:11:22:33:44:55 12345670 2233 OPEN NONE"):
raise Exception("Unexpected success on invalid WPS_REG")
if "FAIL" not in dev[0].request("WPS_AP_PIN random"):
raise Exception("Unexpected success on WPS_AP_PIN in non-AP mode")
if "FAIL" not in dev[0].request("WPS_ER_PIN any"):
raise Exception("Unexpected success on invalid WPS_ER_PIN")
if "FAIL" not in dev[0].request("WPS_ER_LEARN 00:11:22:33:44:55"):
raise Exception("Unexpected success on invalid WPS_ER_LEARN")
if "FAIL" not in dev[0].request("WPS_ER_SET_CONFIG 00:11:22:33:44:55"):
raise Exception("Unexpected success on invalid WPS_ER_SET_CONFIG")
if "FAIL" not in dev[0].request("WPS_ER_CONFIG 00:11:22:33:44:55"):
raise Exception("Unexpected success on invalid WPS_ER_CONFIG")
if "FAIL" not in dev[0].request("WPS_ER_CONFIG 00:11:22:33:44:55 12345670"):
raise Exception("Unexpected success on invalid WPS_ER_CONFIG")
if "FAIL" not in dev[0].request("WPS_ER_CONFIG 00:11:22:33:44:55 12345670 2233"):
raise Exception("Unexpected success on invalid WPS_ER_CONFIG")
if "FAIL" not in dev[0].request("WPS_ER_CONFIG 00:11:22:33:44:55 12345670 2233 OPEN"):
raise Exception("Unexpected success on invalid WPS_ER_CONFIG")
if "FAIL" not in dev[0].request("WPS_ER_CONFIG 00:11:22:33:44:55 12345670 2233 OPEN NONE"):
raise Exception("Unexpected success on invalid WPS_ER_CONFIG")
if "FAIL" not in dev[0].request("WPS_ER_NFC_CONFIG_TOKEN WPS"):
raise Exception("Unexpected success on invalid WPS_ER_NFC_CONFIG_TOKEN")
if "FAIL" not in dev[0].request("WPS_ER_NFC_CONFIG_TOKEN FOO 00:11:22:33:44:55"):
raise Exception("Unexpected success on invalid WPS_ER_NFC_CONFIG_TOKEN")
if "FAIL" not in dev[0].request("WPS_ER_NFC_CONFIG_TOKEN NDEF 00:11:22:33:44:55"):
raise Exception("Unexpected success on invalid WPS_ER_NFC_CONFIG_TOKEN")
if "FAIL" not in dev[0].request("WPS_NFC_CONFIG_TOKEN FOO"):
raise Exception("Unexpected success on invalid WPS_NFC_CONFIG_TOKEN")
if "FAIL" not in dev[0].request("WPS_NFC_CONFIG_TOKEN WPS FOO"):
raise Exception("Unexpected success on invalid WPS_NFC_CONFIG_TOKEN")
if "FAIL" not in dev[0].request("WPS_NFC_TOKEN FOO"):
raise Exception("Unexpected success on invalid WPS_NFC_TOKEN")
def test_wpas_ctrl_config_parser(dev):
"""wpa_supplicant ctrl_iface SET config parser"""
if "FAIL" not in dev[0].request("SET pbc_in_m1 qwerty"):
raise Exception("Non-number accepted as integer")
if "FAIL" not in dev[0].request("SET eapol_version 0"):
raise Exception("Out-of-range value accepted")
if "FAIL" not in dev[0].request("SET eapol_version 10"):
raise Exception("Out-of-range value accepted")
if "FAIL" not in dev[0].request("SET serial_number 0123456789abcdef0123456789abcdef0"):
raise Exception("Too long string accepted")
def test_wpas_ctrl_mib(dev):
"""wpa_supplicant ctrl_iface MIB"""
mib = dev[0].get_mib()
if "dot11RSNAOptionImplemented" not in mib:
raise Exception("Missing MIB entry")
if mib["dot11RSNAOptionImplemented"] != "TRUE":
raise Exception("Unexpected dot11RSNAOptionImplemented value")
def test_wpas_ctrl_set_wps_params(dev):
"""wpa_supplicant ctrl_iface SET config_methods"""
ts = [ "config_methods label virtual_display virtual_push_button keypad",
"device_type 1-0050F204-1",
"os_version 01020300",
"uuid 12345678-9abc-def0-1234-56789abcdef0" ]
for t in ts:
if "OK" not in dev[2].request("SET " + t):
raise Exception("SET failed for: " + t)
def test_wpas_ctrl_level(dev):
"""wpa_supplicant ctrl_iface LEVEL"""
try:
if "FAIL" not in dev[2].request("LEVEL 3"):
raise Exception("Unexpected LEVEL success")
if "OK" not in dev[2].mon.request("LEVEL 2"):
raise Exception("Unexpected LEVEL failure")
dev[2].request("SCAN freq=2412")
ev = dev[2].wait_event(["State:"], timeout=5)
if ev is None:
raise Exception("No debug message received")
dev[2].wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=5)
finally:
dev[2].mon.request("LEVEL 3")
def test_wpas_ctrl_bssid_filter(dev, apdev):
"""wpa_supplicant bssid_filter"""
try:
if "OK" not in dev[2].request("SET bssid_filter " + apdev[0]['bssid']):
raise Exception("Failed to set bssid_filter")
params = { "ssid": "test" }
hostapd.add_ap(apdev[0]['ifname'], params)
hostapd.add_ap(apdev[1]['ifname'], params)
dev[2].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[2].scan(freq="2412")
bss = dev[2].get_bss(apdev[0]['bssid'])
if bss is None or len(bss) == 0:
raise Exception("Missing BSS data")
bss = dev[2].get_bss(apdev[1]['bssid'])
if bss and len(bss) != 0:
raise Exception("Unexpected BSS data")
dev[2].request("SET bssid_filter ")
dev[2].scan(freq="2412")
bss = dev[2].get_bss(apdev[0]['bssid'])
if bss is None or len(bss) == 0:
raise Exception("Missing BSS data")
bss = dev[2].get_bss(apdev[1]['bssid'])
if bss is None or len(bss) == 0:
raise Exception("Missing BSS data(2)")
res = dev[2].request("SCAN_RESULTS").splitlines()
if "test" not in res[1] or "test" not in res[2]:
raise Exception("SSID missing from SCAN_RESULTS")
if apdev[0]['bssid'] not in res[1] and apdev[1]['bssid'] not in res[1]:
raise Exception("BSS1 missing from SCAN_RESULTS")
if apdev[0]['bssid'] not in res[2] and apdev[1]['bssid'] not in res[2]:
raise Exception("BSS1 missing from SCAN_RESULTS")
if "FAIL" not in dev[2].request("SET bssid_filter 00:11:22:33:44:55 00:11:22:33:44"):
raise Exception("Unexpected success for invalid SET bssid_filter")
finally:
dev[2].request("SET bssid_filter ")
def test_wpas_ctrl_disallow_aps(dev, apdev):
"""wpa_supplicant ctrl_iface disallow_aps"""
params = { "ssid": "test" }
hostapd.add_ap(apdev[0]['ifname'], params)
if "FAIL" not in dev[0].request("SET disallow_aps bssid "):
raise Exception("Unexpected success on invalid disallow_aps")
if "FAIL" not in dev[0].request("SET disallow_aps bssid 00:11:22:33:44"):
raise Exception("Unexpected success on invalid disallow_aps")
if "FAIL" not in dev[0].request("SET disallow_aps ssid 0"):
raise Exception("Unexpected success on invalid disallow_aps")
if "FAIL" not in dev[0].request("SET disallow_aps ssid 4q"):
raise Exception("Unexpected success on invalid disallow_aps")
if "FAIL" not in dev[0].request("SET disallow_aps bssid 00:11:22:33:44:55 ssid 112233 ssid 123"):
raise Exception("Unexpected success on invalid disallow_aps")
if "FAIL" not in dev[0].request("SET disallow_aps ssid 000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f00"):
raise Exception("Unexpected success on invalid disallow_aps")
if "FAIL" not in dev[0].request("SET disallow_aps foo 112233445566"):
raise Exception("Unexpected success on invalid disallow_aps")
dev[0].connect("test", key_mgmt="NONE", scan_freq="2412")
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].scan_for_bss(apdev[1]['bssid'], freq="2412")
dev[0].dump_monitor()
if "OK" not in dev[0].request("SET disallow_aps bssid 00:11:22:33:44:55 bssid 00:22:33:44:55:66"):
raise Exception("Failed to set disallow_aps")
if "OK" not in dev[0].request("SET disallow_aps bssid " + apdev[0]['bssid']):
raise Exception("Failed to set disallow_aps")
ev = dev[0].wait_connected(timeout=30, error="Reassociation timed out")
if apdev[1]['bssid'] not in ev:
raise Exception("Unexpected BSSID")
dev[0].dump_monitor()
if "OK" not in dev[0].request("SET disallow_aps ssid " + "test".encode("hex")):
raise Exception("Failed to set disallow_aps")
dev[0].wait_disconnected(timeout=5, error="Disconnection not seen")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected reassociation")
dev[0].request("DISCONNECT")
dev[0].p2p_start_go(freq=2412)
if "OK" not in dev[0].request("SET disallow_aps "):
raise Exception("Failed to set disallow_aps")
def test_wpas_ctrl_blob(dev):
"""wpa_supplicant ctrl_iface SET blob"""
if "FAIL" not in dev[0].request("SET blob foo"):
raise Exception("Unexpected SET success")
if "FAIL" not in dev[0].request("SET blob foo 0"):
raise Exception("Unexpected SET success")
if "FAIL" not in dev[0].request("SET blob foo 0q"):
raise Exception("Unexpected SET success")
if "OK" not in dev[0].request("SET blob foo 00"):
raise Exception("Unexpected SET failure")
if "OK" not in dev[0].request("SET blob foo 0011"):
raise Exception("Unexpected SET failure")
def test_wpas_ctrl_set_uapsd(dev):
"""wpa_supplicant ctrl_iface SET uapsd"""
if "FAIL" not in dev[0].request("SET uapsd foo"):
raise Exception("Unexpected SET success")
if "FAIL" not in dev[0].request("SET uapsd 0,0,0"):
raise Exception("Unexpected SET success")
if "FAIL" not in dev[0].request("SET uapsd 0,0"):
raise Exception("Unexpected SET success")
if "FAIL" not in dev[0].request("SET uapsd 0"):
raise Exception("Unexpected SET success")
if "OK" not in dev[0].request("SET uapsd 1,1,1,1;1"):
raise Exception("Unexpected SET failure")
if "OK" not in dev[0].request("SET uapsd 0,0,0,0;0"):
raise Exception("Unexpected SET failure")
if "OK" not in dev[0].request("SET uapsd disable"):
raise Exception("Unexpected SET failure")
def test_wpas_ctrl_set(dev):
"""wpa_supplicant ctrl_iface SET"""
vals = [ "foo",
"ampdu 0",
"radio_disable 0",
"ps 10",
"ps 1",
"dot11RSNAConfigPMKLifetime 0",
"dot11RSNAConfigPMKReauthThreshold 101",
"dot11RSNAConfigSATimeout 0",
"wps_version_number -1",
"wps_version_number 256" ]
for val in vals:
if "FAIL" not in dev[0].request("SET " + val):
raise Exception("Unexpected SET success for " + val)
vals = [ "EAPOL::heldPeriod 60",
"EAPOL::authPeriod 30",
"EAPOL::startPeriod 30",
"EAPOL::maxStart 3",
"dot11RSNAConfigSATimeout 60",
"ps -1",
"ps 0",
"no_keep_alive 0",
"tdls_disabled 1",
"tdls_disabled 0" ]
for val in vals:
if "OK" not in dev[0].request("SET " + val):
raise Exception("Unexpected SET failure for " + val)
def test_wpas_ctrl_get_capability(dev):
"""wpa_supplicant ctrl_iface GET_CAPABILITY"""
if "FAIL" not in dev[0].request("GET_CAPABILITY 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"):
raise Exception("Unexpected success on invalid GET_CAPABILITY")
if "FAIL" not in dev[0].request("GET_CAPABILITY eap foo"):
raise Exception("Unexpected success on invalid GET_CAPABILITY")
if "AP" not in dev[0].request("GET_CAPABILITY modes strict"):
raise Exception("Unexpected GET_CAPABILITY response")
res = dev[0].get_capability("eap")
if "TTLS" not in res:
raise Exception("Unexpected GET_CAPABILITY eap response: " + str(res))
res = dev[0].get_capability("pairwise")
if "CCMP" not in res:
raise Exception("Unexpected GET_CAPABILITY pairwise response: " + str(res))
res = dev[0].get_capability("group")
if "CCMP" not in res:
raise Exception("Unexpected GET_CAPABILITY group response: " + str(res))
res = dev[0].get_capability("key_mgmt")
if "WPA-PSK" not in res or "WPA-EAP" not in res:
raise Exception("Unexpected GET_CAPABILITY key_mgmt response: " + str(res))
res = dev[0].get_capability("proto")
if "WPA" not in | |
"""
Character object
--
Author : Drlarck
Last update : 1/11/20 by DrLarck
"""
import asyncio
# util
from utility.graphic.embed import CustomEmbed
from utility.graphic.icon import GameIcon
from utility.graphic.color import GameColor
from utility.entity.ability import Ability
class Character:
def __init__(self, client):
# Public
self.client = client
self.name = ""
self.id = 0
self.unique_id = ""
self.level = 1
self.npc = False # Tells if it's a non playable character
self.posture = 0
self.image = CharacterImage()
self.type = CharacterType()
self.rarity = CharacterRarity()
self.health = CharacterHealth()
self.ki = CharacterKi()
self.damage = CharacterDamage()
self.critical = CharacterCritical()
self.armor = CharacterDefense()
self.spirit = CharacterDefense()
# Items
self.training_item = CharacterTrainingItem(self)
# Abilities
self.ability = []
# Private
self.__embed = CustomEmbed()
# Public method
async def generate(self, name="", char_id=0, level=1,
card="", thumbnail="",
type_value=0, rarity_value=0, health=0,
ki=100, physical=0, ki_power=0,
crit_chance=0, crit_bonus=0, armor_fixed=0,
armor_floating=0, spirit_fixed=0, spirit_floating=0,
ability=[]):
"""
Generate a character instance.
:param name: (`str`)
:param char_id: (`int`)
:param level: (`int`)
:param card: (`url`)
:param thumbnail: (`url`)
:param type_value: (`int`)
:param rarity_value: (`int`)
:param health: (`int`)
:param ki: (`int`)
:param physical: (`int`)
:param ki_power: (`int`)
:param crit_chance: (`int`)
:param crit_bonus: (`int`)
:param armor_fixed: (`int`)
:param armor_floating: (`int`)
:param spirit_fixed: (`int`)
:param spirit_floating: (`int`)
:param ability: (`list`)
--
:return: `Character`
"""
# New character instance
new_char = Character(self.client)
# Init all the attributes
new_char.name = name
new_char.id = char_id
new_char.level = level
# Set bonus per lvl
level_bonus = pow(1.02, new_char.level-1) # Default +5 % stat per level
new_char.image.card = card
new_char.image.thumbnail = thumbnail
new_char.type.value = type_value
new_char.rarity.value = rarity_value
new_char.health.maximum = int(health * level_bonus)
new_char.ki.maximum = ki
new_char.damage.physical = int(physical * level_bonus)
new_char.damage.ki = int(ki_power * level_bonus)
new_char.critical.chance = crit_chance
new_char.critical.bonus = crit_bonus
new_char.armor.fixed = int(armor_fixed * level_bonus)
new_char.armor.floating = armor_floating
new_char.spirit.fixed = int(spirit_fixed * level_bonus)
new_char.spirit.floating = spirit_floating
# Get the character's abilities
ability_ref = Ability(self.client)
for ability_id in ability:
await asyncio.sleep(0)
# If the ability id is not an actual ability
if not isinstance(ability_id, Ability):
# Get the id as int
ability_id = int(ability_id)
# Get the ability instance
ability = await ability_ref.get_ability_data(ability_id)
# If the ability has been found, add it to the character
if ability is not None:
new_char.ability.append(ability)
# If the char has no abilities, add passed abilities as parameter
if len(new_char.ability) == 0:
new_char.ability = ability
# Get the icons
new_char.rarity.icon = await GameIcon().get_rarity_icon(new_char.rarity.value)
new_char.type.icon = await GameIcon().get_type_icon(new_char.type.value)
# Return the character
return new_char
async def get_display_card(self, client):
"""
Generate a display card of this character
:param client: (`discord.ext.commands.Bot`)
--
:return: `discord.Embed`
"""
# Init
color = await GameColor().get_rarity_color(self.rarity.value)
embed = await self.__embed.setup(client, color=color)
# Info
info = f"""
__Name__ : **{self.name}**{self.type.icon}
__Reference__ : `#{self.id}`
__Rarity__ : {self.rarity.icon}
"""
embed.add_field(name="Info :", value=info, inline=False)
embed.set_image(url=self.image.card)
return embed
async def get_combat_card(self, client, team_index):
"""
Return the combat format display card
:param client: (`discord.ext.commands.Bot`)
:param team_index: (`int`)
--
:return: `Embed`
"""
# Init
color = GameColor()
if team_index == 0:
color = color.player_a
else:
color = color.player_b
# Thumbnail
# If the thumbnail is not defined, use the card image
if self.image.thumbnail == "":
thumb = self.image.card
# Use the defined thumbnail image
else:
thumb = self.image.thumbnail
embed = await self.__embed.setup(client, color=color, thumbnail_url=thumb)
# Setting up the character display
display_info = f"""
__Name__ : {self.image.icon}**{self.name}**{self.type.icon}
__Level__ : {self.level:,}
__Health__ : **{self.health.current:,}**/{self.health.maximum:,} :hearts:
__Ki__ : **{self.ki.current}**/{self.ki.maximum} :fire:
"""
# Damage
phy_min = await self.damage.get_physical_min()
ki_min = await self.damage.get_ki_min()
display_damage = f"""
__Physical__ : **{phy_min:,}** - **{self.damage.physical:,}** :punch:
__Ki power__ : **{ki_min:,}** - **{self.damage.ki:,}** ☄️
"""
# Defense
display_defense = f"""
__Armor__ : **{self.armor.fixed:,}** | **{self.armor.floating:,} %** :shield:
__Spirit__ : **{self.spirit.fixed:,}** | **{self.spirit.floating:,} %** 🏵️
"""
# Fields
embed.add_field(name=f"**{self.name}** info",
value=display_info,
inline=False)
embed.add_field(name="Damage",
value=display_damage,
inline=False)
embed.add_field(name="Defense",
value=display_defense,
inline=False)
return embed
async def init(self):
"""
Init the character for combat purpose.
--
:return: `None`
"""
# Init health
await self.health.init()
# Init abilities
for ability in self.ability:
await asyncio.sleep(0)
await ability.init(self)
return
async def is_playable(self):
"""
Tells if the character is playable or not
--
:return: `bool`
"""
# Init
playable = True
# If the character is stunned
if self.posture == 3:
playable = False
# If the character is dead
elif self.health.current <= 0:
playable = False
# If the character has posture a normal posture
else:
playable = True
return playable
class CharacterImage:
def __init__(self):
# Public
self.card = ""
self.thumbnail = ""
self.icon = ""
class CharacterType:
def __init__(self):
# Public
self.value = 0
self.icon = ""
class CharacterRarity:
def __init__(self):
# Public
self.value = 0
self.icon = ""
class CharacterHealth:
def __init__(self):
# Public
self.maximum = 0
self.current = 0
# Public method
async def init(self):
"""
Init the current health
--
:return: `None`
"""
self.current = self.maximum
return
async def limit(self):
"""
Avoid the current health to reach a value that is < 0 or higher than the max health
--
:return: `None`
"""
if self.current < 0:
self.current = 0
if self.current > self.maximum:
self.current = self.maximum
return
class CharacterKi:
def __init__(self):
# Public
self.maximum = 0
self.current = 0
# Public method
async def limit(self):
"""
Avoid the current ki value to reach a value that is < 0 or higher than maximum
--
:return: `None`
"""
if self.current < 0:
self.current = 0
if self.current > self.maximum:
self.current = self.maximum
return
class CharacterDamage:
def __init__(self):
# Public
self.physical = 0
self.ki = 0
# Private
# This represents the difference in % between the max value and the min value
# For example, if the range is set to 10 and the max value is 100
# The min value would be 90 and max 100
self.__physical_range = 10
self.__ki_range = 10
# Public method
async def get_physical_min(self):
"""
Return the minimal value of the physical damage range
--
:return: `int`
"""
minimal = self.physical * (1 - (self.__physical_range / 100))
return int(minimal)
async def get_ki_min(self):
"""
Return the minimal value of the ki damage range
--
:return: `None`
"""
minimal = self.ki * (1 - (self.__ki_range / 100))
return int(minimal)
class CharacterCritical:
def __init__(self):
# Public
self.chance = 0
self.bonus = 0
class CharacterDefense:
def __init__(self):
# Public
self.fixed = 0
self.floating = 0
class CharacterTrainingItem:
def __init__(self, character):
"""
:param character: (`Character`)
"""
# Public
self.character = character
self.equipped = []
# Private
self.__database = self.character.client.database
# Private
async def __get_equipped(self):
"""
Get the equipped training items
--
:return: `None`
"""
# Get the equipped items' unique id
unique_items = await self.__database.fetch_value("""
SELECT training_item
FROM character_unique
WHERE character_unique_id = $1;
""", [self.character.unique_id])
# Get the list of items
unique_items = unique_items.split()
# Set the equipped list
self.equipped = unique_items
return
# Public
async def apply_effect(self):
"""
Apply the equipped items effects on the character
--
:return: `None`
"""
# Apply the effect of each items
for item in self.equipped:
await asyncio.sleep(0)
await item.apply_effect(self)
return
class CharacterGetter:
# Private
__cache = []
__cache_ok = False # Indicates if the cache has already been filled
# Public
async def get_cache_size(self):
"""Return the cache size
--
@return int"""
return len(self.__cache)
async def set_cache(self, client):
"""
Set the character cache
:param client: object discord.Bot
:param context: object discord.ext.commands.Context
--
:return: `None`
"""
if self.__cache_ok is False:
data = await client.database.fetch_row("""
SELECT *
FROM character_reference
ORDER BY reference;
""")
if len(data) > 0:
# Storing each character in the cache as Character objects
for character in data:
await asyncio.sleep(0)
# Get the set of character's abilities
ability_set = character[15]
ability_set = ability_set.split()
character = await Character(client).generate(
char_id=character[0], name=character[1], type_value=character[2],
rarity_value=character[3], card=character[4], thumbnail=character[4],
health=character[5], ki=character[6], physical=character[7],
ki_power=character[8], armor_fixed=character[9], armor_floating=character[10],
spirit_fixed=character[11], spirit_floating=character[12],
ability=ability_set
)
self.__cache.append(character)
# Cache has been filled
self.__cache_ok = True
print("Character Cache : DONE")
else: # The cache has already been filled
print("Character Cache : The cache has already been filled.")
return
async def get_reference_character(self, reference, client, level=1):
"""
Get a base character
:param reference: (`int`)
@param int level
@param object discord.ext.commands.Bot client
--
:return: `Character` or `None`
"""
# Get the character from the cache
if reference > 0 and reference - 1 < len(self.__cache):
char = self.__cache[reference - | |
0x88, 0x98, 0x22,
0x22, 0x81, 0x15, 0x24, 0x24, 0x32, 0x42, 0x43,
0x82, 0x64, 0x22, 0x12, 0x26, 0x84, 0x84, 0x48,
0x41, 0x82, 0x61, 0x86, 0x17, 0x41, 0x2c, 0x74,
0x24, 0x21, 0x14, 0x24, 0x08, 0x90, 0x11, 0x28,
0x40, 0x04, 0x81, 0x17, 0x44, 0x4a, 0x01, 0x00,
0x42, 0x48, 0x80, 0x12, 0x18, 0x84, 0x31, 0x28,
0x84, 0x00, 0x83, 0xf4, 0x59, 0x3f, 0x80, 0x11,
0x83, 0x03, 0x66, 0x98, 0x18, 0x27, 0x81, 0x00,
0x11, 0x42, 0x00, 0x42, 0x25, 0x08, 0x45, 0x18,
0x88, 0x11, 0x4c, 0x01, 0x12, 0x48, 0x48, 0x44,
0x14, 0x44, 0x21, 0x16, 0x18, 0x68, 0x83, 0x58,
0x62, 0x31, 0x28, 0x40, 0xa8, 0x21, 0xc2, 0x1c,
0x91, 0x82, 0x28, 0x00, 0x2b, 0x14, 0x1c, 0x02,
0x49, 0x18, 0x2a, 0x34, 0x42, 0x83, 0xe2, 0x88,
0x12, 0x14, 0x04, 0xaa, 0xc4, 0x86, 0x6f, 0x8a,
0x0d, 0x46, 0x12, 0x01, 0x30, 0x48, 0x4c, 0x12,
0x18, 0x05, 0x30, 0x42, 0x28, 0x50, 0x61, 0x81,
0x28, 0xa1, 0x26, 0x88, 0x61, 0xa2, 0x16, 0x41,
0x84, 0x85, 0x28, 0x54, 0x24, 0x12, 0x2e, 0x4a,
0x15, 0x24, 0x74, 0x28, 0x95, 0x48, 0x56, 0xe4,
0x28, 0x14, 0x21, 0x82, 0x91, 0x28, 0x38, 0xc6,
0x24, 0x31, 0x8a, 0x45, 0x21, 0x05, 0x18, 0x19,
0x02, 0x49, 0x04, 0x22, 0x30, 0x24, 0x12, 0x22,
0x84, 0x80, 0xe6, 0xa8, 0xe4, 0x92, 0x01, 0x18,
0x12, 0x2a, 0x01, 0x22, 0x16, 0x98, 0x12, 0x10,
0x38, 0x81, 0x22, 0x10, 0x88, 0x24, 0x04, 0x46,
0x14, 0x24, 0x14, 0x04, 0x22, 0x80, 0x44, 0x01,
0x14, 0x41, 0x12, 0xac, 0x01, 0x2a, 0x01, 0x22,
0x00, 0x11, 0x00, 0x10, 0x01, 0x68, 0x00, 0x23,
0x04, 0x42, 0x82, 0xa1, 0x48, 0xa3, 0x44, 0x98,
0xa8, 0x45, 0x48, 0x04, 0x88, 0x41, 0x2f, 0xf5,
0x04, 0x00, 0x00, 0x00, 0x22, 0x00, 0x11, 0x40,
0x88, 0x09, 0x20, 0x81, 0x01, 0x88, 0x00, 0x40,
0x21, 0x02, 0x22, 0x80, 0x64, 0x1c, 0x2a, 0x01,
0x00, 0x2a, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x88, 0x48, 0x00, 0x20, 0x01, 0x48, 0x00,
0x00, 0x2f, 0xaf, 0x0f, 0x10, 0x63, 0x83, 0x32,
0x8b, 0x21, 0x00, 0x2a, 0x61, 0x84, 0x00, 0x00,
0xa0, 0x81, 0x49, 0x12, 0x02, 0x22, 0x20, 0xa9,
0x42, 0x28, 0x00, 0x20, 0x84, 0x82, 0x02, 0x49,
0x03, 0x8a, 0x01, 0x44, 0x28, 0x12, 0x22, 0x82,
0x80, 0x14, 0x22, 0x24, 0x96, 0x41, 0x00, 0x4a,
0x01, 0x20, 0xa4, 0x21, 0x48, 0x2a, 0x14, 0xa8,
0x24, 0x48, 0x4a, 0x42, 0xf8, 0x56, 0x53, 0x00,
0x21, 0x8c, 0x21, 0x11, 0x08, 0x20, 0x01, 0x11,
0x00, 0x88, 0x80, 0xc1, 0x14, 0x12, 0x00, 0x00,
0x48, 0x19, 0x02, 0x00, 0x20, 0x24, 0x0a, 0x00,
0xa0, 0x12, 0x41, 0x20, 0x01, 0x00, 0x10, 0x82,
0x24, 0x14, 0x01, 0xa0, 0x49, 0x00, 0x48, 0x32,
0x12, 0x48, 0x20, 0x84, 0x24, 0x04, 0x3f, 0x81,
0x02, 0x10, 0x21, 0x82, 0xa2, 0x12, 0x80, 0x82,
0x32, 0x48, 0x00, 0x81, 0x18, 0x20, 0x68, 0x22,
0x00, 0xa2, 0xa0, 0x18, 0x28, 0x20, 0x02, 0x22,
0x48, 0x28, 0x13, 0xa4, 0x12, 0x36, 0x04, 0x92,
0x00, 0x22, 0x80, 0x82, 0x08, 0x42, 0x00, 0x22,
0x42, 0x00, 0x20, 0x04, 0x00, 0x42, 0x28, 0x46,
0x88, 0x02, 0x28, 0x81, 0xbf, 0x3a, 0x07, 0x25,
0x01, 0x18, 0x00, 0x23, 0x21, 0x24, 0x81, 0x31,
0x42, 0x18, 0x23, 0x81, 0x83, 0x41, 0x02, 0x00,
0x88, 0xc4, 0x54, 0x48, 0xa0, 0x1c, 0x22, 0x00,
0x36, 0x48, 0x84, 0xc4, 0x88, 0x48, 0x22, 0x24,
0x00, 0x00, 0x32, 0x20, 0x62, 0x44, 0x12, 0x20,
0x01, 0x80, 0x02, 0xc0, 0x88, 0x43, 0x02, 0x42,
0x00, 0x28, 0x28, 0xf0, 0x6b, 0x6b, 0x30, 0x28,
0x42, 0x40, 0x14, 0x04, 0x00, 0x22, 0x18, 0x80,
0x84, 0x02, 0x12, 0x12, 0x44, 0x00, 0x00, 0x00,
0x2a, 0x01, 0x00, 0x00, 0x00, 0x00, 0x28, 0x20,
0x21, 0x24, 0x02, 0x48, 0x12, 0x00, 0x88, 0x00,
0x80, 0x02, 0x00, 0x48, 0x88, 0x89, 0x28, 0x06,
0x8a, 0x14, 0x08, 0x5f, 0x5b, 0x06, 0x18, 0x00,
0x12, 0x90, 0x48, 0x49, 0x81, 0x02, 0x00, 0x28,
0x14, 0x88, 0x11, 0x44, 0x42, 0x80, 0xa1, 0x24,
0x26, 0x48, 0x64, 0x84, 0x10, 0x08, 0x48, 0x48,
0x12, 0x4c, 0x82, 0x04, 0x00, 0x4a, 0x11, 0x02,
0x1c, 0x01, 0x00, 0x40, 0x28, 0x02, 0x28, 0x28,
0x80, 0x04, 0x22, 0x84, 0x00, 0x20, 0x02, 0x80,
0xfc, 0xb1, 0xe6, 0xb0, 0x21, 0x01, 0x18, 0x18,
0x64, 0x8c, 0x65, 0x41, 0x22, 0x2c, 0x22, 0x45,
0x22, 0x22, 0x21, 0x79, 0x11, 0x32, 0x14, 0x48,
0x20, 0x39, 0x4c, 0x85, 0x35, 0x44, 0x81, 0x82,
0x8c, 0x02, 0x42, 0x8c, 0x23, 0x41, 0x34, 0x88,
0x80, 0x82, 0xd2, 0x22, 0x81, 0x01, 0x2a, 0x81,
0x22, 0x84, 0x41, 0x04, 0x80, 0x21, 0x02, 0x20,
0x04, 0x83, 0x58, 0x48, 0x82, 0x83, 0x84, 0x06,
0x22, 0x28, 0x4a, 0xf8, 0xa8, 0x45, 0x10, 0x02,
0x40, 0x04, 0x20, 0x24, 0x81, 0x01, 0x18, 0x1a,
0x44, 0x01, 0x00, 0x44, 0x00, 0x22, 0x28, 0x20,
0xa4, 0x42, 0x00, 0x80, 0x44, 0xa4, 0x42, 0x42,
0x48, 0x00, 0x42, 0x00, 0x14, 0x12, 0x80, 0x22,
0x64, 0x81, 0x2a, 0x08, 0x80, 0x02, 0x00, 0x22,
0x4a, 0x02, 0x88, 0x80, 0xa2, 0x48, 0x81, 0xf0,
0x9d, 0x44, 0x00, 0x12, 0x80, 0x02, 0x00, 0x00,
0x28, 0x20, 0x04, 0x18, 0x00, 0x00, 0x20, 0x02,
0x42, 0x20, 0x02, 0x1a, 0x24, 0x04, 0x42, 0x00,
0x00, 0x18, 0x00, 0x28, 0x00, 0x20, 0x02, 0x48,
0x80, 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00,
0x42, 0xf0, 0xec, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x28, 0x18, 0x00, 0x00, 0x20, 0x21, 0x02, 0x22,
0x80, 0x01, 0x00, 0x20, 0x24, 0x02, 0x80, 0x02,
0x42, 0x20, 0x04, 0x00, 0x00, 0x28, 0x00, 0x00,
0x42, 0x80, 0x22, 0x04, 0x20, 0x04, 0x00, 0x42,
0x40, 0x01, 0x20, 0xf6, 0xe8, 0x12, 0x00, 0x22,
0x80, 0x81, 0x02, 0x00, 0x00, 0x20, 0x22, 0x02,
0x00, 0x00, 0x00, 0x22, 0x28, 0x12, 0x42, 0x00,
0xa0, 0x24, 0x12, 0x62, 0x12, 0x80, 0x01, 0x00,
0x00, 0x00, 0x80, 0x04, 0x00, 0x22, 0x28, 0x68,
0x00, 0x80, 0x04, 0x00, 0x00, 0x00, 0x5b, 0x56,
0x00, 0x00, 0x42, 0x80, 0x23, 0x02, 0x00, 0x00,
0x80, 0x21, 0x02, 0x00, 0x18, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x12, 0xa0, 0x41, 0x80,
0x02, 0x00, 0x42, 0x00, 0x00, 0x00, 0x48, 0x80,
0x02, 0x22, 0x00, 0x20, 0x82, 0x82, 0x04, 0xf0,
0x3f, 0x39, 0x20, 0x01, 0x00, 0x00, 0xa0, 0x12,
0x00, 0x20, 0x04, 0x20, 0x01, 0x00, 0x00, 0x00,
0x48, 0x22, 0x48, 0x00, 0x00, 0x20, 0x04, 0x48,
0x80, 0x04, 0x00, 0x00, 0x42, 0x80, 0x02, 0x42,
0x48, 0x00, 0x42, 0x28, 0x00, 0x00, 0x80, 0x04,
0x42, 0x14, 0xfd, 0x96, 0x00, 0x00, 0x00, 0x12,
0x18, 0x00, 0x00, 0x00, 0x00, 0x20, 0x02, 0x80,
0x02, 0x22, 0x00, 0x38, 0x12, 0x42, 0x20, 0x06,
0x00, 0x00, 0x80, 0x24, 0x82, 0x24, 0x02, 0x00,
0x48, 0x00, 0x00, 0x00, 0x00, 0x80, 0x05, 0x28,
0x80, 0x02, 0xd0, 0x8a, 0x02, 0x00, 0x2a, 0x04,
0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
0x20, 0x01, 0x28, 0x00, 0x00, 0x00, 0x00, 0x80,
0x01, 0x00, 0x20, 0x02, 0x00, 0x00, 0x00, 0x00,
0x80, 0x02, 0x48, 0x00, 0x48, 0x42, 0x00, 0x00,
0xef, 0xf2, 0x08, 0x18, 0x18, 0x22, 0x28, 0x80,
0xa2, 0x12, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02,
0x00, 0x00, 0x28, 0x12, 0x28, 0x00, 0x28, 0x20,
0x82, 0x23, 0x84, 0x22, 0x82, 0x22, 0x02, 0x22,
0x48, 0x00, 0x00, 0x00, 0x20, 0x02, 0x42, 0x00,
0x00, 0x28, 0xa0, 0x24, 0x00, 0x4a, 0xf2, 0x5b,
0xaf, 0x40, 0x08, 0x48, 0x00, 0x00, 0x80, 0x01,
0x40, 0x04, 0x28, 0x22, 0x28, 0x00, 0x00, 0x80,
0x34, 0x42, 0x48, 0x18, 0x48, 0x88, 0x48, 0x80,
0x04, 0x18, 0x14, 0x48, 0x42, 0x84, 0x42, 0x48,
0x20, 0x84, 0x24, 0x04, 0x42, 0x28, 0x40, 0x24,
0x84, 0xa2, 0x42, 0x42, 0x42, 0x22, 0x20, 0x04,
0x42, 0x21, 0xf0, 0x49, 0xa3, 0x00, 0x18, 0x22,
0x62, 0x28, 0x1a, 0xa2, 0x23, 0x18, 0x89, 0x21,
0x02, 0x62, 0x00, 0x3a, 0x02, 0x2a, 0x02, 0x12,
0x12, 0x3a, 0x81, 0xa2, 0x21, 0x48, 0x28, 0x58,
0x20, 0x86, 0xa1, 0x24, 0x58, 0xa0, 0x34, 0x48,
0x48, 0x80, 0x02, 0x68, 0x4a, 0x24, 0x82, 0x04,
0x00, 0x42, 0x48, 0x4a, 0x02, 0x2a, 0x84, 0x82,
0x24, 0x82, 0x24, 0x22, 0x04, 0x14, 0x2a, 0xf6,
0x8c, 0x4f, 0x80, 0x21, 0x82, 0x82, 0x03, 0x22,
0x20, 0x22, 0x01, 0x80, 0x84, 0xa2, 0x11, 0x00,
0x00, 0xa0, 0x32, 0xa0, 0x42, 0x48, 0x22, 0x48,
0x1a, 0x82, 0x84, 0x86, 0x86, 0xa4, 0x25, 0x12,
0x48, 0x52, 0x28, 0x48, 0x22, 0x00, 0x20, 0x24,
0xa4, 0x24, 0x4a, 0xa4, 0x24, 0x80, 0x02, 0x4a,
0x24, 0x04, 0x42, 0x42, 0x42, 0x28, 0x22, 0x2a,
0x84, 0x04, 0x4f, 0x74, 0x0d, 0x12, 0x12, 0x00,
0x80, 0x81, 0x81, 0x02, 0x00, 0x00, 0x80, 0xa1,
0x22, 0x80, 0x02, 0x00, 0x42, 0x62, 0x5a, 0x22,
0x26, 0x24, 0x04, 0x62, 0x22, 0x80, 0x25, 0xa1,
0x12, 0x42, 0x2a, 0x22, 0xa4, 0x22, 0x22, 0x80,
0x04, 0x48, 0x80, 0xa4, 0x22, | |
size)
"""
return self.tfidfMatrix
def getTFIDFVectors(self, ngrams=1):
"""
Return docs with TFIDF values instead of tokens
"""
if ngrams != 1:
raise Exception("ngrams > 1 not yet implemented")
if self.tfidfVectors is None:
tfidfScores = []
pbar = ProgressBar(len(self.docs), verbose=self.verbose and (len(self.docs) > 1000), logger=self.logger, message="Building TFIDF tokens")
for docId in range(len(self.docs)):
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
feature_index = self.tfidfMatrix[docId,:].nonzero()[1]
currentScores = np.array([self.tfidfMatrix[docId, x] for x in feature_index])
aaa = dict()
for i in range(len(feature_index)):
aaa[feature_index[i]] = currentScores[i]
tokensTFIDF = []
for word in self.docs[docId]:
if self.doLower:
word = word.lower()
tokensTFIDF.append(aaa[self.vocIndexes[word]])
tfidfScores.append(tokensTFIDF)
pbar.tic()
self.tfidfVectors = tfidfScores
return self.tfidfVectors
def getVoc(self):
"""
Return the list of ngrams
"""
if self.voc is None:
self.voc = [None] * len(self.vocIndexes)
for word, index in self.vocIndexes.items():
self.voc[index] = word
return self.voc
def getVocIndexes(self):
"""
Return a mapping voc -> index
"""
return self.vocIndexes
def getTFIDFMap(self):
"""
Return a list docId -> (dict of ngram -> tfidf value)
"""
if self.tfidfMap is None:
self.tfidfMap = []
for i in range(self.tfidfMatrix.shape[0]):
self.tfidfMap.append(dict())
cx = scipy.sparse.coo_matrix(self.tfidfMatrix)
pbar = ProgressBar(self.tfidfMatrix.shape[0], logger=self.logger, verbose=self.verbose, message="Collecting TFIDF values")
alreadySeenDocs = set()
for docId, vocId, tfidfValue in zip(cx.row, cx.col, cx.data):
ngram = self.voc[vocId]
ngrams = ngram.count(" ") + 1
self.tfidfMap[docId][ngram] = tfidfValue
if docId not in alreadySeenDocs:
pbar.tic()
alreadySeenDocs.add(docId)
return self.tfidfMap
def getTFIDFValue(self, docId, ngram):
"""
Return the TFIDF value of a ngram in a specific doc
"""
valuesDict = self.getTFIDFMap()[docId]
if ngram not in valuesDict:
logError('"' + ngram + '"' + " not in doc " + str(docId), self)
return 0.0
else:
return valuesDict[ngram]
def getTops(self):
"""
This method takes tfidfMatrix a sparse matric (from `generateTFIDF`).
It takes voc a list of ngrams corresponding to tfidfMatrix columns.
It return top ngrams (according to there tfidf values) for each doc looking:
[
{
1: [ sunye, bosu, ..., jan., ryan ],
2: [ sarah bean, master jay, ..., and former, added . ],
<ngrams>: [ <word>, <word>, ..., <word>, <word> ]
},
<doc>,
...,
{
1: [ hu, candid, ..., of, is ],
2: [ private talks, with hu, ..., to a, in a ],
3: [ worshipped at a, with some olympic, ..., , he said, as well as ]
}
]
"""
if self.tops is None:
self.getVoc()
self.tops = []
for i in range(self.tfidfMatrix.shape[0]):
grams = {1: [], 2: [], 3: []}
self.tops.append(grams)
cx = scipy.sparse.coo_matrix(self.tfidfMatrix)
pbar = ProgressBar(self.tfidfMatrix.shape[0], logger=self.logger, verbose=self.verbose, message="Collecting TFIDF values")
alreadySeenDocs = set()
for docId, vocId, tfidfValue in zip(cx.row, cx.col, cx.data):
ngram = self.voc[vocId]
ngrams = ngram.count(" ") + 1
self.tops[docId][ngrams].append((ngram, tfidfValue))
if docId not in alreadySeenDocs:
pbar.tic()
alreadySeenDocs.add(docId)
for i in pb(list(range(len(self.tops))), logger=self.logger, verbose=self.verbose, message="Sorting ngrams by TFIDF values"):
for u in self.tops[i].keys():
self.tops[i][u] = [e[0] for e in sorted(self.tops[i][u], key=lambda x: x[1], reverse=True)]
return self.tops
def getMaxTFIDFsPerSentence(self):
"""
To use this function, you must give a corpus of docs composed of sentences at the init step.
This function return a structure looking:
[
<doc 0>,
{
<ngrams>: [<max tfidf value of sentence 0>, <max tfidf value of sentence 1>, <...>],
2: [0.2, 0.1],
<...>,
},
<...>,
]
"""
assert self.sentencesCorpus
if self.maxTFIDFs is None:
self.getTFIDFMap()
self.maxTFIDFs = []
maxNgrams = self.ngramRange[1]
docId = 0
for doc in pb(self.docsSentences, logger=self.logger, verbose=self.verbose, message="Collecting max TFIDF value per sentence"):
perNgrams = dict()
for ngrams in range(1, maxNgrams + 1):
(sentenceIndexes, flattenedSentences) = flattenedIndexes(doc, doLower=self.doLower, ngrams=ngrams, returnFlattenedDoc=True)
allMax = [-1] * len(doc)
for i in range(len(flattenedSentences)):
sentenceHit = sentenceIndexes[i]
ngram = flattenedSentences[i]
for hit in sentenceHit:
value = self.getTFIDFValue(docId, ngram)
if value > allMax[hit]:
allMax[hit] = value
perNgrams[ngrams] = allMax
self.maxTFIDFs.append(perNgrams)
docId += 1
return self.maxTFIDFs
def getCumhistoIntervals(self):
if self.cumhistoIntervals is None:
self.getCumhisto()
return self.cumhistoIntervals
def getCumhisto(self):
"""
This method return the cumulative histogram of tfidf values.
Example of structure:
{
<ngrams>: [<count of sentences so that the max TFIDF is higher than this value in self.cumhistoIntervals>, <...>]
'2': [ 39600, 39600, 35000, ..., 84, 2, 2, 0, 0, 0, 0, 0, 0, 0 ],
}
"""
if self.cumhisto is None:
tt = TicToc(logger=self.logger, verbose=self.verbose)
tt.tic()
maxTFIDFs = self.getMaxTFIDFsPerSentence()
maxNgrams = len(maxTFIDFs[0])
intervalsSize = self.cumhistoIntervalsSize
# We calculate intervals:
minis, maxis = dict(), dict()
for ngrams in range(1, maxNgrams + 1):
if ngrams not in minis:
minis[ngrams] = None
if ngrams not in maxis:
maxis[ngrams] = None
for doc in maxTFIDFs:
currentMin = min(doc[ngrams])
if minis[ngrams] is None or currentMin < minis[ngrams]:
minis[ngrams] = currentMin
currentMax = max(doc[ngrams])
if maxis[ngrams] is None or currentMax > maxis[ngrams]:
maxis[ngrams] = currentMax
tt.tic("We got min and max TFIDF values")
intervals = dict()
for ngrams in range(1, maxNgrams + 1):
mini = minis[ngrams]
maxi = maxis[ngrams]
epsilon = 0.01 * (maxi - mini)
mini = mini - epsilon
maxi = maxi + epsilon
jump = (maxi - mini) / intervalsSize
intervals[ngrams] = list(np.arange(mini, maxi, jump))
# We make cumulative histograms:
cumhisto = dict()
for ngrams in range(1, maxNgrams + 1):
currentIntervals = intervals[ngrams]
if ngrams not in cumhisto:
cumhisto[ngrams] = [0] * len(currentIntervals)
for currentMaxTFIDFs in maxTFIDFs:
currentMaxTFIDFs = currentMaxTFIDFs[ngrams]
for value in currentMaxTFIDFs:
for i in range(len(currentIntervals)):
if value > currentIntervals[i]:
cumhisto[ngrams][i] += 1
tt.tic("We calculated the cumulative histogram of tfidf values")
self.cumhisto = cumhisto
self.cumhistoIntervals = intervals
return self.cumhisto
def getBlackNgrams(self, deletionRatio, *args, **kwargs):
"""
Return a black list of ngrams for each document
The black list is calculate according a ratio of deletion of all sentences in the cirpus
Each ngram in the black list is an indicator when chossing to delete or not a sentence in the corpus
The structure looks like:
[
<list of ngrams for doc 1>,
[<ngram 1>, <ngram 2>, ...],
...
]
"""
maxTFIDFs = self.getMaxTFIDFsPerSentence()
cumhisto = self.getCumhisto()
tfidfThresholds = getOptimalTFIDFThresholds\
(
maxTFIDFs, cumhisto, deletionRatio, self.getCumhistoIntervals(),
*args, logger=self.logger, verbose=self.verbose, **kwargs
)
blackNgrams = []
maxNgrams = len(maxTFIDFs[0])
for docId in pb(list(range(len(maxTFIDFs))),
logger=self.logger, verbose=self.verbose,
message="Collecting ngrams TFIDF black list for threshold " + str(tfidfThresholds)):
blackNgrams.append(set())
voc = self.getTFIDFMap()[docId]
for ngram in voc:
ngrams = ngram.count(" ") + 1
theshold = tfidfThresholds[ngrams]
currentTFIDF = self.getTFIDFValue(docId, ngram)
if currentTFIDF >= theshold:
blackNgrams[docId].add(ngram)
return blackNgrams
def removeSentences(self, deletionRatio, *args, **kwargs):
assert self.sentencesCorpus
maxTFIDFs = self.getMaxTFIDFsPerSentence()
cumhisto = self.getCumhisto()
intervals = self.getCumhistoIntervals()
tfidfThresholds = getOptimalTFIDFThresholds(maxTFIDFs, cumhisto, deletionRatio, intervals,
*args, logger=self.logger, verbose=self.verbose, **kwargs)
newDocs = []
maxNgrams = len(maxTFIDFs[0])
for docId in range(len(maxTFIDFs)):
newsSentences = []
for sentenceId in range(len(maxTFIDFs[docId][list(maxTFIDFs[docId].keys())[0]])):
foundHigher = False
for ngrams in range(1, maxNgrams + 1):
if maxTFIDFs[docId][ngrams][sentenceId] > tfidfThresholds[ngrams]:
foundHigher = True
break
if not foundHigher:
newsSentences.append(self.docsSentences[docId][sentenceId])
newDocs.append(newsSentences)
return newDocs
def estimateDeletion(maxTFIDFs, cumhisto, deletionRatio, intervals, logger=None, verbose=True):
"""
This function calculate how final deletion ratio which will be higher in case we handle multiple ngrams...
"""
tfidfThresholds = dict()
maxNgrams = len(maxTFIDFs[0])
for ngrams in range(1, maxNgrams + 1):
countThreshold = deletionRatio * cumhisto[ngrams][0]
for i in range(len(cumhisto[ngrams])):
if cumhisto[ngrams][i] < countThreshold:
break
tfidfThresholds[ngrams] = intervals[ngrams][i]
sentencesToRemove = []
for docId in range(len(maxTFIDFs)):
currentSentencesToRemove = set()
currentMaxTFIDFs = maxTFIDFs[docId]
for ngrams in range(1, maxNgrams + 1):
for sentenceId in range(len(currentMaxTFIDFs[ngrams])):
if currentMaxTFIDFs[ngrams][sentenceId] >= tfidfThresholds[ngrams]:
currentSentencesToRemove.add(sentenceId)
sentencesToRemove.append(currentSentencesToRemove)
deletedCount = 0
totalCount = 0
for docId in range(len(maxTFIDFs)):
currentSentencesToRemove = sentencesToRemove[docId]
newDoc = []
for sentenceId in range(len(maxTFIDFs[docId][list(maxTFIDFs[docId].keys())[0]])):
if sentenceId in currentSentencesToRemove:
deletedCount += 1
totalCount += 1
# log("We delete " + str(int(deletedCount / totalCount * 100)) + "% of sentences", logger=logger, verbose=verbose)
# print(tfidfThresholds)
return deletedCount / totalCount
def estimateOptimalDeletionRatio(maxTFIDFs, cumhisto, targetDeletionRatio, intervals, *args,
minimumDichotomicMove=0.000001,
logger=None, verbose=True, **kwargs):
deletionRatio = targetDeletionRatio
move = targetDeletionRatio / 2
while move > minimumDichotomicMove:
computedDeletionRatio = estimateDeletion(maxTFIDFs, cumhisto, deletionRatio, intervals,
*args, logger=logger, verbose=verbose, **kwargs)
if computedDeletionRatio < targetDeletionRatio:
deletionRatio = deletionRatio + move
else:
deletionRatio = deletionRatio - move
move = move / 2
return deletionRatio
def getOptimalTFIDFThresholds(maxTFIDFs, cumhisto, targetDeletionRatio, intervals,
*args, logger=None, verbose=True, **kwargs):
optimalDeletionRatio = estimateOptimalDeletionRatio(maxTFIDFs, cumhisto, targetDeletionRatio, intervals, *args, logger=logger, verbose=verbose, **kwargs)
tfidfThresholds = dict()
maxNgrams = len(maxTFIDFs[0])
for ngrams in range(1, maxNgrams + 1):
countThreshold = optimalDeletionRatio * cumhisto[ngrams][0]
for i | |
attribute_names = self.library.execute_javascript(
"(element) => element.getAttributeNames()", selector
)
expected = list(assertion_expected)
return list_verify_assertion(
attribute_names, assertion_operator, expected, "Attribute names", message
)
@keyword(tags=("Getter", "Assertion", "PageContent"))
@with_assertion_polling
def get_classes(
self,
selector: str,
assertion_operator: Optional[AssertionOperator] = None,
*assertion_expected,
message: Optional[str] = None,
) -> Any:
"""Returns all classes of an element as a list.
``selector`` Selector from which the info is to be retrieved.
See the `Finding elements` section for details about the selectors.
``assertion_operator`` See `Assertions` for further details. Defaults to None.
``expected_value`` Expected value for the state
``message`` overrides the default error message for assertion.
Optionally asserts that the value matches the specified assertion. See
`Assertions` for further details for the assertion arguments. By default assertion
is not done.
Available assertions:
- ``==`` and ``!=`` can work with multiple values
- ``contains`` / ``*=`` only accepts one single expected value
Other operators are not allowed.
Example:
| `Get Classes` id=draggable == react-draggable box # Element contains exactly this class name.
| `Get Classes` id=draggable validate "react-draggable-dragged" not in value # Element does not contain react-draggable-dragged class.
"""
class_dict = self.get_property(selector, "classList")
expected = list(assertion_expected)
return list_verify_assertion(
list(class_dict.values()),
assertion_operator,
expected,
f"Classes of {selector}",
message,
)
@keyword(tags=("Getter", "Assertion", "PageContent"))
@with_assertion_polling
def get_select_options(
self,
selector: str,
assertion_operator: Optional[AssertionOperator] = None,
assertion_expected: Any = None,
message: Optional[str] = None,
) -> Any:
"""Returns attributes of options of a ``select`` element as a list of dictionaries.
Returned dictionaries have the following keys and their values
"index", "value", "label" and "selected".
``selector`` Selector from which the info is to be retrieved.
See the `Finding elements` section for details about the selectors.
``assertion_operator`` See `Assertions` for further details. Defaults to None.
``expected_value`` Expected value for the state
``message`` overrides the default error message for assertion.
Optionally asserts that these match the specified assertion. See
`Assertions` for further details for the assertion arguments. By default assertion
is not done.
Example:
| `Get Select Options` //select[2] validate [v["label"] for v in value] == ["Email", "Mobile"]
| `Get Select Options` select#names validate any(v["label"] == "Mikko" for v in value)
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetSelectContent(
Request().ElementSelector(selector=selector)
)
logger.info(response)
result = [
{
"index": index,
"value": sel.value,
"label": sel.label,
"selected": bool(sel.selected),
}
for index, sel in enumerate(response.entry)
]
return verify_assertion(
result,
assertion_operator,
assertion_expected,
"Select Options:",
message,
)
@keyword(tags=("Getter", "Assertion", "PageContent"))
@with_assertion_polling
def get_selected_options(
self,
selector: str,
option_attribute: SelectAttribute = SelectAttribute.label,
assertion_operator: Optional[AssertionOperator] = None,
*assertion_expected,
) -> Any:
"""Returns the specified attribute of selected options of the ``select`` element.
``selector`` Selector from which the info is to be retrieved.
See the `Finding elements` section for details about the selectors.
``option_attribute`` Which attribute shall be returned/verified.
Defaults to label.
``assertion_operator`` See `Assertions` for further details. Defaults to None.
``expected_value`` Expected value for the state
Optionally asserts that these match the specified assertion. See
`Assertions` for further details for the assertion arguments. By default assertion
is not done.
- ``==`` and ``!=`` can work with multiple values
- ``contains`` / ``*=`` only accepts one single expected value
Other operators are not allowed.
Example:
| `Select Options By` label //select[2] Email Mobile
| ${selected_list} `Get Selected Options` //select[2] # getter
| `Get Selected Options` //select[2] label `==` Mobile Mail #assertion content
| `Select Options By` label select#names 2 4
| `Get Selected Options` select#names index `==` 2 4 #assertion index
| `Get Selected Options` select#names label *= Mikko #assertion contain
| `Get Selected Options` select#names label validate len(value) == 3 #assertion length
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetSelectContent(
Request().ElementSelector(selector=selector)
)
logger.info(response)
expected = list(assertion_expected)
selected: Union[List[int], List[str]]
if option_attribute is SelectAttribute.value:
selected = [sel.value for sel in response.entry if sel.selected]
elif option_attribute is SelectAttribute.label:
selected = [sel.label for sel in response.entry if sel.selected]
elif option_attribute is SelectAttribute.index:
selected = [
index for index, sel in enumerate(response.entry) if sel.selected
]
expected = [int(exp) for exp in expected]
return list_verify_assertion(
selected,
assertion_operator,
expected,
"Selected Options:",
)
@keyword(tags=("Getter", "Assertion", "PageContent"))
@with_assertion_polling
def get_checkbox_state(
self,
selector: str,
assertion_operator: Optional[AssertionOperator] = None,
expected_state: Union[bool, str] = "Unchecked",
message: Optional[str] = None,
) -> bool:
"""Returns the state of the checkbox found by ``selector``.
``selector`` Selector which shall be examined.
See the `Finding elements` section for details about the selectors.
``assertion_operator`` See `Assertions` for further details. Defaults to None.
``expected_value`` Expected value for the state
``message`` overrides the default error message for assertion.
Optionally asserts that the state matches the specified assertion. See
`Assertions` for further details for the assertion arguments. By default assertion
is not done.
- ``==`` and ``!=`` and equivalent are allowed on boolean values
- other operators are not accepted.
``expected_state`` boolean value of expected state.
Strings are interpreted as booleans.
All strings are ``${True}`` except of the
following `FALSE, NO, OFF, 0, UNCHECKED, NONE, ${EMPTY}``.
(case-insensitive). Defaults to unchecked
- ``checked`` => ``True``
- ``unchecked`` => ``False``
``message`` overrides the default error message.
Example:
| `Get Checkbox State` [name=can_send_email] == checked
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetBoolProperty(
Request().ElementProperty(selector=selector, property="checked")
)
logger.info(response.log)
value: bool = response.body
logger.info(f"Checkbox is {'checked' if value else 'unchecked'}")
return bool_verify_assertion(
value,
assertion_operator,
expected_state,
f"Checkbox {selector} is",
message,
)
@keyword(tags=("Getter", "Assertion", "PageContent"))
@with_assertion_polling
def get_element_count(
self,
selector: str,
assertion_operator: Optional[AssertionOperator] = None,
expected_value: Union[int, str] = 0,
message: Optional[str] = None,
) -> Any:
"""Returns the count of elements found with ``selector``.
``selector`` Selector which shall be counted.
See the `Finding elements` section for details about the selectors.
``assertion_operator`` See `Assertions` for further details. Defaults to None.
``expected_value`` Expected value for the counting
``message`` overrides the default error message for assertion.
Optionally asserts that the state matches the specified assertion. See
`Assertions` for further details for the assertion arguments. By default assertion
is not done.
Example:
| `Get Element Count` label > 1
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetElementCount(
Request().ElementSelector(selector=selector)
)
count = response.body
return float_str_verify_assertion(
int(count),
assertion_operator,
expected_value,
f"Element count for selector `{selector}` is",
message,
)
@keyword(tags=("Getter", "Assertion", "BrowserControl"))
@with_assertion_polling
def get_viewport_size(
self,
key: SizeFields = SizeFields.ALL,
assertion_operator: Optional[AssertionOperator] = None,
assertion_expected: Any = None,
message: Optional[str] = None,
) -> Any:
"""Returns the current viewport dimensions.
``key`` Optionally filters the returned values.
If keys is set to ``ALL`` (default) it will return the viewport size as dictionary,
otherwise it will just return the single value selected by the key.
Note: If a single value is retrieved, an assertion does *not* need a ``validate``
combined with a cast of ``value``.
``assertion_operator`` See `Assertions` for further details. Defaults to None.
``expected_value`` Expected value for the counting
``message`` overrides the default error message for assertion.
Optionally asserts that the state matches the specified assertion. See
`Assertions` for further details for the assertion arguments. By default assertion
is not done.
Example:
| `Get Viewport Size` ALL == {'width':1280, 'height':720}
| `Get Viewport Size` width >= 1200
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetViewportSize(Request().Empty())
logger.info(response.log)
parsed = json.loads(response.json)
logger.debug(parsed)
if key == SizeFields.ALL:
return int_dict_verify_assertion(
parsed,
assertion_operator,
assertion_expected,
"Viewport size is",
message,
)
else:
logger.info(f"Value of '{key}'': {parsed[key.name]}")
return float_str_verify_assertion(
parsed[key.name],
assertion_operator,
assertion_expected,
f"{key} is",
message,
)
@keyword(tags=("Getter", "PageContent"))
def get_element(self, selector: str) -> str:
"""Returns a reference to a Playwright element handle.
The reference can be used in subsequent selectors.
``selector`` Selector from which shall be retrieved .
See the `Finding elements` section for details about the selectors.
Example:
| ${element} = `Get Element` \\#username_field
| ${option_value} = `Get Property` ${element} >> option value
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetElement(Request().ElementSelector(selector=selector))
return response.body
@keyword(tags=("Getter", "PageContent"))
def get_elements(self, selector: str) -> List[str]:
"""Returns a reference to playwright element handle for all matched elements by ``selector``.
``selector`` Selector from which shall be retrieved.
See the `Finding elements` section for details about the selectors.
Example:
| ${elements} = `Get Elements`
| ${elem} = Get From List ${elements} 0
| | |
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import unittest
import datetime
import ast
import sys
from pathlib import Path
from lingua_franca import get_default_lang, set_default_lang, \
load_language, unload_language
from lingua_franca.format import date_time_format
from lingua_franca.format import join_list
from lingua_franca.format import nice_date
from lingua_franca.format import nice_date_time
from lingua_franca.format import nice_duration
from lingua_franca.format import nice_number
from lingua_franca.format import nice_time
from lingua_franca.format import nice_year
from lingua_franca.format import pronounce_number
from lingua_franca.time import default_timezone
def setUpModule():
load_language("ru-ru")
set_default_lang("ru")
def tearDownModule():
unload_language("ru")
NUMBERS_FIXTURE_RU = {
1.435634: '1.436',
2: '2',
5.0: '5',
0.027: '0.027',
0.5: 'половина',
1.333: '1 и 1 треть',
2.666: '2 и 2 трети',
0.25: 'четверть',
1.25: '1 и 1 четверть',
0.75: '3 четверти',
1.75: '1 и 3 четверти',
3.4: '3 и 2 пятые',
16.8333: '16 и 5 шестых',
12.5714: '12 и 4 седьмые',
9.625: '9 и 5 восьмых',
6.777: '6 и 7 девятых',
3.1: '3 и 1 десятая',
2.272: '2 и 3 одиннадцатые',
5.583: '5 и 7 двенадцатых',
8.384: '8 и 5 тринадцатых',
0.071: '1 четырнадцатая',
6.466: '6 и 7 пятнадцатых',
8.312: '8 и 5 шестнадцатых',
2.176: '2 и 3 семнадцатые',
200.722: '200 и 13 восемнадцатых',
7.421: '7 и 8 девятнадцатых',
0.05: '1 двадцатая'
}
class TestNiceNumberFormat(unittest.TestCase):
def test_convert_float_to_nice_number(self):
for number, number_str in NUMBERS_FIXTURE_RU.items():
self.assertEqual(nice_number(number, speech=True), number_str,
'должен отформатировать {} как {}, а не {}'.format(
number, number_str, nice_number(number, speech=True)))
def test_specify_denominator(self):
self.assertEqual(nice_number(5.5, speech=True, denominators=[1, 2, 3]),
'5 с половиной',
'должен отформатировать 5.5 как 5 с половиной, а не {}'.format(
nice_number(5.5, speech=True, denominators=[1, 2, 3])))
self.assertEqual(nice_number(2.333, speech=True, denominators=[1, 2]),
'2.333',
'должен отформатировать 2.333 как 2.333, а не {}'.format(
nice_number(2.333, speech=True, denominators=[1, 2])))
def test_no_speech(self):
self.assertEqual(nice_number(6.777, speech=False),
'6 7/9',
'должен отформатировать 6.777 как 6 7/9, а не {}'.format(
nice_number(6.777, speech=False)))
self.assertEqual(nice_number(6.0, speech=False),
'6',
'должен отформатировать 6.0 как 6, а не {}'.format(
nice_number(6.0, speech=False)))
class TestPronounceNumber(unittest.TestCase):
def test_convert_int(self):
self.assertEqual(pronounce_number(0), "ноль")
self.assertEqual(pronounce_number(1), "один")
self.assertEqual(pronounce_number(10), "десять")
self.assertEqual(pronounce_number(15), "пятнадцать")
self.assertEqual(pronounce_number(20), "двадцать")
self.assertEqual(pronounce_number(27), "двадцать семь")
self.assertEqual(pronounce_number(30), "тридцать")
self.assertEqual(pronounce_number(33), "тридцать три")
def test_convert_negative_int(self):
self.assertEqual(pronounce_number(-1), "минус один")
self.assertEqual(pronounce_number(-10), "минус десять")
self.assertEqual(pronounce_number(-15), "минус пятнадцать")
self.assertEqual(pronounce_number(-20), "минус двадцать")
self.assertEqual(pronounce_number(-27), "минус двадцать семь")
self.assertEqual(pronounce_number(-30), "минус тридцать")
self.assertEqual(pronounce_number(-33), "минус тридцать три")
def test_convert_decimals(self):
self.assertEqual(pronounce_number(0.05), "ноль точка ноль пять")
self.assertEqual(pronounce_number(-0.05), "минус ноль точка ноль пять")
self.assertEqual(pronounce_number(1.234),
"один точка два три")
self.assertEqual(pronounce_number(21.234),
"двадцать один точка два три")
self.assertEqual(pronounce_number(21.234, places=1),
"двадцать один точка два")
self.assertEqual(pronounce_number(21.234, places=0),
"двадцать один")
self.assertEqual(pronounce_number(21.234, places=3),
"двадцать один точка два три четыре")
self.assertEqual(pronounce_number(21.234, places=4),
"двадцать один точка два три четыре")
self.assertEqual(pronounce_number(21.234, places=5),
"двадцать один точка два три четыре")
self.assertEqual(pronounce_number(-1.234),
"минус один точка два три")
self.assertEqual(pronounce_number(-21.234),
"минус двадцать один точка два три")
self.assertEqual(pronounce_number(-21.234, places=1),
"минус двадцать один точка два")
self.assertEqual(pronounce_number(-21.234, places=0),
"минус двадцать один")
self.assertEqual(pronounce_number(-21.234, places=3),
"минус двадцать один точка два три четыре")
self.assertEqual(pronounce_number(-21.234, places=4),
"минус двадцать один точка два три четыре")
self.assertEqual(pronounce_number(-21.234, places=5),
"минус двадцать один точка два три четыре")
def test_convert_stos(self):
self.assertEqual(pronounce_number(100), "сто")
self.assertEqual(pronounce_number(666), "шестьсот шестьдесят шесть")
self.assertEqual(pronounce_number(1456), "тысяча четыреста пятьдесят шесть")
self.assertEqual(pronounce_number(103254654), "сто три миллиона "
"двести пятьдесят "
"четыре тысячи "
"шестьсот "
"пятьдесят четыре")
self.assertEqual(pronounce_number(1512457), "миллион пятьсот"
" двенадцать тысяч "
"четыреста пятьдесят "
"семь")
self.assertEqual(pronounce_number(209996), "двести девять "
"тысяч девятьсот "
"девяносто шесть")
def test_convert_scientific_notation(self):
self.assertEqual(pronounce_number(0, scientific=True), "ноль")
self.assertEqual(pronounce_number(33, scientific=True),
"три точка три на десять в степени один")
self.assertEqual(pronounce_number(299792458, scientific=True),
"два точка девять девять на десять в степени восемь")
self.assertEqual(pronounce_number(299792458, places=6,
scientific=True),
"два точка девять девять семь девять два пять "
"на десять в степени восемь")
self.assertEqual(pronounce_number(1.672e-27, places=3,
scientific=True),
"один точка шесть семь два на десять в степени "
"минус двадцать семь")
def test_auto_scientific_notation(self):
self.assertEqual(
pronounce_number(1.1e-150), "один точка один на десять в степени "
"минус сто пятьдесят")
def test_large_numbers(self):
self.maxDiff = None
self.assertEqual(
pronounce_number(299792458, short_scale=True),
"двести девяносто девять миллионов семьсот "
"девяносто две тысячи четыреста пятьдесят восемь")
self.assertEqual(
pronounce_number(299792458, short_scale=False),
"двести девяносто девять миллионов семьсот "
"девяносто две тысячи четыреста пятьдесят восемь")
self.assertEqual(
pronounce_number(100034000000299792458, short_scale=True),
"сто квинтиллионов тридцать четыре квадриллиона "
"двести девяносто девять миллионов семьсот "
"девяносто две тысячи четыреста пятьдесят восемь")
self.assertEqual(
pronounce_number(100034000000299792458, short_scale=False),
"сто биллионов тридцать четыре тысячи миллиардов "
"двести девяносто девять миллионов семьсот "
"девяносто две тысячи четыреста пятьдесят восемь")
self.assertEqual(
pronounce_number(1e10, short_scale=True),
"десять миллиардов")
self.assertEqual(
pronounce_number(1e12, short_scale=True),
"триллион")
# TODO maybe beautify this
self.assertEqual(
pronounce_number(1000001, short_scale=True),
"миллион один")
self.assertEqual(pronounce_number(95505896639631893, short_scale=True),
"девяносто пять квадриллионов "
"пятьсот пять триллионов "
"восемьсот девяносто шесть миллиардов "
"шестьсот тридцать девять миллионов "
"шестьсот тридцать одна тысяча "
"восемьсот девяносто три")
self.assertEqual(pronounce_number(95505896639631893,
short_scale=False),
"девяносто пять тысяч пятьсот пять миллиардов "
"восемьсот девяносто шесть тысяч "
"шестьсот тридцать девять миллионов "
"шестьсот тридцать одна тысяча "
"восемьсот девяносто три")
self.assertEqual(pronounce_number(10e80, places=1),
"секснвигинтиллион")
# TODO floating point rounding issues might happen
self.assertEqual(pronounce_number(1.9874522571e80, places=9),
"сто девяносто восемь квинвигинтиллионов "
"семьсот сорок пять кватторвигинтиллионов "
"двести двадцать пять тревигинтиллионов "
"семьсот девять дуовигинтиллионов "
"девятьсот девяносто девять унвигинтиллионов "
"девятьсот восемьдесят девять вигинтиллионов "
"семьсот тридцать новемдециллионов "
"девятьсот девятнадцать октодециллионов "
"девятьсот девяносто девять септендециллионов "
"девятьсот пятьдесят пять сексдециллионов "
"четыреста девяносто восемь квиндециллионов "
"двести четырнадцать кваттордециллионов "
"восемьсот сорок пять тредециллионов "
"четыреста двадцать девять дуодециллионов "
"четыреста сорок четыре ундециллиона "
"триста тридцать шесть дециллионов "
"семьсот двадцать четыре нониллиона "
"пятьсот шестьдесят девять октиллионов "
"триста семьдесят пять септиллионов "
"двести тридцать девять секстиллионов "
"шестьсот семьдесят квинтиллионов "
"пятьсот семьдесят четыре квадриллиона "
"семьсот тридцать девять триллионов "
"семьсот сорок восемь миллиардов "
"четыреста семьдесят миллионов "
"девятьсот пятнадцать тысяч "
"семьдесят два")
# infinity
self.assertEqual(
pronounce_number(sys.float_info.max * 2), "бесконечность")
self.assertEqual(
pronounce_number(float("inf")),
"бесконечность")
self.assertEqual(
pronounce_number(float("-inf")),
"минус бесконечность")
def test_ordinals(self):
self.assertEqual(pronounce_number(1, ordinals=True), "первый")
self.assertEqual(pronounce_number(10, ordinals=True), "десятый")
self.assertEqual(pronounce_number(15, ordinals=True), "пятнадцатый")
self.assertEqual(pronounce_number(20, ordinals=True), "двадцатый")
self.assertEqual(pronounce_number(27, ordinals=True), "двадцать седьмой")
self.assertEqual(pronounce_number(30, ordinals=True), "тридцатый")
self.assertEqual(pronounce_number(33, ordinals=True), "тридцать третий")
self.assertEqual(pronounce_number(100, ordinals=True), "сотый")
self.assertEqual(pronounce_number(1000, ordinals=True), "тысячный")
self.assertEqual(pronounce_number(10000, ordinals=True),
"десятитысячный")
self.assertEqual(pronounce_number(18691, ordinals=True),
"восемнадцать тысяч шестьсот девяносто первый")
self.assertEqual(pronounce_number(1567, ordinals=True),
"тысяча пятьсот шестьдесят седьмой")
self.assertEqual(pronounce_number(1.672e-27, places=3,
scientific=True, ordinals=True),
"один точка шесть семь два на десять в минус "
"двадцать седьмой степени")
self.assertEqual(pronounce_number(1e6, ordinals=True),
"миллионный")
self.assertEqual(pronounce_number(2e6, ordinals=True),
"двухмиллионный")
self.assertEqual(pronounce_number(2e6, ordinals=True, short_scale=False),
"двухмиллионный")
self.assertEqual(pronounce_number(3e6, ordinals=True),
"трёхмиллионный")
self.assertEqual(pronounce_number(4e6, ordinals=True),
"четырёхмиллионный")
self.assertEqual(pronounce_number(18e6, ordinals=True),
"восемнадцатимиллионный")
self.assertEqual(pronounce_number(18e12, ordinals=True,
short_scale=False),
"восемнадцатибиллионный")
self.assertEqual(pronounce_number(18e12, ordinals=True),
"восемнадцатитриллионный")
self.assertEqual(pronounce_number(18e18, ordinals=True,
short_scale=False), "восемнадцатитриллионный")
class TestNiceDateFormat(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Read date_time_test.json files for test data
cls.test_config = {}
p = Path(date_time_format.config_path)
for sub_dir in [x for x in p.iterdir() if x.is_dir()]:
if (sub_dir / 'date_time_test.json').exists():
print("Loading test for " +
str(sub_dir / 'date_time_test.json'))
with (sub_dir / 'date_time_test.json').open() as f:
cls.test_config[sub_dir.parts[-1]] = json.loads(f.read())
def test_convert_times(self):
dt = datetime.datetime(2017, 1, 31,
13, 22, 3, tzinfo=default_timezone())
# Verify defaults haven't changed
self.assertEqual(nice_time(dt),
nice_time(dt, speech=True, use_24hour=True, use_ampm=False))
self.assertEqual(nice_time(dt, use_24hour=False),
"час двадцать два")
self.assertEqual(nice_time(dt, use_24hour=False, use_ampm=True),
"час двадцать два дня")
self.assertEqual(nice_time(dt, speech=False, use_24hour=False),
"1:22")
self.assertEqual(nice_time(dt, speech=False, use_24hour=False, use_ampm=True),
"1:22 дня")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True),
"13:22")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True,
use_ampm=True),
"13:22")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True),
"тринадцать двадцать два")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False),
"тринадцать двадцать два")
dt = datetime.datetime(2017, 1, 31,
13, 0, 3, tzinfo=default_timezone())
self.assertEqual(nice_time(dt, use_24hour=False),
"час")
self.assertEqual(nice_time(dt, use_24hour=False, use_ampm=True),
"час дня")
self.assertEqual(nice_time(dt, use_24hour=False, speech=False),
"1:00")
self.assertEqual(nice_time(dt, speech=False, use_24hour=False, use_ampm=True),
"1:00 дня")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True),
"13:00")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True,
use_ampm=True),
"13:00")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True),
"тринадцать ровно")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False),
"тринадцать ровно")
dt = datetime.datetime(2017, 1, 31,
13, 2, 3, tzinfo=default_timezone())
self.assertEqual(nice_time(dt, use_24hour=False),
"час ноль два")
self.assertEqual(nice_time(dt, use_24hour=False, use_ampm=True),
"час ноль два дня")
self.assertEqual(nice_time(dt, use_24hour=False, speech=False),
"1:02")
self.assertEqual(nice_time(dt, use_24hour=False, speech=False, use_ampm=True),
"1:02 дня")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True),
"13:02")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True,
use_ampm=True),
"13:02")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True),
"тринадцать ноль два")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False),
"тринадцать ноль два")
dt = datetime.datetime(2017, 1, 31,
0, 2, 3, tzinfo=default_timezone())
self.assertEqual(nice_time(dt, use_24hour=False),
"двенадцать ноль два")
self.assertEqual(nice_time(dt, use_24hour=False, use_ampm=True),
"двенадцать ноль два ночи")
self.assertEqual(nice_time(dt, speech=False, use_24hour=False),
"12:02")
self.assertEqual(nice_time(dt, speech=False, use_24hour=False, use_ampm=True),
"12:02 ночи")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True),
"00:02")
self.assertEqual(nice_time(dt, speech=False, use_24hour=True,
use_ampm=True),
"00:02")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=True),
"ноль ноль ноль два")
self.assertEqual(nice_time(dt, use_24hour=True, use_ampm=False),
"ноль ноль | |
# coding: utf-8
"""
finAPI RESTful Services
finAPI RESTful Services # noqa: E501
OpenAPI spec version: v1.42.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class LabelsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_label(self, body, **kwargs): # noqa: E501
"""Create a new label # noqa: E501
Create a new label for a specific user. Must pass the new label's name and the user's access_token.<br/><br/>Users can create labels to flag transactions (see method PATCH /transactions), with the goal of collecting and getting an overview of all transactions of a certain 'type'. In this sense, labels are similar to transaction categories. However, labels are supposed to depict more of an implicit meaning of a transaction. For instance, a user might want to assign a flag to a transaction that reminds him that he can offset it against tax. At the same time, the category of the transactions might be something like 'insurance', which is a more 'fact-based', or 'objective' way of typing the transaction. Despite this semantic difference between categories and labels, there is also the difference that a transaction can be assigned multiple labels at the same time (while in contrast it can have just a single category). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_label(body, async=True)
>>> result = thread.get()
:param async bool
:param LabelParams body: Label's name (required)
:return: Label
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_label_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_label_with_http_info(body, **kwargs) # noqa: E501
return data
def create_label_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a new label # noqa: E501
Create a new label for a specific user. Must pass the new label's name and the user's access_token.<br/><br/>Users can create labels to flag transactions (see method PATCH /transactions), with the goal of collecting and getting an overview of all transactions of a certain 'type'. In this sense, labels are similar to transaction categories. However, labels are supposed to depict more of an implicit meaning of a transaction. For instance, a user might want to assign a flag to a transaction that reminds him that he can offset it against tax. At the same time, the category of the transactions might be something like 'insurance', which is a more 'fact-based', or 'objective' way of typing the transaction. Despite this semantic difference between categories and labels, there is also the difference that a transaction can be assigned multiple labels at the same time (while in contrast it can have just a single category). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_label_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param LabelParams body: Label's name (required)
:return: Label
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_label" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_label`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['finapi_auth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/labels', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Label', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_all_labels(self, **kwargs): # noqa: E501
"""Delete all labels # noqa: E501
Delete all labels of the user that is authorized by the access_token. Must pass the user's access_token. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_all_labels(async=True)
>>> result = thread.get()
:param async bool
:return: IdentifierList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_all_labels_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_all_labels_with_http_info(**kwargs) # noqa: E501
return data
def delete_all_labels_with_http_info(self, **kwargs): # noqa: E501
"""Delete all labels # noqa: E501
Delete all labels of the user that is authorized by the access_token. Must pass the user's access_token. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_all_labels_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: IdentifierList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_all_labels" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['finapi_auth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/labels', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IdentifierList', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_label(self, id, **kwargs): # noqa: E501
"""Delete a label # noqa: E501
Delete a single label of the user that is authorized by the access_token. Must pass the label's identifier and the user's access_token. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_label(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Identifier of the label to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_label_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_label_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_label_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a label # noqa: E501
Delete a single label of the user that is authorized by the access_token. Must pass the label's identifier and the user's access_token. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_label_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Identifier of the label to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_label" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_label`") # noqa: E501
if 'id' in params and not re.search('[\\d]+', params['id']): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `delete_label`, must conform to the pattern `/[\\d]+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['finapi_auth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/labels/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
| |
#error_var = max_error(post_var - prior_var) # Max distance
### MC error
var_est = np.sum((bnn_grid_post - post_mean.reshape(1,-1))**2, 0) / (n_samp - 1) # unbiased estimate of variance
mc_error_mean = np.max(np.sqrt(var_est / n_samp)) # max over inputs
##
return error_mean, None, mc_error_mean
def distance_to_prior_couple(bnn, x, n_samp=1000, seed=0):
#error = lambda z: np.sqrt(np.sum(z**2))
max_error = lambda z: np.max(np.abs(z))
torch.manual_seed(seed)
bnn_grid_prior = forward_no_bias_prior(bnn, x, n_samp=n_samp)
torch.manual_seed(seed)
bnn_grid_post = forward_no_bias(bnn, x, n_samp=n_samp)
prior_mean = np.mean(bnn_grid_prior, 0)
prior_var = np.var(bnn_grid_prior, 0)
post_mean = np.mean(bnn_grid_post, 0)
post_var = np.var(bnn_grid_post, 0)
error_mean = max_error(post_mean - prior_mean) # Max distance
error_var = max_error(post_var - prior_var) # Max distance
### MC error
var_est = np.sum((bnn_grid_post - post_mean.reshape(1,-1))**2, 0) / (n_samp - 1) # unbiased estimate of variance
mc_error_mean = np.max(np.sqrt(var_est / n_samp)) # max over inputs
##
return error_mean, error_var, mc_error_mean
def distance_to_prior_rmse(bnn, x, n_samp=1000):
#error = lambda z: np.sqrt(np.sum(z**2))
rmse = lambda z1, z2: np.sqrt(np.mean((z1-z2)**2))
bnn_grid_prior = forward_no_bias_prior(bnn, x, n_samp=n_samp)
bnn_grid_post = forward_no_bias(bnn, x, n_samp=n_samp)
prior_mean = 0
prior_var = np.var(bnn_grid_prior, 0)
post_mean = np.mean(bnn_grid_post, 0)
post_var = np.var(bnn_grid_post, 0)
error_mean = rmse(post_mean, prior_mean) # RMSE
error_var = rmse(post_var, prior_var) # RMSE
return error_mean, error_var
def figure1(args, fontsize_titles=8, fontsize_xlabels=6, fontsize_ylabels=8, fontsize_ticks=6, n_samp=1000, n_samp_show=3):
torch.random.manual_seed(0)
np.random.seed(0)
fig, ax = plt.subplots(2,2, sharex=True, sharey=True, figsize=(4,4), tight_layout=True)
DIR_SAVE = ['../../experiment_1/results/pytorch_1/0/', '../../experiment_1/results/pytorch_1/4/']
DIM_HIDDEN = [125, 2000]
DIR_OUT = '../results/figure1/'
### for testing
if args.test_mode:
DIR_SAVE = ['../../experiment_1/results/pytorch_1/0/', '../../experiment_1/results/pytorch_1/1/'] # test
DIM_HIDDEN = [125, 250] # test
###
ax[0,0].set_title('Prior', fontsize=fontsize_titles)
ax[0,1].set_title('Posterior', fontsize=fontsize_titles)
ax[0,0].set_ylabel('BNN', fontsize=fontsize_ylabels)
ax[1,0].set_ylabel('NNGP', fontsize=fontsize_ylabels)
ax[1,0].set_xlabel(r'$x$', fontsize=fontsize_xlabels)
ax[1,1].set_xlabel(r'$x$', fontsize=fontsize_xlabels)
## 0,0 ##
ax_ = ax[0,0]
bnn, data = load_pytorch(args, DIM_HIDDEN[0], DIR_SAVE[0])
data['x_grid'] = np.linspace(data['x_train'].min()-.5, data['x_train'].max()+.5, 50).reshape(-1,1).astype(args.dtype)
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=True, n_samp=n_samp, n_samp_show=n_samp_show, label=r'$M=$%d' % DIM_HIDDEN[0], color='tab:blue')
bnn, _ = load_pytorch(args, DIM_HIDDEN[1], DIR_SAVE[1])
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=True, n_samp=n_samp, n_samp_show=n_samp_show, label=r'$M=$%d' % DIM_HIDDEN[1], color='tab:orange')
## 0,1 ##
ax_ = ax[0,1]
bnn, _ = load_pytorch(args, DIM_HIDDEN[0], DIR_SAVE[0])
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=False, n_samp=n_samp, n_samp_show=n_samp_show, color='tab:blue')
bnn, _ = load_pytorch(args, DIM_HIDDEN[1], DIR_SAVE[1])
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=False, n_samp=n_samp, n_samp_show=n_samp_show, color='tab:orange')
## 1,0 ##
ax_ = ax[1,0]
kernel_fn, predict_fn = fit_nngp(args, data['x_train'], data['y_train'], noise_sig2=bnn.noise_scale**2)
#plot_nngp(kernel_fn, predict_fn, x_grid=data['x_grid'], prior=True, n_samp=n_samp, n_samp_show=n_samp_show, ax=ax_, label=r'$M\to\infty$', color='tab:green')
plot_nngp_alt(args, data['x_train'], data['y_train'], noise_sig2=bnn.noise_scale**2, x_test=data['x_grid'], prior=True, n_samp=n_samp, n_samp_show=n_samp_show, ax=ax_, label=r'$M\to\infty$', color='tab:green')
## 1,1 ##
ax_ = ax[1,1]
#plot_nngp(kernel_fn, predict_fn, x_grid=data['x_grid'], prior=False, n_samp=n_samp, n_samp_show=n_samp_show, ax=ax_, label=None, color='tab:green')
plot_nngp_alt(args, data['x_train'], data['y_train'], noise_sig2=bnn.noise_scale**2, x_test=data['x_grid'], prior=False, n_samp=n_samp, n_samp_show=n_samp_show, ax=ax_, color='tab:green')
# adjust tick font sizes
for i, ax_ in enumerate(ax.flat):
ax_.tick_params(axis='both', labelsize=fontsize_ticks)
# plot data over posterior plots
for ax_ in [ax[0,1],ax[1,1]]:
ax_.scatter(data['x_train'], data['y_train'], s=10, marker='+', linewidths=1, color='tab:red')
# adjust xlim to remove whitespace
ax[1,0].set_xlim(data['x_grid'].min(), data['x_grid'].max())
legend = fig.legend(bbox_to_anchor=(.55,-.025), loc="lower center", bbox_transform=fig.transFigure, ncol=3, fontsize=6, frameon=False) # title='width'
plt.setp(legend.get_title(),fontsize=8)
ax[0,0].set_ylim(-2.03,2.03)
ax[1,0].set_ylim(-2.03,2.03)
if not os.path.exists(DIR_OUT):
os.makedirs(DIR_OUT)
fig.savefig(os.path.join(DIR_OUT, 'figure1.png'), dpi=300, bbox_inches='tight')
fig.savefig(os.path.join(DIR_OUT, 'figure1.pdf'), dpi=300, bbox_inches='tight')
def prepare_data_mean_convergence(args, n_samp=10000, dir_out='.', fix_seed=False):
# fix_seed: whether to use the same seed for each dim_hidden (so same seed across each seed_init)
DIRS_DATA = ['../../experiment_3/results/pytorch_1/'] # one directory for each set of seeds
DIM_HIDDEN_GRID = np.linspace(1e2, 1e7, 100) # for computing the bound
MAX_DIM_HIDDEN_TEST = 1000
rows_out = []
for i, dir_data in enumerate(DIRS_DATA):
arg_perms = pd.read_csv(os.path.join(dir_data, 'argument_permutations.csv'))
for j, arg_perm in arg_perms.iterrows():
if args.test_mode and arg_perm['--dim_hidden'] > MAX_DIM_HIDDEN_TEST:
continue
dir_model = os.path.join(dir_data, os.path.basename(arg_perm['--dir_out']))
res = np.load(os.path.join(dir_model, 'results.npy'), allow_pickle=True).item()
# grid of points
x_grid = np.linspace(-1, 1, 25).reshape(-1,1).astype(args.dtype)
# compute distance to the prior
bnn, _ = load_pytorch(args, arg_perm['--dim_hidden'], dir_model, act_name=arg_perm['--activation'])
#if fix_seed:
# torch.manual_seed(arg_perm['--dim_hidden']) # same seed for each dim_hidden
error_mean, error_var, mc_error_mean = distance_to_prior_couple(bnn, x_grid, n_samp=n_samp, seed=arg_perm['--dim_hidden'])
# compute bound based on first network (assumed to be the smallest but it doesn't matter)
# also compute NNGP error
if i == 0 and j==0:
dataset = load_dataset(arg_perm['--dataset'], dim_in=bnn.dim_in, noise_sig2=bnn.noise_scale**2, n_train=arg_perm['--n_train'], n_test=100, seed=args.seed_data, dtype=args.dtype)
data = {'x_train':dataset.x_train, 'y_train':dataset.y_train, 'noise_sig2':dataset.noise_sig2} # convert to dictionary
bound = compute_bound(DIM_HIDDEN_GRID, bnn, data)
error_mean_nngp = distance_to_prior_nngp(args, data['x_train'], data['y_train'], x_grid, noise_sig2=bnn.noise_scale**2)
rows_out.append({
'dim_hidden': arg_perm['--dim_hidden'],
'seed': arg_perm['--seed_init'],
'error_mean': error_mean,
'error_var': error_var,
'mc_error_mean': mc_error_mean,
'bound': bound
})
if not os.path.exists(dir_out):
os.makedirs(dir_out)
df = pd.DataFrame(rows_out)
df.to_csv(os.path.join(dir_out, 'data_mean_convergence.csv'))
df_bound = pd.DataFrame({'dim_hidden': DIM_HIDDEN_GRID, 'bound': bound, 'error_mean_nngp': error_mean_nngp})
df_bound.to_csv(os.path.join(dir_out, 'bound_mean_convergence.csv'))
return df, df_bound
def figure_mean_convergence(args, fontsize_titles=8, fontsize_xlabels=8, fontsize_ylabels=8, fontsize_ticks=6, n_samp=10000, fix_seed=False, fig_name='figure_mean_convergence'):
DIR_OUT = '../results/mean_convergence'
# obtain prepared data
try:
df = pd.read_csv(os.path.join(DIR_OUT, 'data_mean_convergence.csv'))
df_bound = pd.read_csv(os.path.join(DIR_OUT, 'bound_mean_convergence.csv'))
except:
df, df_bound = prepare_data_mean_convergence(args, n_samp=n_samp, dir_out=DIR_OUT, fix_seed=fix_seed)
df_agg = df.groupby('dim_hidden').agg({'error_mean':['mean','min','max']})['error_mean']
# make plot
fig, ax = plt.subplots(1,1, figsize=(4,2.5), tight_layout=True)
ax.set_xscale('log', base=10)
ax.set_yscale('log', base=10)
# BNN
ax.plot(df_agg.index.to_numpy(), df_agg['mean'].to_numpy(), '-o', markersize=2.5, label='Observed (BNN)') #log
ax.fill_between(df_agg.index.to_numpy(), df_agg['min'].to_numpy(), df_agg['max'].to_numpy(), alpha=.3)
# NNGP
ax.axhline(df_bound['error_mean_nngp'][0], color='tab:green', linestyle='dashed', label='Observed (NNGP)')
# Bound
ax.plot(df_bound['dim_hidden'], df_bound['bound'], label='Theoretical Bound (BNN)', color='tab:orange')
# Plot stuff
ax.set_xlim(1e2,1e7)
ax.set_ylabel(r'$|\mathbb{E}_{Q^*}[f(x)]| - |\mathbb{E}_{P}[f(x)]|$', fontsize=fontsize_ylabels)
ax.set_xlabel(r'$M$', fontsize=fontsize_xlabels)
legend = fig.legend(bbox_to_anchor=(.55,1.025), loc="upper center", bbox_transform=fig.transFigure, ncol=3, fontsize=6, frameon=False)
ax.tick_params(axis='both', labelsize=fontsize_ticks)
ax.tick_params(axis='x', labelsize=fontsize_ticks)
if not os.path.exists(DIR_OUT):
os.makedirs(DIR_OUT)
fig.savefig(os.path.join(DIR_OUT, '%s.png' % fig_name), dpi=300, bbox_inches='tight')
fig.savefig(os.path.join(DIR_OUT, '%s.pdf' % fig_name), dpi=300, bbox_inches='tight')
def figure_counterexample(args, fontsize_titles=8, fontsize_xlabels=8, fontsize_ylabels=8, fontsize_ticks=6, n_samp=1000, n_samp_show=3):
torch.random.manual_seed(2)
np.random.seed(2)
DIR_SAVE_RELU = ['../../experiment_2/results/pytorch_2/15/', '../../experiment_2/results/pytorch_1/15/'] # counterexample, 2pts
DIR_SAVE_ODD = ['../../experiment_1/results/pytorch_2/15/', '../../experiment_1/results/pytorch_1/15/'] # counterexample, 2pts
DIM_HIDDEN = 4096000
ACT_NAME = ['relu', 'erf']
PLOT_ODD = True # whether to plot the odd activation
DIR_OUT = '../results/counterexample'
# For testing
if args.test_mode:
DIR_SAVE_RELU = ['../../experiment_2/results/pytorch_2/0/', '../../experiment_2/results/pytorch_1/0/'] # counterexample, 2pts
DIR_SAVE_ODD = ['../../experiment_1/results/pytorch_2/0/', '../../experiment_1/results/pytorch_1/0/'] # counterexample, 2pts
DIM_HIDDEN = 125
fig, ax = plt.subplots(1,2, sharex=False, sharey=False, figsize=(4,2), tight_layout=True)
ax[0].set_title('Counterexample Dataset', fontsize=fontsize_titles)
ax[1].set_title('Non-counterexample Dataset', fontsize=fontsize_titles)
ax[0].set_xlabel(r'$x$', fontsize=fontsize_xlabels)
ax[1].set_xlabel(r'$x$', fontsize=fontsize_xlabels)
## RELU NETWORK ##
ax_ = ax[0]
bnn, data = load_pytorch(args, DIM_HIDDEN, DIR_SAVE_RELU[0], act_name=ACT_NAME[0])
data['x_grid'] = np.linspace(data['x_train'].min()-.5, data['x_train'].max()+.5, 50).reshape(-1,1).astype(args.dtype)
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=False, n_samp=n_samp, n_samp_show=n_samp_show, label='ReLU', color='tab:blue')
data_0 = data
ax_ = ax[1]
bnn, data = load_pytorch(args, DIM_HIDDEN, DIR_SAVE_RELU[1], act_name=ACT_NAME[0])
data['x_grid'] = np.linspace(data['x_train'].min()-.5, data['x_train'].max()+.5, 50).reshape(-1,1).astype(args.dtype)
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=False, n_samp=n_samp, n_samp_show=n_samp_show, color='tab:blue')
data_1 = data
## ODD NETWORK ##
if PLOT_ODD:
ax_ = ax[0]
bnn, data = load_pytorch(args, DIM_HIDDEN, DIR_SAVE_ODD[0], act_name=ACT_NAME[1])
data['x_grid'] = np.linspace(data['x_train'].min()-.5, data['x_train'].max()+.5, 50).reshape(-1,1).astype(args.dtype)
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=False, n_samp=n_samp, n_samp_show=n_samp_show, label='erf', color='tab:orange')
ax_ = ax[1]
bnn, data = load_pytorch(args, DIM_HIDDEN, DIR_SAVE_ODD[1], act_name=ACT_NAME[1])
data['x_grid'] = np.linspace(data['x_train'].min()-.5, data['x_train'].max()+.5, 50).reshape(-1,1).astype(args.dtype)
plot_pytorch(bnn, x_grid = data['x_grid'], ax = ax_, prior=False, n_samp=n_samp, n_samp_show=n_samp_show, color='tab:orange')
ax[0].set_xlim(data_0['x_grid'].min(), data_0['x_grid'].max())
ax[1].set_xlim(data_1['x_grid'].min(), data_1['x_grid'].max())
ax[0].scatter(data_0['x_train'], data_0['y_train'], s=10, marker='+', linewidths=1, color='tab:red')
ax[1].scatter(data_1['x_train'], data_1['y_train'], s=10, marker='+', linewidths=1, color='tab:red')
# adjust tick font sizes
for i, ax_ in enumerate(ax.flat):
ax_.tick_params(axis='both', which='minor', labelsize=fontsize_ticks)
ax_.tick_params(axis='both', which='major', labelsize=fontsize_ticks)
if PLOT_ODD:
fig.legend(bbox_to_anchor=(.53,0), loc="lower center", bbox_transform=fig.transFigure, ncol=3, fontsize=6, frameon=False)
if not os.path.exists(DIR_OUT):
os.makedirs(DIR_OUT)
fig.savefig(os.path.join(DIR_OUT, 'figure_counterexample.png'), dpi=300, bbox_inches='tight')
fig.savefig(os.path.join(DIR_OUT, 'figure_counterexample.pdf'), dpi=300, bbox_inches='tight')
def prepare_data_many_datasets(args, datasets, dir_data, dir_out):
NUM_SEED = 5
MAX_DIM_HIDDEN_TEST = 500
if not os.path.exists(dir_out):
os.makedirs(dir_out)
if args.test_mode:
NUM_SEED = 2
rows_out_all = []
for dataset in datasets:
rows_out = []
for seed in range(NUM_SEED):
# load permutations of arguments (each row is a different experiment)
arg_perms_file = os.path.join(dir_data, '%s_%d/argument_permutations.csv' % (dataset, seed))
try:
arg_perms = pd.read_csv(arg_perms_file)
except:
print('Unable to open %s, skipping to next set of experiments' % arg_perms_file)
continue
for _, arg_perm in arg_perms.iterrows():
# filepath for this experiment (row of argument permutations)
dir_data_exp = os.path.join(dir_data, '/'.join(arg_perm['--dir_out'].split('/')[-2:]))
if args.test_mode and arg_perm['--dim_hidden'] > MAX_DIM_HIDDEN_TEST:
continue
try:
res = np.load(os.path.join(dir_data_exp, 'results.npy'), allow_pickle=True).item()
except:
print('Unable to open %s, skipping to next experiment' % dir_save)
continue
# compute distance to the prior
bnn, data = load_pytorch(args, arg_perm['--dim_hidden'], dir_data_exp, act_name=arg_perm['--activation'], dim_in=arg_perm['--dim_in'])
data['x_samp'] = np.random.uniform(-1,1,(100,arg_perm['--dim_in']))
error_mean, _, _ = distance_to_prior(bnn, data['x_samp'], n_samp=1000)
error_mean_rmse, error_var_rmse = distance_to_prior_rmse(bnn, data['x_samp'], n_samp=1000)
# save
rows_out.append({
'dataset': dataset,
'act': arg_perm['--activation'],
'seed': seed,
'dim_hidden': arg_perm['--dim_hidden'],
'max_dist_prior': error_mean,
'rmse_prior_mean': error_mean_rmse,
'rmse_prior_var': error_var_rmse,
'rmse_test': res['post_rmse_test']
})
rows_out_all.append(rows_out[-1])
df = pd.DataFrame(rows_out)
df.to_csv(os.path.join(dir_out, 'results_%s.csv' % dataset))
return pd.DataFrame(rows_out_all)
def figure_many_datasets(args, fontsize_titles=7, fontsize_xlabels=7, fontsize_ylabels=7, fontsize_ticks=6, n_samp=1000):
torch.random.manual_seed(2)
DIR_DATA = '../../experiment_4/results' # trained BNNs
DIR_OUT = '../results/many_datasets/' # where to store the figure
DATASETS = ['sin2', 'sin100', 'two_dim_toy100', 'concrete_slump', 'concrete_relu', 'concrete_tanh']
if args.test_mode:
DATASETS = ['sin100']
try:
df = pd.concat([pd.read_csv(os.path.join(DIR_OUT, 'results_%s.csv' % dataset)) for dataset in DATASETS])
print('reading in existing prepared data')
except:
breakpoint()
df = prepare_data_many_datasets(args, datasets=DATASETS, dir_data = DIR_DATA, dir_out | |
<filename>toytree/TreeParser.py
#!/usr/bin/env python
"""
A newick/nexus file/string parser based on the ete3.parser.newick. Takes as
input a string or file that contains one or multiple lines that contain
newick strings. Lines that do not contain newick strings are ignored, unless
the #NEXUS is in the header in which case the 'translate' and 'trees' blocks
are parsed.
"""
import os
import re
import requests
from .TreeNode import TreeNode
from .utils import NW_FORMAT
# Regular expressions used for reading newick format
FLOAT_RE = r"\s*[+-]?\d+\.?\d*(?:[eE][-+]\d+)?\s*"
NAME_RE = r"[^():,;]+?"
NHX_RE = r"\[&&NHX:[^\]]*\]"
MB_BRLEN_RE = r"\[&B (\w+) [0-9.e-]+\]"
class NewickError(Exception):
"""Exception class designed for NewickIO errors."""
def __init__(self, value):
Exception.__init__(self, value)
class NexusError(Exception):
"""Exception class designed for NewickIO errors."""
def __init__(self, value):
Exception.__init__(self, value)
class FastTreeParser():
"""
A less flexible but faster newick parser for performance sensitive apps.
Only supports newick string input in format 0.
"""
def __init__(self, newick, tree_format):
self.data = newick
extractor = FastNewick2TreeNode(self.data, tree_format)
self.treenode = extractor.newick_from_string()
class TreeParser(object):
def __init__(self, intree, tree_format=0, multitree=False, debug=False):
"""
Reads input as a string or file, figures out format and parses it.
Formats 0-10 are newick formats supported by ete3.
Format 11 is nexus format from mrbayes.
Returns either a Toytree or MultiTree object, depending if input has
one or more trees.
"""
# the input file/stream and the loaded data
self.intree = intree
self.data = None
self.debug = debug
# the tree_format and parsed tree string from data
self.fmt = tree_format
self.multitree = multitree
self.newick = ""
# returned result: 1 tree for Toytree multiple trees for MultiTrees
self.treenodes = []
# newick translation dictionary
self.tdict = {}
# compiled re matchers for this tree format type
self.matcher = MATCHER[self.fmt]
# parse intree
if not self.debug:
self._run()
def _run(self):
# get newick from data and test newick structure
if self.intree:
# read data input by lines to .data
self.get_data_from_intree()
# check for NEXUS wrappings and update .data for newick strings
self.parse_nexus()
# raise warnings if tree_format doesn't seem right for data
self.warn_about_format()
# parse newick strings to treenodes list
self.get_treenodes()
# apply names from tdict
self.apply_name_translation()
# no input data
else:
self.treenodes = [TreeNode()]
def warn_about_format(self):
# warning about formats
if "[&&NHX" not in self.data[0]:
if ("[&" in self.data[0]) & (self.fmt != 10):
print("Warning: data looks like tree_format=10 (mrbayes-like)")
def get_data_from_intree(self):
"""
Load *data* from a file or string and return as a list of strings.
The data contents could be one newick string; a multiline NEXUS format
for one tree; multiple newick strings on multiple lines; or multiple
newick strings in a multiline NEXUS format. In any case, we will read
in the data as a list on lines.
"""
# load string: filename or data stream
if isinstance(self.intree, (str, bytes)):
# strip it
self.intree = self.intree.strip()
# is a URL: make a list by splitting a string
if any([i in self.intree for i in ("http://", "https://")]):
response = requests.get(self.intree)
response.raise_for_status()
self.data = response.text.strip().split("\n")
# is a file: read by lines to a list
elif os.path.exists(self.intree):
with open(self.intree, 'rU') as indata:
self.data = indata.readlines()
# is a string: make into a list by splitting
else:
self.data = self.intree.split("\n")
# load iterable: iterable of newick strings
elif isinstance(self.intree, (list, set, tuple)):
self.data = list(self.intree)
def parse_nexus(self):
"get newick data from NEXUS"
if self.data[0].strip().upper() == "#NEXUS":
nex = NexusParser(self.data)
self.data = nex.newicks
self.tdict = nex.tdict
def get_treenodes(self):
"test format of intree nex/nwk, extra features"
if not self.multitree:
# get TreeNodes from Newick
extractor = Newick2TreeNode(self.data[0].strip(), fmt=self.fmt)
# extract one tree
self.treenodes.append(extractor.newick_from_string())
else:
for tre in self.data:
# get TreeNodes from Newick
extractor = Newick2TreeNode(tre.strip(), fmt=self.fmt)
# extract one tree
self.treenodes.append(extractor.newick_from_string())
def apply_name_translation(self):
if self.tdict:
for tree in self.treenodes:
for node in tree.traverse():
if node.name in self.tdict:
node.name = self.tdict[node.name]
class Newick2TreeNode:
"Parse newick str to a TreeNode object"
def __init__(self, data, fmt=0):
self.data = data
self.root = TreeNode()
self.current_node = self.root
self.current_parent = None
self.fmt = fmt
self.cleanup_data()
def cleanup_data(self):
# check parentheses
if self.data.count('(') != self.data.count(')'):
raise NewickError('Parentheses do not match. Broken tree data.')
# remove white spaces and separators
self.data = re.sub(r"[\n\r\t ]+", "", self.data)
# mrbayes format terrible formatting hacks--------
if self.fmt == 10:
# convert bracket markers to NHX format
self.data = self.data.replace("[&", "[&&NHX:")
# replace commas inside feature strings with dashes
ns = ""
for chunk in self.data.split("{"):
if "}" in chunk:
pre, post = chunk.split("}", 1)
pre = pre.replace(",", "-")
ns += "{" + pre + "}" + post
else:
ns += chunk
self.data = ns
# replace parentheses inside brackets with curly braces
ns = ""
for chunk in self.data.split("["):
if "]" in chunk:
pre, post = chunk.split("]", 1)
pre = pre.replace("(", "{")
pre = pre.replace(")", "}")
pre = pre.replace(",", ":")
pre = pre.replace('"', "")
pre = pre.replace("'", "")
ns += "[" + pre + "]" + post
else:
ns += chunk
self.data = ns
def newick_from_string(self):
"Reads a newick string in the New Hampshire format."
# split on parentheses to traverse hierarchical tree structure
for chunk in self.data.split("(")[1:]:
# add child to make this node a parent.
self.current_parent = (
self.root if self.current_parent is None else
self.current_parent.add_child()
)
# get all parenth endings from this parenth start
subchunks = [ch.strip() for ch in chunk.split(",")]
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError(
'Broken newick structure at: {}'.format(chunk))
# Every closing parenthesis will close a node and go up one level.
for idx, leaf in enumerate(subchunks):
if leaf.strip() == '' and idx == len(subchunks) - 1:
continue
closing_nodes = leaf.split(")")
# parse features and apply to the node object
self.apply_node_data(closing_nodes[0], "leaf")
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes) > 1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
self.apply_node_data(closing_internal, "internal")
self.current_parent = self.current_parent.up
return self.root
def apply_node_data(self, subnw, node_type):
if node_type in ("leaf", "single"):
self.current_node = self.current_parent.add_child()
else:
self.current_node = self.current_parent
# if no feature data
subnw = subnw.strip()
if not subnw:
return
# load matcher junk
c1, c2, cv1, cv2, match = MATCHER[self.fmt].type[node_type]
# if beast or mb then combine brackets
if self.fmt == 10:
if "]:" not in subnw:
node, edge = subnw.split("]", 1)
subnw = node + "]:0.0" + edge
node, edge = subnw.split("]:")
npre, npost = node.split("[")
# mrbayes mode: (a[&a:1,b:2]:0.1[&c:10])
try:
epre, epost = edge.split("[")
subnw = "{}:{}[&&NHX:{}".format(
npre, epre, ":".join([npost[6:], epost[6:]]))
# BEAST mode: (a[&a:1,b:2,c:10]:0.1)
except ValueError:
subnw = "{}:{}[&&NHX:{}]".format(npre, edge, npost[6:])
# look for node features
data = re.match(match, subnw)
# if there are node features then add them to this node
if data:
data = data.groups()
# data should not be empty
if all([i is None for i in data]):
raise NewickError(
"Unexpected newick format {}".format(subnw))
# node has a name
if (data[0] is not None) and (data[0] != ''):
self.current_node.add_feature(c1, cv1(data[0]))
if (data[1] is not None) and (data[1] != ''):
self.current_node.add_feature(c2, cv2(data[1][1:]))
if (data[2] is not None) and data[2].startswith("[&&NHX"):
fdict = parse_nhx(data[2])
for fname, fvalue in fdict.items():
self.current_node.add_feature(fname, fvalue)
else:
raise NewickError("Unexpected newick format {}".format(subnw))
class NexusParser:
"""
Parse nexus file/str formatted data to extract tree data and features.
Expects '#NEXUS', 'begin trees', 'tree', and 'end;'.
"""
def __init__(self, data, debug=False):
self.data = data
self.newicks = []
self.tdict = {}
self.matcher = re.compile(MB_BRLEN_RE)
if not debug:
self.extract_tree_block()
def extract_tree_block(self):
"iterate through data file to extract trees"
# data SHOULD be a list of strings at this point
lines = iter(self.data)
while 1:
try:
line = next(lines).strip()
except StopIteration:
break
# oh mrbayes, you seriously allow spaces within newick format!?
# find "[&B TK02Brlens 8.123e-3]" and change to [&Brlen=8.123e-3]
# this is a tmp hack fix, to be replaced with a regex
line = line.replace(" TK02Brlens ", "=")
# enter trees block
if line.lower() == "begin trees;":
while 1:
# iter through trees block
nextline = next(lines).strip()
# remove horrible brlen string with spaces from | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
def getCutoff(rw_mat, avgNeighborsSize):
tmp = rw_mat.flatten('F')
a = np.flip(np.sort(tmp), 0)
len1 = len(rw_mat)
#cutoffs = []
all_neibs = int( avgNeighborsSize * len1 )
print( all_neibs)
ct = a[all_neibs]
return ct
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_len_of_each_ele(c1):
#% Assume c1 is a 1-dimension cell array, and each element is a 1d double
#% array. This function counts the length of each double array.
lens = np.zeros(len(c1))
for i in range(0, len(c1)):
lens[i] = len(c1[i])
return lens
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
| |
#!/usr/bin/env python3
# Copyright (c) 2018 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_true,
assert_false,
assert_raises_message,
connect_nodes_bi,
get_coinbase_address,
nuparams,
DEFAULT_FEE,
DEFAULT_FEE_ZATS,
NU5_BRANCH_ID,
)
from test_framework.util import wait_and_assert_operationid_status, start_nodes
from decimal import Decimal
my_memo_str = 'c0ffee' # stay awake
my_memo = '633066666565'
my_memo = my_memo + '0'*(1024-len(my_memo))
no_memo = 'f6' + ('0'*1022) # see section 5.5 of the protocol spec
class ListReceivedTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = True
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[[
nuparams(NU5_BRANCH_ID, 225),
]] * self.num_nodes
)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
self.is_network_split = False
self.sync_all()
def generate_and_sync(self, new_height):
current_height = self.nodes[0].getblockcount()
assert(new_height > current_height)
self.sync_all()
self.nodes[0].generate(new_height - current_height)
self.sync_all()
assert_equal(new_height, self.nodes[0].getblockcount())
def test_received_sprout(self, height):
self.generate_and_sync(height+2)
zaddr1 = self.nodes[1].z_getnewaddress('sprout')
# Send 10 ZEC each zaddr1 and zaddrExt via z_shieldcoinbase
result = self.nodes[0].z_shieldcoinbase(get_coinbase_address(self.nodes[0]), zaddr1, 0, 1)
txid_shielding1 = wait_and_assert_operationid_status(self.nodes[0], result['opid'])
zaddrExt = self.nodes[2].z_getnewaddress('sprout')
result = self.nodes[0].z_shieldcoinbase(get_coinbase_address(self.nodes[0]), zaddrExt, 0, 1)
txid_shieldingExt = wait_and_assert_operationid_status(self.nodes[0], result['opid'])
self.sync_all()
# Decrypted transaction details should not be visible on node 0
pt = self.nodes[0].z_viewtransaction(txid_shielding1)
assert_equal(pt['txid'], txid_shielding1)
assert_equal(len(pt['spends']), 0)
assert_equal(len(pt['outputs']), 0)
# Decrypted transaction details should be correct on node 1
pt = self.nodes[1].z_viewtransaction(txid_shielding1)
assert_equal(pt['txid'], txid_shielding1)
assert_equal(len(pt['spends']), 0)
assert_equal(len(pt['outputs']), 1)
assert_equal(pt['outputs'][0]['type'], 'sprout')
assert_equal(pt['outputs'][0]['js'], 0)
assert_equal(pt['outputs'][0]['address'], zaddr1)
assert_equal(pt['outputs'][0]['value'], Decimal('10'))
assert_equal(pt['outputs'][0]['valueZat'], 1000000000)
assert_equal(pt['outputs'][0]['memo'], no_memo)
jsOutputPrev = pt['outputs'][0]['jsOutput']
# Second transaction should not be known to node 1
assert_raises_message(
JSONRPCException,
"Invalid or non-wallet transaction id",
self.nodes[1].z_viewtransaction,
txid_shieldingExt)
# Second transaction should be visible on node0
pt = self.nodes[2].z_viewtransaction(txid_shieldingExt)
assert_equal(pt['txid'], txid_shieldingExt)
assert_equal(len(pt['spends']), 0)
assert_equal(len(pt['outputs']), 1)
assert_equal(pt['outputs'][0]['type'], 'sprout')
assert_equal(pt['outputs'][0]['js'], 0)
assert_equal(pt['outputs'][0]['address'], zaddrExt)
assert_equal(pt['outputs'][0]['value'], Decimal('10'))
assert_equal(pt['outputs'][0]['valueZat'], 1000000000)
assert_equal(pt['outputs'][0]['memo'], no_memo)
r = self.nodes[1].z_listreceivedbyaddress(zaddr1)
assert_equal(0, len(r), "Should have received no confirmed note")
c = self.nodes[1].z_getnotescount()
assert_equal(0, c['sprout'], "Count of confirmed notes should be 0")
# No confirmation required, one note should be present
r = self.nodes[1].z_listreceivedbyaddress(zaddr1, 0)
assert_equal(1, len(r), "Should have received one (unconfirmed) note")
assert_equal(txid_shielding1, r[0]['txid'])
assert_equal(10, r[0]['amount'])
assert_equal(1000000000, r[0]['amountZat'])
assert_false(r[0]['change'], "Note should not be change")
assert_equal(no_memo, r[0]['memo'])
assert_equal(0, r[0]['confirmations'])
assert_equal(-1, r[0]['blockindex'])
assert_equal(0, r[0]['blockheight'])
c = self.nodes[1].z_getnotescount(0)
assert_equal(1, c['sprout'], "Count of unconfirmed notes should be 1")
# Confirm transaction (10 ZEC shielded)
self.generate_and_sync(height+3)
# Require one confirmation, note should be present
r0 = self.nodes[1].z_listreceivedbyaddress(zaddr1)
assert_equal(1, len(r0), "Should have received one (unconfirmed) note")
assert_equal(txid_shielding1, r0[0]['txid'])
assert_equal(10, r0[0]['amount'])
assert_equal(1000000000, r0[0]['amountZat'])
assert_false(r0[0]['change'], "Note should not be change")
assert_equal(no_memo, r0[0]['memo'])
assert_equal(1, r0[0]['confirmations'])
assert_equal(height + 3, r0[0]['blockheight'])
taddr = self.nodes[1].getnewaddress()
# Generate some change by sending part of zaddr1 back to taddr
opid = self.nodes[1].z_sendmany(zaddr1, [{'address': taddr, 'amount': 0.6}], 1)
txid = wait_and_assert_operationid_status(self.nodes[1], opid)
self.generate_and_sync(height+4)
# Decrypted transaction details should be correct
pt = self.nodes[1].z_viewtransaction(txid)
assert_equal(pt['txid'], txid)
assert_equal(len(pt['spends']), 1)
# TODO: enable once z_viewtransaction displays transparent elements
# assert_equal(len(pt['outputs']), 2)
assert_equal(len(pt['outputs']), 1)
assert_equal(pt['spends'][0]['type'], 'sprout')
assert_equal(pt['spends'][0]['txidPrev'], txid_shielding1)
assert_equal(pt['spends'][0]['js'], 0)
assert_equal(pt['spends'][0]['jsPrev'], 0)
assert_equal(pt['spends'][0]['jsOutputPrev'], jsOutputPrev)
assert_equal(pt['spends'][0]['address'], zaddr1)
assert_equal(pt['spends'][0]['value'], Decimal('10.0'))
assert_equal(pt['spends'][0]['valueZat'], 1000000000)
# We expect a transparent output and a Sprout output, but the RPC does
# not define any particular ordering of these within the returned JSON.
outputs = [{
'type': output['type'],
'address': output['address'],
'value': output['value'],
'valueZat': output['valueZat'],
} for output in pt['outputs']]
for (i, output) in enumerate(pt['outputs']):
if 'memo' in output:
outputs[i]['memo'] = output['memo']
# TODO: enable once z_viewtransaction displays transparent elements
# assert({
# 'type': 'transparent',
# 'address': taddr,
# 'value': Decimal('0.6'),
# 'valueZat': 60000000,
# } in outputs)
assert({
'type': 'sprout',
'address': zaddr1,
'value': Decimal('9.4') - DEFAULT_FEE,
'valueZat': 940000000 - DEFAULT_FEE_ZATS,
'memo': no_memo,
} in outputs)
# zaddr1 should have a note with change
r = self.nodes[1].z_listreceivedbyaddress(zaddr1, 0)
assert_equal(2, len(r), "zaddr1 Should have received 2 notes")
r = sorted(r, key = lambda received: received['amount'])
assert_equal(txid, r[0]['txid'])
assert_equal(Decimal('9.4')-DEFAULT_FEE, r[0]['amount'])
assert_equal(940000000-DEFAULT_FEE_ZATS, r[0]['amountZat'])
assert_true(r[0]['change'], "Note valued at (9.4-"+str(DEFAULT_FEE)+") should be change")
assert_equal(no_memo, r[0]['memo'])
# The old note still exists (it's immutable), even though it is spent
assert_equal(Decimal('10.0'), r[1]['amount'])
assert_equal(1000000000, r[1]['amountZat'])
assert_false(r[1]['change'], "Note valued at 10.0 should not be change")
assert_equal(no_memo, r[1]['memo'])
def test_received_sapling(self, height):
self.generate_and_sync(height+1)
taddr = self.nodes[1].getnewaddress()
zaddr1 = self.nodes[1].z_getnewaddress('sapling')
zaddrExt = self.nodes[2].z_getnewaddress('sapling')
txid_taddr = self.nodes[0].sendtoaddress(taddr, 4.0)
self.generate_and_sync(height+2)
# Send 1 ZEC to zaddr1
opid = self.nodes[1].z_sendmany(taddr, [
{'address': zaddr1, 'amount': 1, 'memo': my_memo},
{'address': zaddrExt, 'amount': 2},
], 1)
txid = wait_and_assert_operationid_status(self.nodes[1], opid)
self.sync_all()
# Decrypted transaction details should be correct
pt = self.nodes[1].z_viewtransaction(txid)
assert_equal(pt['txid'], txid)
assert_equal(len(pt['spends']), 0)
assert_equal(len(pt['outputs']), 2)
# Outputs are not returned in a defined order but the amounts are deterministic
outputs = sorted(pt['outputs'], key=lambda x: x['valueZat'])
assert_equal(outputs[0]['type'], 'sapling')
assert_equal(outputs[0]['address'], zaddr1)
assert_equal(outputs[0]['value'], Decimal('1'))
assert_equal(outputs[0]['valueZat'], 100000000)
assert_equal(outputs[0]['output'], 0)
assert_equal(outputs[0]['outgoing'], False)
assert_equal(outputs[0]['memo'], my_memo)
assert_equal(outputs[0]['memoStr'], my_memo_str)
assert_equal(outputs[1]['type'], 'sapling')
assert_equal(outputs[1]['address'], zaddrExt)
assert_equal(outputs[1]['value'], Decimal('2'))
assert_equal(outputs[1]['valueZat'], 200000000)
assert_equal(outputs[1]['output'], 1)
assert_equal(outputs[1]['outgoing'], True)
assert_equal(outputs[1]['memo'], no_memo)
assert 'memoStr' not in outputs[1]
r = self.nodes[1].z_listreceivedbyaddress(zaddr1)
assert_equal(0, len(r), "Should have received no confirmed note")
c = self.nodes[1].z_getnotescount()
assert_equal(0, c['sapling'], "Count of confirmed notes should be 0")
# No confirmation required, one note should be present
r = self.nodes[1].z_listreceivedbyaddress(zaddr1, 0)
assert_equal(1, len(r), "Should have received one (unconfirmed) note")
assert_equal(txid, r[0]['txid'])
assert_equal(1, r[0]['amount'])
assert_equal(100000000, r[0]['amountZat'])
assert_false(r[0]['change'], "Note should not be change")
assert_equal(my_memo, r[0]['memo'])
assert_equal(0, r[0]['confirmations'])
assert_equal(-1, r[0]['blockindex'])
assert_equal(0, r[0]['blockheight'])
c = self.nodes[1].z_getnotescount(0)
assert_equal(1, c['sapling'], "Count of unconfirmed notes should be 1")
# Confirm transaction (1 ZEC from taddr to zaddr1)
self.generate_and_sync(height+3)
# adjust confirmations
r[0]['confirmations'] = 1
# adjust blockindex
r[0]['blockindex'] = 1
# adjust height
r[0]['blockheight'] = height + 3
# Require one confirmation, note should be present
assert_equal(r, self.nodes[1].z_listreceivedbyaddress(zaddr1))
# Generate some change by sending part of zaddr1 to zaddr2
txidPrev = txid
zaddr2 = self.nodes[1].z_getnewaddress('sapling')
opid = self.nodes[1].z_sendmany(zaddr1, [{'address': zaddr2, 'amount': 0.6}], 1)
txid = wait_and_assert_operationid_status(self.nodes[1], opid)
self.sync_all()
self.generate_and_sync(height+4)
# Decrypted transaction details should be correct
pt = self.nodes[1].z_viewtransaction(txid)
assert_equal(pt['txid'], txid)
assert_equal(len(pt['spends']), 1)
assert_equal(len(pt['outputs']), 2)
assert_equal(pt['spends'][0]['type'], 'sapling')
assert_equal(pt['spends'][0]['txidPrev'], txidPrev)
assert_equal(pt['spends'][0]['spend'], 0)
assert_equal(pt['spends'][0]['outputPrev'], 0)
assert_equal(pt['spends'][0]['address'], zaddr1)
assert_equal(pt['spends'][0]['value'], Decimal('1.0'))
assert_equal(pt['spends'][0]['valueZat'], 100000000)
# Outputs are not returned in a defined order but the amounts are deterministic
outputs = sorted(pt['outputs'], key=lambda x: x['valueZat'])
assert_equal(outputs[0]['type'], 'sapling')
assert_equal(outputs[0]['address'], zaddr1)
assert_equal(outputs[0]['value'], Decimal('0.4') - DEFAULT_FEE)
assert_equal(outputs[0]['valueZat'], 40000000 - DEFAULT_FEE_ZATS)
assert_equal(outputs[0]['output'], 1)
assert_equal(outputs[0]['outgoing'], False)
assert_equal(outputs[0]['memo'], no_memo)
assert 'memoStr' not in outputs[0]
assert_equal(outputs[1]['type'], 'sapling')
assert_equal(outputs[1]['address'], zaddr2)
assert_equal(outputs[1]['value'], Decimal('0.6'))
assert_equal(outputs[1]['valueZat'], 60000000)
assert_equal(outputs[1]['output'], 0)
assert_equal(outputs[1]['outgoing'], False)
assert_equal(outputs[1]['memo'], no_memo)
assert 'memoStr' not in outputs[1]
# zaddr1 should have a note with change
r = self.nodes[1].z_listreceivedbyaddress(zaddr1, 0)
assert_equal(2, len(r), "zaddr1 Should have received 2 notes")
r = sorted(r, key = lambda received: received['amount'])
assert_equal(txid, r[0]['txid'])
assert_equal(Decimal('0.4')-DEFAULT_FEE, r[0]['amount'])
assert_equal(40000000-DEFAULT_FEE_ZATS, r[0]['amountZat'])
assert_equal(r[0]['change'], True, "Note valued at (0.4-"+str(DEFAULT_FEE)+") should be change")
assert_equal(no_memo, r[0]['memo'])
# The old note still exists (it's immutable), even though it is spent
assert_equal(Decimal('1.0'), r[1]['amount'])
assert_equal(100000000, r[1]['amountZat'])
assert_equal(r[1]['change'], False, "Note valued at 1.0 should not be change")
assert_equal(my_memo, r[1]['memo'])
# zaddr2 should not have change
r = self.nodes[1].z_listreceivedbyaddress(zaddr2, 0)
assert_equal(len(r), 1, "zaddr2 Should have received 1 notes")
r = sorted(r, key = lambda received: received['amount'])
assert_equal(r[0]['txid'], txid)
assert_equal(r[0]['amount'], Decimal('0.6'))
assert_equal(r[0]['amountZat'], 60000000)
assert_equal(r[0]['change'], False, "Note valued at 0.6 should not be change")
assert_equal(r[0]['memo'], no_memo)
assert 0 <= r[0]['outindex'] < 2
c = self.nodes[1].z_getnotescount(0)
assert_equal(c['sapling'], 3, "Count of unconfirmed notes should be 3(2 in zaddr1 + 1 in zaddr2)")
# As part of UA support, a transparent address is now accepted
r = self.nodes[1].z_listreceivedbyaddress(taddr, 0)
assert_equal(len(r), 1)
assert_equal(r[0]['pool'], 'transparent')
assert_equal(r[0]['txid'], txid_taddr)
assert_equal(r[0]['amount'], Decimal('4'))
assert_equal(r[0]['amountZat'], 400000000)
assert_equal(r[0]['confirmations'], 3)
assert 0 <= r[0]['outindex'] < 2
# Test unified address
node = self.nodes[1]
# Create a unified address on one node, try z_listreceivedbyaddress on another node
account = self.nodes[0].z_getnewaccount()['account']
r = self.nodes[0].z_getaddressforaccount(account)
unified_addr = r['address']
# this address isn't in node1's wallet
assert_raises_message(
JSONRPCException,
"From address does not belong to this node",
node.z_listreceivedbyaddress, unified_addr, 0)
# create a UA on node1
r = node.z_getnewaccount()
account = r['account']
r = node.z_getaddressforaccount(account)
unified_addr = r['address']
receivers = node.z_listunifiedreceivers(unified_addr)
assert_equal(len(receivers), 3)
assert 'p2pkh' in receivers
assert 'sapling' in receivers
assert 'orchard' in receivers
assert_raises_message(
JSONRPCException,
"The provided address is a bare receiver from a Unified Address in this wallet.",
node.z_listreceivedbyaddress, receivers['p2pkh'], 0)
assert_raises_message(
JSONRPCException,
"The provided address is a bare receiver from a Unified Address in this | |
memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTawInstancesResponse(AbstractModel):
"""DescribeTawInstances返回参数结构体
"""
def __init__(self):
r"""
:param InstanceSet: 实例列表
:type InstanceSet: list of RumInstanceInfo
:param TotalCount: 实例总数
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.InstanceSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("InstanceSet") is not None:
self.InstanceSet = []
for item in params.get("InstanceSet"):
obj = RumInstanceInfo()
obj._deserialize(item)
self.InstanceSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeUvListRequest(AbstractModel):
"""DescribeUvList请求参数结构体
"""
def __init__(self):
r"""
:param ProjectId: ID
:type ProjectId: int
:param EndTime: 结束时间
:type EndTime: str
:param StartTime: 开始时间
:type StartTime: str
:param Dimension: 获取day:d, min:m
:type Dimension: str
"""
self.ProjectId = None
self.EndTime = None
self.StartTime = None
self.Dimension = None
def _deserialize(self, params):
self.ProjectId = params.get("ProjectId")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.Dimension = params.get("Dimension")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUvListResponse(AbstractModel):
"""DescribeUvList返回参数结构体
"""
def __init__(self):
r"""
:param ProjectUvSet: uv列表
:type ProjectUvSet: list of RumUvInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProjectUvSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProjectUvSet") is not None:
self.ProjectUvSet = []
for item in params.get("ProjectUvSet"):
obj = RumUvInfo()
obj._deserialize(item)
self.ProjectUvSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeWhitelistsRequest(AbstractModel):
"""DescribeWhitelists请求参数结构体
"""
def __init__(self):
r"""
:param InstanceID: 实例instance-ID
:type InstanceID: str
"""
self.InstanceID = None
def _deserialize(self, params):
self.InstanceID = params.get("InstanceID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeWhitelistsResponse(AbstractModel):
"""DescribeWhitelists返回参数结构体
"""
def __init__(self):
r"""
:param WhitelistSet: 白名单列表
:type WhitelistSet: list of Whitelist
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.WhitelistSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("WhitelistSet") is not None:
self.WhitelistSet = []
for item in params.get("WhitelistSet"):
obj = Whitelist()
obj._deserialize(item)
self.WhitelistSet.append(obj)
self.RequestId = params.get("RequestId")
class Filter(AbstractModel):
"""描述键值对过滤器,用于条件过滤查询。例如过滤ID、名称、状态等
· 若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
· 若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
"""
def __init__(self):
r"""
:param Values: 一个或者多个过滤值。
:type Values: list of str
:param Name: 过滤键的名称。
:type Name: str
"""
self.Values = None
self.Name = None
def _deserialize(self, params):
self.Values = params.get("Values")
self.Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LogExport(AbstractModel):
"""日志导出记录
"""
def __init__(self):
r"""
:param CosPath: 日志导出路径
:type CosPath: str
:param Count: 日志导出数量
:type Count: int
:param CreateTime: 日志导出任务创建时间
:type CreateTime: str
:param ExportID: 日志导出任务ID
:type ExportID: str
:param FileName: 日志导出文件名
:type FileName: str
:param FileSize: 日志文件大小
:type FileSize: int
:param Format: 日志导出格式
:type Format: str
:param Order: 日志导出时间排序
:type Order: str
:param Query: 日志导出查询语句
:type Query: str
:param StartTime: 日志导出起始时间
:type StartTime: str
:param EndTime: 日志导出结束时间
:type EndTime: str
:param Status: 日志下载状态。Queuing:导出正在排队中,Processing:导出正在进行中,Complete:导出完成,Failed:导出失败,Expired:日志导出已过期(三天有效期)。
:type Status: str
"""
self.CosPath = None
self.Count = None
self.CreateTime = None
self.ExportID = None
self.FileName = None
self.FileSize = None
self.Format = None
self.Order = None
self.Query = None
self.StartTime = None
self.EndTime = None
self.Status = None
def _deserialize(self, params):
self.CosPath = params.get("CosPath")
self.Count = params.get("Count")
self.CreateTime = params.get("CreateTime")
self.ExportID = params.get("ExportID")
self.FileName = params.get("FileName")
self.FileSize = params.get("FileSize")
self.Format = params.get("Format")
self.Order = params.get("Order")
self.Query = params.get("Query")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyInstanceRequest(AbstractModel):
"""ModifyInstance请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 要修改的实例id
:type InstanceId: str
:param InstanceName: 新的实例名称(长度最大不超过255)
:type InstanceName: str
:param InstanceDesc: 新的实例描述(长度最大不超过1024)
:type InstanceDesc: str
"""
self.InstanceId = None
self.InstanceName = None
self.InstanceDesc = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.InstanceDesc = params.get("InstanceDesc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyInstanceResponse(AbstractModel):
"""ModifyInstance返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyProjectLimitRequest(AbstractModel):
"""ModifyProjectLimit请求参数结构体
"""
def __init__(self):
r"""
:param ProjectID: 项目ID
:type ProjectID: int
:param ProjectInterface: 项目接口
:type ProjectInterface: str
:param ReportRate: 上报比例 10代表10%
:type ReportRate: int
:param ReportType: 上报类型 1:比例 2:上报量
:type ReportType: int
:param ID: 主键ID
:type ID: int
"""
self.ProjectID = None
self.ProjectInterface = None
self.ReportRate = None
self.ReportType = None
self.ID = None
def _deserialize(self, params):
self.ProjectID = params.get("ProjectID")
self.ProjectInterface = params.get("ProjectInterface")
self.ReportRate = params.get("ReportRate")
self.ReportType = params.get("ReportType")
self.ID = params.get("ID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyProjectLimitResponse(AbstractModel):
"""ModifyProjectLimit返回参数结构体
"""
def __init__(self):
r"""
:param Msg: 返回信息
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Msg = None
self.RequestId = None
def _deserialize(self, params):
self.Msg = params.get("Msg")
self.RequestId = params.get("RequestId")
class ModifyProjectRequest(AbstractModel):
"""ModifyProject请求参数结构体
"""
def __init__(self):
r"""
:param ID: 项目 id
:type ID: int
:param Name: 项目名(可选,不为空且最长为 200)
:type Name: str
:param URL: 项目网页地址(可选,最长为 256)
:type URL: str
:param Repo: 项目仓库地址(可选,最长为 256)
:type Repo: str
:param InstanceID: 项目需要转移到的实例 id(可选)
:type InstanceID: str
:param Rate: 项目采样率(可选)
:type Rate: str
:param EnableURLGroup: 是否开启聚类(可选)
:type EnableURLGroup: int
:param Type: 项目类型(可接受值为 "web", "mp", "android", "ios", "node", "hippy", "weex", "viola", "rn")
:type Type: str
:param Desc: 项目描述(可选,最长为 1000)
:type Desc: str
"""
self.ID = None
self.Name = None
self.URL = None
self.Repo = None
self.InstanceID = None
self.Rate = None
self.EnableURLGroup = None
self.Type = None
self.Desc = None
def _deserialize(self, params):
self.ID = params.get("ID")
self.Name = params.get("Name")
self.URL = params.get("URL")
self.Repo = params.get("Repo")
self.InstanceID = params.get("InstanceID")
self.Rate = params.get("Rate")
self.EnableURLGroup = params.get("EnableURLGroup")
self.Type = params.get("Type")
self.Desc = params.get("Desc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyProjectResponse(AbstractModel):
"""ModifyProject返回参数结构体
"""
def __init__(self):
r"""
:param Msg: 操作信息
:type Msg: str
:param ID: 项目id
:type ID: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Msg = None
self.ID = None
self.RequestId = None
def _deserialize(self, params):
self.Msg = params.get("Msg")
self.ID = params.get("ID")
self.RequestId = params.get("RequestId")
class ProjectLimit(AbstractModel):
"""项目接口限制类型
"""
def __init__(self):
r"""
:param ProjectInterface: 接口
:type ProjectInterface: str
:param ReportRate: 上报率
:type ReportRate: int
:param ReportType: 上报类型 1:上报率 2:上报量限制
:type ReportType: int
:param ID: 主键ID
:type ID: int
:param ProjectID: 项目ID
:type ProjectID: int
"""
self.ProjectInterface = None
self.ReportRate = None
self.ReportType = None
self.ID = None
self.ProjectID = None
def _deserialize(self, params):
self.ProjectInterface = params.get("ProjectInterface")
self.ReportRate = params.get("ReportRate")
self.ReportType = params.get("ReportType")
self.ID = params.get("ID")
self.ProjectID = params.get("ProjectID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReleaseFile(AbstractModel):
"""发布文件列表(SOURCEMAP)
"""
def __init__(self):
r"""
:param Version: 文件版本
:type Version: str
:param FileKey: 文件唯一 key
:type FileKey: str
:param FileName: 文件名
:type FileName: str
:param FileHash: 文件哈希值
:type FileHash: str
:param ID: 文件 id
注意:此字段可能返回 null,表示取不到有效值。
:type ID: int
"""
self.Version = None
self.FileKey = None
self.FileName = None
self.FileHash = None
self.ID = None
def _deserialize(self, params):
self.Version = params.get("Version")
self.FileKey = params.get("FileKey")
self.FileName = params.get("FileName")
self.FileHash = params.get("FileHash")
self.ID = params.get("ID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ResumeInstanceRequest(AbstractModel):
"""ResumeInstance请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 需要恢复的实例id
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ResumeInstanceResponse(AbstractModel):
"""ResumeInstance返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RumAreaInfo(AbstractModel):
"""Rum片区信息
"""
def __init__(self):
r"""
:param AreaId: 片区Id
:type AreaId: int
:param AreaStatus: 片区状态(1=有效,2=无效)
:type AreaStatus: int
:param AreaName: 片区名称
:type AreaName: str
:param AreaKey: 片区Key
:type AreaKey: str
"""
self.AreaId | |
% iIii1I11I1II1 / O0
if 54 - 54: iII111i - I1Ii111
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
if 7 - 7: i1IIi
if 30 - 30: oO0o . i1IIi / I11i
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if 74 - 74: Ii1I + I11i . OoooooooOO - I1ii11iIi11i
iiI = iiI . encode ( )
iiI += IiIi1iiI11
OoOooO00 = [ oO00Oooo0o0o0 . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 2 - 2: oO0o - o0oOOo0O0Ooo
for oOo00O in i1I11iI11i1 :
if ( oOo00O . map_notify_requested == False ) : continue
if ( oOo00O . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , iiI , OoOooO00 , 1 , oOo00O . rloc ,
LISP_CTRL_PORT , Ooo00OOoOO . nonce , Ooo00OOoOO . key_id ,
Ooo00OOoOO . alg_id , Ooo00OOoOO . auth_len , IIiiIiI , False )
if 80 - 80: i1IIi
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
if 17 - 17: iII111i % Oo0Ooo
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
lisp_notify_subscribers ( lisp_sockets , iiI , oO00Oooo0o0o0 . eid , IIiiIiI )
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
if 3 - 3: II111iiii
if 61 - 61: oO0o . I1IiiI + i1IIi
if 69 - 69: O0 / i1IIi - OoOoOO00 + ooOoO0o - oO0o
if 80 - 80: o0oOOo0O0Ooo % O0 * I11i . i1IIi - ooOoO0o
if ( len ( OOoooO ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , OOoooO )
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
if ( Ooo00OOoOO . merge_register_requested ) : return
if 62 - 62: oO0o % Ii1I - Ii1I
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if ( Ooo00OOoOO . map_notify_requested and IIiiIiI != None ) :
lisp_build_map_notify ( lisp_sockets , IIii , OOo00OOooOooO ,
Ooo00OOoOO . record_count , source , sport , Ooo00OOoOO . nonce ,
Ooo00OOoOO . key_id , Ooo00OOoOO . alg_id , Ooo00OOoOO . auth_len ,
IIiiIiI , True )
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
return
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if 71 - 71: Ii1I - IiII
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
if 65 - 65: iII111i . oO0o
if 67 - 67: I1IiiI / iII111i / O0 % ooOoO0o - IiII / Ii1I
def lisp_process_multicast_map_notify ( packet , source ) :
oOoOOo = lisp_map_notify ( "" )
packet = oOoOOo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 31 - 31: I11i - oO0o * ooOoO0o
if 64 - 64: I11i
oOoOOo . print_notify ( )
if ( oOoOOo . record_count == 0 ) : return
if 41 - 41: I1Ii111 * OoooooooOO / OoOoOO00 + OoO0O00 . OoOoOO00 + I1Ii111
I1i = oOoOOo . eid_records
if 75 - 75: iII111i * i1IIi
for IiIIi1IiiIiI in range ( oOoOOo . record_count ) :
iiI = lisp_eid_record ( )
I1i = iiI . decode ( I1i )
if ( packet == None ) : return
iiI . print_record ( " " , False )
if 53 - 53: i11iIiiIii . i1IIi - O0
if 70 - 70: OoO0O00
if 87 - 87: I1IiiI + Oo0Ooo % o0oOOo0O0Ooo % I11i - OoOoOO00
if 93 - 93: o0oOOo0O0Ooo % OoooooooOO
IiiiiII1i = lisp_map_cache_lookup ( iiI . eid , iiI . group )
if ( IiiiiII1i == None ) :
IiII1II1I , O0o000 , o00oo0 = lisp_allow_gleaning ( iiI . eid , iiI . group ,
None )
if ( IiII1II1I == False ) : continue
if 40 - 40: o0oOOo0O0Ooo - OoOoOO00 - iIii1I11I1II1
IiiiiII1i = lisp_mapping ( iiI . eid , iiI . group , [ ] )
IiiiiII1i . add_cache ( )
if 46 - 46: ooOoO0o / I1ii11iIi11i * O0
if 100 - 100: Ii1I / OoO0O00 / II111iiii / OoOoOO00 * IiII
if 76 - 76: I1IiiI + IiII * I1ii11iIi11i * I1IiiI % Ii1I + ooOoO0o
if 46 - 46: OoOoOO00
if 66 - 66: iII111i - O0 . I1Ii111 * i1IIi / OoO0O00 / II111iiii
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
if ( IiiiiII1i . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ) )
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
continue
if 20 - 20: IiII
if 81 - 81: Oo0Ooo / I1Ii111
IiiiiII1i . mapping_source = None if source == "lisp-etr" else source
IiiiiII1i . map_cache_ttl = iiI . store_ttl ( )
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
if 51 - 51: iII111i - ooOoO0o
if 32 - 32: IiII - i11iIiiIii
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
if ( len ( IiiiiII1i . rloc_set ) != 0 and iiI . rloc_count == 0 ) :
IiiiiII1i . rloc_set = [ ]
IiiiiII1i . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , IiiiiII1i )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ) )
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
continue
if 49 - 49: I1Ii111 . OoooooooOO / o0oOOo0O0Ooo - iII111i - iII111i - i11iIiiIii
if 37 - 37: OOooOOo
O0OOO0oOO = IiiiiII1i . rtrs_in_rloc_set ( )
if 4 - 4: ooOoO0o / i11iIiiIii
if 87 - 87: II111iiii * OoO0O00
if 2 - 2: iIii1I11I1II1 % II111iiii * OoO0O00 * OoOoOO00 * OoooooooOO
if 11 - 11: ooOoO0o . I1IiiI / OOooOOo - I1ii11iIi11i - OoOoOO00 % I11i
if 11 - 11: IiII * i11iIiiIii % IiII
for oOoOoO0O in range ( iiI . rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
I1i = iIii1IiIiI . decode ( I1i , None )
iIii1IiIiI . print_record ( " " )
if ( iiI . group . is_null ( ) ) : continue
if ( iIii1IiIiI . rle == None ) : continue
if 24 - 24: OoO0O00 + ooOoO0o
if 57 - 57: iII111i
if 37 - 37: i1IIi - I1Ii111 + IiII * ooOoO0o
if 43 - 43: O0 . iII111i * I11i / i11iIiiIii
if 39 - 39: oO0o / ooOoO0o
OooOooo = IiiiiII1i . rloc_set [ 0 ] . | |
# pylint: disable=R0902,R0904,R0914
"""
All static loads are defined in this file. This includes:
* LOAD
* GRAV
* ACCEL
* ACCEL1
* FORCE / MOMENT
* FORCE1 / MOMENT1
* FORCE2 / MOMENT2
* MOMENT
* PLOAD
* PLOAD2
* PLOAD4
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from numpy import array, cross, allclose, unique
from numpy.linalg import norm # type: ignore
#from pyNastran.bdf.errors import CrossReferenceError
from pyNastran.utils.numpy_utils import integer_types, float_types
from pyNastran.bdf.cards.loads.loads import Load, LoadCombination
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard, expand_thru, expand_thru_by # _node_ids,
from pyNastran.bdf.cards.collpase_card import collapse_thru_by
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank, string, string_or_blank,
integer_or_string, fields, integer_string_or_blank, integer_or_double)
from pyNastran.bdf.field_writer_8 import print_card_8, print_float_8, set_string8_blank_if_default
from pyNastran.bdf.field_writer_16 import (
print_card_16, print_float_16, set_string16_blank_if_default)
from pyNastran.bdf.field_writer_double import print_card_double, print_scientific_double
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class LOAD(LoadCombination):
"""
+------+-----+------+------+----+-----+----+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+======+======+====+=====+====+====+====+
| LOAD | SID | S | S1 | L1 | S2 | L2 | S3 | L3 |
+------+-----+------+------+----+-----+----+----+----+
| | S4 | L4 | etc. | | | | | |
+------+-----+------+------+----+-----+----+----+----+
| LOAD | 101 | -0.5 | 1.0 | 3 | 6.2 | 4 | | |
+------+-----+------+------+----+-----+----+----+----+
"""
type = 'LOAD'
@classmethod
def _init_from_empty(cls):
sid = 1
scale = 1.
scale_factors = [1.]
load_ids = [1]
return cls(sid, scale, scale_factors, load_ids, comment='')
def __init__(self, sid, scale, scale_factors, load_ids, comment=''):
"""
Creates a LOAD card
Parameters
----------
sid : int
load id
scale : float
overall scale factor
scale_factors : List[float]
individual scale factors (corresponds to load_ids)
load_ids : List[int]
individual load_ids (corresponds to scale_factors)
comment : str; default=''
a comment for the card
.. note:: MSC can handle self-referencing loads, NX cannot
"""
LoadCombination.__init__(self, sid, scale, scale_factors, load_ids,
comment=comment)
def get_load_types(self):
"""
.. note:: requires a cross referenced load
"""
load_types = []
for loads in self.load_ids_ref:
for load in loads:
if isinstance(load, LOAD):
lid = load.lid
if isinstance(lid, list):
load_types += load.type
else: # int
load_types += [load.type] + load.get_load_types()
elif isinstance(load, (Load0, Load1, Load2, PLOAD4, GRAV)):
load_types += [load.type]
else:
raise NotImplementedError(load)
load_types = list(set(load_types))
#print("load_types = ", load_types)
return load_types
def get_reduced_loads(self, resolve_load_card=False, filter_zero_scale_factors=False):
"""
Get all load objects in a simplified form, which means all
scale factors are already applied and only base objects
(no LOAD cards) will be returned.
Parameters
----------
resolve_load_card : bool; default=False
Nastran requires that LOAD cards do not reference other load cards
This feature can be enabled.
filter_zero_scale_factors : bool; default=False
Nastran does not filter loads with a 0.0 scale factor. So, if you
have a 0.0 load, but are missing load ids, Nastran will throw a
fatal error.
.. todo:: lots more object types to support
"""
scale_factors = []
loads = []
simple_loads = [
'FORCE', 'FORCE1', 'FORCE2',
'MOMENT', 'MOMENT1', 'MOMENT2',
'PLOAD1', 'PLOAD2', 'PLOAD4',
'GRAV', 'ACCEL', 'ACCEL1']
load_scale = self.scale # global
for (loads_pack, i_scale) in zip(self.load_ids, self.scale_factors):
scale = i_scale * load_scale # actual scale = global * local
if isinstance(loads_pack, integer_types):
raise RuntimeError('the load have not been cross-referenced')
if scale == 0.0 and filter_zero_scale_factors:
continue
for load in loads_pack:
if simple_loads:
loads.append(load)
scale_factors.append(scale) # local
elif isinstance(load, LOAD):
if not resolve_load_card:
msg = (
'A LOAD card cannot reference another LOAD card\n'
'current:\n%s\n'
'new:\n%s' % (str(self), str(load))
)
raise RuntimeError(msg)
load_data = load.get_reduced_loads(
resolve_load_card=True,
filter_zero_scale_factors=filter_zero_scale_factors)
(reduced_scale_factors, reduced_loads) = load_data
loads += reduced_loads
scale_factors += [scale * j_scale
for j_scale in reduced_scale_factors]
else:
msg = ('%s isnt supported in get_reduced_loads method'
% load.__class__.__name__)
raise NotImplementedError(msg)
return (scale_factors, loads)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
load_ids2 = []
msg = ', which is required by LOAD=%s' % (self.sid)
for load_id in self.load_ids:
if load_id == self.sid:
msg = 'Type=%s sid=%s load_id=%s creates a recursion error' % (
self.type, self.sid, load_id)
raise RuntimeError(msg)
load_id2 = model.Load(load_id, consider_load_combinations=True, msg=msg)
assert isinstance(load_id2, list), load_id2
load_ids2.append(load_id2)
self.load_ids_ref = load_ids2
def safe_cross_reference(self, model: BDF, xref_errors, debug=True):
load_ids2 = []
msg = ', which is required by LOAD=%s' % (self.sid)
for load_id in self.load_ids:
try:
load_id2 = model.Load(load_id, consider_load_combinations=True, msg=msg)
except KeyError:
if debug:
msg = 'Couldnt find load_id=%i, which is required by %s=%s' % (
load_id, self.type, self.sid)
print(msg)
continue
load_ids2.append(load_id2)
self.load_ids_ref = load_ids2
def raw_fields(self):
list_fields = ['LOAD', self.sid, self.scale]
load_ids = self.get_load_ids()
for (scale_factor, load_id) in zip(self.scale_factors, load_ids):
list_fields += [scale_factor, self.LoadID(load_id)]
if len(load_ids) != len(self.scale_factors):
msg = 'nload_ids=%s nscale_factors=%s and arent the same\n' % (
len(load_ids), len(self.scale_factors))
msg = 'load_ids=%s\n' % (load_ids)
msg += 'scale_factors=%s\n' % (self.scale_factors)
msg += print_card_8(list_fields)
msg += str(self.get_stats())
raise IndexError(msg)
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.load_ids = self.get_load_ids()
self.load_ids_ref = None
class CLOAD(LoadCombination):
"""
Static Load Combination for Superelement Loads (Superposition)
references excite ids (e.g., an LSEQ); looks like a LOAD
"""
type = 'CLOAD'
@classmethod
def _init_from_empty(cls):
sid = 1
scale = 1.
scale_factors = [1.]
load_ids = [1]
return cls(sid, scale, scale_factors, load_ids, comment='')
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
load_ids2 = []
msg = ', which is required by CLOAD=%s' % (self.sid)
for load_id in self.load_ids:
if load_id == self.sid:
msg = 'Type=%s sid=%s load_id=%s creates a recursion error' % (
self.type, self.sid, load_id)
raise RuntimeError(msg)
#print(model.load_combinations)
load_id2 = []
for loadset, load_combinations in model.load_combinations.items():
for load in load_combinations:
if load.type in ['CLOAD']:
continue
if load_id == load.excite_id:
load_id2.append(load)
#load_id2 = model.Load(load_id, consider_load_combinations=True, msg=msg)
assert isinstance(load_id2, list), load_id2
assert len(load_id2) > 0, f'could not find references for CLOAD load_id={load_id}'
load_ids2.append(load_id2)
self.load_ids_ref = load_ids2
def safe_cross_reference(self, model: BDF, xref_errors, debug=True):
self.cross_reference(model)
def get_load_ids(self):
if self.load_ids_ref is None:
return self.load_ids
excite_ids = []
#print(self.load_ids_ref)
for loads in self.load_ids_ref:
excite_idsi = set([])
for load in loads:
excite_id = load.excite_id
excite_idsi.add(excite_id)
assert len(excite_idsi) == 1, excite_idsi
excite_ids.append(excite_idsi.pop())
assert len(excite_ids) > 0, excite_ids
return excite_ids
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.load_ids = self.get_load_ids()
self.load_ids_ref = None
def raw_fields(self):
list_fields = ['CLOAD', self.sid, self.scale]
load_ids = self.get_load_ids()
for (scale_factor, load_id) in zip(self.scale_factors, load_ids):
load_idi = self.LoadID(load_id)
list_fields += [scale_factor, load_idi]
if len(load_ids) != len(self.scale_factors):
msg = 'nload_ids=%s nscale_factors=%s and arent the same\n' % (
len(load_ids), len(self.scale_factors))
msg = 'load_ids=%s\n' % (load_ids)
msg += 'scale_factors=%s\n' % (self.scale_factors)
msg += print_card_8(list_fields)
msg += str(self.get_stats())
raise IndexError(msg)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class GRAV(BaseCard):
"""
Defines acceleration vectors for gravity or other acceleration loading.
+------+-----+-----+------+-----+-----+------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+======+=====+=====+======+=====+=====+======+=====+
| GRAV | SID | CID | A | N1 | N2 | N3 | MB |
+------+-----+-----+------+-----+-----+------+-----+
| GRAV | 1 | 3 | 32.2 | 0.0 | 0.0 | -1.0 | |
+------+-----+-----+------+-----+-----+------+-----+
"""
type = 'GRAV'
@classmethod
def _init_from_empty(cls):
sid = 1
scale = 1.
N = [1., 1., 1.]
return GRAV(sid, scale, N, cid=0, mb=0, comment='')
def __init__(self, sid, scale, N, cid=0, mb=0, comment=''):
"""
Creates an GRAV card
Parameters
----------
sid : int
load id
scale : float
scale factor for load
N : (3, ) float ndarray
the acceleration vector in the cid frame
cid : int; default=0
the coordinate system for the load
mb : int; default=0
???
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
#: Set identification number
self.sid = sid
#: Coordinate system identification number.
self.cid = cid
#: | |
tensors (keys == different regularization types, e.g. 'entropy').
"""
return dict()
def tf_loss(self, states, internals, actions, terminal, reward, update):
"""
Creates and returns the single loss Tensor representing the total loss for a batch, including
the mean loss per sample, the regularization loss of the batch, .
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
Single float-value loss tensor.
"""
# Losses per samples
loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
# Mean loss
loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0)
# Summary for (mean) loss without any regularizations.
if 'losses' in self.summary_labels:
summary = tf.summary.scalar(name='loss-without-regularization', tensor=loss)
self.summaries.append(summary)
# Add the different types of regularization losses to the total.
losses = self.fn_regularization_losses(states=states, internals=internals, update=update)
if len(losses) > 0:
loss += tf.add_n(inputs=list(losses.values()))
if 'regularization' in self.summary_labels:
for name, loss_val in losses.items():
summary = tf.summary.scalar(name="regularization/"+name, tensor=loss_val)
self.summaries.append(summary)
# Summary for the total loss (including regularization).
if 'losses' in self.summary_labels or 'total-loss' in self.summary_labels:
summary = tf.summary.scalar(name='total-loss', tensor=loss)
self.summaries.append(summary)
return loss
def get_optimizer_kwargs(self, states, internals, actions, terminal, reward, update):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various argument-free functions (in particular `fn_loss` returning the combined
0-dim batch loss tensor) which the optimizer might require to perform an update step.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
Dict to be passed into the optimizer op (e.g. 'minimize') as kwargs.
"""
kwargs = dict()
kwargs['time'] = self.timestep
kwargs['variables'] = self.get_variables()
kwargs['fn_loss'] = (
lambda: self.fn_loss(states=states, internals=internals, actions=actions,
terminal=terminal, reward=reward, update=update)
)
if self.global_model is not None:
kwargs['global_variables'] = self.global_model.get_variables()
return kwargs
def tf_optimization(self, states, internals, actions, terminal, reward, update):
"""
Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
Returns:
The optimization operation.
"""
# No optimization (non-learning model)
if self.optimizer is None:
return tf.no_op()
optimizer_kwargs = self.get_optimizer_kwargs(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
return self.optimizer.minimize(**optimizer_kwargs)
def create_output_operations(self, states, internals, actions, terminal, reward, update, deterministic):
"""
Calls all the relevant TensorFlow functions for this model and hence creates all the
TensorFlow operations involved.
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of prior internal state tensors.
actions (dict): Dict of action tensors (each key represents one action space component).
terminal: Terminal boolean tensor (shape=(batch-size,)).
reward: Reward float tensor (shape=(batch-size,)).
update: Single boolean tensor indicating whether this call happens during an update.
deterministic: Boolean Tensor indicating, whether we will not apply exploration when actions
are calculated.
"""
# Create graph by calling the functions corresponding to model.act() / model.update(), to initialize variables.
# TODO: Could call reset here, but would have to move other methods below reset.
self.fn_actions_and_internals(
states=states,
internals=internals,
update=update,
deterministic=deterministic
)
self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
# Tensor fetched for model.act()
operations = list()
if self.variable_noise is not None and self.variable_noise > 0.0:
# Add variable noise
noise_deltas = list()
for variable in self.get_variables():
noise_delta = tf.random_normal(shape=util.shape(variable), mean=0.0, stddev=self.variable_noise)
noise_deltas.append(noise_delta)
operations.append(variable.assign_add(delta=noise_delta))
# Retrieve actions and internals
with tf.control_dependencies(control_inputs=operations):
self.actions_output, self.internals_output = self.fn_actions_and_internals(
states=states,
internals=internals,
update=update,
deterministic=deterministic
)
# Increment timestep
increment_timestep = tf.shape(input=next(iter(states.values())))[0]
increment_timestep = self.timestep.assign_add(delta=increment_timestep)
operations = [increment_timestep]
# Subtract variable noise
if self.variable_noise is not None and self.variable_noise > 0.0:
for variable, noise_delta in zip(self.get_variables(), noise_deltas):
operations.append(variable.assign_sub(delta=noise_delta))
with tf.control_dependencies(control_inputs=operations):
# Trivial operation to enforce control dependency
self.timestep_output = self.timestep + 0
# Tensor fetched for model.observe()
increment_episode = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int'))
increment_episode = self.episode.assign_add(delta=increment_episode)
with tf.control_dependencies(control_inputs=(increment_episode,)):
self.increment_episode = self.episode + 0
# TODO: add up rewards per episode and add summary_label 'episode-reward'
# Tensor(s) fetched for model.update()
self.optimization = self.fn_optimization(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
self.loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
update=update
)
def get_variables(self, include_non_trainable=False):
"""
Returns the TensorFlow variables used by the model.
Returns:
List of variables.
"""
if include_non_trainable:
# Optimizer variables and timestep/episode only included if 'include_non_trainable' set
model_variables = [self.all_variables[key] for key in sorted(self.all_variables)]
states_preprocessing_variables = [
variable for name in self.states_preprocessing.keys()
for variable in self.states_preprocessing[name].get_variables()
]
explorations_variables = [
variable for name in self.explorations.keys()
for variable in self.explorations[name].get_variables()
]
if self.reward_preprocessing is not None:
reward_preprocessing_variables = self.reward_preprocessing.get_variables()
else:
reward_preprocessing_variables = list()
if self.optimizer is None:
optimizer_variables = list()
else:
optimizer_variables = self.optimizer.get_variables()
variables = model_variables
variables.extend([v for v in states_preprocessing_variables if v not in variables])
variables.extend([v for v in explorations_variables if v not in variables])
variables.extend([v for v in reward_preprocessing_variables if v not in variables])
variables.extend([v for v in optimizer_variables if v not in variables])
return variables
else:
return [self.variables[key] for key in sorted(self.variables)]
def get_summaries(self):
"""
Returns the TensorFlow summaries reported by the model
Returns:
List of summaries
"""
return self.summaries
def reset(self):
"""
Resets the model to its initial state on episode start.
Returns:
tuple:
Current episode, timestep counter and the shallow-copied list of internal state initialization Tensors.
"""
# TODO preprocessing reset call moved from agent
episode, timestep = self.monitored_session.run(fetches=(self.episode, self.timestep))
return episode, timestep, list(self.internals_init)
def act(self, states, internals, deterministic=False):
"""
Does a forward pass through the model to retrieve action (outputs) given inputs for state (and internal
state, if applicable (e.g. RNNs))
Args:
states (dict): Dict of state tensors (each key represents one state space component).
internals: List of incoming internal state tensors.
deterministic (bool): If True, will not apply exploration after actions are calculated.
Returns:
tuple:
- Actual action-outputs (batched if state input is a batch).
- Actual values of internal states (if applicable) (batched if state input is a batch).
- The timestep (int) after calculating the (batch of) action(s).
"""
fetches = [self.actions_output, self.internals_output, self.timestep_output]
name = next(iter(self.states_spec))
batched = (np.asarray(states[name]).ndim != len(self.states_spec[name]['shape']))
if batched:
feed_dict = {state_input: states[name] for name, state_input in self.states_input.items()}
feed_dict.update({internal_input: internals[n] for n, internal_input in enumerate(self.internals_input)})
else:
feed_dict = {state_input: (states[name],) for name, state_input in self.states_input.items()}
feed_dict.update({internal_input: (internals[n],) for n, internal_input in enumerate(self.internals_input)})
feed_dict[self.deterministic_input] = deterministic
feed_dict[self.update_input] = False
actions, internals, timestep = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
# Extract the first (and only) action/internal from the batch to make return values non-batched
if not batched:
actions = {name: action[0] for name, action in actions.items()}
internals = [internal[0] for internal in internals]
if self.summary_configuration_op is not None:
summary_values = self.session.run(self.summary_configuration_op)
self.summary_writer.add_summary(summary_values)
self.summary_writer.flush()
# Only do this operation once to reduce duplicate data in Tensorboard
self.summary_configuration_op = None
return actions, internals, timestep
def observe(self, terminal, reward):
"""
Adds an observation (reward and is-terminal) to the model without updating its trainable variables.
Args:
terminal (bool): Whether the episode has terminated.
reward (float): The observed reward value.
Returns:
The value of the model-internal episode counter.
"""
terminal = np.asarray(terminal)
batched = (terminal.ndim == 1)
if batched:
feed_dict = {self.terminal_input: terminal, self.reward_input: reward, }
else:
feed_dict = {self.terminal_input: (terminal,), self.reward_input: (reward,)}
feed_dict[self.update_input] = False # don't update, just "observe"
episode = self.monitored_session.run(fetches=self.increment_episode, feed_dict=feed_dict)
return episode
def update(self, states, internals, actions, terminal, reward, return_loss_per_instance=False):
"""
Runs the self.optimization in the session to update the | |
<gh_stars>1-10
import os
import pickle
import plotly.graph_objects as go
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath)
import countries # noqa: E402
from regions import Electoral_Region # noqa: E402
import electoral_systems # noqa: E402
class Election():
"""
Class representing an parliamentary election that was held in the past.
...
Attributes
----------
date: str
The date on which the election was held, in the format 'YYYY-MM-DD'.
country: countries.Country
The country where the election was held.
regions: dict
A dictionary containing information about the election regions.
Keys are region levels, values are dictionaries.
The keys of these dictionaries are region names, and their values are
the corresponding Electoral_Region.region objects.
parties: list
A list of the parties taking part in the election.
colors: dict
A dictionary whose keys are party names and values are the
corresponding colors to be used on the plots (Hex color code).
electoral_system: electoral_systems.System
An object of the class electoral_systems.System containing the
information about the system used on the election.
Methods
-------
get_region(level, name): Electoral_Regions.region
Given a regional level and a region name, return the Electoral_Regions.region
object corresponding to that region.
get_regions(level): dict
Given a region level, return a dict containing all the Electoral_Regions.region
objects corresponding to that level. Keys of the dictionary are region names.
get_valid_parties(threshold): list
For a particular election, given a national-level threshold, return
a list of parties that have a number of votes above that threshold.
"""
def __init__(self, country: countries.Country, date: str = None):
self.date = date
self.country = country
self.maps = {}
for level in range(len(self.regions)):
self.maps[level] = go.Figure(go.Choroplethmapbox(
geojson=self.country.get_geojson(level),
locations=[x for x in self.regions[level]],
z=[0] * len(self.regions[level]),
colorscale="Reds",
zmin=0, zmax=1,
marker_line_width=1,
hoverinfo='none',
))
@property
def regions(self):
"""
A dictionary. Keys are region levels, values are dictionaries.
The keys of these dictionaries are region names, and their values are
the corresponding Electoral_Region.region objects.
"""
return self._regions
@regions.setter
def regions(self, value):
if not type(value) == dict:
raise TypeError("Election's 'region' attribute must be a dictionary.")
for key in value:
if not 0 <= key <= 5:
raise ValueError("Keys representing levels must have values between 0 and 5.")
self._regions = value
@property
def parties(self):
"""
A list of the parties taking part in the election.
"""
return self._parties
@parties.setter
def parties(self, value):
if not type(value) == list:
raise TypeError("Election's 'parties' attribute must be a list.")
self._parties = value
@property
def colors(self):
"""
A dictionary whose keys are party names and values are the
corresponding colors to be used on the plots (Hex color code).
"""
return self._colors
@colors.setter
def colors(self, value):
if not type(value) == dict:
raise TypeError("Election's 'colors' attribute must be a dictionary.")
self._colors = value
@property
def electoral_system(self):
"""
An object of the class electoral_systems.System containing the
information about the system used on the election.
"""
return self._electoral_system
@electoral_system.setter
def electoral_system(self, value):
if not type(value) == electoral_systems.System:
raise TypeError("The Election's electoral_system must be an object of electoral_systems.System.")
self._electoral_system = value
def get_region(self, level, name):
"""
Given a regional level and a region name, return the Electoral_Regions.region
object corresponding to that region.
"""
return self.regions[level][name]
def get_regions(self, level):
"""
Given a region level, return a dict containing all the Electoral_Regions.region
objects corresponding to that level. Keys of the dictionary are region names.
"""
return self.regions[level]
def get_valid_parties(self, threshold):
"""
For a particular election, given a national-level threshold, return
a list of parties that have a number of votes above that threshold.
"""
total_votes = sum(self._regions[0][self.country.name].votes.values())
if threshold == 'n/2s':
vote_threshold = total_votes / self._regions[0][self.country.name].n_seats
else:
vote_threshold = total_votes * int(threshold) / 100
parties = [p for p, v in self._regions[0][self.country.name].votes.items() if v >= vote_threshold]
return parties
def _parse_data(self, filename, max_level):
"""
Extract the data from the pickle file, initialize the Electoral_Regions.region
objects and format the data for it to be used as attributes of an object of
the class Election.
"""
with open(filename, 'rb') as f:
data = pickle.load(f)
level_0_electoral_region = dict()
level_0_electoral_region[data['data'][0]['region_name']] = Electoral_Region(
self,
data['data'][0]['region_name'],
data['data'][0]['level'],
data['data'][0]['census'],
data['data'][0]['n_seats'],
data['data'][0]['votes'],
data['data'][0]['nota'],
data['data'][0]['split_votes'],
)
electoral_regions = {0: level_0_electoral_region}
for level in range(1, max_level+1):
level_electoral_regions = dict()
for region, results in data['data'][level].items():
level_electoral_regions[results['region_name']] = Electoral_Region(
self,
results['region_name'],
results['level'],
results['census'],
results['n_seats'],
results['votes'],
results['nota'],
results['split_votes'],
)
electoral_regions[level] = level_electoral_regions
return {'parties': data['parties'], 'regions': electoral_regions}
#########
# SPAIN #
#########
# Define some variables that are common to every Spain_Election
spain_country = countries.Spain()
spain_colors = { # See https://en.wikipedia.org/wiki/Category:Spain_political_party_colour_templates
'AMAIUR': '#087178',
'ARALAR-ZUTIK': '#BD0000', # 2004
'BLOC-EV': '#4E9E41', # 2004
'BLOC-VERDS': '#4E9E41',
'BNG': '#ADCFEF',
'B.N.G.': '#ADCFEF',
'BNG-NÓS': '#ADCFEF',
'NÓS': '#ADCFEF',
'CA': '#006633', # Coalición Andalucista, 2008
'CC': '#FFD700',
'CC-PNC': '#FFD700',
'CCa-PNC-NC': '#FFD700',
'CCa-PNC': '#FFD700',
'CC-NC-PNC': '#FFD700',
'CDC': '#18307B',
'CHA': '#008A21', # Chunta Aragonesista, 2004
'CIU': '#18307B',
'CiU': '#18307B',
'DL': '#18307B',
'Cs': '#EB6109',
"C's": '#EB6109',
'COMPROMÍS 2019': '#DA5C31',
'COMPROMÍS-Q': '#DA5C31',
'CUP-PR': '#FFED00',
'EA': '#77AC1C', # Eusko-Alkartasuna, 2004
'EAJ-PNV': '#4AAE4A',
'Eb': '#DDDDDD', # Escaños en blanco, 2011
'CENB': '#DDDDDD', # 2004
'ECP-GUANYEM EL CANVI': '#5A205A',
'ECP': '#5A205A',
'EH Bildu': '#B5CF18',
'EN COMÚ': '#5A205A',
'EQUO': '#8ABA18',
'ERC': '#8ABA18',
'ERC-CATSÍ': '#FFB232',
'ERC-CATSI': '#FFB232',
'ERC-SOBIRANISTES': '#FFB232',
'ESQUERRA': '#FFB232',
'FAC': '#10286B',
'FRONT REPUBLICÀ': '#EB2071', # 2019
'GIL': '#029138', # 2000
'IC-V': '#4E9E41', # Iniciativa per Catalunya, 2000
'IU': '#D56545',
'I.U.': '#D56545',
'IU-LV': '#D56545',
'IU-UPeC': '#D56545',
'JxCAT-JUNTS': '#5AB6A1',
'MÁS PAÍS-EQUO': '#0FDEC4',
'MÉS': '#D8DE40',
'MÉS COMPROMÍS': '#DA5C31',
'Na-Bai': '#F75E42',
'NA-BAI': '#F75E42',
'GBAI': '#F75E42',
'MÁS PAÍS': '#0FDEC4', # 2019
'NA+': '#819DA3',
'NC-CCN': '#BAF73E',
'Nca': '#639E42',
'PA': '#005931', # Partido Andalucista, 2004
'PAR': '#FFCC66', # Partido Aragonés, 2008
'PACMA': '#ADBE18',
'PP': '#007FFF',
'P.P.': '#007FFF',
'PRC': '#C2CE0C',
'P.R.C.': '#C2CE0C',
'PSM-EN,EU,EV,ER': '#FF9933', # Progressistes per les Illes Balears, 2004
'PSOE': '#FF0000',
'P.S.O.E.': '#FF0000',
'PSOE-PROGR.': '#FF0000',
'PODEMOS': '#5A205A',
'PODEMOS-En': '#5A205A',
'PODEMOS-EN MAREA-ANOVA-EU': '#5A205A',
'PODEMOS-EU-MAREAS EN COMÚN-EQUO': '#5A205A',
'PODEMOS-COM': '#5A205A',
'PODEMOS-COMPROMÍS-EUPV': '#5A205A',
'PODEMOS-EU': '#5A205A',
'PODEMOS-IU': '#5A205A',
'PODEMOS-IU-EQUO': '#5A205A',
'PxC': '#444042', # 2011
'RECORTES CERO-GV': '#00862A', # 2019
'RECORTES CERO-GRUPO VERDE': '#00862A',
'RECORTES CE': '#00862A',
'unio.cat': '#18307B',
'UPL': '#B41062', # Unión del Pueblo Leonés, 2000
'UPYD': '#E9008C',
'UPyD': '#E9008C',
'UV': '#1F4473', # Unió Valenciana, 2000
'VERDES': '#099E40', # 2008
'LV-E': '#099E40', # 2004
'VOX': '#63BE21',
'¡TERUEL EXISTE!': '#037252',
}
spain_ccaa_and_provinces = {}
for x in ['Almería', 'Cádiz', 'Córdoba', 'Granada', 'Huelva', 'Jaén', 'Málaga', 'Sevilla']:
spain_ccaa_and_provinces[x] = 'Andalucía'
for x in ['Huesca', 'Teruel', 'Zaragoza']:
spain_ccaa_and_provinces[x] = 'Aragón'
spain_ccaa_and_provinces['Cantabria'] = 'Cantabria'
for x in ['Ávila', 'Burgos', 'León', 'Palencia', 'Salamanca', 'Segovia', 'Soria', 'Valladolid', 'Zamora']:
spain_ccaa_and_provinces[x] = 'Castilla y León'
for x in ['Albacete', 'Ciudad Real', 'Cuenca', 'Guadalajara', 'Toledo']:
spain_ccaa_and_provinces[x] = 'Castilla-La Mancha'
for x in ['Barcelona', 'Girona', 'Lleida', 'Tarragona']:
spain_ccaa_and_provinces[x] = 'Cataluña'
spain_ccaa_and_provinces['Ceuta'] = 'Ceuta y Melilla'
spain_ccaa_and_provinces['Melilla'] = 'Ceuta y Melilla'
spain_ccaa_and_provinces['Madrid'] = 'Comunidad de Madrid'
spain_ccaa_and_provinces['Navarra'] = 'Comunidad Foral de Navarra'
for x in ['Alacant', 'Castelló', 'València']:
spain_ccaa_and_provinces[x] = 'Comunidad Valenciana'
spain_ccaa_and_provinces['Badajoz'] = 'Extremadura'
spain_ccaa_and_provinces['Cáceres'] = 'Extremadura'
for x in ['A Coruña', 'Lugo', 'Ourense', 'Pontevedra']:
spain_ccaa_and_provinces[x] = 'Galicia'
spain_ccaa_and_provinces['Illes Balears'] = 'Islas Baleares'
spain_ccaa_and_provinces['Las Palmas'] = 'Islas Canarias'
spain_ccaa_and_provinces['Santa Cruz de Tenerife'] = 'Islas Canarias'
spain_ccaa_and_provinces['La Rioja'] = 'La Rioja'
for x in ['Araba', 'Bizkaia', 'Gipuzcoa']:
spain_ccaa_and_provinces[x] = 'País Vasco'
spain_ccaa_and_provinces['Asturias'] = 'Principado de Asturias'
spain_ccaa_and_provinces['Murcia'] = 'Región de Murcia'
class Spain_Election(Election):
"""
Parent class to all the classes that represent elections that were held
in Spain.
"""
def __init__(self, data_file: str):
"""
Parameters
----------
data_file: str
The path of the pickle file containing the electoral results.
"""
date = data_file.split('_')[-1].split('.')[0] # The date of the election is encoded in the filename
parsed_data = self._parse_data(data_file, max_level=2)
self.regions = parsed_data['regions']
self.parties = parsed_data['parties']
self.electoral_system = electoral_systems.System(name='dHondt', level=2, threshold=3)
self.colors = spain_colors
self._build_region_tree()
super(Spain_Election, self).__init__(country=spain_country, date=date)
def _build_region_tree(self):
self.regions[0]['Spain'].subregions = self.regions[1].values()
for region_name, region_value in self.regions[2].items():
if hasattr(self.regions[1][spain_ccaa_and_provinces[region_name]], 'subregions'):
self.regions[1][spain_ccaa_and_provinces[region_name]].subregions.append(region_value)
else:
self.regions[1][spain_ccaa_and_provinces[region_name]].subregions = [region_value]
class Spain_2019_11(Spain_Election):
"""
Class representing the elections that were held in Spain on 10-11-2019.
"""
def __init__(self):
filename = 'data/Spain/election_data_2019-11-10.pkl'
filename = os.path.join(os.path.dirname(__file__), filename)
super(Spain_2019_11, self).__init__(data_file=filename)
return
class Spain_2019_04(Spain_Election):
"""
Class representing the elections that were held in Spain on 04-28-2019.
"""
def __init__(self):
filename = 'data/Spain/election_data_2019-04-28.pkl'
filename = os.path.join(os.path.dirname(__file__), filename)
super(Spain_2019_04, self).__init__(data_file=filename)
return
class Spain_2016_06(Spain_Election):
"""
Class representing the elections that were held in Spain on 26-06-2016.
"""
def __init__(self):
filename = 'data/Spain/election_data_2016-06-26.pkl'
filename = os.path.join(os.path.dirname(__file__), filename)
super(Spain_2016_06, self).__init__(data_file=filename)
return
class Spain_2015_12(Spain_Election):
"""
Class representing the elections that were held in Spain on 20-12-2015.
"""
def __init__(self):
filename = | |
"""
:program:`javaproperties`
-------------------------
NAME
^^^^
:program:`javaproperties` — Basic manipulation of Java ``.properties`` files
SYNOPSIS
^^^^^^^^
.. code-block:: shell
javaproperties get [<OPTIONS>] <file> <key> ...
javaproperties select [<OPTIONS>] <file> <key> ...
javaproperties set [<OPTIONS>] <file> <key> <value>
javaproperties delete [<OPTIONS>] <file> <key> ...
javaproperties format [<OPTIONS>] [<file>]
:command:`get`
^^^^^^^^^^^^^^
.. code-block:: shell
javaproperties get [<OPTIONS>] <file> <key> ...
Print out the values of the given keys in the given ``.properties`` file. Each
value is printed out on a separate line with escape sequences interpolated.
If you want the output to also be in ``.properties`` format, see :ref:`select`.
Options
'''''''
.. program:: javaproperties get
.. option:: -d <value>, --default-value <value>
Default value for undefined keys. If this option is not specified, keys
requested on the command line that are not defined in either the main
``.properties`` file or the :option:`--defaults` file will (unless the
:option:`--quiet` option is given) cause a warning to be printed to stderr
and the command to exit with a failure status.
.. option:: -D <file>, --defaults <file>
``.properties`` file of default values. If this option is specified, keys
requested on the command line that are not defined in the main
``.properties`` file will be looked up in this file.
.. option:: -e, --escaped
Parse the keys and default value specified on the command line for
``.properties``-style escape sequences (specifically, those supported by
`javaproperties.unescape`)
.. option:: -E <encoding>, --encoding <encoding>
Specifies the encoding of the input file(s); default value: ``iso-8859-1``
(a.k.a. Latin-1). Output always uses the locale's encoding.
.. option:: -q, --quiet
.. versionadded:: 0.7.0
Do not warn about or fail due to missing keys
.. _select:
:command:`select`
^^^^^^^^^^^^^^^^^
.. code-block:: shell
javaproperties select [<OPTIONS>] <file> <key> ...
Print out the key-value entries in the given ``.properties`` file for the given
keys. The output is in ``.properties`` format, reformatted as though by
:ref:`format`.
Options
'''''''
.. program:: javaproperties select
.. option:: -A, --ascii
.. versionadded:: 0.6.0
Escape all non-ASCII characters in the output with ``\\uXXXX`` escape
sequences. This overrides :option:`--unicode`. This is the default
behavior.
.. option:: -d <value>, --default-value <value>
Default value for undefined keys. If this option is not specified, keys
requested on the command line that are not defined in either the main
``.properties`` file or the :option:`--defaults` file will (unless the
:option:`--quiet` option is given) cause a warning to be printed to stderr
and the command to exit with a failure status.
.. option:: -D <file>, --defaults <file>
``.properties`` file of default values. If this option is specified, keys
requested on the command line that are not defined in the main
``.properties`` file will be looked up in this file.
.. option:: -e, --escaped
Parse the keys and default value specified on the command line for
``.properties``-style escape sequences (specifically, those supported by
`javaproperties.unescape`)
.. option:: -E <encoding>, --encoding <encoding>
Specifies the encoding of the input and output files; default value:
``iso-8859-1`` (a.k.a. Latin-1)
.. option:: -o <file>, --outfile <file>
Write output to this file instead of standard output
.. option:: -s <sep>, --separator <sep>
Use ``<sep>`` as the key-value separator in the output; default value:
``=``
.. option:: -q, --quiet
.. versionadded:: 0.7.0
Do not warn about or fail due to missing keys
.. option:: -U, --unicode
.. versionadded:: 0.6.0
Output non-ASCII characters literally, except for characters that are not
supported by the output encoding, which are escaped with ``\\uXXXX`` escape
sequences. This overrides :option:`--ascii`.
:command:`set`
^^^^^^^^^^^^^^
.. code-block:: shell
javaproperties set [<OPTIONS>] <file> <key> <value>
Set the value of ``<key>`` in the ``.properties`` file ``<file>`` to
``<value>`` and output the results. The other entries in the file (including
comments, possibly not including the timestamp; see below) will be left as-is.
Options
'''''''
.. program:: javaproperties set
.. option:: -A, --ascii
.. versionadded:: 0.6.0
Escape all non-ASCII characters in the new key & value with ``\\uXXXX``
escape sequences on output. This overrides :option:`--unicode`. This is
the default behavior.
.. option:: -e, --escaped
Parse ``<key>`` and ``<value>`` for ``.properties``-style escape sequences
(specifically, those supported by `javaproperties.unescape`)
.. option:: -E <encoding>, --encoding <encoding>
Specifies the encoding of the input and output files; default value:
``iso-8859-1`` (a.k.a. Latin-1)
.. option:: -o <file>, --outfile <file>
Write output to this file instead of standard output
.. option:: -s <sep>, --separator <sep>
Separate ``<key>`` and ``<value>`` in the output with ``<sep>``; default
value: ``=``
.. option:: -T, --preserve-timestamp
Do not modify the timestamp in the ``.properties`` file. By default, if a
timestamp is found, it is updated to the current time, even if the rest of
the file is unchanged.
.. option:: -U, --unicode
.. versionadded:: 0.6.0
Output non-ASCII characters in the new key & value literally, except for
characters that are not supported by the output encoding, which are escaped
with ``\\uXXXX`` escape sequences. This overrides :option:`--ascii`.
:command:`delete`
^^^^^^^^^^^^^^^^^
.. code-block:: shell
javaproperties delete [<OPTIONS>] <file> <key> ...
Remove all entries for the given keys from the given ``.properties`` file and
output the results. The other entries in the file (including comments,
possibly not including the timestamp; see below) will be left as-is.
Options
'''''''
.. program:: javaproperties delete
.. option:: -e, --escaped
Parse the keys specified on the command line for ``.properties``-style
escape sequences (specifically, those supported by
`javaproperties.unescape`)
.. option:: -E <encoding>, --encoding <encoding>
Specifies the encoding of the input and output files; default value:
``iso-8859-1`` (a.k.a. Latin-1)
.. option:: -o <file>, --outfile <file>
Write output to this file instead of standard output
.. option:: -T, --preserve-timestamp
Do not modify the timestamp in the ``.properties`` file. By default, if a
timestamp is found, it is updated to the current time, even if the rest of
the file is unchanged.
.. _format:
:command:`format`
^^^^^^^^^^^^^^^^^
.. code-block:: shell
javaproperties format [<OPTIONS>] [<file>]
Normalize the formatting of the given ``.properties`` file (or standard input
if no file is given) and output the results. All comments, excess whitespace,
invalid escapes, and duplicate keys are removed, and the entries are sorted
lexicographically.
Options
'''''''
.. program:: javaproperties format
.. option:: -A, --ascii
.. versionadded:: 0.6.0
Escape all non-ASCII characters in the output with ``\\uXXXX`` escape
sequences. This overrides :option:`--unicode`. This is the default
behavior.
.. option:: -E <encoding>, --encoding <encoding>
Specifies the encoding of the input and output files; default value:
``iso-8859-1`` (a.k.a. Latin-1)
.. option:: -o <file>, --outfile <file>
Write output to this file instead of standard output
.. option:: -s <sep>, --separator <sep>
Use ``<sep>`` as the key-value separator in the output; default value:
``=``
.. option:: -U, --unicode
.. versionadded:: 0.6.0
Output non-ASCII characters literally, except for characters that are not
supported by the output encoding, which are escaped with ``\\uXXXX`` escape
sequences. This overrides :option:`--ascii`.
"""
import click
from javaproperties import (
KeyValue,
dump,
java_timestamp,
join_key_value,
load,
parse,
to_comment,
unescape,
)
from .util import command, encoding_option, infile_type, outfile_type
@command(group=True)
def javaproperties():
"""Basic manipulation of Java .properties files"""
pass
@javaproperties.command()
@click.option(
"-d", "--default-value", metavar="VALUE", help="Default value for undefined keys"
)
@click.option(
"-D",
"--defaults",
metavar="FILE",
type=infile_type,
help=".properties file of default values",
)
@click.option(
"-e", "--escaped", is_flag=True, help="Parse command-line keys & values for escapes"
)
@encoding_option
@click.option("-q", "--quiet", is_flag=True, help="Don't warn on missing keys")
@click.argument("file", type=infile_type)
@click.argument("key", nargs=-1, required=True)
@click.pass_context
def get(ctx, default_value, defaults, escaped, file, key, encoding, quiet):
"""Query values from a Java .properties file"""
ok = True
for k, v in getselect(file, key, defaults, default_value, encoding, escaped):
if v is not None:
click.echo(v)
elif not quiet:
click.echo(f"{ctx.command_path}: {k}: key not found", err=True)
ok = False
ctx.exit(0 if ok else 1)
@javaproperties.command()
@click.option(
"-A/-U",
"--ascii/--unicode",
"ensure_ascii",
default=True,
help="Whether to escape non-ASCII characters or output raw",
)
@click.option(
"-d", "--default-value", metavar="VALUE", help="Default value for undefined keys"
)
@click.option(
"-D",
"--defaults",
metavar="FILE",
type=infile_type,
help=".properties file of default values",
)
@click.option(
"-e", "--escaped", is_flag=True, help="Parse command-line keys & values for escapes"
)
@encoding_option
@click.option(
"-o", "--outfile", type=outfile_type, default="-", help="Write output to this file"
)
@click.option("-q", "--quiet", is_flag=True, help="Don't warn on missing keys")
@click.option(
"-s",
"--separator",
default="=",
show_default=True,
help="Key-value separator to use in output",
)
@click.argument("file", type=infile_type)
@click.argument("key", nargs=-1, required=True)
@click.pass_context
def select(
ctx,
default_value,
defaults,
escaped,
separator,
file,
key,
encoding,
outfile,
ensure_ascii,
quiet,
):
"""Extract key-value pairs from a Java .properties file"""
ok = True
with click.open_file(
outfile,
"w",
encoding=encoding,
errors="javapropertiesreplace",
) as fpout:
print(to_comment(java_timestamp()), file=fpout)
for k, v in getselect(file, key, defaults, default_value, encoding, escaped):
if v is not None:
print(
join_key_value(
k, v, separator=separator, ensure_ascii=ensure_ascii
),
file=fpout,
)
elif not quiet:
click.echo(f"{ctx.command_path}: {k}: key not found", err=True)
ok = False
ctx.exit(0 if ok else 1)
@javaproperties.command("set")
@click.option(
"-A/-U",
"--ascii/--unicode",
"ensure_ascii",
default=True,
help="Whether to escape non-ASCII characters or output raw",
)
@click.option(
| |
Initialize the superclass. :)
super(Mol2_Reader,self).__init__(fnm)
## The parameter dictionary (defined in this file)
self.pdict = mol2_pdict
## The atom numbers in the interaction (stored in the parser)
self.atom = []
## The mol2 file provides a list of atom names
self.atomnames = []
## The section that we're in
self.section = None
# The name of the molecule
self.mol = None
def feed(self, line):
s = line.split()
self.ln += 1
# In mol2 files, the only defined interaction type is the Coulomb interaction.
if line.strip().lower() == '@<tripos>atom':
self.itype = 'COUL'
self.section = 'Atom'
elif line.strip().lower() == '@<tripos>bond':
self.itype = 'None'
self.section = 'Bond'
elif line.strip().lower() == '@<tripos>substructure':
self.itype = 'None'
self.section = 'Substructure'
elif line.strip().lower() == '@<tripos>molecule':
self.itype = 'None'
self.section = 'Molecule'
elif self.section == 'Molecule' and self.mol is None:
self.mol = '_'.join(s)
elif not is_mol2_atom(line):
self.itype = 'None'
if is_mol2_atom(line) and self.itype == 'COUL':
#self.atomnames.append(s[self.pdict[self.itype]['Atom'][0]])
#self.adict.setdefault(self.mol,[]).append(s[self.pdict[self.itype]['Atom'][0]])
self.atomnames.append(s[0])
self.adict.setdefault(self.mol,[]).append(s[0])
if self.itype in self.pdict:
if 'Atom' in self.pdict[self.itype] and match(' *[0-9]', line):
# List the atoms in the interaction.
#self.atom = [s[i] for i in self.pdict[self.itype]['Atom']]
self.atom = [s[0]]
# The suffix of the parameter ID is built from the atom #
# types/classes involved in the interaction.
self.suffix = ':' + '-'.join([self.mol,''.join(self.atom)])
#self.suffix = '.'.join(self.atom)
self.molatom = (self.mol, self.atom if type(self.atom) is list else [self.atom])
class FrcMod_Reader(BaseReader):
"""Finite state machine for parsing FrcMod force field file."""
def __init__(self,fnm):
# Initialize the superclass. :)
super(FrcMod_Reader,self).__init__(fnm)
## The parameter dictionary (defined in this file)
self.pdict = frcmod_pdict
## The atom numbers in the interaction (stored in the parser)
self.atom = []
## Whether we're inside the dihedral section
self.dihe = False
## The frcmod file never has any atoms in it
self.adict = {None:None}
def Split(self, line):
return split(' +(?!-(?![0-9.]))', line.strip().replace('\n',''))
def Whites(self, line):
return findall(' +(?!-(?![0-9.]))', line.replace('\n',''))
def build_pid(self, pfld):
""" Returns the parameter type (e.g. K in BONDSK) based on the
current interaction type.
Both the 'pdict' dictionary (see gmxio.pdict) and the
interaction type 'state' (here, BONDS) are needed to get the
parameter type.
If, however, 'pdict' does not contain the ptype value, a suitable
substitute is simply the field number.
Note that if the interaction type state is not set, then it
defaults to the file name, so a generic parameter ID is
'filename.line_num.field_num'
"""
if self.dihe and not self.haveAtomLine:
pfld += 1
if hasattr(self, 'overpfx'):
return self.overpfx + ':%i:' % pfld + self.oversfx
ptype = self.pdict.get(self.itype,{}).get(pfld,':%i.%i' % (self.ln,pfld))
answer = self.itype
answer += ptype
answer += '/'+self.suffix
return answer
def feed(self, line):
s = self.Split(line)
self.ln += 1
if len(line.strip()) == 0:
return
if match('^dihe', line.strip().lower()):
self.dihe = True
return
elif match('^mass$', line.strip().lower()):
self.dihe = False
self.itype = 'MASS'
return
elif match('^bond$', line.strip().lower()):
self.dihe = False
self.itype = 'BONDS'
return
elif match('^angle$', line.strip().lower()):
self.dihe = False
self.itype = 'ANGLES'
return
elif match('^improper$', line.strip().lower()):
self.dihe = False
self.itype = 'IDIHS'
return
elif match('^nonbon$', line.strip().lower()):
self.dihe = False
self.itype = 'VDW'
return
elif len(s) == 0:
self.dihe = False
return
if self.dihe:
if '-' in s[0]:
self.haveAtomLine = True
self.itype = 'PDIHS%i' % int(np.abs(float(s[4])))
else:
self.haveAtomLine = False
self.itype = 'PDIHS%i' % int(np.abs(float(s[3])))
else:
self.haveAtomLine = True
if self.itype in self.pdict:
if 'Atom' in self.pdict[self.itype] and self.haveAtomLine:
# List the atoms in the interaction.
self.atom = [s[i].replace(" -","-") for i in self.pdict[self.itype]['Atom']]
# The suffix of the parameter ID is built from the atom #
# types/classes involved in the interaction.
self.suffix = ''.join(self.atom)
#=============================================================================================
# AMBER parmtop loader (from 'zander', by <NAME>)
#=============================================================================================
# A regex for extracting print format info from the FORMAT lines.
FORMAT_RE_PATTERN=re.compile("([0-9]+)([a-zA-Z]+)([0-9]+)\.?([0-9]*)")
# Pointer labels which map to pointer numbers at top of prmtop files
POINTER_LABELS = """
NATOM, NTYPES, NBONH, MBONA, NTHETH, MTHETA,
NPHIH, MPHIA, NHPARM, NPARM, NEXT, NRES,
NBONA, NTHETA, NPHIA, NUMBND, NUMANG, NPTRA,
NATYP, NPHB, IFPERT, NBPER, NGPER, NDPER,
MBPER, MGPER, MDPER, IFBOX, NMXRS, IFCAP
"""
# Pointer labels (above) as a list, not string.
POINTER_LABEL_LIST = POINTER_LABELS.replace(',', '').split()
class PrmtopLoader(object):
"""Parsed AMBER prmtop file.
ParmtopLoader reads, parses and manages content from a AMBER prmtop file.
EXAMPLES
Parse a prmtop file of alanine dipeptide in implicit solvent.
>>> import os, os.path
>>> directory = os.path.join(os.getenv('YANK_INSTALL_DIR'), 'test', 'systems', 'alanine-dipeptide-gbsa')
>>> prmtop_filename = os.path.join(directory, 'alanine-dipeptide.prmtop')
>>> prmtop = PrmtopLoader(prmtop_filename)
Parse a prmtop file of alanine dipeptide in explicit solvent.
>>> import os, os.path
>>> directory = os.path.join(os.getenv('YANK_INSTALL_DIR'), 'test', 'systems', 'alanine-dipeptide-explicit')
>>> prmtop_filename = os.path.join(directory, 'alanine-dipeptide.prmtop')
>>> prmtop = PrmtopLoader(prmtop_filename)
"""
def __init__(self, inFilename):
"""
Create a PrmtopLoader object from an AMBER prmtop file.
ARGUMENTS
inFilename (string) - AMBER 'new-style' prmtop file, probably generated with one of the AMBER tleap/xleap/sleap
"""
self._prmtopVersion=None
self._flags=[]
self._raw_format={}
self._raw_data={}
fIn=open(inFilename)
for line in fIn:
if line.startswith('%VERSION'):
tag, self._prmtopVersion = line.rstrip().split(None, 1)
elif line.startswith('%FLAG'):
tag, flag = line.rstrip().split(None, 1)
self._flags.append(flag)
self._raw_data[flag] = []
elif line.startswith('%FORMAT'):
format = line.rstrip()
index0=format.index('(')
index1=format.index(')')
format = format[index0+1:index1]
m = FORMAT_RE_PATTERN.search(format)
self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), m.group(3), m.group(4))
elif self._flags \
and 'TITLE'==self._flags[-1] \
and not self._raw_data['TITLE']:
self._raw_data['TITLE'] = line.rstrip()
else:
flag=self._flags[-1]
(format, numItems, itemType,
itemLength, itemPrecision) = self._getFormat(flag)
iLength=int(itemLength)
line = line.rstrip()
for index in range(0, len(line), iLength):
item = line[index:index+iLength]
if item:
self._raw_data[flag].append(item.strip())
fIn.close()
def _getFormat(self, flag=None):
if not flag:
flag=self._flags[-1]
return self._raw_format[flag]
def _getPointerValue(self, pointerLabel):
"""Return pointer value given pointer label
Parameter:
- pointerLabel: a string matching one of the following:
NATOM : total number of atoms
NTYPES : total number of distinct atom types
NBONH : number of bonds containing hydrogen
MBONA : number of bonds not containing hydrogen
NTHETH : number of angles containing hydrogen
MTHETA : number of angles not containing hydrogen
NPHIH : number of dihedrals containing hydrogen
MPHIA : number of dihedrals not containing hydrogen
NHPARM : currently not used
NPARM : currently not used
NEXT : number of excluded atoms
NRES : number of residues
NBONA : MBONA + number of constraint bonds
NTHETA : MTHETA + number of constraint angles
NPHIA : MPHIA + number of constraint dihedrals
NUMBND : number of unique bond types
NUMANG : number of unique angle types
NPTRA : number of unique dihedral types
NATYP : number of atom types in parameter file, see SOLTY below
NPHB : number of distinct 10-12 hydrogen bond pair types
IFPERT : set to 1 if perturbation info is to be read in
NBPER : number of bonds to be perturbed
NGPER : number of angles to be perturbed
NDPER : number of dihedrals to be perturbed
MBPER : number of bonds with atoms completely in perturbed group
MGPER : number of angles with atoms completely in perturbed group
MDPER : number of dihedrals with atoms completely in perturbed groups
IFBOX : set to 1 if standard periodic box, 2 when truncated octahedral
NMXRS : number of atoms in the largest residue
IFCAP : set to 1 if the CAP option from edit was specified
"""
index = POINTER_LABEL_LIST.index(pointerLabel)
return float(self._raw_data['POINTERS'][index])
def getNumAtoms(self):
"""Return the number of atoms in the system"""
return int(self._getPointerValue('NATOM'))
def getNumTypes(self):
"""Return the number of AMBER atoms types in the system"""
return int(self._getPointerValue('NTYPES'))
def getIfBox(self):
"""Return True if the system was build with periodic boundary conditions (PBC)"""
return int(self._getPointerValue('IFBOX'))
def getIfCap(self):
"""Return True if the system was build with the cap option)"""
return int(self._getPointerValue('IFCAP'))
def getIfPert(self):
"""Return True if the system was build with the perturbation parameters)"""
return int(self._getPointerValue('IFPERT'))
def getMasses(self):
"""Return a list of atomic masses in the system"""
try:
return self._massList
except AttributeError:
pass
self._massList=[]
raw_masses=self._raw_data['MASS']
for ii in range(self.getNumAtoms()):
self._massList.append(float(raw_masses[ii]))
self._massList = self._massList
return self._massList
def getCharges(self):
"""Return a list of atomic charges in the system"""
try:
return self._chargeList
except AttributeError:
pass
self._chargeList=[]
raw_charges=self._raw_data['CHARGE']
for ii in range(self.getNumAtoms()):
self._chargeList.append(float(raw_charges[ii])/18.2223)
self._chargeList = self._chargeList
return self._chargeList
def getAtomName(self, iAtom):
"""Return the atom name for iAtom"""
atomNames = self.getAtomNames()
return atomNames[iAtom]
def getAtomNames(self):
"""Return the list of the system atom names"""
return | |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for failure module.
"""
import re
import sys
import StringIO
import traceback
import pdb
from twisted.trial import unittest, util
from twisted.python import failure
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTestCase(unittest.TestCase):
def testFailAndTrap(self):
"""Trapping a failure."""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_notTrapped(self):
"""Making sure trap doesn't trap what it shouldn't."""
try:
raise ValueError()
except:
f = failure.Failure()
self.assertRaises(failure.Failure, f.trap, OverflowError)
def assertStartsWith(self, s, prefix):
"""
Assert that s starts with a particular prefix.
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def test_printingSmokeTest(self):
"""
None of the print* methods fail when called.
"""
f = getDivisionFailure()
out = StringIO.StringIO()
f.printDetailedTraceback(out)
self.assertStartsWith(out.getvalue(), '*--- Failure')
out = StringIO.StringIO()
f.printBriefTraceback(out)
self.assertStartsWith(out.getvalue(), 'Traceback')
out = StringIO.StringIO()
f.printTraceback(out)
self.assertStartsWith(out.getvalue(), 'Traceback')
def test_printingCapturedVarsSmokeTest(self):
"""
None of the print* methods fail when called on a L{Failure} constructed
with C{captureVars=True}.
Local variables on the stack can be seen in the detailed traceback.
"""
exampleLocalVar = 'xyzzy'
f = getDivisionFailure(captureVars=True)
out = StringIO.StringIO()
f.printDetailedTraceback(out)
self.assertStartsWith(out.getvalue(), '*--- Failure')
self.assertNotEqual(None, re.search('exampleLocalVar.*xyzzy',
out.getvalue()))
out = StringIO.StringIO()
f.printBriefTraceback(out)
self.assertStartsWith(out.getvalue(), 'Traceback')
out = StringIO.StringIO()
f.printTraceback(out)
self.assertStartsWith(out.getvalue(), 'Traceback')
def test_printingCapturedVarsCleanedSmokeTest(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
exampleLocalVar = 'xyzzy'
f = getDivisionFailure(captureVars=True)
f.cleanFailure()
out = StringIO.StringIO()
f.printDetailedTraceback(out)
self.assertNotEqual(None, re.search('exampleLocalVar.*xyzzy',
out.getvalue()))
def test_printingNoVars(self):
"""
Calling C{Failure()} with no arguments does not capture any locals or
globals, so L{printDetailedTraceback} cannot show them in its output.
"""
out = StringIO.StringIO()
f = getDivisionFailure()
f.printDetailedTraceback(out)
# There should be no variables in the detailed output. Variables are
# printed on lines with 2 leading spaces.
linesWithVars = [line for line in out.getvalue().splitlines()
if line.startswith(' ')]
self.assertEqual([], linesWithVars)
self.assertSubstring(
'Capture of Locals and Globals disabled', out.getvalue())
def test_printingCaptureVars(self):
"""
Calling C{Failure(captureVars=True)} captures the locals and globals
for its stack frames, so L{printDetailedTraceback} will show them in
its output.
"""
out = StringIO.StringIO()
f = getDivisionFailure(captureVars=True)
f.printDetailedTraceback(out)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in out.getvalue().splitlines()
if line.startswith(' ')]
self.assertNotEqual([], linesWithVars)
def testExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def testRaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def testLackOfTB(self):
f = getDivisionFailure()
f.cleanFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
testLackOfTB.todo = "the traceback is not preserved, exarkun said he'll try to fix this! god knows how"
_stringException = "bugger off"
def _getStringFailure(self):
try:
raise self._stringException
except:
f = failure.Failure()
return f
def test_raiseStringExceptions(self):
# String exceptions used to totally bugged f.raiseException
f = self._getStringFailure()
try:
f.raiseException()
except:
self.assertEqual(sys.exc_info()[0], self._stringException)
else:
raise AssertionError("Should have raised")
test_raiseStringExceptions.suppress = [
util.suppress(message='raising a string exception is deprecated')]
def test_printStringExceptions(self):
"""
L{Failure.printTraceback} should write out stack and exception
information, even for string exceptions.
"""
failure = self._getStringFailure()
output = StringIO.StringIO()
failure.printTraceback(file=output)
lines = output.getvalue().splitlines()
# The last line should be the value of the raised string
self.assertEqual(lines[-1], self._stringException)
test_printStringExceptions.suppress = [
util.suppress(message='raising a string exception is deprecated')]
if sys.version_info[:2] >= (2, 6):
skipMsg = ("String exceptions aren't supported anymore starting "
"Python 2.6")
test_raiseStringExceptions.skip = skipMsg
test_printStringExceptions.skip = skipMsg
def testConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertNotEqual(None, expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertEqual(f.getTracebackObject(), None)
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(unittest.TestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(unittest.TestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
C{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertEqual(failure.Failure._findFailure(), None)
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
self.assertEqual(sys.exc_info()[-1], None) #environment sanity check
self.assertEqual(failure.Failure._findFailure(), None)
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser | |
#!/usr/bin/env python
"""
analyse Elasticsearch query
"""
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from datetime import datetime
# Preprocess terms for TF-IDF
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
# LOG
import logging
from logging.handlers import RotatingFileHandler
# Word embedding for evaluation
from sentence_transformers import SentenceTransformer
from sklearn.manifold import TSNE
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
import scipy.spatial as sp
# Spatial entity as descriptor :
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
# venn
from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
import operator
# Global var on Levels on spatial and temporal axis
spatialLevels = ['city', 'state', 'country']
temporalLevels = ['day', 'week', 'month', 'period']
def elasticsearch_query(query_fname, logger):
"""
Build a ES query and return a default dict with resuls
:return: tweetsByCityAndDate
"""
# Elastic search credentials
client = Elasticsearch("http://localhost:9200")
es_logger.setLevel(logging.WARNING)
index = "twitter"
# Define a Query
query = open(query_fname, "r").read()
result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000)
# Append all pages form scroll search : avoid the 10k limitation of ElasticSearch
results = avoid10kquerylimitation(result, client, logger)
# Initiate a dict for each city append all Tweets content
tweetsByCityAndDate = defaultdict(list)
for hits in results:
# parse Java date : EEE MMM dd HH:mm:ss Z yyyy
inDate = hits["_source"]["created_at"]
parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y")
try:# geodocing may be bad
geocoding = hits["_source"]["rest"]["features"][0]["properties"]
except:
continue # skip this iteraction
if "country" in hits["_source"]["rest"]["features"][0]["properties"]:
# locaties do not necessarily have an associated stated
try:
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no state in geocoding
try:
logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state")
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no city as well : only country
# print(json.dumps(hits["_source"], indent=4))
try: #
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except:
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str("none")
try:
tweetsByCityAndDate[cityStateCountry].append(
{
"tweet": preprocessTweets(hits["_source"]["full_text"]),
"created_at": parseDate
}
)
except:
print(json.dumps(hits["_source"], indent=4))
# biotexInputBuilder(tweetsByCityAndDate)
# pprint(tweetsByCityAndDate)
return tweetsByCityAndDate
def avoid10kquerylimitation(result, client, logger):
"""
Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll
This method append all pages form scroll search
:param result: a result of a ElasticSearcg query
:return:
"""
scroll_size = result['hits']['total']["value"]
logger.info("Number of elasticsearch scroll: " + str(scroll_size))
results = []
# Progress bar
pbar = tqdm(total=scroll_size)
while (scroll_size > 0):
try:
scroll_id = result['_scroll_id']
res = client.scroll(scroll_id=scroll_id, scroll='60s')
results += res['hits']['hits']
scroll_size = len(res['hits']['hits'])
pbar.update(scroll_size)
except:
pbar.close()
logger.error("elasticsearch search scroll failed")
break
pbar.close()
return results
def preprocessTweets(text):
"""
1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1
2 - Detection lang
3 - remove stopword ??
:param text:
:return: list : texclean, and langue detected
"""
## 1 clean up twetts
# remove URLs
textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text)
textclean = re.sub(r'http\S+', '', textclean)
# remove usernames
# textclean = re.sub('@[^\s]+', '', textclean)
# remove the # in #hashtag
# textclean = re.sub(r'#([^\s]+)', r'\1', textclean)
return textclean
def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger):
"""
Create a matrix of :
- line : (city,day)
- column : terms
- value of cells : TF (term frequency)
Help found here :
http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
:param tweetsofcity:
:param matrixAggDay_fout: file to save
:param matrixOccurence_fout: file to save
:return:
"""
# initiate matrix of tweets aggregate by day
# col = ['city', 'day', 'tweetsList', 'bow']
col = ['city', 'day', 'tweetsList']
matrixAggDay = pd.DataFrame(columns=col)
cityDayList = []
logger.info("start full_text concatenation for city & day")
pbar = tqdm(total=len(tweetsofcity))
for city in tweetsofcity:
# create a table with 2 columns : tweet and created_at for a specific city
matrix = pd.DataFrame(tweetsofcity[city])
# Aggregate list of tweets by single day for specifics cities
## Loop on days for a city
period = matrix['created_at'].dt.date
period = period.unique()
period.sort()
for day in period:
# aggregate city and date document
document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist())
# Bag of Words and preprocces
# preproccesFullText = preprocessTerms(document)
tweetsOfDayAndCity = {
'city': city,
'day': day,
'tweetsList': document
}
cityDayList.append(city + "_" + str(day))
try:
matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True)
except:
print("full_text empty after pre-process: "+document)
continue
pbar.update(1)
pbar.close()
if save_intermediaire_files:
logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout))
matrixAggDay.to_csv(matrixAggDay_fout)
# Count terms with sci-kit learn
cd = CountVectorizer(
stop_words='english',
#preprocessor=sklearn_vectorizer_no_number_preprocessor,
#min_df=2, # token at least present in 2 cities : reduce size of matrix
max_features=25000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue
# strip_accents= "ascii" # remove token with special character (trying to keep only english word)
)
cd.fit(matrixAggDay['tweetsList'])
res = cd.transform(matrixAggDay["tweetsList"])
countTerms = res.todense()
# create matrix
## get terms :
# voc = cd.vocabulary_
# listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])}
listOfTerms = cd.get_feature_names()
##initiate matrix with count for each terms
matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)
# save to file
if save_intermediaire_files:
logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout))
matrixOccurence.to_csv(matrixOccurence_fout)
return matrixOccurence
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
###Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
##period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city',
period='all', temporalLevel='day'):
"""
Aggregate on spatial and temporel and then compute TF-IDF
:param matrixOcc: Matrix with TF already compute
:param listOfcities: filter on this cities
:param spatialLevel: city / state / country / world
:param period: Filter on this period
:param temporalLevel: day / week (month have to be implemented)
:return:
"""
matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities,
spatialLevel='state', period=period)
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
if temporalLevel == 'day':
## In space
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("state").sum()
elif spatialLevel == 'country' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("country").sum()
elif temporalLevel == "week":
matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime
## in space and time
if spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum()
# Compute TF-IDF
## compute TF : for each doc, devide count by Sum of all count
### Sum fo all count by row
matrixOcc['sumCount'] = matrixOcc.sum(axis=1)
### Devide each cell by these sums
listOfTerms = matrixOcc.keys()
matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0)
## Compute IDF : create a vector of length = nb of termes with IDF value
idf = pd.Series(index=matrixOcc.keys(), dtype=float)
### N : nb of doucments <=> nb of rows :
N = matrixOcc.shape[0]
### DFt : nb of document that contains the term
DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True
#### Not a Number when value 0 because otherwise log is infinite
DFt.replace(0, np.nan, inplace=True)
### compute log(N/DFt)
idf = np.log10(N / (DFt))
# idf = np.log10( N / (DFt * 10))
## compute TF-IDF
matrixTFIDF = matrixOcc * idf
# matrixTFIDF = matrixOcc * idf * idf
## remove terms if for all documents | |
self.flakes('''
try:
pass
finally:
continue
''', m.ContinueInFinally)
def test_breakOutsideLoop(self):
self.flakes('''
break
''', m.BreakOutsideLoop)
self.flakes('''
def f():
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
pass
else:
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
pass
else:
if 1:
if 2:
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
def f():
break
''', m.BreakOutsideLoop)
self.flakes('''
while True:
class A:
break
''', m.BreakOutsideLoop)
self.flakes('''
try:
pass
finally:
break
''', m.BreakOutsideLoop)
def test_breakInsideLoop(self):
self.flakes('''
while True:
break
''')
self.flakes('''
for i in range(10):
break
''')
self.flakes('''
while True:
if 1:
break
''')
self.flakes('''
for i in range(10):
if 1:
break
''')
self.flakes('''
while True:
while True:
pass
else:
break
else:
pass
''')
self.flakes('''
while True:
try:
pass
finally:
while True:
break
''')
self.flakes('''
while True:
try:
pass
finally:
break
''')
self.flakes('''
while True:
try:
pass
finally:
if 1:
if 2:
break
''')
def test_defaultExceptLast(self):
"""
A default except block should be last.
YES:
try:
...
except Exception:
...
except:
...
NO:
try:
...
except:
...
except Exception:
...
"""
self.flakes('''
try:
pass
except ValueError:
pass
''')
self.flakes('''
try:
pass
except ValueError:
pass
except:
pass
''')
self.flakes('''
try:
pass
except:
pass
''')
self.flakes('''
try:
pass
except ValueError:
pass
else:
pass
''')
self.flakes('''
try:
pass
except:
pass
else:
pass
''')
self.flakes('''
try:
pass
except ValueError:
pass
except:
pass
else:
pass
''')
def test_defaultExceptNotLast(self):
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
else:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
else:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
else:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
else:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
finally:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast)
self.flakes('''
try:
pass
except:
pass
except ValueError:
pass
except:
pass
except ValueError:
pass
else:
pass
finally:
pass
''', m.DefaultExceptNotLast, m.DefaultExceptNotLast)
@skipIf(version_info < (3,), "Python 3 only")
def test_starredAssignmentNoError(self):
"""
Python 3 extended iterable unpacking
"""
self.flakes('''
a, *b = range(10)
''')
self.flakes('''
*a, b = range(10)
''')
self.flakes('''
a, *b, c = range(10)
''')
self.flakes('''
(a, *b) = range(10)
''')
self.flakes('''
(*a, b) = range(10)
''')
self.flakes('''
(a, *b, c) = range(10)
''')
self.flakes('''
[a, *b] = range(10)
''')
self.flakes('''
[*a, b] = range(10)
''')
self.flakes('''
[a, *b, c] = range(10)
''')
# Taken from test_unpack_ex.py in the cPython source
s = ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \
", *rest = range(1<<8)"
self.flakes(s)
s = "(" + ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \
", *rest) = range(1<<8)"
self.flakes(s)
s = "[" + ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \
", *rest] = range(1<<8)"
self.flakes(s)
@skipIf(version_info < (3, ), "Python 3 only")
def test_starredAssignmentErrors(self):
"""
SyntaxErrors (not encoded in the ast) surrounding Python 3 extended
iterable unpacking
"""
# Taken from test_unpack_ex.py in the cPython source
s = ", ".join("a%d" % i for i in range(1 << 8)) + \
", *rest = range(1<<8 + 1)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "(" + ", ".join("a%d" % i for i in range(1 << 8)) + \
", *rest) = range(1<<8 + 1)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "[" + ", ".join("a%d" % i for i in range(1 << 8)) + \
", *rest] = range(1<<8 + 1)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \
", *rest = range(1<<8 + 2)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "(" + ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \
", *rest) = range(1<<8 + 2)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
s = "[" + ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \
", *rest] = range(1<<8 + 2)"
self.flakes(s, m.TooManyExpressionsInStarredAssignment)
# No way we can actually test this!
# s = "*rest, " + ", ".join("a%d" % i for i in range(1<<24)) + \
# ", *rest = range(1<<24 + 1)"
# self.flakes(s, m.TooManyExpressionsInStarredAssignment)
self.flakes('''
a, *b, *c = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
a, *b, c, *d = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
*a, *b, *c = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
(a, *b, *c) = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
(a, *b, c, *d) = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
(*a, *b, *c) = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
[a, *b, *c] = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
[a, *b, c, *d] = range(10)
''', m.TwoStarredExpressions)
self.flakes('''
[*a, *b, *c] = range(10)
''', m.TwoStarredExpressions)
@skip("todo: Too hard to make this warn but other cases stay silent")
def test_doubleAssignment(self):
"""
If a variable is re-assigned to without being used, no warning is
emitted.
"""
self.flakes('''
x = 10
x = 20
''', m.RedefinedWhileUnused)
def test_doubleAssignmentConditionally(self):
"""
If a variable is re-assigned within a conditional, no warning is
emitted.
"""
self.flakes('''
x = 10
if True:
x = 20
''')
def test_doubleAssignmentWithUse(self):
"""
If a variable is re-assigned to after being used, no warning is
emitted.
"""
self.flakes('''
x = 10
y = x * 2
x = 20
''')
def test_comparison(self):
"""
If a defined name is used on either side of any of the six comparison
operators, no warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x < y
x <= y
x == y
x != y
x >= y
x > y
''')
def test_identity(self):
"""
If a defined name is used on either side of an identity test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x is y
x is not y
''')
def test_containment(self):
"""
If a defined name is used on either side of a containment test, no
warning is emitted.
"""
self.flakes('''
x = 10
y = 20
x in y
x not in y
''')
def test_loopControl(self):
"""
break and continue statements are supported.
"""
self.flakes('''
for x in [1, 2]:
break
''')
self.flakes('''
for x in [1, 2]:
continue
''')
def test_ellipsis(self):
"""
Ellipsis in a slice is supported.
"""
self.flakes('''
[1, 2][...]
''')
def test_extendedSlice(self):
"""
Extended slices are supported.
"""
self.flakes('''
x = 3
[1, 2][x,:]
''')
def test_varAugmentedAssignment(self):
"""
Augmented assignment of a variable is supported.
We don't care about var refs.
"""
self.flakes('''
foo = 0
foo += 1
''')
def test_attrAugmentedAssignment(self):
"""
Augmented assignment of attributes is supported.
We don't care about attr refs.
"""
self.flakes('''
foo = None
foo.bar += foo.baz
''')
def test_globalDeclaredInDifferentScope(self):
"""
A 'global' can be declared in one scope and reused in another.
"""
self.flakes('''
def f(): global foo
def g(): foo = 'anything'; foo.is_used()
''')
class TestUnusedAssignment(TestCase):
"""
Tests for warning about unused assignments.
"""
def test_unusedVariable(self):
"""
Warn when a variable in a function is assigned a value that's never
used.
"""
self.flakes('''
def a():
b = 1
''', m.UnusedVariable)
def test_unusedVariableAsLocals(self):
"""
Using locals() it is perfectly valid to have unused variables
"""
self.flakes('''
def a():
b = 1
return locals()
''')
def test_unusedVariableNoLocals(self):
"""
Using locals() in wrong scope should | |
<filename>NNDB/model.py
from peewee import *
from peewee import (FloatField, FloatField, ProgrammingError, IntegerField, BooleanField,
AsIs)
# Param, Passthrough)
from peewee import fn
import numpy as np
import inspect
import sys
from playhouse.postgres_ext import PostgresqlExtDatabase, ArrayField, BinaryJSONField, JSONField, HStoreField
from playhouse.hybrid import hybrid_property
#from playhouse.shortcuts import RetryOperationalError #peewee==2.10.1
from IPython import embed
from warnings import warn
import os
networks_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../networks'))
sys.path.append(networks_path)
from run_model import QuaLiKizNDNN, QuaLiKizComboNN, QuaLiKizMultiNN
import json
import pandas as pd
import subprocess
import socket
import re
import traceback
import operator
from functools import reduce
from itertools import chain
from collections import OrderedDict
import scipy.io as io
#class RetryPostgresqlExtDatabase(RetryOperationalError, PostgresqlExtDatabase):
# pass
#db = RetryPostgresqlExtDatabase(database='nndb', host='gkdb.org')
db = PostgresqlExtDatabase(database='nndb', host='gkdb.org')
class BaseModel(Model):
"""A base model that will use our Postgresql database"""
class Meta:
database = db
schema = 'develop'
class TrainScript(BaseModel):
script = TextField()
version = TextField()
@classmethod
def from_file(cls, pwd):
with open(pwd, 'r') as script:
script = script.read()
train_script_query = TrainScript.select().where(TrainScript.script == script)
if train_script_query.count() == 0:
with db.atomic() as txn:
stdout = subprocess.check_output('git rev-parse HEAD',
shell=True)
version = stdout.decode('UTF-8').strip()
train_script = TrainScript(
script=script,
version=version
)
train_script.save()
elif train_script_query.count() == 1:
train_script = train_script_query.get()
else:
raise Exception('multiple train scripts found. Could not choose')
return train_script
class Filter(BaseModel):
script = TextField()
description = TextField(null=True)
min = FloatField(null=True)
max = FloatField(null=True)
remove_negative = BooleanField(null=True)
remove_zeros = BooleanField(null=True)
gam_filter = BooleanField(null=True)
ck_max = FloatField(null=True)
diffsep_max = FloatField(null=True)
@classmethod
def from_file(cls, pwd):
with db.atomic() as txn:
with open(pwd, 'r') as script:
script = script.read()
filter = Filter(script=script)
filter.save()
@classmethod
def find_by_path_name(cls, name):
split = re.split('(?:(unstable)_|)(sane|test|training)_(?:gen(\d+)_|)(\d+)D_nions0_flat_filter(\d+).h5', name)
try:
if len(split) != 7:
raise
filter_id = int(split[5])
except:
raise Exception('Could not find filter ID from name "{!s}"'.format(name))
return filter_id
class ComboNetwork(BaseModel):
target_names = ArrayField(TextField)
recipe = TextField()
feature_names = ArrayField(TextField)
networks = ArrayField(IntegerField)
@hybrid_property
def hidden_neurons(self):
return [Network.get_by_id(nn).hyperparameters.get().hidden_neurons for nn in self.networks]
@hidden_neurons.expression
def hidden_neurons(cls):
raise NotImplementedError('Cannot use in SQL query')
def to_QuaLiKizComboNN(self):
network_ids = self.networks
networks = [Network.get_by_id(num).to_QuaLiKizNDNN() for num in network_ids]
recipe = self.recipe
for ii in range(len(network_ids)):
recipe = recipe.replace('nn' + str(ii), 'args[' + str(ii) + ']')
exec('def combo_func(*args): return ' + recipe, globals())
return QuaLiKizComboNN(self.target_names, networks, combo_func)
to_QuaLiKizNN = to_QuaLiKizComboNN
@classmethod
def find_divsum_candidates(cls):
query = (Network
.select()
.where(Network.target_names[0] % '%_div_%')
.where(SQL('array_length(target_names, 1) = 1'))
)
for network in query:
try:
cls.divsum_from_div_id(network.id)
except Exception:
traceback.print_exc()
@classmethod
def divsum_from_div_id(cls, network_id):
query = (Network
.select()
.where(Network.id == network_id)
)
nn = query.get()
if len(nn.target_names) != 1:
raise Exception('Divsum network needs div network, not {!s}'.format(nn.target_names))
target_name = nn.target_names[0]
print('Trying to make combine Network {:d} with target {!s}'.format(nn.id, target_name))
splitted = re.compile('(.*)_(div|plus)_(.*)').split(target_name)
if len(splitted) != 5:
raise Exception('Could not split {!s} in divsum parts'.format(target_name))
partner_target_sets = []
formula_sets = []
if splitted[2] == 'div':
if splitted[1].startswith('efi') and splitted[3].startswith('efe'):
# If it is efi / efe
partner_targets = [[splitted[1] + '_plus_' + splitted[3]]]
formulas = OrderedDict([(splitted[1], '(nn{0:d} * nn{1:d}) / (nn{0:d} + 1)'),
(splitted[3], '(nn{1:d}) / (nn{0:d} + 1)')])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
elif splitted[1].startswith('efe') and splitted[3].startswith('efi'):
# If it is efe / efi
partner_targets = [[splitted[3]]]
formulas = OrderedDict([
(splitted[3], 'nn{1:d}'),
(splitted[1], '(nn{0:d} * nn{1:d})')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
elif splitted[1].startswith('pfe') and splitted[3].startswith('efi'):
# If it is pfe / efi
pfe = splitted[1]
efi = splitted[3]
split_efi = re.compile('(?=.*)(.)(|ITG|ETG|TEM)(_GB|SI|cm)').split(efi)
efe = ''.join(*[[split_efi[0]] + ['e'] + split_efi[2:]])
# Triplet style: pfe / efi == nn0, pfe + efi + efe == nn1, efi / efe == nn2
partner_targets = [[pfe + '_plus_' + efi + '_plus_' + efe],
[efi + '_div_' + efe]
]
formulas = OrderedDict([
(efi, '(nn{1:d} * nn{2:d}) / (1 + nn{0:d} + nn{2:d})'),
(efe, 'nn{1:d} / (1 + nn{0:d} + nn{2:d})'),
(pfe, '(nn{0:d} * nn{1:d}) / (1 + nn{0:d} + nn{2:d})')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
# Simple style: pfe / efi == nn0, efi == nn1, efe / efi == nn2
partner_targets = [[efi],
[efe + '_div_' + efi]
]
formulas = OrderedDict([
(efi, 'nn{1:d}'),
(efe, '(nn{1:d} * nn{2:d})'),
(pfe, '(nn{0:d} * nn{1:d})')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
elif splitted[1].startswith('efi') and splitted[3].startswith('pfe'):
# If it is efi / pfe
efi = splitted[1]
pfe = splitted[3]
split_efi = re.compile('(?=.*)(.)(|ITG|ETG|TEM)(_GB|SI|cm)').split(efi)
efe = ''.join(*[[split_efi[0]] + ['e'] + split_efi[2:]])
# Triplet style: efi / pfe == nn0, pfe + efi + efe == nn1, efi / efe == nn2
partner_targets = [[pfe + '_plus_' + efi + '_plus_' + efe],
[efi + '_div_' + efe]
]
formulas = OrderedDict([
(efi, '(nn{0:d} * nn{1:d} * nn{2:d}) / (nn{0:d} + nn{2:d} + nn{0:d} * nn{2:d})'),
(efe, '(nn{0:d} * nn{1:d}) / (nn{0:d} + nn{2:d} + nn{0:d} * nn{2:d})'),
(pfe, '(nn{1:d} * nn{2:d}) / (nn{0:d} + nn{2:d} + nn{0:d} * nn{2:d})')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
# Heatflux style: efi / pfe == nn0, efi + efe == nn1, efi / efe == nn2
partner_targets = [[efi + '_plus_' + efe],
[efi + '_div_' + efe]
]
formulas = OrderedDict([
(efi, '(nn{1:d} * nn{2:d}) / (1 + nn{2:d})'),
(efe, '(nn{1:d}) / (1 + nn{2:d})'),
(pfe, '(nn{1:d} * nn{2:d}) / (nn{0:d} * (1 + nn{2:d}))')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
elif splitted[1].startswith('pfe') and splitted[3].startswith('efe'):
# If it is pfe / efe
pfe = splitted[1]
efe = splitted[3]
split_efe = re.compile('(?=.*)(.)(|ITG|ETG|TEM)(_GB|SI|cm)').split(efe)
efi = ''.join(*[[split_efe[0]] + ['i'] + split_efe[2:]])
# Triplet style: pfe / efe == nn0, pfe + efi + efe == nn1, efi / efe == nn2
partner_targets = [[pfe + '_plus_' + efi + '_plus_' + efe],
[efi + '_div_' + efe]
]
formulas = OrderedDict([
(efi, '(nn{1:d} * nn{2:d}) / (1 + nn{0:d} + nn{2:d})'),
(efe, '(nn{1:d}) / (1 + nn{1:d} + nn{2:d})'),
(pfe, '(nn{0:d} * nn{1:d}) / (1 + nn{0:d} + nn{2:d})')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
# Heatflux style: pfe / efe == nn0, efi + efe == nn1, efi / efe == nn2
partner_targets = [[efi + '_plus_' + efe],
[efi + '_div_' + efe]
]
formulas = OrderedDict([
(efi, '(nn{1:d} * nn{2:d}) / (1 + nn{2:d})'),
(efe, '(nn{1:d}) / (1 + nn{2:d})'),
(pfe, '(nn{0:d} * nn{1:d} * nn{2:d}) / (1 + nn{2:d})')
])
partner_target_sets.append(partner_targets)
formula_sets.append(formulas)
else:
raise NotImplementedError("Div style network {:d} with target {!s} and first part '{!s}'".format(network_id, target_name, splitted[0]))
else:
raise Exception('Divsum network needs div network, not {!s}'.format(nn.target_names))
for formulas, partner_targets in zip(formula_sets, partner_target_sets):
nns = [nn]
skip = False
for partner_target in partner_targets:
if len(partner_target) > 1:
raise Exception('Multiple partner targets!')
query = Network.find_similar_topology_by_id(network_id, match_train_dim=False)
query &= Network.find_similar_networkpar_by_id(network_id, match_train_dim=False)
query &= (Network
.select()
.where(Network.target_names == AsIs(partner_target))
)
if query.count() > 1:
print('Found {:d} matches for {!s}'.format(query.count(), partner_target))
try:
sort = sorted([(el.postprocess.get().rms, el.id) for el in query])
except Postprocess.DoesNotExist as ee:
net_id = re.search('PARAMS: \[(.*)\]', ee.args[0])[1]
table_field = re.search('WHERE \("t1"."(.*)"', ee.args[0])[1]
raise Exception('{!s} {!s} does not exist! Run postprocess.py'.format(table_field, net_id))
print('Selected {1:d} with RMS val {0:.2f}'.format(*sort[0]))
query = (Network
.select()
.where(Network.id == sort[0][1])
)
elif query.count() == 0:
print('No match for {!s}! Skipping..'.format(partner_target))
skip = True
if query.count() > 0:
nns.append(query.get())
if skip is not True:
recipes = OrderedDict()
network_ids = [nn.id for nn in nns]
for target, formula in formulas.items():
recipes[target] = formula.format(*list(range(len(nns))))
combonets = []
purenets = []
for target, recipe in recipes.items():
if all([el not in recipe for el in ['+', '-', '/', '*']]):
net_num = int(recipe.replace('nn', ''))
net_id = network_ids[net_num]
purenets.append(Network.get_by_id(net_id))
else:
query = (ComboNetwork.select()
.where((ComboNetwork.recipe == recipe) &
(ComboNetwork.networks == AsIs(network_ids)))
)
if query.count() == 0:
combonet = cls(target_names=[target],
feature_names=nn.feature_names,
recipe=recipe,
networks=network_ids)
#raise Exception(combonet.recipe + ' ' + str(combonet.networks))
combonet.save()
print('Created ComboNetwork {:d} with recipe {!s} and networks {!s}'.format(combonet.id, recipe, network_ids))
elif query.count() == 1:
combonet = query.get()
print('Network with recipe {!s} and networks {!s} already exists! Skipping!'.format(recipe, network_ids))
else:
raise NotImplementedError('Duplicate recipies! How could this happen..?')
combonets.append(combonet)
flatten = lambda l: [item for sublist in l for item in sublist]
if len(combonets) > 1:
combo_network_partners = AsIs([combonet.id for combonet in combonets[1:]])
else:
combo_network_partners = None
if len(purenets) > 0:
network_partners = AsIs([purenet.id for purenet in purenets])
else:
network_partners = None
try:
net = MultiNetwork.get(MultiNetwork.combo_network == combonets[0],
MultiNetwork.combo_network_partners == combo_network_partners,
MultiNetwork.network_partners == network_partners,
MultiNetwork.target_names == AsIs(list(recipes.keys())),
MultiNetwork.feature_names == AsIs(nn.feature_names)
)
except MultiNetwork.DoesNotExist:
net = MultiNetwork(combo_network = combonets[0],
combo_network_partners = combo_network_partners,
network_partners = network_partners,
target_names = AsIs(list(recipes.keys())),
feature_names = AsIs(nn.feature_names)
)
net.save()
print('Created MultiNetwork with id: | |
"OMOVS",
"OMRAH",
"ONCER",
"ONCES",
"ONCET",
"ONCUS",
"ONELY",
"ONERS",
"ONERY",
"ONIUM",
"ONKUS",
"ONLAY",
"ONNED",
"ONTIC",
"OOBIT",
"OOHED",
"OOMPH",
"OONTS",
"OOPED",
"OORIE",
"OOSES",
"OOTID",
"OOZED",
"OOZES",
"OPAHS",
"OPALS",
"OPENS",
"OPEPE",
"OPING",
"OPPOS",
"OPSIN",
"OPTED",
"OPTER",
"ORACH",
"ORACY",
"ORALS",
"ORANG",
"ORANT",
"ORATE",
"ORBED",
"ORCAS",
"ORCIN",
"ORDOS",
"OREAD",
"ORFES",
"ORGIA",
"ORGIC",
"ORGUE",
"ORIBI",
"ORIEL",
"ORIXA",
"ORLES",
"ORLON",
"ORLOP",
"ORMER",
"ORNIS",
"ORPIN",
"ORRIS",
"ORTHO",
"ORVAL",
"ORZOS",
"OSCAR",
"OSHAC",
"OSIER",
"OSMIC",
"OSMOL",
"OSSIA",
"OSTIA",
"OTAKU",
"OTARY",
"OTTAR",
"OTTOS",
"OUBIT",
"OUCHT",
"OUENS",
"OUIJA",
"OULKS",
"OUMAS",
"OUNDY",
"OUPAS",
"OUPED",
"OUPHE",
"OUPHS",
"OURIE",
"OUSEL",
"OUSTS",
"OUTBY",
"OUTED",
"OUTRE",
"OUTRO",
"OUTTA",
"OUZEL",
"OUZOS",
"OVALS",
"OVELS",
"OVENS",
"OVERS",
"OVIST",
"OVOLI",
"OVOLO",
"OVULE",
"OWCHE",
"OWIES",
"OWLED",
"OWLER",
"OWLET",
"OWNED",
"OWRES",
"OWRIE",
"OWSEN",
"OXBOW",
"OXERS",
"OXEYE",
"OXIDS",
"OXIES",
"OXIME",
"OXIMS",
"OXLIP",
"OXTER",
"OYERS",
"OZEKI",
"OZZIE",
"PAALS",
"PAANS",
"PACAS",
"PACED",
"PACER",
"PACES",
"PACEY",
"PACHA",
"PACKS",
"PACOS",
"PACTA",
"PACTS",
"PADIS",
"PADLE",
"PADMA",
"PADRE",
"PADRI",
"PAEAN",
"PAEDO",
"PAEON",
"PAGED",
"PAGER",
"PAGES",
"PAGLE",
"PAGOD",
"PAGRI",
"PAIKS",
"PAILS",
"PAINS",
"PAIRE",
"PAIRS",
"PAISA",
"PAISE",
"PAKKA",
"PALAS",
"PALAY",
"PALEA",
"PALED",
"PALES",
"PALET",
"PALIS",
"PALKI",
"PALLA",
"PALLS",
"PALLY",
"PALMS",
"PALMY",
"PALPI",
"PALPS",
"PALSA",
"PAMPA",
"PANAX",
"PANCE",
"PANDA",
"PANDS",
"PANDY",
"PANED",
"PANES",
"PANGA",
"PANGS",
"PANIM",
"PANKO",
"PANNE",
"PANNI",
"PANTO",
"PANTS",
"PANTY",
"PAOLI",
"PAOLO",
"PAPAS",
"PAPAW",
"PAPES",
"PAPPI",
"PAPPY",
"PARAE",
"PARAS",
"PARCH",
"PARDI",
"PARDS",
"PARDY",
"PARED",
"PAREN",
"PAREO",
"PARES",
"PAREU",
"PAREV",
"PARGE",
"PARGO",
"PARIS",
"PARKI",
"PARKS",
"PARKY",
"PARLE",
"PARLY",
"PARMA",
"PAROL",
"PARPS",
"PARRA",
"PARRS",
"PARTI",
"PARTS",
"PARVE",
"PARVO",
"PASEO",
"PASES",
"PASHA",
"PASHM",
"PASKA",
"PASPY",
"PASSE",
"PASTS",
"PATED",
"PATEN",
"PATER",
"PATES",
"PATHS",
"PATIN",
"PATKA",
"PATLY",
"PATTE",
"PATUS",
"PAUAS",
"PAULS",
"PAVAN",
"PAVED",
"PAVEN",
"PAVER",
"PAVES",
"PAVID",
"PAVIN",
"PAVIS",
"PAWAS",
"PAWAW",
"PAWED",
"PAWER",
"PAWKS",
"PAWKY",
"PAWLS",
"PAWNS",
"PAXES",
"PAYED",
"PAYOR",
"PAYSD",
"PEAGE",
"PEAGS",
"PEAKS",
"PEAKY",
"PEALS",
"PEANS",
"PEARE",
"PEARS",
"PEART",
"PEASE",
"PEATS",
"PEATY",
"PEAVY",
"PEAZE",
"PEBAS",
"PECHS",
"PECKE",
"PECKS",
"PECKY",
"PEDES",
"PEDIS",
"PEDRO",
"PEECE",
"PEEKS",
"PEELS",
"PEENS",
"PEEOY",
"PEEPE",
"PEEPS",
"PEERS",
"PEERY",
"PEEVE",
"PEGGY",
"PEGHS",
"PEINS",
"PEISE",
"PEIZE",
"PEKAN",
"PEKES",
"PEKIN",
"PEKOE",
"PELAS",
"PELAU",
"PELES",
"PELFS",
"PELLS",
"PELMA",
"PELON",
"PELTA",
"PELTS",
"PENDS",
"PENDU",
"PENED",
"PENES",
"PENGO",
"PENIE",
"PENIS",
"PENKS",
"PENNA",
"PENNI",
"PENTS",
"PEONS",
"PEONY",
"PEPLA",
"PEPOS",
"PEPPY",
"PEPSI",
"PERAI",
"PERCE",
"PERCS",
"PERDU",
"PERDY",
"PEREA",
"PERES",
"PERIS",
"PERKS",
"PERMS",
"PERNS",
"PEROG",
"PERPS",
"PERRY",
"PERSE",
"PERST",
"PERTS",
"PERVE",
"PERVO",
"PERVS",
"PERVY",
"PESOS",
"PESTS",
"PESTY",
"PETAR",
"PETER",
"PETIT",
"PETRE",
"PETRI",
"PETTI",
"PETTO",
"PEWEE",
"PEWIT",
"PEYSE",
"PHAGE",
"PHANG",
"PHARE",
"PHARM",
"PHEER",
"PHENE",
"PHEON",
"PHESE",
"PHIAL",
"PHISH",
"PHIZZ",
"PHLOX",
"PHOCA",
"PHONO",
"PHONS",
"PHOTS",
"PHPHT",
"PHUTS",
"PHYLA",
"PHYLE",
"PIANI",
"PIANS",
"PIBAL",
"PICAL",
"PICAS",
"PICCY",
"PICKS",
"PICOT",
"PICRA",
"PICUL",
"PIEND",
"PIERS",
"PIERT",
"PIETA",
"PIETS",
"PIEZO",
"PIGHT",
"PIGMY",
"PIING",
"PIKAS",
"PIKAU",
"PIKED",
"PIKER",
"PIKES",
"PIKEY",
"PIKIS",
"PIKUL",
"PILAE",
"PILAF",
"PILAO",
"PILAR",
"PILAU",
"PILAW",
"PILCH",
"PILEA",
"PILED",
"PILEI",
"PILER",
"PILES",
"PILIS",
"PILLS",
"PILOW",
"PILUM",
"PILUS",
"PIMAS",
"PIMPS",
"PINAS",
"PINED",
"PINES",
"PINGO",
"PINGS",
"PINKO",
"PINKS",
"PINNA",
"PINNY",
"PINON",
"PINOT",
"PINTA",
"PINTS",
"PINUP",
"PIONS",
"PIONY",
"PIOUS",
"PIOYE",
"PIOYS",
"PIPAL",
"PIPAS",
"PIPED",
"PIPES",
"PIPET",
"PIPIS",
"PIPIT",
"PIPPY",
"PIPUL",
"PIRAI",
"PIRLS",
"PIRNS",
"PIROG",
"PISCO",
"PISES",
"PISKY",
"PISOS",
"PISSY",
"PISTE",
"PITAS",
"PITHS",
"PITON",
"PITOT",
"PITTA",
"PIUMS",
"PIXES",
"PIZED",
"PIZES",
"PLAAS",
"PLACK",
"PLAGE",
"PLANS",
"PLAPS",
"PLASH",
"PLASM",
"PLAST",
"PLATS",
"PLATT",
"PLATY",
"PLAYA",
"PLAYS",
"PLEAS",
"PLEBE",
"PLEBS",
"PLENA",
"PLEON",
"PLESH",
"PLEWS",
"PLICA",
"PLIES",
"PLIMS",
"PLING",
"PLINK",
"PLOAT",
"PLODS",
"PLONG",
"PLONK",
"PLOOK",
"PLOPS",
"PLOTS",
"PLOTZ",
"PLOUK",
"PLOWS",
"PLOYE",
"PLOYS",
"PLUES",
"PLUFF",
"PLUGS",
"PLUMS",
"PLUMY",
"PLUOT",
"PLUTO",
"PLYER",
"POACH",
"POAKA",
"POAKE",
"POBOY",
"POCKS",
"POCKY",
"PODAL",
"PODDY",
"PODEX",
"PODGE",
"PODGY",
"PODIA",
"POEMS",
"POEPS",
"POETS",
"POGEY",
"POGGE",
"POGOS",
"POHED",
"POILU",
"POIND",
"POKAL",
"POKED",
"POKES",
"POKEY",
"POKIE",
"POLED",
"POLER",
"POLES",
"POLEY",
"POLIO",
"POLIS",
"POLJE",
"POLKS",
"POLLS",
"POLLY",
"POLOS",
"POLTS",
"POLYS",
"POMBE",
"POMES",
"POMMY",
"POMOS",
"POMPS",
"PONCE",
"PONCY",
"PONDS",
"PONES",
"PONEY",
"PONGA",
"PONGO",
"PONGS",
"PONGY",
"PONKS",
"PONTS",
"PONTY",
"PONZU",
"POODS",
"POOED",
"POOFS",
"POOFY",
"POOHS",
"POOJA",
"POOKA",
"POOKS",
"POOLS",
"POONS",
"POOPS",
"POOPY",
"POORI",
"POORT",
"POOTS",
"POOVE",
"POOVY",
"POPES",
"POPPA",
"POPSY",
"PORAE",
"PORAL",
"PORED",
"PORER",
"PORES",
"PORGE",
"PORGY",
"PORIN",
"PORKS",
"PORKY",
"PORNO",
"PORNS",
"PORNY",
"PORTA",
"PORTS",
"PORTY",
"POSED",
"POSES",
"POSEY",
"POSHO",
"POSTS",
"POTAE",
"POTCH",
"POTED",
"POTES",
"POTIN",
"POTOO",
"POTSY",
"POTTO",
"POTTS",
"POTTY",
"POUFF",
"POUFS",
"POUKE",
"POUKS",
"POULE",
"POULP",
"POULT",
"POUPE",
"POUPT",
"POURS",
"POUTS",
"POWAN",
"POWIN",
"POWND",
"POWNS",
"POWNY",
"POWRE",
"POXED",
"POXES",
"POYNT",
"POYOU",
"POYSE",
"POZZY",
"PRAAM",
"PRADS",
"PRAHU",
"PRAMS",
"PRANA",
"PRANG",
"PRAOS",
"PRASE",
"PRATE",
"PRATS",
"PRATT",
"PRATY",
"PRAUS",
"PRAYS",
"PREDY",
"PREED",
"PREES",
"PREIF",
"PREMS",
"PREMY",
"PRENT",
"PREON",
"PREOP",
"PREPS",
"PRESA",
"PRESE",
"PREST",
"PREVE",
"PREXY",
"PREYS",
"PRIAL",
"PRICY",
"PRIEF",
"PRIER",
"PRIES",
"PRIGS",
"PRILL",
"PRIMA",
"PRIMI",
"PRIMP",
"PRIMS",
"PRIMY",
"PRINK",
"PRION",
"PRISE",
"PRISS",
"PROAS",
"PROBS",
"PRODS",
"PROEM",
"PROFS",
"PROGS",
"PROIN",
"PROKE",
"PROLE",
"PROLL",
"PROMO",
"PROMS",
"PRONK",
"PROPS",
"PRORE",
"PROSO",
"PROSS",
"PROST",
"PROSY",
"PROTO",
"PROUL",
"PROWS",
"PROYN",
"PRUNT",
"PRUTA",
"PRYER",
"PRYSE",
"PSEUD",
"PSHAW",
"PSION",
"PSOAE",
"PSOAI",
"PSOAS",
"PSORA",
"PSYCH",
"PSYOP",
"PUBCO",
"PUBES",
"PUBIS",
"PUCAN",
"PUCER",
"PUCES",
"PUCKA",
"PUCKS",
"PUDDY",
"PUDGE",
"PUDIC",
"PUDOR",
"PUDSY",
"PUDUS",
"PUERS",
"PUFFA",
"PUFFS",
"PUGGY",
"PUGIL",
"PUHAS",
"PUJAH",
"PUJAS",
"PUKAS",
"PUKED",
"PUKER",
"PUKES",
"PUKEY",
"PUKKA",
"PUKUS",
"PULAO",
"PULAS",
"PULED",
"PULER",
"PULES",
"PULIK",
"PULIS",
"PULKA",
"PULKS",
"PULLI",
"PULLS",
"PULLY",
"PULMO",
"PULPS",
"PULUS",
"PUMAS",
"PUMIE",
"PUMPS",
"PUNAS",
"PUNCE",
"PUNGA",
"PUNGS",
"PUNJI",
"PUNKA",
"PUNKS",
"PUNKY",
"PUNNY",
"PUNTO",
"PUNTS",
"PUNTY",
"PUPAE",
"PUPAS",
"PUPUS",
"PURDA",
"PURED",
"PURES",
"PURIN",
"PURIS",
"PURLS",
"PURPY",
"PURRS",
"PURSY",
"PURTY",
"PUSES",
"PUSLE",
"PUSSY",
"PUTID",
"PUTON",
"PUTTI",
"PUTTO",
"PUTTS",
"PUZEL",
"PWNED",
"PYATS",
"PYETS",
"PYGAL",
"PYINS",
"PYLON",
"PYNED",
"PYNES",
"PYOID",
"PYOTS",
"PYRAL",
"PYRAN",
"PYRES",
"PYREX",
"PYRIC",
"PYROS",
"PYXED",
"PYXES",
"PYXIE",
"PYXIS",
"PZAZZ",
"QADIS",
"QAIDS",
"QAJAQ",
"QANAT",
"QAPIK",
"QIBLA",
"QOPHS",
"QORMA",
"QUADS",
"QUAFF",
"QUAGS",
"QUAIR",
"QUAIS",
"QUAKY",
"QUALE",
"QUANT",
"QUARE",
"QUASS",
"QUATE",
"QUATS",
"QUAYD",
"QUAYS",
"QUBIT",
"QUEAN",
"QUEME",
"QUENA",
"QUERN",
"QUEYN",
"QUEYS",
"QUICH",
"QUIDS",
"QUIFF",
"QUIMS",
"QUINA",
"QUINE",
"QUINO",
"QUINS",
"QUINT",
"QUIPO",
"QUIPS",
"QUIPU",
"QUIRE",
"QUIRT",
"QUIST",
"QUITS",
"QUOAD",
"QUODS",
"QUOIF",
"QUOIN",
"QUOIT",
"QUOLL",
"QUONK",
"QUOPS",
"QURSH",
"QUYTE",
"RABAT",
"RABIC",
"RABIS",
"RACED",
"RACES",
"RACHE",
"RACKS",
"RACON",
"RADGE",
"RADIX",
"RADON",
"RAFFS",
"RAFTS",
"RAGAS",
"RAGDE",
"RAGED",
"RAGEE",
"RAGER",
"RAGES",
"RAGGA",
"RAGGS",
"RAGGY",
"RAGIS",
"RAGUS",
"RAHED",
"RAHUI",
"RAIAS",
"RAIDS",
"RAIKS",
"RAILE",
"RAILS",
"RAINE",
"RAINS",
"RAIRD",
"RAITA",
"RAITS",
"RAJAS",
"RAJES",
"RAKED",
"RAKEE",
"RAKER",
"RAKES",
"RAKIA",
"RAKIS",
"RAKUS",
"RALES",
"RAMAL",
"RAMEE",
"RAMET",
"RAMIE",
"RAMIN",
"RAMIS",
"RAMMY",
"RAMPS",
"RAMUS",
"RANAS",
"RANCE",
"RANDS",
"RANEE",
"RANGA",
"RANGI",
"RANGS",
"RANGY",
"RANID",
"RANIS",
"RANKE",
"RANKS",
"RANTS",
"RAPED",
"RAPER",
"RAPES",
"RAPHE",
"RAPPE",
"RARED",
"RAREE",
"RARES",
"RARKS",
"RASED",
"RASER",
"RASES",
"RASPS",
"RASSE",
"RASTA",
"RATAL",
"RATAN",
"RATAS",
"RATCH",
"RATED",
"RATEL",
"RATER",
"RATES",
"RATHA",
"RATHE",
"RATHS",
"RATOO",
"RATOS",
"RATUS",
"RAUNS",
"RAUPO",
"RAVED",
"RAVEL",
"RAVER",
"RAVES",
"RAVEY",
"RAVIN",
"RAWER",
"RAWIN",
"RAWLY",
"RAWNS",
"RAXED",
"RAXES",
"RAYAH",
"RAYAS",
"RAYED",
"RAYLE",
"RAYNE",
"RAZED",
"RAZEE",
"RAZER",
"RAZES",
"RAZOO",
"READD",
"READS",
"REAIS",
"REAKS",
"REALO",
"REALS",
"REAME",
"REAMS",
"REAMY",
"REANS",
"REAPS",
"REARS",
"REAST",
"REATA",
"REATE",
"REAVE",
"REBBE",
"REBEC",
"REBID",
"REBIT",
"REBOP",
"REBUY",
"RECAL",
"RECCE",
"RECCO",
"RECCY",
"RECIT",
"RECKS",
"RECON",
"RECTA",
"RECTI",
"RECTO",
"REDAN",
"REDDS",
"REDDY",
"REDED",
"REDES",
"REDIA",
"REDID",
"REDIP",
"REDLY",
"REDON",
"REDOS",
"REDOX",
"REDRY",
"REDUB",
"REDUX",
"REDYE",
"REECH",
"REEDE",
"REEDS",
"REEFS",
"REEFY",
"REEKS",
"REEKY",
"REELS",
"REENS",
"REEST",
"REEVE",
"REFED",
"REFEL",
"REFFO",
"REFIS",
"REFIX",
"REFLY",
"REFRY",
"REGAR",
"REGES",
"REGGO",
"REGIE",
"REGMA",
"REGNA",
"REGOS",
"REGUR",
"REHEM",
"REIFS",
"REIFY",
"REIKI",
"REIKS",
"REINK",
"REINS",
"REIRD",
"REIST",
"REIVE",
"REJIG",
"REJON",
"REKED",
"REKES",
"REKEY",
"RELET",
"RELIE",
"RELIT",
"RELLO",
"REMAN",
"REMAP",
"REMEN",
"REMET",
"REMEX",
"REMIX",
"RENAY",
"RENDS",
"RENEY",
"RENGA",
"RENIG",
"RENIN",
"RENNE",
"RENOS",
"RENTE",
"RENTS",
"REOIL",
"REORG",
"REPEG",
"REPIN",
"REPLA",
"REPOS",
"REPOT",
"REPPS",
"REPRO",
"RERAN",
"RERIG",
"RESAT",
"RESAW",
"RESAY",
"RESEE",
"RESES",
"RESEW",
"RESID",
"RESIT",
"RESOD",
"RESOW",
"RESTO",
"RESTS",
"RESTY",
"RESUS",
"RETAG",
"RETAX",
"RETEM",
"RETIA",
"RETIE",
"RETOX",
"REVET",
"REVIE",
"REWAN",
"REWAX",
"REWED",
"REWET",
"REWIN",
"REWON",
"REWTH",
"REXES",
"REZES",
"RHEAS",
"RHEME",
"RHEUM",
"RHIES",
"RHIME",
"RHINE",
"RHODY",
"RHOMB",
"RHONE",
"RHUMB",
"RHYNE",
"RHYTA",
"RIADS",
"RIALS",
"RIANT",
"RIATA",
"RIBAS",
"RIBBY",
"RIBES",
"RICED",
"RICER",
"RICES",
"RICEY",
"RICHT",
"RICIN",
"RICKS",
"RIDES",
"RIDGY",
"RIDIC",
"RIELS",
"RIEMS",
"RIEVE",
"RIFER",
"RIFFS",
"RIFTE",
"RIFTS",
"RIFTY",
"RIGGS",
"RIGOL",
"RILED",
"RILES",
"RILEY",
"RILLE",
"RILLS",
"RIMAE",
"RIMED",
"RIMER",
"RIMES",
"RIMUS",
"RINDS",
"RINDY",
"RINES",
"RINGS",
"RINKS",
"RIOJA",
"RIOTS",
"RIPED",
"RIPES",
"RIPPS",
"RISES",
"RISHI",
"RISKS",
"RISPS",
"RISUS",
"RITES",
"RITTS",
"RITZY",
"RIVAS",
"RIVED",
"RIVEL",
"RIVEN",
"RIVES",
"RIYAL",
"RIZAS",
"ROADS",
"ROAMS",
"ROANS",
"ROARS",
"ROARY",
"ROATE",
"ROBED",
"ROBES",
"ROBLE",
"ROCKS",
"RODED",
"RODES",
"ROGUY",
"ROHES",
"ROIDS",
"ROILS",
"ROILY",
"ROINS",
"ROIST",
"ROJAK",
"ROJIS",
"ROKED",
"ROKER",
"ROKES",
"ROLAG",
"ROLES",
"ROLFS",
"ROLLS",
"ROMAL",
"ROMAN",
"ROMEO",
"ROMPS",
"RONDE",
"RONDO",
"RONEO",
"RONES",
"RONIN",
"RONNE",
"RONTE",
"RONTS",
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.