filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_25329
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration types."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Optional
from absl import logging
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.proto import config_pb2
# Define types here to avoid type errors between OSS and internal code.
ModelSpec = config_pb2.ModelSpec
SlicingSpec = config_pb2.SlicingSpec
CrossSlicingSpec = config_pb2.CrossSlicingSpec
BinarizationOptions = config_pb2.BinarizationOptions
ConfidenceIntervalOptions = config_pb2.ConfidenceIntervalOptions
AggregationOptions = config_pb2.AggregationOptions
MetricConfig = config_pb2.MetricConfig
MetricsSpec = config_pb2.MetricsSpec
MetricDirection = config_pb2.MetricDirection
GenericChangeThreshold = config_pb2.GenericChangeThreshold
GenericValueThreshold = config_pb2.GenericValueThreshold
MetricThreshold = config_pb2.MetricThreshold
Options = config_pb2.Options
PerSliceMetricThreshold = config_pb2.PerSliceMetricThreshold
PerSliceMetricThresholds = config_pb2.PerSliceMetricThresholds
CrossSliceMetricThreshold = config_pb2.CrossSliceMetricThreshold
CrossSliceMetricThresholds = config_pb2.CrossSliceMetricThresholds
EvalConfig = config_pb2.EvalConfig
PaddingOptions = config_pb2.PaddingOptions
def verify_eval_config(eval_config: EvalConfig,
baseline_required: Optional[bool] = None):
"""Verifies eval config."""
if not eval_config.model_specs:
raise ValueError(
'At least one model_spec is required: eval_config=\n{}'.format(
eval_config))
model_specs_by_name = {}
baseline = None
for spec in eval_config.model_specs:
if spec.label_key and spec.label_keys:
raise ValueError('only one of label_key or label_keys should be used at '
'a time: model_spec=\n{}'.format(spec))
if spec.prediction_key and spec.prediction_keys:
raise ValueError(
'only one of prediction_key or prediction_keys should be used at '
'a time: model_spec=\n{}'.format(spec))
if spec.example_weight_key and spec.example_weight_keys:
raise ValueError(
'only one of example_weight_key or example_weight_keys should be '
'used at a time: model_spec=\n{}'.format(spec))
if spec.name in eval_config.model_specs:
raise ValueError(
'more than one model_spec found for model "{}": {}'.format(
spec.name, [spec, model_specs_by_name[spec.name]]))
model_specs_by_name[spec.name] = spec
if spec.is_baseline:
if baseline is not None:
raise ValueError('only one model_spec may be a baseline, found: '
'{} and {}'.format(spec, baseline))
baseline = spec
if len(model_specs_by_name) > 1 and '' in model_specs_by_name:
raise ValueError('A name is required for all ModelSpecs when multiple '
'models are used: eval_config=\n{}'.format(eval_config))
if baseline_required and not baseline:
raise ValueError(
'A baseline ModelSpec is required: eval_config=\n{}'.format(
eval_config))
def update_eval_config_with_defaults(
eval_config: EvalConfig,
maybe_add_baseline: Optional[bool] = None,
maybe_remove_baseline: Optional[bool] = None,
has_baseline: Optional[bool] = False,
rubber_stamp: Optional[bool] = False) -> EvalConfig:
"""Returns a new config with default settings applied.
a) Add or remove a model_spec according to "has_baseline".
b) Fix the model names (model_spec.name) to tfma.CANDIDATE_KEY and
tfma.BASELINE_KEY.
c) Update the metrics_specs with the fixed model name.
Args:
eval_config: Original eval config.
maybe_add_baseline: DEPRECATED. True to add a baseline ModelSpec to the
config as a copy of the candidate ModelSpec that should already be
present. This is only applied if a single ModelSpec already exists in the
config and that spec doesn't have a name associated with it. When applied
the model specs will use the names tfma.CANDIDATE_KEY and
tfma.BASELINE_KEY. Only one of maybe_add_baseline or maybe_remove_baseline
should be used.
maybe_remove_baseline: DEPRECATED. True to remove a baseline ModelSpec from
the config if it already exists. Removal of the baseline also removes any
change thresholds. Only one of maybe_add_baseline or maybe_remove_baseline
should be used.
has_baseline: True to add a baseline ModelSpec to the config as a copy of
the candidate ModelSpec that should already be present. This is only
applied if a single ModelSpec already exists in the config and that spec
doesn't have a name associated with it. When applied the model specs will
use the names tfma.CANDIDATE_KEY and tfma.BASELINE_KEY. False to remove a
baseline ModelSpec from the config if it already exists. Removal of the
baseline also removes any change thresholds. Only one of has_baseline or
maybe_remove_baseline should be used.
rubber_stamp: True if this model is being rubber stamped. When a model is
rubber stamped diff thresholds will be ignored if an associated baseline
model is not passed.
"""
if (not has_baseline and has_change_threshold(eval_config) and
not rubber_stamp):
# TODO(b/173657964): Raise an error instead of logging an error.
logging.error('There are change thresholds, but the baseline is missing. '
'This is allowed only when rubber stamping (first run).')
updated_config = EvalConfig()
updated_config.CopyFrom(eval_config)
# if user requests CIs but doesn't set method, use JACKKNIFE
if (eval_config.options.compute_confidence_intervals.value and
eval_config.options.confidence_intervals.method ==
ConfidenceIntervalOptions.UNKNOWN_CONFIDENCE_INTERVAL_METHOD):
updated_config.options.confidence_intervals.method = (
config_pb2.ConfidenceIntervalOptions.JACKKNIFE)
if maybe_add_baseline and maybe_remove_baseline:
raise ValueError('only one of maybe_add_baseline and maybe_remove_baseline '
'should be used')
if maybe_add_baseline or maybe_remove_baseline:
logging.warning(
""""maybe_add_baseline" and "maybe_remove_baseline" are deprecated,
please use "has_baseline" instead.""")
if has_baseline:
raise ValueError(
""""maybe_add_baseline" and "maybe_remove_baseline" are ignored if
"has_baseline" is set.""")
if has_baseline is not None:
if has_baseline:
maybe_add_baseline = True
else:
maybe_remove_baseline = True
# Has a baseline model.
if (maybe_add_baseline and len(updated_config.model_specs) == 1 and
not updated_config.model_specs[0].name):
baseline = updated_config.model_specs.add()
baseline.CopyFrom(updated_config.model_specs[0])
baseline.name = constants.BASELINE_KEY
baseline.is_baseline = True
updated_config.model_specs[0].name = constants.CANDIDATE_KEY
logging.info(
'Adding default baseline ModelSpec based on the candidate ModelSpec '
'provided. The candidate model will be called "%s" and the baseline '
'will be called "%s": updated_config=\n%s', constants.CANDIDATE_KEY,
constants.BASELINE_KEY, updated_config)
# Does not have a baseline.
if maybe_remove_baseline:
tmp_model_specs = []
for model_spec in updated_config.model_specs:
if not model_spec.is_baseline:
tmp_model_specs.append(model_spec)
del updated_config.model_specs[:]
updated_config.model_specs.extend(tmp_model_specs)
for metrics_spec in updated_config.metrics_specs:
for metric in metrics_spec.metrics:
if metric.threshold.ByteSize():
metric.threshold.ClearField('change_threshold')
for per_slice_threshold in metric.per_slice_thresholds:
if per_slice_threshold.threshold.ByteSize():
per_slice_threshold.threshold.ClearField('change_threshold')
for cross_slice_threshold in metric.cross_slice_thresholds:
if cross_slice_threshold.threshold.ByteSize():
cross_slice_threshold.threshold.ClearField('change_threshold')
for threshold in metrics_spec.thresholds.values():
if threshold.ByteSize():
threshold.ClearField('change_threshold')
for per_slice_thresholds in metrics_spec.per_slice_thresholds.values():
for per_slice_threshold in per_slice_thresholds.thresholds:
if per_slice_threshold.threshold.ByteSize():
per_slice_threshold.threshold.ClearField('change_threshold')
for cross_slice_thresholds in metrics_spec.cross_slice_thresholds.values(
):
for cross_slice_threshold in cross_slice_thresholds.thresholds:
if cross_slice_threshold.threshold.ByteSize():
cross_slice_threshold.threshold.ClearField('change_threshold')
logging.info(
'Request was made to ignore the baseline ModelSpec and any change '
'thresholds. This is likely because a baseline model was not provided: '
'updated_config=\n%s', updated_config)
if not updated_config.model_specs:
updated_config.model_specs.add()
model_names = []
for spec in updated_config.model_specs:
model_names.append(spec.name)
if len(model_names) == 1 and model_names[0]:
logging.info(
'ModelSpec name "%s" is being ignored and replaced by "" because a '
'single ModelSpec is being used', model_names[0])
updated_config.model_specs[0].name = ''
model_names = ['']
for spec in updated_config.metrics_specs:
if not spec.model_names:
spec.model_names.extend(model_names)
elif len(model_names) == 1:
del spec.model_names[:]
spec.model_names.append('')
return updated_config
def has_change_threshold(eval_config: EvalConfig) -> bool:
"""Checks whether the eval_config has any change thresholds.
Args:
eval_config: the TFMA eval_config.
Returns:
True when there are change thresholds otherwise False.
"""
for metrics_spec in eval_config.metrics_specs:
for metric in metrics_spec.metrics:
if metric.threshold.change_threshold.ByteSize():
return True
for per_slice_threshold in metric.per_slice_thresholds:
if per_slice_threshold.threshold.change_threshold.ByteSize():
return True
for cross_slice_threshold in metric.cross_slice_thresholds:
if cross_slice_threshold.threshold.change_threshold.ByteSize():
return True
for threshold in metrics_spec.thresholds.values():
if threshold.change_threshold.ByteSize():
return True
for per_slice_thresholds in metrics_spec.per_slice_thresholds.values():
for per_slice_threshold in per_slice_thresholds.thresholds:
if per_slice_threshold.threshold.change_threshold.ByteSize():
return True
for cross_slice_thresholds in metrics_spec.cross_slice_thresholds.values():
for cross_slice_threshold in cross_slice_thresholds.thresholds:
if cross_slice_threshold.threshold.change_threshold.ByteSize():
return True
return False
|
the-stack_0_25330
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import unittest
import PyOpenColorIO as OCIO
class ExponentWithLinearTransformTest(unittest.TestCase):
TEST_ID = 'sample exponent linear'
TEST_GAMMA = [1, 2, 3, 4]
TEST_OFFSET = [0.1, 0.2, 0.3, 0.4]
TEST_NEGATIVE_STYLE = OCIO.NEGATIVE_MIRROR
TEST_DIRECTION = OCIO.TRANSFORM_DIR_INVERSE
def setUp(self):
self.exp_tr = OCIO.ExponentWithLinearTransform()
def tearDown(self):
self.exp_tr = None
def test_direction(self):
"""
Test the setDirection() and getDirection() methods.
"""
# Default initialized direction is forward.
self.assertEqual(self.exp_tr.getDirection(),
OCIO.TRANSFORM_DIR_FORWARD)
for direction in OCIO.TransformDirection.__members__.values():
# Setting the unknown direction preserves the current direction.
if direction != OCIO.TRANSFORM_DIR_UNKNOWN:
self.exp_tr.setDirection(direction)
self.assertEqual(self.exp_tr.getDirection(), direction)
# Wrong type tests.
for invalid in (None, 1, 'test'):
with self.assertRaises(TypeError):
self.exp_tr.setDirection(invalid)
def test_format_metadata(self):
"""
Test the getFormatMetadata() method.
"""
format_metadata = self.exp_tr.getFormatMetadata()
format_metadata.setName(self.TEST_ID)
self.assertIsInstance(format_metadata, OCIO.FormatMetadata)
self.assertEqual(format_metadata.getName(), self.TEST_ID)
def test_gamma(self):
"""
Test the setGamma() and getGamma() methods.
"""
# Default initialized vars value is [1, 1, 1, 1]
self.assertEqual(self.exp_tr.getGamma(), [1, 1, 1, 1])
self.exp_tr.setGamma(self.TEST_GAMMA)
self.assertEqual(self.exp_tr.getGamma(), self.TEST_GAMMA)
# Wrong type tests.
for invalid in (None, 'hello', [1, 2, 3]):
with self.assertRaises(TypeError):
self.exp_tr.setGamma(invalid)
def test_validate_gamma(self):
"""
Test the validate() method.
"""
# Validate should pass with default values.
self.exp_tr.validate()
# Validate should pass with positive values.
self.exp_tr.setGamma([1, 2, 3, 4])
self.exp_tr.validate()
# Validate should fail with lower bound value of 1.
self.exp_tr.setGamma([-1, -2, -3, -4])
with self.assertRaises(OCIO.Exception):
self.exp_tr.validate()
# Validate should fail with higher bound value of 10.
self.exp_tr.setGamma([11, 1, 1, 1])
with self.assertRaises(OCIO.Exception):
self.exp_tr.validate()
def test_negative_style(self):
"""
Test the setNegativeStyle() and getNegativeStyle() methods.
"""
# Default initialized negative style is clamp.
self.assertEqual(self.exp_tr.getNegativeStyle(), OCIO.NEGATIVE_LINEAR)
# These negative extrapolations are not valid for
# MonCurve exponent style.
exception_negatives = [OCIO.NEGATIVE_CLAMP, OCIO.NEGATIVE_PASS_THRU]
for negative_style in OCIO.NegativeStyle.__members__.values():
if negative_style not in exception_negatives:
self.exp_tr.setNegativeStyle(negative_style)
self.assertEqual(
self.exp_tr.getNegativeStyle(), negative_style)
else:
with self.assertRaises(OCIO.Exception):
self.exp_tr.setNegativeStyle(negative_style)
def test_offset(self):
"""
Test the setOffset() and getOffset() methods.
"""
# Default initialized offset values are [0.0, 0.0, 0.0, 0.0]
self.assertListEqual(self.exp_tr.getOffset(), [0.0, 0.0, 0.0, 0.0])
# Test by setting offset values to TEST_OFFSET.
self.exp_tr.setOffset(self.TEST_OFFSET)
self.assertListEqual(self.exp_tr.getOffset(), self.TEST_OFFSET)
def test_validate_offset(self):
"""
Test the validate() method.
"""
# Validate should pass with default values.
self.exp_tr.validate()
# Validate should pass with positive values.
self.exp_tr.setOffset([0.1, 0.2, 0.3, 0.4])
self.exp_tr.validate()
# Validate should fail with lower bound value of 0.
self.exp_tr.setOffset([-1, -2, -3, -4])
with self.assertRaises(OCIO.Exception):
self.exp_tr.validate()
# Validate should fail with higher bound value of 0.9.
self.exp_tr.setOffset([1, 1, 1, 1])
with self.assertRaises(OCIO.Exception):
self.exp_tr.validate()
def test_constructor_with_keyword(self):
"""
Test ExponentWithLinearTransform constructor with keywords and validate its values.
"""
# With keywords in their proper order.
exp_tr = OCIO.ExponentWithLinearTransform(
gamma=self.TEST_GAMMA,
offset=self.TEST_OFFSET,
negativeStyle=self.TEST_NEGATIVE_STYLE,
direction=self.TEST_DIRECTION)
self.assertEqual(exp_tr.getGamma(), self.TEST_GAMMA)
self.assertEqual(exp_tr.getOffset(), self.TEST_OFFSET)
self.assertEqual(exp_tr.getNegativeStyle(), self.TEST_NEGATIVE_STYLE)
self.assertEqual(exp_tr.getDirection(), self.TEST_DIRECTION)
# With keywords not in their proper order.
exp_tr2 = OCIO.ExponentWithLinearTransform(
direction=self.TEST_DIRECTION,
negativeStyle=self.TEST_NEGATIVE_STYLE,
gamma=self.TEST_GAMMA,
offset=self.TEST_OFFSET)
self.assertEqual(exp_tr2.getGamma(), self.TEST_GAMMA)
self.assertEqual(exp_tr2.getOffset(), self.TEST_OFFSET)
self.assertEqual(exp_tr2.getNegativeStyle(), self.TEST_NEGATIVE_STYLE)
self.assertEqual(exp_tr2.getDirection(), self.TEST_DIRECTION)
def test_constructor_with_positional(self):
"""
Test ExponentWithLinearTransform constructor without keywords and validate its values.
"""
exp_tr = OCIO.ExponentWithLinearTransform(
self.TEST_GAMMA,
self.TEST_OFFSET,
self.TEST_NEGATIVE_STYLE,
self.TEST_DIRECTION)
self.assertEqual(exp_tr.getGamma(), self.TEST_GAMMA)
self.assertEqual(exp_tr.getOffset(), self.TEST_OFFSET)
self.assertEqual(exp_tr.getNegativeStyle(), self.TEST_NEGATIVE_STYLE)
self.assertEqual(exp_tr.getDirection(), self.TEST_DIRECTION)
def test_constructor_wrong_parameter_type(self):
"""
Test ExponentWithLinearTransform constructor with a wrong parameter type.
"""
for invalid in (None, 1, self.TEST_ID):
with self.assertRaises(TypeError):
exp_tr = OCIO.ExponentWithLinearTransform(invalid)
|
the-stack_0_25334
|
DOMAIN = "meteo_system_weather"
CONF_STATION_NAME = "station_name"
CONF_WEATHER_STATIONS = "weather_stations"
ENTITY_LAST_UPDATE = "last_update"
ENTITY_TEMP = "temperature"
ENTITY_HUMIDITY = "humidity"
ENTITY_PERCEIVED_TEMP = "perceived_temperature"
ENTITY_TEMP_COMMENT = "temperature_comment"
ENTITY_PRESSURE = "pressure"
ENTITY_WIND = "wind"
ENTITY_WIND_DIRECTION = "wind_direction"
ENTITY_WIND_COMMENT = "wind_comment"
ENTITY_RAIN = "rain"
ENTITY_RAIN_COMMENT = "rain_comment"
ENTITY_RAIN_INTENSITY = "rain_intensity"
ENTITY_STATION_STATUS = "station_status"
|
the-stack_0_25335
|
from jsonschema import Draft7Validator, validators
def extend_with_default(validator_class):
"""
Validates and fills in default values
"""
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
DefaultValidatingDraft7Validator = extend_with_default(Draft7Validator)
|
the-stack_0_25336
|
"""Functions for histograms.
These functions are used for low-level work
with histograms and their contents.
They are not needed for normal usage.
"""
import collections
import copy
import itertools
import operator
import sys
if sys.version_info.major == 3:
from functools import reduce as _reduce
else:
_reduce = reduce
import lena.core
import lena.structures.graph
class HistCell(collections.namedtuple("HistCell", ("edges, bin, index"))):
"""A namedtuple with fields *edges, bin, index*."""
# from Aaron Hall's answer https://stackoverflow.com/a/28568351/952234
__slots__ = ()
def cell_to_string(
cell_edges, var_context=None, coord_names=None,
coord_fmt="{}_lte_{}_lt_{}", coord_join="_", reverse=False):
"""Transform cell edges into a string.
*cell_edges* is a tuple of pairs *(lower bound, upper bound)*
for each coordinate.
*coord_names* is a list of coordinates names.
*coord_fmt* is a string,
which defines how to format individual coordinates.
*coord_join* is a string, which joins coordinate pairs.
If *reverse* is True, coordinates are joined in reverse order.
"""
# todo: do we really need var_context?
# todo: even if so, why isn't that a {}? Is that dangerous?
if coord_names is None:
if var_context is None:
coord_names = [
"coord{}".format(ind) for ind in range(len(cell_edges))
]
else:
if "combine" in var_context:
coord_names = [var["name"]
for var in var_context["combine"]]
else:
coord_names = [var_context["name"]]
if len(cell_edges) != len(coord_names):
raise lena.core.LenaValueError(
"coord_names must have same length as cell_edges, "
"{} and {} given".format(coord_names, cell_edges)
)
coord_strings = [coord_fmt.format(edge[0], coord_names[ind], edge[1])
for (ind, edge) in enumerate(cell_edges)]
if reverse:
coord_strings = reversed(coord_strings)
coord_str = coord_join.join(coord_strings)
return coord_str
def _check_edges_increasing_1d(arr):
if len(arr) <= 1:
raise lena.core.LenaValueError("size of edges should be more than one,"
" {} provided".format(arr))
increasing = (tup[0] < tup[1] for tup in zip(arr, arr[1:]))
if not all(increasing):
raise lena.core.LenaValueError(
"expected strictly increasing values, "
"{} provided".format(arr)
)
def check_edges_increasing(edges):
"""Assure that multidimensional *edges* are increasing.
If length of *edges* or its subarray is less than 2
or if some subarray of *edges*
contains not strictly increasing values,
:exc:`.LenaValueError` is raised.
"""
if not len(edges):
raise lena.core.LenaValueError("edges must be non-empty")
elif not hasattr(edges[0], '__iter__'):
_check_edges_increasing_1d(edges)
return
for arr in edges:
if len(arr) <= 1:
raise lena.core.LenaValueError(
"size of edges should be more than one. "
"{} provided".format(arr)
)
_check_edges_increasing_1d(arr)
def get_bin_edges(index, edges):
"""Return edges of the bin for the given *edges* of a histogram.
In one-dimensional case *index* must be an integer and a tuple
of *(x_low_edge, x_high_edge)* for that bin is returned.
In a multidimensional case *index* is a container of numeric indices
in each dimension.
A list of bin edges in each dimension is returned."""
# todo: maybe give up this 1- and multidimensional unification
# and write separate functions for each case.
if not hasattr(edges[0], '__iter__'):
# 1-dimensional edges
if hasattr(index, '__iter__'):
index = index[0]
return (edges[index], edges[index+1])
# multidimensional edges
return [(edges[coord][i], edges[coord][i+1])
for coord, i in enumerate(index)]
def get_bin_on_index(index, bins):
"""Return bin corresponding to multidimensional *index*.
*index* can be a number or a list/tuple.
If *index* length is less than dimension of *bins*,
a subarray of *bins* is returned.
In case of an index error, :exc:`.LenaIndexError` is raised.
Example:
>>> from lena.structures import histogram, get_bin_on_index
>>> hist = histogram([0, 1], [0])
>>> get_bin_on_index(0, hist.bins)
0
>>> get_bin_on_index((0, 1), [[0, 1], [0, 0]])
1
>>> get_bin_on_index(0, [[0, 1], [0, 0]])
[0, 1]
"""
if not isinstance(index, (list, tuple)):
index = [index]
subarr = bins
for ind in index:
try:
subarr = subarr[ind]
except IndexError:
raise lena.core.LenaIndexError(
"bad index: {}, bins = {}".format(index, bins)
)
return subarr
def get_bin_on_value_1d(val, arr):
"""Return index for value in one-dimensional array.
*arr* must contain strictly increasing values
(not necessarily equidistant),
it is not checked.
"Linear binary search" is used,
that is our array search by default assumes
the array to be split on equidistant steps.
Example:
>>> from lena.structures import get_bin_on_value_1d
>>> arr = [0, 1, 4, 5, 7, 10]
>>> get_bin_on_value_1d(0, arr)
0
>>> get_bin_on_value_1d(4.5, arr)
2
>>> # upper range is excluded
>>> get_bin_on_value_1d(10, arr)
5
>>> # underflow
>>> get_bin_on_value_1d(-10, arr)
-1
"""
# may also use numpy.searchsorted
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.searchsorted.html
ind_min = 0
ind_max = len(arr) - 1
while True:
if ind_max - ind_min <= 1:
# lower bound is close
if val < arr[ind_min]:
return ind_min - 1
# upper bound is open
elif val >= arr[ind_max]:
return ind_max
else:
return ind_min
if val == arr[ind_min]:
return ind_min
if val < arr[ind_min]:
return ind_min - 1
elif val >= arr[ind_max]:
return ind_max
else:
shift = int(
(ind_max - ind_min) * (
float(val - arr[ind_min]) / (arr[ind_max] - arr[ind_min])
))
ind_guess = ind_min + shift
if ind_min == ind_guess:
ind_min += 1
continue
# ind_max is always more that ind_guess,
# because val < arr[ind_max] (see the formula for shift).
# This branch is not needed and can't be tested.
# But for the sake of numerical inaccuracies, let us keep this
# so that we never get into an infinite loop.
elif ind_max == ind_guess:
ind_max -= 1
continue
if val < arr[ind_guess]:
ind_max = ind_guess
else:
ind_min = ind_guess
def get_bin_on_value(arg, edges):
"""Get the bin index for *arg* in a multidimensional array *edges*.
*arg* is a 1-dimensional array of numbers
(or a number for 1-dimensional *edges*),
and corresponds to a point in N-dimensional space.
*edges* is an array of N-1 dimensional arrays (lists or tuples) of numbers.
Each 1-dimensional subarray consists of increasing numbers.
*arg* and *edges* must have the same length
(otherwise :exc:`.LenaValueError` is raised).
*arg* and *edges* must be iterable and support *len()*.
Return list of indices in *edges* corresponding to *arg*.
If any coordinate is out of its corresponding edge range,
its index will be ``-1`` for underflow
or ``len(edge)-1`` for overflow.
Examples:
>>> from lena.structures import get_bin_on_value
>>> edges = [[1, 2, 3], [1, 3.5]]
>>> get_bin_on_value((1.5, 2), edges)
[0, 0]
>>> get_bin_on_value((1.5, 0), edges)
[0, -1]
>>> # the upper edge is excluded
>>> get_bin_on_value((3, 2), edges)
[2, 0]
>>> # one-dimensional edges
>>> edges = [1, 2, 3]
>>> get_bin_on_value(2, edges)
[1]
"""
# arg is a one-dimensional index
if not isinstance(arg, (tuple, list)):
return [get_bin_on_value_1d(arg, edges)]
# arg is a multidimensional index
if len(arg) != len(edges):
raise lena.core.LenaValueError(
"argument should have same dimension as edges. "
"arg = {}, edges = {}".format(arg, edges)
)
indices = []
for ind, array in enumerate(edges):
cur_bin = get_bin_on_value_1d(arg[ind], array)
indices.append(cur_bin)
return indices
def get_example_bin(struct):
"""Return bin with zero index on each axis of the histogram bins.
For example, if the histogram is two-dimensional, return hist[0][0].
*struct* can be a :class:`.histogram`
or an array of bins.
"""
if isinstance(struct, lena.structures.histogram):
return lena.structures.get_bin_on_index([0] * struct.dim, struct.bins)
else:
bins = struct
while isinstance(bins, list):
bins = bins[0]
return bins
def hist_to_graph(hist, make_value=None, get_coordinate="left", scale=None):
"""Convert a :class:`.histogram` to a :class:`.Graph`.
*make_value* is a function to set the value of a graph's point.
By default it is bin content.
*make_value* accepts a single value (bin content) without context.
This option could be used to create graph's error bars.
For example, to create a graph with errors
from a histogram where bins contain
a named tuple with fields *mean*, *mean_error* and a context
one could use
>>> make_value = lambda bin_: (bin_.mean, bin_.mean_error)
*get_coordinate* defines what will be the coordinate
of a graph's point created from a histogram's bin.
It can be "left" (default), "right" and "middle".
*scale* becomes the graph's scale (unknown by default).
Return the resulting graph.
"""
gr = lena.structures.graph.Graph(scale=scale)
## Could have allowed get_coordinate to be callable
# (for generality), but 1) first find a use case,
# 2) histogram bins could be adjusted in the first place.
if get_coordinate == "left":
get_coord = lambda edges: tuple(coord[0] for coord in edges)
elif get_coordinate == "right":
get_coord = lambda edges: tuple(coord[1] for coord in edges)
# *middle* between the two edges, not the *center* of the bin
# as a whole (because the graph corresponds to a point)
elif get_coordinate == "middle":
get_coord = lambda edges: tuple(0.5*(coord[0] + coord[1])
for coord in edges)
else:
raise lena.core.LenaValueError(
'get_coordinate must be one of "left", "right" or "middle"; '
'"{}" provided'.format(get_coordinate)
)
for value, edges in _iter_bins_with_edges(hist.bins, hist.edges):
coord = get_coord(edges)
# todo: unclear when bin_context is present.
bin_value = lena.flow.get_data(value)
# todo: maybe it should be only a tuple?
if not hasattr(bin_value, "__iter__"):
bin_value = (bin_value,)
if make_value is None:
graph_value = bin_value
else:
graph_value = make_value(bin_value)
gr.fill((coord, graph_value))
return gr
def init_bins(edges, value=0, deepcopy=False):
"""Initialize cells of the form *edges* with the given *value*.
Return bins filled with copies of *value*.
*Value* must be copyable, usual numbers will suit.
If the value is mutable, use *deepcopy =* ``True``
(or the content of cells will be identical).
Examples:
>>> edges = [[0, 1], [0, 1]]
>>> # one cell
>>> init_bins(edges)
[[0]]
>>> # no need to use floats,
>>> # because integers will automatically be cast to floats
>>> # when used together
>>> init_bins(edges, 0.0)
[[0.0]]
>>> init_bins([[0, 1, 2], [0, 1, 2]])
[[0, 0], [0, 0]]
>>> init_bins([0, 1, 2])
[0, 0]
"""
nbins = len(edges) - 1
if not isinstance(edges[0], (list, tuple)):
# edges is one-dimensional
if deepcopy:
return [copy.deepcopy(value) for _ in range(nbins)]
else:
return [value] * nbins
for ind, arr in enumerate(edges):
if ind == nbins:
if deepcopy:
return [copy.deepcopy(value) for _ in range(len(arr)-1)]
else:
return list([value] * (len(arr)-1))
bins = []
for _ in range(len(arr)-1):
bins.append(init_bins(edges[ind+1:], value, deepcopy))
return bins
def integral(bins, edges):
"""Compute integral (scale for a histogram).
*bins* contain values, and *edges* form the mesh
for the integration.
Their format is defined in :class:`.histogram` description.
"""
total = 0
for ind, bin_content in iter_bins(bins):
# print(bins, edges)
# print(ind, bin_content)
bin_lengths = [
edges[coord][i+1] - edges[coord][i]
for coord, i in enumerate(ind)
]
# print(bin_lengths)
# product
vol = _reduce(operator.mul, bin_lengths, 1)
cell_integral = vol * bin_content
total += cell_integral
return total
def iter_bins(bins):
"""Iterate on *bins*. Yield *(index, bin content)*.
Edges with higher index are iterated first
(that is z, then y, then x for a 3-dimensional histogram).
"""
# if not isinstance(bins, (list, tuple)):
if not hasattr(bins, '__iter__'):
# cell
yield ((), bins)
else:
for ind, _ in enumerate(bins):
for sub_ind, val in iter_bins(bins[ind]):
yield (((ind,) + sub_ind), val)
def _iter_bins_with_edges(bins, edges):
"""Yield *(bin content, bin edges)* pairs.
*Bin edges* is a tuple, such that at index *i*
its element is bin's *(lower bound, upper bound)*
along *i*-th the coordinate.
"""
# todo: only a list or also a tuple, an array?
if not isinstance(edges[0], list):
edges = [edges]
bins_sizes = [len(edge)-1 for edge in edges]
indices = [list(range(nbins)) for nbins in bins_sizes]
for index in itertools.product(*indices):
bin_ = lena.structures.get_bin_on_index(index, bins)
edges_low = []
edges_high = []
for var, var_ind in enumerate(index):
edges_low.append(edges[var][var_ind])
edges_high.append(edges[var][var_ind+1])
yield (bin_, tuple(zip(edges_low, edges_high)))
# old interface:
# yield (bin_, (edges_low, edges_high))
def iter_cells(hist, ranges=None, coord_ranges=None):
"""Iterate cells of a histogram *hist*, possibly in a subrange.
For each bin, yield a :class:`HistCell`
containing *bin edges, bin content* and *bin index*.
The order of iteration is the same as for :func:`iter_bins`.
*ranges* are the ranges of bin indices to be used
for each coordinate
(the lower value is included, the upper value is excluded).
*coord_ranges* set real coordinate ranges based on histogram edges.
Obviously, they can be not exactly bin edges.
If one of the ranges for the given coordinate
is outside the histogram edges,
then only existing histogram edges within the range are selected.
If the coordinate range is completely outside histogram edges,
nothing is yielded.
If a lower or upper *coord_range*
falls within a bin, this bin is yielded.
Note that if a coordinate range falls on a bin edge,
the number of generated bins can be unstable
because of limited float precision.
*ranges* and *coord_ranges* are tuples of tuples of limits
in corresponding dimensions.
For one-dimensional histogram it must be a tuple
containing a tuple, for example
*((None, None),)*.
``None`` as an upper or lower *range* means no limit
(*((None, None),)* is equivalent to *((0, len(bins)),)*
for a 1-dimensional histogram).
If a *range* index is lower than 0 or higher than possible index,
:exc:`.LenaValueError` is raised.
If both *coord_ranges* and *ranges* are provided,
:exc:`.LenaTypeError` is raised.
"""
# for bin_ind, bin_ in iter_bins(hist.bins):
# yield HistCell(get_bin_edges(bin_ind, hist.edges), bin_, bin_ind)
# if bins and edges are calculated each time, save the result now
bins, edges = hist.bins, hist.edges
# todo: hist.edges must be same
# for 1- and multidimensional histograms.
if hist.dim == 1:
edges = (edges,)
if coord_ranges is not None:
if ranges is not None:
raise lena.core.LenaTypeError(
"only ranges or coord_ranges can be provided, not both"
)
ranges = []
if not isinstance(coord_ranges[0], (tuple, list)):
coord_ranges = (coord_ranges, )
for coord, coord_range in enumerate(coord_ranges):
# todo: (dis?)allow None as an infinite range.
# todo: raise or transpose unordered coordinates?
# todo: change the order of function arguments.
lower_bin_ind = get_bin_on_value_1d(coord_range[0], edges[coord])
if lower_bin_ind == -1:
lower_bin_ind = 0
upper_bin_ind = get_bin_on_value_1d(coord_range[1], edges[coord])
max_ind = len(edges[coord])
if upper_bin_ind == max_ind:
upper_bin_ind -= 1
if lower_bin_ind >= max_ind or upper_bin_ind <= 0:
# histogram edges are outside the range.
return
ranges.append((lower_bin_ind, upper_bin_ind))
if not ranges:
ranges = ((None, None),) * hist.dim
real_ind_ranges = []
for coord, coord_range in enumerate(ranges):
low, up = coord_range
if low is None:
low = 0
else:
# negative indices should not be supported
if low < 0:
raise lena.core.LenaValueError(
"low must be not less than 0 if provided"
)
max_ind = len(edges[coord]) - 1
if up is None:
up = max_ind
else:
# huge indices should not be supported as well.
if up > max_ind:
raise lena.core.LenaValueError(
"up must not be greater than len(edges)-1, if provided"
)
real_ind_ranges.append(list(range(low, up)))
indices = list(itertools.product(*real_ind_ranges))
for ind in indices:
yield HistCell(get_bin_edges(ind, edges),
get_bin_on_index(ind, bins),
ind)
def _make_hist_context(hist):
hc = {
"dim": hist.dim,
"nbins": hist.nbins,
"ranges": hist.ranges
}
# do we really add scale to context?
# If that is important, we must always calculate that.
# If that is not important, then why adding that?
# if hist._scale is not None:
# hc["scale"] = hist._scale
return hc
# todo: make private and completely refactor this function.
def make_hist_context(hist, context):
"""Update *context* with the context
of a :class:`.histogram` *hist*.
Deep copy of updated context is returned.
"""
all_context = copy.deepcopy(context)
hist_context = {
"histogram": {
"dim": hist.dim,
"nbins": hist.nbins,
"ranges": hist.ranges
}
}
all_context.update(hist_context)
return all_context
# return copy.deepcopy(all_context)
def unify_1_md(bins, edges):
"""Unify 1- and multidimensional bins and edges.
Return a tuple of *(bins, edges)*.
Bins and multidimensional *edges* return unchanged,
while one-dimensional *edges* are inserted into a list.
"""
if hasattr(edges[0], '__iter__'):
# if isinstance(edges[0], (list, tuple)):
return (bins, edges)
else:
return (bins, [edges])
|
the-stack_0_25337
|
from confluent_kafka import Consumer
import socket
conf = {'bootstrap.servers': 'localhost:9092',
'group.id': socket.gethostname(),
'enable.auto.commit': False,
'auto.offset.reset': 'earliest'}
consumer = Consumer(conf)
consumer.subscribe(["chatroom1"])
while True:
msg = consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
print('Received message: {}'.format(msg.key().decode('utf-8') + ":" + msg.value().decode('utf-8')))
consumer.close()
|
the-stack_0_25338
|
import bpy
from ... base_types import AnimationNode, VectorizedSocket
class ShapeKeyOutputNode(bpy.types.Node, AnimationNode):
bl_idname = "an_ShapeKeyOutputNode"
bl_label = "Shape Key Output"
bl_width_default = 160
codeEffects = [VectorizedSocket.CodeEffect]
useShapeKeyList: VectorizedSocket.newProperty()
useValueList: VectorizedSocket.newProperty()
def create(self):
self.newInput(VectorizedSocket("Shape Key", "useShapeKeyList",
("Shape Key", "shapeKey", dict(defaultDrawType = "PROPERTY_ONLY")),
("Shape Keys", "shapeKeys"),
codeProperties = dict(allowListExtension = False)))
self.newInput(VectorizedSocket("Float", "useValueList",
("Value", "value", dict(minValue = 0, maxValue = 1)),
("Values", "values")))
self.newInput("Float", "Slider Min", "sliderMin")
self.newInput("Float", "Slider Max", "sliderMax")
self.newInput("Boolean", "Mute", "mute")
self.newInput("Text", "Name", "name")
self.newOutput(VectorizedSocket("Shape Key", "useShapeKeyList",
("Shape Key", "shapeKey"),
("Shape Keys", "shapeKeys")))
for socket in self.inputs[1:]:
socket.useIsUsedProperty = True
socket.isUsed = False
for socket in self.inputs[2:]:
socket.hide = True
def getExecutionCode(self, required):
yield "if shapeKey is not None:"
s = self.inputs
if s[1].isUsed: yield " shapeKey.value = value"
if s[2].isUsed: yield " shapeKey.slider_min = sliderMin"
if s[3].isUsed: yield " shapeKey.slider_max = sliderMax"
if s[4].isUsed: yield " shapeKey.mute = mute"
if s[5].isUsed: yield " shapeKey.name = name"
yield " pass"
def getBakeCode(self):
yield "if shapeKey is not None:"
s = self.inputs
if s[1].isUsed: yield " shapeKey.keyframe_insert('value')"
if s[2].isUsed: yield " shapeKey.keyframe_insert('slider_min')"
if s[3].isUsed: yield " shapeKey.keyframe_insert('slider_max')"
if s[4].isUsed: yield " shapeKey.keyframe_insert('mute')"
|
the-stack_0_25339
|
import os
import shutil
from pathlib import Path
import base64
import yaml
import json
import toml
import tomlkit
import traceback
import basedosdados
import basedosdados as bd
from basedosdados import Storage
from basedosdados import Dataset
from basedosdados.upload.base import Base
def decogind_base64(message):
# decoding the base64 string
base64_bytes = message.encode("ascii")
message_bytes = base64.b64decode(base64_bytes)
return message_bytes.decode("ascii")
def create_config_folder(config_folder):
## if ~/.basedosdados folder exists delete
if os.path.exists(Path.home() / config_folder):
shutil.rmtree(Path.home() / config_folder, ignore_errors=True)
## create ~/.basedosdados folder
os.mkdir(Path.home() / config_folder)
os.mkdir(Path.home() / config_folder / "credentials")
def save_json(json_obj, file_path, file_name):
### function to save json file
with open(f"{file_path}/{file_name}", "w", encoding="utf-8") as f:
json.dump(json_obj, f, ensure_ascii=False, indent=2)
def create_json_file(message_base64, file_name, config_folder):
### decode base64 script and load as a json object
json_obj = json.loads(decogind_base64(message_base64))
prod_file_path = Path.home() / config_folder / "credentials"
### save the json credential in the .basedosdados/credentials/
save_json(json_obj, prod_file_path, file_name)
def save_toml(config_dict, file_name, config_folder):
### save the config.toml in .basedosdados
file_path = Path.home() / config_folder
with open(file_path / file_name, "w") as toml_file:
toml.dump(config_dict, toml_file)
def load_configs(dataset_id, table_id):
### get the config file in .basedosdados/config.toml
configs_path = Base()._load_config()
### get the path to metadata_path, where the folder bases with metadata information
metadata_path = configs_path["metadata_path"]
### get the path to table_config.yaml
table_path = f"{metadata_path}/{dataset_id}/{table_id}"
return (
### load the table_config.yaml
yaml.load(open(f"{table_path}/table_config.yaml", "r"), Loader=yaml.FullLoader),
### return the path to .basedosdados configs
configs_path,
)
def create_config_tree(prod_base64, staging_base64, config_dict):
### execute the creation of .basedosdados
create_config_folder(".basedosdados")
### create the prod.json secret
create_json_file(prod_base64, "prod.json", ".basedosdados")
### create the staging.json secret
create_json_file(staging_base64, "staging.json", ".basedosdados")
### create the config.toml
save_toml(config_dict, "config.toml", ".basedosdados")
def replace_project_id_publish_sql(configs_path, dataset_id, table_id):
print("REPLACE PUBLISH.SQL")
### load the paths to metadata and configs folder
table_config, configs_path = load_configs(dataset_id, table_id)
metadata_path = configs_path["metadata_path"]
table_path = f"{metadata_path}/{dataset_id}/{table_id}"
### load the source project id to staging and pro data in bigquery
user_staging_id = table_config["project_id_staging"]
user_prod_id = table_config["project_id_prod"]
### load the destination project id to staging and prod data in bigquery
bq_prod_id = configs_path["gcloud-projects"]["prod"]["name"]
bq_staging_id = configs_path["gcloud-projects"]["staging"]["name"]
print("user_prod_id: ", user_prod_id)
print("user_staging_id: ", user_staging_id)
print("bq_prod_id: ", bq_prod_id)
print("bq_staging_id: ", bq_staging_id)
### load publish.sql file with the query for create the VIEW in production
sql_file = Path(table_path + "/publish.sql").open("r").read()
### replace the project id name of the source for the production (basedosdados project)
sql_final = sql_file.replace(f"{user_prod_id}.", f"{bq_prod_id}.", 1)
sql_final = sql_final.replace(f"{user_staging_id}.", f"{bq_staging_id}.")
print(sql_final)
### write the replaced file
Path(table_path + "/publish.sql").open("w").write(sql_final)
def pretty_log(dataset_id, table_id, source_bucket_name):
if "basedosdados" in source_bucket_name:
source_len = len(source_bucket_name) - 9
else:
source_len = len(source_bucket_name)
print(
"\n###================================================================================###",
"\n### ###",
"\n### Data successfully synced and created in bigquery ###",
"\n### ###",
f"\n### Dataset : {dataset_id}",
" " * (48 - len(dataset_id)),
"###",
f"\n### Table : {table_id}",
" " * (48 - len(table_id)),
"###",
f"\n### Source Bucket: {source_bucket_name}",
" " * (48 - source_len),
"###",
"\n### ###",
"\n###================================================================================###\n",
)
def sync_bucket(
source_bucket_name,
dataset_id,
table_id,
destination_bucket_name,
backup_bucket_name,
mode="staging",
):
"""Copies proprosed data between storage buckets.
Creates a backup of old data, then delete it and copies new data into the destination bucket.
Args:
source_bucket_name (str):
The bucket name from which to copy data.
dataset_id (str):
Dataset id available in basedosdados. It should always come with table_id.
table_id (str):
Table id available in basedosdados.dataset_id.
It should always come with dataset_id.
destination_bucket_name (str):
The bucket name which data will be copied to.
If None, defaults to the bucket initialized when instantianting Storage object
(check it with the Storage.bucket proprerty)
backup_bucket_name (str):
The bucket name for where backup data will be stored.
mode (str): Optional.
Folder of which dataset to update.
Raises:
ValueError:
If there are no files corresponding to the given dataset_id and table_id on the source bucket
"""
ref = Storage(dataset_id=dataset_id, table_id=table_id)
prefix = f"{mode}/{dataset_id}/{table_id}/"
source_ref = (
ref.client["storage_staging"]
.bucket(source_bucket_name)
.list_blobs(prefix=prefix)
)
destination_ref = ref.bucket.list_blobs(prefix=prefix)
if len(list(source_ref)) == 0:
raise ValueError("No objects found on the source bucket")
# MAKE A BACKUP OF OLD DATA
if len(list(destination_ref)):
print(
f"\n########################################### COPY BACKUP ###########################################\n"
)
ref.copy_table(
source_bucket_name=destination_bucket_name,
destination_bucket_name=backup_bucket_name,
)
print(
f"\n########################################## DELETE OLD DATA ##########################################\n"
)
# DELETE OLD DATA FROM PROD
ref.delete_table(not_found_ok=True)
print(
f"\n########################################### COPY NEW DATA ###########################################\n"
)
# COPIES DATA TO DESTINATION
ref.copy_table(source_bucket_name=source_bucket_name)
def get_table_dataset_id():
### load the change files in PR || diff between PR and master
changes = json.load(Path("/github/workspace/files.json").open("r"))
print(changes)
### create a dict to save the dataset and source_bucket related to each table_id
dataset_table_ids = {}
### create a list to save the table folder path, for each table changed in the commit
table_folders = []
for change_file in changes:
### get the directory path for a table with changes
file_dir = Path(change_file).parent
### append the table directory if it was not already appended
if file_dir not in table_folders:
table_folders.append(file_dir)
### construct the iterable for the table_config paths
table_config_paths = [Path(root / "table_config.yaml") for root in table_folders]
### iterate through each config path
for filepath in table_config_paths:
### check if the table_config.yaml exists in the changed folder
if filepath.is_file():
### load the found table_config.yaml
table_config = yaml.load(open(filepath, "r"), Loader=yaml.SafeLoader)
### add the dataset and source_bucket for each table_id
dataset_table_ids[table_config["table_id"]] = {
"dataset_id": table_config["dataset_id"],
"source_bucket_name": table_config["source_bucket_name"],
}
else:
print(
"\n###==============================================================================================###",
f"\n{str(filepath)} does not exist on current commit",
"\n###==============================================================================================###\n",
)
return dataset_table_ids
def push_table_to_bq(
dataset_id,
table_id,
source_bucket_name="basedosdados-dev",
destination_bucket_name="basedosdados",
backup_bucket_name="basedosdados-staging",
):
### Copies proprosed data between storage buckets.
### Creates a backup of old data, then delete it and copies new data into the destination bucket.
sync_bucket(
source_bucket_name,
dataset_id,
table_id,
destination_bucket_name,
backup_bucket_name,
)
### laod the table_config.yalm to get the metadata IDs
table_config, configs_path = load_configs(dataset_id, table_id)
### adjust the correct project ID in publish sql
replace_project_id_publish_sql(configs_path, dataset_id, table_id)
### create Table object of selected table and dataset ID
tb = bd.Table(table_id, dataset_id)
### delete table from staging and prod if exists
tb.delete("all")
### create the staging table in bigquery
tb.create(
path=None,
if_table_exists="replace",
if_storage_data_exists="pass",
if_table_config_exists="pass",
)
### publish the table in prod bigquery
tb.publish(if_exists="replace")
### updates the table description
tb.update("prod")
### updates the dataset description
Dataset(dataset_id).update("prod")
def main():
# print(json.load(Path("/github/workspace/files.json").open("r")))
print(os.environ.get("INPUT_PROJECT_ID"))
print(Path.home())
### json with information of .basedosdados/config.toml
config_dict = {
"metadata_path": "/github/workspace/bases",
"templates_path": "/github/workspace/python-package/basedosdados/configs/templates",
"bucket_name": "basedosdados",
"gcloud-projects": {
"staging": {
"name": "basedosdados-staging",
"credentials_path": "/github/home/.basedosdados/credentials/staging.json",
},
"prod": {
"name": "basedosdados",
"credentials_path": "/github/home/.basedosdados/credentials/prod.json",
},
},
}
### load the secret of prod and staging data
prod_base64 = os.environ.get("INPUT_GCP_TABLE_APPROVE_PROD")
staging_base64 = os.environ.get("INPUT_GCP_TABLE_APPROVE_STAGING")
### create config and credential folders
create_config_tree(prod_base64, staging_base64, config_dict)
### find the dataset and tables of the PR
dataset_table_ids = get_table_dataset_id()
print(f"Tables found: {dataset_table_ids}")
### iterate over each table in dataset of the PR
for table_id in dataset_table_ids.keys():
dataset_id = dataset_table_ids[table_id]["dataset_id"]
source_bucket_name = dataset_table_ids[table_id]["source_bucket_name"]
### push the table to bigquery
try:
push_table_to_bq(
dataset_id,
table_id,
source_bucket_name,
destination_bucket_name=os.environ.get("INPUT_DESTINATION_BUCKET_NAME"),
backup_bucket_name=os.environ.get("INPUT_BACKUP_BUCKET_NAME"),
)
pretty_log(dataset_id, table_id, source_bucket_name)
except Exception as error:
print(
"\n###====================================================================================================###",
f"\n{dataset_id}.{table_id}",
)
traceback.print_exc()
print(
"\n###====================================================================================================###\n",
)
if __name__ == "__main__":
main()
|
the-stack_0_25341
|
"""
Common format for graph data structures in ComptoxAI.
Q: Why do we define so many internal representations of graphs? Shouldn't a
single format be sufficient?
A: The graphs used by ComptoxAI are sufficiently large that it is only
reasonable to store a single copy of the graph in memory at a given time,
unless the user explitly asks for otherwise. Some of the representations are
better for data storage, for information retrieval, for deep learning, etc., so
the best way to handle internal format is to provide a single interface to the
underlying data and a single function that handles conversion between those
types. In the future, we may modify this to a more complex architecture that is
more sustainable.
"""
from abc import abstractmethod
from typing import Iterable, List, Tuple, Union
import networkx as nx
import numpy as np
import pandas as pd
import neo4j
from py2neo import Graph, Subgraph, Node, Relationship
from json import JSONEncoder, dump
from networkx.readwrite.json_graph import node_link_data
from ..cypher import queries
from ..utils import load_config
def _execute_cypher_transaction(tx, query, **kwargs):
if kwargs:
verbose = kwargs['verbose']
else:
verbose = False
records = []
for record in tx.run(query):
if verbose:
print(record)
records.append(record)
return records
class GraphDataMixin(object):
"""
Abstract base class specifying a common interface for all graph data.
"""
@property
@abstractmethod
def nodes(self):
pass
@property
@abstractmethod
def edges(self):
pass
@property
@abstractmethod
def is_heterogeneous(self):
pass
@abstractmethod
def add_node(self, node: tuple):
pass
@abstractmethod
def add_edge(self, edge: tuple):
pass
@abstractmethod
def add_nodes(self, nodes: List[tuple]):
pass
@abstractmethod
def add_edges(self, edges: List[tuple]):
pass
@abstractmethod
def save_graph(self):
pass
class GraphSAGEData(GraphDataMixin):
"""
Internal representation of a GraphSAGE formatted graph/dataset.
This is essentially a NetworkX graph with a few extra data components
needed to run the GraphSAGE algorithms. It also provides a more flexible
way to work with node features, which are stored in a separate NumPy array
(which, unfortunately, isn't natively compatible with heterogeneous
graphs).
Parameters
----------
graph : nx.DiGraph
A NetworkX directed graph containing the nodes and edges that define
the topology of the ComptoxAI graph database. Nodes are identified by
the ID assigned to them by Neo4j.
node_map : Iterable, default=None
An iterable where each element maps a Neo4j node id (int) to a
consecutively numbered index, used to map nodes to columns of the
(optional) matrix of node features. If None, a node map will be
generated from scratch.
edge_map : Iterable, default=None
Currently not implemented (:TODO:)
node_classes : list of str, default=None
Membership for classes to be used in supervised learning tasks. NOTE:
there is a semantic difference between the notion of 'node classes' in
an ontology / graph database (which specifies the semantic type(s) of
entities) versus in supervised learning (a target variable used to
learn a decision function), although they may be equivalent in some
settings.
edge_classes : list of str, default=None
Currently not implemented (:TODO:)
node_features : array-like, default=None
Array of node features.
edge_features : array-like, default=None
Array of edge features.
"""
format = 'graphsage'
def __init__(self, graph: nx.DiGraph, node_map: Iterable=None,
edge_map: Iterable=None, node_classes: List[str]=None,
edge_classes: List[str]=None,
node_features: Union[np.ndarray, pd.DataFrame]=None,
edge_features: Union[np.ndarray, pd.DataFrame]=None):
self._graph = graph
self._node_map = node_map
self._edge_map = edge_map
self._node_classes = node_classes
self._edge_classes = edge_classes
self._node_features = node_features
self._edge_features = edge_features
@property
def nodes(self):
return list(self._graph.nodes())
@property
def edges(self):
return self._graph.edges()
@property
def is_heterogeneous(self):
"""Return True if graph is heterogeneous, False otherwise.
"""
# GraphSAGE is (CURRENTLY) only compatible with non-heterogeneous
# graphs, so we always return False.
# TODO: Revisit to potentially extend to heterogeneous case.
return False
def add_node(self, node: int, **kwargs):
"""
Add a node to GraphSAGE.
A node is simply an ID corresponding to a node in the Neo4j graph.
Node features aren't tied to the NetworkX digraph under GraphSAGE,
instead, they are stored in _node_features.
Parameters
----------
node : int
A Neo4j node id
kwargs :
"""
self._graph.add_node(node, **kwargs)
def add_edge(self, edge: Tuple[int, str, int]):
"""
Add an edge to GraphSAGE.
Edge format:
3-tuple with format:
.. code-block::
(
{ID of u},
{relationship label (str)},
{ID of v}
)
If the edge does not have a label, you should use the empty string
('') as the second element of `edge`.
Parameters
----------
edge : Tuple[int, str, int]
A tuple to add to the GraphSAGE dataset.
"""
u, rel, v = edge
if rel != '':
self._graph.add_edge(u, v, rel=rel)
else:
self._graph.add_edge(u, v)
class Neo4jData(GraphDataMixin):
"""Internal representation of a connection to a Neo4j graph database
containing ComptoxAI data.
Importantly, this data structure does not load the complete contents of the
database into Python's memory space. This places significantly less demand
on system resources when not executing large queries or performing complex
data manipulations. This representation is also able to unload a fair deal
of logic onto Neo4j's standard library in implementing various standardized
operations.
The recommended way to instantiate this class is by calling
comptox_ai.Graph.from_neo4j(), which handles establishing a database driver
connection.
Parameters
----------
driver : neo4j.Driver
A driver connected to a Neo4j graph database containing ComptoxAI data.
"""
format = 'neo4j'
def __init__(self, database: Graph, verbose: bool = False):
self._graph = database.default_graph
n_size = len(self._graph.nodes)
e_size = len(self._graph.relationships)
if verbose:
if (n_size > 100000) or (e_size > 400000):
print("Warning: This is a very large graph! It may take a long time to load.")
if verbose:
print(" Reading {0} nodes...".format(n_size))
self._nodes = list(self._graph.nodes.match("owl__NamedIndividual"))
if verbose:
print(" Reading {0} edges...".format(e_size))
self._edges = list(self._graph.relationships.match())
if verbose:
print(" Building index of node IDs...")
self._node_ids = [n.identity for n in self._nodes]
if verbose:
print()
print("Done! The database connection is ready to use.")
@staticmethod
def standardize_node(n: Node):
return ((
n.identity,
list(n.labels - {'Resource', 'owl__NamedIndividual'})[0],
dict(n)
))
@staticmethod
def standardize_edge(e: Relationship):
return ((
e.start_node.identity,
list(e.types())[0],
e.end_node.identity,
dict(e)
))
@property
def nodes(self):
"""Get a list of all nodes corresponding to a named individual in the
ComptoxAI ontology.
Returns
-------
list of py2neo.Node
List of all Neo4j nodes corresponding to a named individual.
"""
return [self.standardize_node(n) for n in self._nodes]
@property
def edges(self):
return [self.standardize_edge(e) for e in self._edges]
def node_labels(self):
"""
Get all node labels from ns0.
Returns
-------
set of str
Set of ontology labels (as strings) present in the graph schema.
"""
all_lbl_set = self._graph.schema.node_labels
filter_lbls = [x for x in all_lbl_set if x[:5] == "ns0__"]
return set(filter_lbls)
def add_node(self, node: tuple):
"""
Add a node to the graph and synchronize it to the remote database.
Parameters
----------
node : tuple of (int, label, **props)
Node to add to the graph.
"""
n_id, n_label, n_props = node
n = Node(n_id, n_props)
n.update_labels([
'owl__NamedIndividual',
n_label,
'Resource'
])
self._graph.create(n)
def add_nodes(self, nodes: List[tuple]):
"""
Add a list of nodes to the graph and synchronize them to the remote
database.
"""
ns = []
# Since we have to synchronize changes as a single chunk, it's not as
# simple as calling add_node() for every element of `nodes`.
for n in nodes:
n_id, n_label, n_props = n
nn = Node(n_id, n_props)
nn.update_labels([
'owl__NamedIndividual',
n_label,
'Resource'
])
ns.append(nn)
self._graph.create(Subgraph(ns))
def add_edge(self, edge: tuple):
"""
Add an edge to the graph and synchronize it to the remote database.
"""
u, rel_type, v, props = edge
e = Relationship(u, rel_type, v, props)
self._graph.create(e)
def add_edges(self, edges: List[tuple]):
"""
Add a list of edges to the graph and synchronize them to the remote
database.
"""
es = []
# Since we have to synchronize changes as a single chunk, it's not as
# simple as calling add_edge() for every element of `edges`.
for e in edges:
u, rel_type, v, props = e
ee = Relationship(u, rel_type, v, props)
es.append(ee)
self._graph.create(Subgraph(es))
def run_query_in_session(self, query: str):
"""Submit a cypher query transaction to the connected graph database
driver and return the response to the calling function.
Parameters
----------
query : str
String representation of the cypher query to be executed.
Returns
-------
list of neo4j.Record
"""
#raise NotImplementedError
with self._driver.session() as session:
query_response = session.read_transaction(_execute_cypher_transaction, query)
return query_response
class NetworkXData(GraphDataMixin):
def __init__(self, graph: nx.DiGraph=None):
if graph is not None:
self._graph = graph
else:
self._graph = nx.DiGraph()
format = 'networkx'
class NetworkxJsonEncoder(JSONEncoder):
"""
When encoding JSON, sets are converted to lists.
"""
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
@property
def nodes(self):
return self._graph.nodes()
@property
def edges(self):
return self._graph.edges()
def add_node(self, node: tuple):
n_id, n_label, n_props = node
n_props['LABELS'] = {'owl__NamedIndividual', n_label, 'Resource'}
# Use kwargs expansion to explode props
self._graph.add_node(n_id, **n_props)
def add_edge(self, edge: tuple):
"""
Add one edge to the graph from a tuple.
The tuple should be formatted as follows:
.. code-block::
(
{ID of u},
{relationship type},
{ID of v},
{dict of edge properties (leave empty if none)}
)
Parameters
----------
edge : tuple
Tuple containing edge data (see above for format specification).
"""
u, rel_type, v, e_props = edge
e_props['TYPE'] = rel_type
self._graph.add_edge(u, v, **e_props)
def add_nodes(self, nodes: List[tuple]):
for n in nodes:
self.add_node(n)
def add_edges(self, edges: List[tuple]):
"""
Add one or more edges to the graph from a list of tuples.
See Also
--------
add_edge : Add a single edge from a tuple
"""
for e in edges:
self.add_edge(e)
def save_graph(self, format=''):
"""
Save NetworkX representation of ComptoxAI's knowledge graph to disk in
JSON "node-link" format.
Notes
-----
Users should not need to interact with these JSON files directly, but
for reference they should be formatted similarly to the following
example:
.. code-block::
{
'directed': True,
'multigraph': False,
'graph': {},
'nodes': [
{
'ns0__xrefPubchemCID': 71392231,
'ns0__xrefPubChemSID': 316343675,
'ns0__inchi': 'InChI=1S/C8H12Cl2N4S2/c1-5(9)3-11-7(15)13-14-8(16)12-4-6(2)10/h1-4H2,(H2,11,13,15)(H2,12,14,16)',
'ns0__xrefCasRN': '61784-89-2',
'uri': 'http://jdr.bio/ontologies/comptox.owl#chem_n1n2bis2chloroprop2en1ylhydrazine12dicarbothioamide',
'ns0__xrefDtxsid': 'DTXSID70814050',
'ns0__inchiKey': 'UAZDGQNKXGUQPD-UHFFFAOYSA-N',
'LABELS': ['ns0__Chemical', 'Resource', 'owl__NamedIndividual'],
'id': 0
},
...
],
'links': [
{
'TYPE': 'ns0__keyEventTriggeredBy',
'source': 46954,
'target': 47667
},
...
]
}
Notice that ``'graph'`` is empty - the contents of the graph are
entirely specified in the ``'nodes'`` and ``'links'`` lists.
"""
with open('test_json.json', 'w') as fp:
dump(node_link_data(self._graph), fp, cls=self.NetworkxJsonEncoder)
|
the-stack_0_25342
|
# PyGame template.
# Import standard modules.
import boto3
import hashlib
import json
import itertools
import random
import numpy as np
import os
sqs_url = os.getenv("SIM_QUEUE")
print(sqs_url)
print(os.environ["SIM_QUEUE"])
sqs_region = sqs_url.split("sqs.")[1].split(".amazonaws.")[0]
client = boto3.client("sqs", region_name=sqs_region)
def get_social_conf(_sd_impact, _sd_start, _sd_stop, _sd_recovered, _know_rate_sick, _know_rate_recovered, _party_freq, _party_R_boost):
return dict(sd_impact=_sd_impact, sd_start=_sd_start, sd_stop=_sd_stop, sd_recovered=_sd_recovered, know_rate_sick=_know_rate_sick,
know_rate_recovered=_know_rate_recovered, party_freq=_party_freq, party_R_boost=_party_R_boost)
def get_deases_conf(_R_spread, _dd, _fatality, _initial_sick):
return dict(R_spread=_R_spread, desease_duration=_dd, fatality=_fatality, initial_sick=_initial_sick)
def post_simjob(scenario, run_nr, iter_num):
try:
scenario["run"] = run_nr
hash_dict = dict(scenario)
msg = json.dumps(hash_dict, sort_keys=True).encode("utf-8")
md5 = hashlib.md5(msg + b" iter : %i" % iter_num).hexdigest()
msg = msg[:-1] + (', "sim_md5": "%s"}' % md5).encode("utf-8")
except:
print(scenario, run_nr, iter_num)
raise
client.send_message(QueueUrl=sqs_url,
MessageBody=msg.decode("utf-8"))
def gen_social_opts():
sd_impacts = np.arange(0.1, 1.0, 0.1)
sd_starts = np.arange(0.05, 0.30, 0.05) #[0.05, 0.10, 0.15, 0.20, 0.25]
sd_stops = [0.01, 0.04, 0.09, 0.14, 0.19, 0.24]
sd_recovereds = [True, False]
know_rate_sicks = np.arange(0.1, 1.0, 0.1) #[0.1, 0.5, 0.9, 1.0]
know_rate_recs = np.arange(0.1, 1.0, 0.1) #[0.1, 0.5, 0.9, 1.0]
party_freqs = np.arange(1, 14, dtype=np.int32) #7, 13]
party_R_boosts = np.arange(1, 5, dtype=np.int32)
return dict(sd_impact=sd_impacts, sd_conf=list(itertools.product(*[sd_starts, sd_stops])),
sd_recovered=sd_recovereds, know_rate_sick=know_rate_sicks,
know_rate_recovered=know_rate_recs, party_conf=list(itertools.product(*[party_freqs, party_R_boosts])))
def gen_desease_opts():
R_spreads = np.arange(2.0, 3.0)
DDs = range(5, 15)
fatalities = np.arange(0.05, 0.20, 0.01)
initial_sicks = [5]
return dict(R_spread=R_spreads, desease_duration=DDs, fatality=fatalities)
def gen_env_opts():
agent_options = [500]
size_options = [(600, 600)]
return list(itertools.product(*[agent_options, size_options]))
social_opts = gen_social_opts()
desease_opts = gen_desease_opts()
env_opts = gen_env_opts()
scenarios = []
num_scen = 0
total_scens = len(social_opts) * len(desease_opts) # * len(env_opts)
print("Generating: %i scenarios" % total_scens)
base_social = get_social_conf(_sd_impact=0.9, _sd_start=0.05, _sd_stop=0.01, _sd_recovered=True,
_know_rate_sick=0.9, _know_rate_recovered=0.1, _party_freq=0, _party_R_boost=1)
base_desease = get_deases_conf(_R_spread=2.3, _dd=8, _fatality=0.1, _initial_sick=5)
run_nr = 1003
for onum, opts in enumerate([social_opts, desease_opts]):
for key in opts:
s = dict(base_social)
d = dict(base_desease)
for v in opts[key]:
t = s if onum == 0 else d
if key == "sd_conf":
if v[0] <= v[1]:
continue
t["sd_start"] = v[0]
t["sd_stop"] = v[1]
elif key == "party_conf":
t["party_freq"] = int(v[0])
t["party_R_boost"] = int(v[1])
else:
t[key] = v
for i in range(100):
scene = dict(num_agents=500, height=600, width=600, fps=10, agent_radius=3,
social_conf=s, desease_conf=d)
num_scen += 1
post_simjob(scene, run_nr, i)
run_nr += 1
for agents in range(500, 1000, 50):
for density in [1., 0.9, 0.75, 1.1, 1.25]:
scaling = np.sqrt(agents/500) * density
for i in range(100):
scene = dict(num_agents=agents, height=600*scaling, width=600*scaling, fps=10, agent_radius=3,
social_conf=base_social, desease_conf=base_desease)
num_scen += 1
post_simjob(scene, run_nr, i)
if agents == 500 and density == 1.:
print(run_nr)
run_nr += 1
print(num_scen)
|
the-stack_0_25343
|
import sys
def villages(file_path):
"""
Day 12 | Part 1 & 2
http://adventofcode.com/2017/day/12
Finds how many numbers are connected to 0
Takes a file as argument and
creates 'output_121' containing the result for Part 1
and 'output_122' with the result for Part 2
"""
villages = {}
same_group = set()
group_count = 0
# finds how many items there are in the same group where
def find_connections(location):
same_group.add(location)
for i in villages[location]:
# remove the commas if there are such
if i[-1] == ',':
i = i[0:-1]
if i in same_group:
pass
else:
find_connections(i)
with open(file_path, 'r') as f:
for line in f:
line = line.strip().split()
villages[line[0].strip()] = line[2::]
# Part 1
# finds how many items there are in the group where '0' is
find_connections('0')
same_group = set(same_group)
with open('output_121', 'w') as f:
f.write(str(len(same_group)))
# Part 2
while len(villages) > 0:
same_group = set()
item = list(villages.keys())
find_connections(item[0])
group_count += 1
for i in same_group:
del villages[i]
with open('output_122', 'w') as f:
f.write(str(group_count))
if __name__ == '__main__':
villages(sys.argv[-1])
|
the-stack_0_25344
|
# ===--- gyb_stdlib_support.py -----------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
TRAVERSALS = ['Forward', 'Bidirectional', 'RandomAccess']
def collectionForTraversal(traversal): # noqa (N802 function name should be lowercase)
if traversal == 'Forward':
return 'Collection'
elif traversal == 'Bidirectional':
return 'BidirectionalCollection'
elif traversal == 'RandomAccess':
return 'RandomAccessCollection'
else:
raise ValueError("Unknown traversal %r" % traversal)
def sliceTypeName(traversal, mutable, rangeReplaceable): # noqa (N802)
name = collectionForTraversal(traversal).replace('Collection', 'Slice')
if rangeReplaceable:
name = 'RangeReplaceable' + name
if mutable:
name = 'Mutable' + name
return name
def protocolsForCollectionFeatures(traversal, mutable, rangeReplaceable): # noqa (N802)
protocols = [collectionForTraversal(traversal)]
if mutable:
protocols.append('MutableCollection')
if rangeReplaceable:
protocols.append('RangeReplaceableCollection')
return protocols
def defaultIndicesForTraversal(traversal): # noqa (N802)
if traversal == 'Forward':
return 'DefaultIndices'
elif traversal == 'Bidirectional':
return 'DefaultBidirectionalIndices'
elif traversal == 'RandomAccess':
return 'DefaultRandomAccessIndices'
else:
raise ValueError("Unknown traversal %r" % traversal)
def documentationNameForTraversal(traversal): # noqa (N802)
if traversal == 'Forward':
return 'collection'
elif traversal == 'Bidirectional':
return 'bidirectional collection'
elif traversal == 'RandomAccess':
return 'random-access collection'
else:
raise ValueError("Unknown traversal %r" % traversal)
|
the-stack_0_25345
|
from Bio.PDB.Atom import Atom
from Bio.PDB.PDBIO import PDBIO
from Bio.PDB import PDBParser, Polypeptide
from Bio import SVDSuperimposer
import numpy as np
import os
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
VW_RADII = {
"ALA": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0
},
"CYS": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"SG": 1.8
},
"ASP": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"OD1": 1.5,
"OD2": 1.5
},
"GLU": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 1.7,
"OE1": 1.5,
"OE2": 1.5
},
"PHE": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"CD1": 1.9,
"CD2": 1.9,
"CE1": 1.9,
"CE2": 1.9,
"CZ": 1.9
},
"GLY": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4
},
"HIS": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"ND1": 1.7,
"CD2": 1.9,
"CE1": 1.9,
"NE2": 1.7
},
"ILE": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG1": 2.0,
"CG2": 2.0,
"CD1": 2.0
},
"LYS": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 2.0,
"CE": 2.0,
"NZ": 2.0
},
"LEU": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD1": 2.0,
"CD2": 2.0
},
"MET": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"SD": 1.8,
"CE": 2.0
},
"ASN": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"OD1": 1.6,
"ND2": 1.6
},
"PRO": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 2.0
},
"GLN": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 1.7,
"OE1": 1.6,
"NE2": 1.6
},
"ARG": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 2.0,
"CD": 2.0,
"NE": 1.7,
"CZ": 2.0,
"NH1": 2.0,
"NH2": 2.0
},
"SER": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"OG": 1.6
},
"THR": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"OG1": 1.6,
"CG2": 2.0
},
"VAL": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG1": 2.0,
"CG2": 2.0
},
"TRP": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"CD1": 1.9,
"CD2": 1.7,
"NE1": 1.7,
"CE2": 1.7,
"CE3": 1.9,
"CZ2": 1.9,
"CZ3": 1.9,
"CH2": 1.9
},
"TYR": {
"N": 1.7,
"CA": 2.0,
"C": 1.7,
"O": 1.4,
"CB": 2.0,
"CG": 1.7,
"CD1": 1.9,
"CD2": 1.9,
"CE1": 1.9,
"CE2": 1.9,
"CZ": 1.7,
"OH": 1.6
}
}
CHI_ANGLES = {"CHI1": {'CYS': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'SG']},
'ASP': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'SER': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'OG']},
'GLN': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'LYS': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'ILE': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG1']},
'PRO': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'THR': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'OG1']},
'PHE': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'ASN': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'HIS': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'LEU': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'ARG': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'TRP': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'VAL': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG1']},
'GLU': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'TYR': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']},
'MET': {'axis': ['CA', 'CB'], 'ref_plane': ['N', 'CA', 'CB', 'CG']}},
"CHI2": {
'ASP': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'OD1']},
'GLN': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'LYS': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'ILE': {'axis': ['CB', 'CG1'], 'ref_plane': ['CA', 'CB', 'CG1', 'CD1']},
'PRO': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'PHE': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'ASN': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'OD1']},
'HIS': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'ND1']},
'LEU': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'ARG': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'TRP': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'GLU': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD']},
'TYR': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'CD1']},
'MET': {'axis': ['CB', 'CG'], 'ref_plane': ['CA', 'CB', 'CG', 'SD']},
},
"CHI3": {
'ARG': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'NE']},
'GLN': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'OE1']},
'GLU': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'OE1']},
'LYS': {'axis': ['CG', 'CD'], 'ref_plane': ['CB', 'CG', 'CD', 'CE']},
'MET': {'axis': ['CG', 'SD'], 'ref_plane': ['CB', 'CG', 'SD', 'CE']},
},
"CHI4": {
'ARG': {'axis': ['CD', 'NE'], 'ref_plane': ['CG', 'CD', 'NE', 'CZ']},
'LYS': {'axis': ['CG', 'CE'], 'ref_plane': ['CG', 'CD', 'CE', 'NZ']},
}
}
RESIDUE_ORDER = {'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG'],
'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2'],
'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG'],
'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE2', 'OE1'],
'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ'],
'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1'],
'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD'],
'THR': ['N', 'CA', 'C', 'O', 'CB', 'CG2', 'OG1'],
'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ'],
'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND2', 'OD1'],
'GLY': ['N', 'CA', 'C', 'O'],
'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD2', 'ND1', 'CE1', 'NE2'],
'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2'],
'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'NE1', 'CZ2', 'CZ3', 'CH2'],
'ALA': ['N', 'CA', 'C', 'O', 'CB'],
'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2'],
'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2'],
'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH'],
'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE']}
def load_rotamers(rotamer_loc="{}/rotamers.lib".format(DATA_DIR)):
_dunbrack = {}
with open(rotamer_loc) as fn:
for line in fn:
if line.startswith("#"):
continue
if not line.split()[0] in _dunbrack:
_dunbrack[line.split()[0]] = {}
if not int(line.split()[1]) in _dunbrack[line.split()[0]]:
_dunbrack[line.split()[0]][int(line.split()[1])] = {}
if not int(line.split()[2]) in _dunbrack[line.split()[0]][int(line.split()[1])]:
_dunbrack[line.split()[0]][int(line.split()[1])][int(line.split()[2])] = []
_dunbrack[line.split()[0]][int(line.split()[1])][int(line.split()[2])].append({
'prob': float(line.split()[8]),
'CHI1': float(line.split()[9]),
'CHI2': float(line.split()[10]),
'CHI3': float(line.split()[11]),
'CHI4': float(line.split()[12])
})
return _dunbrack
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
from scipy.linalg import expm, norm
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
def dihedral_from_vectors(v1, v2, v3, v4):
"""Praxeolitic formula
1 sqrt, 1 cross product"""
b0 = -1.0 * (v2 - v1)
b1 = v3 - v2
b2 = v4 - v3
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= np.linalg.norm(b1)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - np.dot(b0, b1) * b1
w = b2 - np.dot(b2, b1) * b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
def distance(x, y):
return np.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2)
def select_best_rotemer_based_on_clashes(pdb_object, chain, res_num, mutate_to, sample_residue, rotamers):
best_rotamer = None
lowest_energy = float('inf')
for rotamer in rotamers:
vdw_energy = 0
# Introduce the rotamer
for angle in ['CHI1', 'CHI2', 'CHI3', 'CHI4']:
if mutate_to not in CHI_ANGLES[angle]:
continue
dihedral_start = dihedral_from_vectors(
*[sample_residue[x] for x in CHI_ANGLES[angle][mutate_to]['ref_plane']])
rotation_angle = dihedral_start - np.deg2rad(rotamer[angle])
axis = CHI_ANGLES[angle][mutate_to]['axis']
# print(angle)
for atom in RESIDUE_ORDER[mutate_to][RESIDUE_ORDER[mutate_to].index(axis[1]) + 1:]:
sample_residue[atom] = np.dot(
rotation_matrix(sample_residue[axis[0]] - sample_residue[axis[1]], rotation_angle),
sample_residue[atom] - sample_residue[axis[1]]) + sample_residue[axis[1]]
for rotamer_atom, rotamer_vector in sample_residue.items():
for residues in list(pdb_object[0][chain].get_residues()):
for residue_atoms in list(residues.get_atoms()):
if residues.get_id()[1] == res_num: # Skip itself
continue
# print(residues.get_id()[1], residue_atoms.get_id())
# print(residues.get_resname(), residue_atoms.coord, rotamer_atom, rotamer_vector)
dist = distance(residue_atoms.coord, rotamer_vector)
if dist > 6:
continue
try:
vdw_radi = VW_RADII[residues.get_resname()][residue_atoms.get_id()] + VW_RADII[mutate_to][rotamer_atom]
except KeyError:
continue
# print(residues.get_id()[1], residue_atoms.get_id(), rotamer_atom, dist, ((vdw_radi / dist) ** 12 - (vdw_radi / dist) ** 6))
vdw_energy += ((vdw_radi / dist) ** 12 - (vdw_radi / dist) ** 6)
# print(rotamer, vdw_energy)
# print('________________________')
if vdw_energy < lowest_energy:
lowest_energy = vdw_energy
best_rotamer = rotamer
return best_rotamer
def mutate(pdb_obj, chain, res_num, mutate_to, rotamer_lib=None, mutation_type="best"):
_residue, _residue_idx = [(x, n) for n, x in enumerate(pdb_obj[0][chain].get_residues()) if x.get_id()[1] == res_num][0]
# print(_residue)
_residue_atoms = list(_residue.get_atoms())
for atom in _residue_atoms:
if atom.name not in ['C', 'N', 'CA', 'O']:
residue = atom.parent
residue.detach_child(atom.id)
polypeptide = Polypeptide.Polypeptide(pdb_obj[0][chain])
phi, psi = polypeptide.get_phi_psi_list()[_residue_idx]
if not phi:
phi = 0
if not psi:
psi = 0
phi, psi = round(np.rad2deg(phi), -1), round(np.rad2deg(psi), -1)
# print(phi, psi)
# print(_residue['N'].coord)
sample_residue = {}
with open('{}/{}.pdb'.format(DATA_DIR, mutate_to.upper())) as fn:
for line in fn:
sample_residue[line[12:16].strip()] = np.array([float(line[30:38]), float(line[38:46]), float(line[46:54])])
starting_points = np.mat([sample_residue["N"], sample_residue["CA"], sample_residue["C"]])
end_points = np.mat([_residue["N"].coord, _residue["CA"].coord, _residue["C"].coord])
sup = SVDSuperimposer.SVDSuperimposer()
sup.set(end_points, starting_points)
sup.run()
rot, tran = sup.get_rotran()
for atom, coords in sample_residue.items():
sample_residue[atom] = np.squeeze(np.asarray(np.dot(coords, rot) + tran))
# print(pymut.vector_distance(sample_residue['N'], _residue["N"].coord))
# print(f"Structure has {len(list(structure.get_atoms()))} atoms")
if mutate_to not in ["ALA", "GLY"]:
if not rotamer_lib:
rotamer_lib = load_rotamers()
if mutation_type == 'first':
selected_rotamer = sorted(rotamer_lib[mutate_to][phi][psi], key=lambda x: x['prob'], reverse=True)[0]
elif mutation_type == 'random':
p = np.array([x['prob'] for x in rotamer_lib[mutate_to][phi][psi]])
p /= p.sum()
selected_rotamer = np.random.choice(rotamer_lib[mutate_to][phi][psi], p=p)
elif mutation_type == 'best':
selected_rotamer = select_best_rotemer_based_on_clashes(pdb_obj, chain, _residue_idx, mutate_to, sample_residue, rotamer_lib[mutate_to][phi][psi])
# Introduce the rotamer
for angle in ['CHI1', 'CHI2', 'CHI3', 'CHI4']:
if mutate_to not in CHI_ANGLES[angle]:
continue
dihedral_start = dihedral_from_vectors(*[sample_residue[x] for x in CHI_ANGLES[angle][mutate_to]['ref_plane']])
rotation_angle = dihedral_start - np.deg2rad(selected_rotamer[angle])
axis = CHI_ANGLES[angle][mutate_to]['axis']
# print(angle)
for atom in RESIDUE_ORDER[mutate_to][RESIDUE_ORDER[mutate_to].index(axis[1]) + 1:]:
sample_residue[atom] = np.dot(
rotation_matrix(sample_residue[axis[0]] - sample_residue[axis[1]], rotation_angle),
sample_residue[atom] - sample_residue[axis[1]]) + sample_residue[axis[1]]
for atom, coord in sample_residue.items():
if atom not in ['C', 'N', 'CA', 'O']:
new_atom = Atom(
name=atom,
element=atom[0],
fullname="{}{}".format(" " * (4 - len(atom)), atom), # for writing the structure, should be 4-char long
coord=np.asarray(coord),
bfactor=1.0,
altloc=" ",
occupancy=1.0,
serial_number=9999 # does not matter much, only for writing the struct.
)
_residue.add(new_atom)
_residue.resname = mutate_to
return
def gener_all_rotamers(pdb_obj, chain, res_num, mutate_to, rotamer_lib=None):
_residue, _residue_idx = [(x, n) for n, x in enumerate(pdb_obj[0][chain].get_residues()) if x.get_id()[1] == res_num][0]
# print(_residue)
_residue_atoms = list(_residue.get_atoms())
for atom in _residue_atoms:
if atom.name not in ['C', 'N', 'CA', 'O']:
residue = atom.parent
residue.detach_child(atom.id)
polypeptide = Polypeptide.Polypeptide(pdb_obj[0][chain])
phi, psi = polypeptide.get_phi_psi_list()[_residue_idx]
if not phi:
phi = 0
if not psi:
psi = 0
phi, psi = round(np.rad2deg(phi), -1), round(np.rad2deg(psi), -1)
# print(phi, psi)
# print(_residue['N'].coord)
sample_residue = {}
with open('{}/{}.pdb'.format(DATA_DIR, mutate_to.upper())) as fn:
for line in fn:
sample_residue[line[12:16].strip()] = np.array([float(line[30:38]), float(line[38:46]), float(line[46:54])])
starting_points = np.mat([sample_residue["N"], sample_residue["CA"], sample_residue["C"]])
try:
end_points = np.mat([_residue["N"].coord, _residue["CA"].coord, _residue["C"].coord])
except KeyError:
sys.stderr.write("Missing backbone residues at {}".format(pdb_obj))
return
sup = SVDSuperimposer.SVDSuperimposer()
sup.set(end_points, starting_points)
sup.run()
rot, tran = sup.get_rotran()
for atom, coords in sample_residue.items():
sample_residue[atom] = np.squeeze(np.asarray(np.dot(coords, rot) + tran))
# print(pymut.vector_distance(sample_residue['N'], _residue["N"].coord))
# print(f"Structure has {len(list(structure.get_atoms()))} atoms")
if mutate_to not in ["ALA", "GLY"]:
if not rotamer_lib:
rotamer_lib = load_rotamers()
for selected_rotamer in rotamer_lib[mutate_to][phi][psi]:
_residue_atoms = list(_residue.get_atoms())
for atom in _residue_atoms:
if atom.name not in ['C', 'N', 'CA', 'O']:
_residue = atom.parent
_residue.detach_child(atom.id)
# Introduce the rotamer
for angle in ['CHI1', 'CHI2', 'CHI3', 'CHI4']:
if mutate_to not in CHI_ANGLES[angle]:
continue
dihedral_start = dihedral_from_vectors(*[sample_residue[x] for x in CHI_ANGLES[angle][mutate_to]['ref_plane']])
rotation_angle = dihedral_start - np.deg2rad(selected_rotamer[angle])
axis = CHI_ANGLES[angle][mutate_to]['axis']
# print(angle)
for atom in RESIDUE_ORDER[mutate_to][RESIDUE_ORDER[mutate_to].index(axis[1]) + 1:]:
sample_residue[atom] = np.dot(
rotation_matrix(sample_residue[axis[0]] - sample_residue[axis[1]], rotation_angle),
sample_residue[atom] - sample_residue[axis[1]]) + sample_residue[axis[1]]
for atom, coord in sample_residue.items():
if atom not in ['C', 'N', 'CA', 'O']:
new_atom = Atom(
name=atom,
element=atom[0],
fullname="{}{}".format(" " * (4 - len(atom)), atom),
# for writing the structure, should be 4-char long
coord=np.asarray(coord),
bfactor=1.0,
altloc=" ",
occupancy=1.0,
serial_number=9999 # does not matter much, only for writing the struct.
)
_residue.add(new_atom)
_residue.resname = mutate_to
yield pdb_obj
else:
for atom, coord in sample_residue.items():
if atom not in ['C', 'N', 'CA', 'O']:
new_atom = Atom(
name=atom,
element=atom[0],
fullname="{}{}".format(" " * (4 - len(atom)), atom), # for writing the structure, should be 4-char long
coord=np.asarray(coord),
bfactor=1.0,
altloc=" ",
occupancy=1.0,
serial_number=9999 # does not matter much, only for writing the struct.
)
_residue.add(new_atom)
_residue.resname = mutate_to
yield pdb_obj
|
the-stack_0_25347
|
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos_lldp_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
dict_diff,
param_list_to_dict,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.facts import (
Facts,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.utils.utils import (
normalize_interface,
)
class Lldp_interfaces(ConfigBase):
"""
The eos_lldp_interfaces class
"""
gather_subset = ["!all", "!min"]
gather_network_resources = ["lldp_interfaces"]
def get_lldp_interfaces_facts(self, data=None):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(
self.gather_subset, self.gather_network_resources, data=data
)
lldp_interfaces_facts = facts["ansible_network_resources"].get(
"lldp_interfaces"
)
if not lldp_interfaces_facts:
return []
return lldp_interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {"changed": False}
warnings = list()
commands = list()
if self.state in self.ACTION_STATES:
existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
else:
existing_lldp_interfaces_facts = []
if self.state in self.ACTION_STATES or self.state == "rendered":
commands.extend(self.set_config(existing_lldp_interfaces_facts))
if commands and self.state in self.ACTION_STATES:
if not self._module.check_mode:
self._connection.edit_config(commands)
result["changed"] = True
if self.state in self.ACTION_STATES:
result["commands"] = commands
if self.state in self.ACTION_STATES or self.state == "gathered":
changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
elif self.state == "rendered":
result["rendered"] = commands
elif self.state == "parsed":
running_config = self._module.params["running_config"]
if not running_config:
self._module.fail_json(
msg="value of running_config parameter must not be empty for state parsed"
)
result["parsed"] = self.get_lldp_interfaces_facts(
data=running_config
)
if self.state in self.ACTION_STATES:
result["before"] = existing_lldp_interfaces_facts
if result["changed"]:
result["after"] = changed_lldp_interfaces_facts
elif self.state == "gathered":
result["gathered"] = changed_lldp_interfaces_facts
result["warnings"] = warnings
return result
def set_config(self, existing_lldp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params["config"]
have = existing_lldp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params["state"]
if (
state in ("merged", "replaced", "overridden", "rendered")
and not want
):
self._module.fail_json(
msg="value of config parameter must not be empty for state {0}".format(
state
)
)
want = param_list_to_dict(want, remove_key=False)
have = param_list_to_dict(have, remove_key=False)
if state == "overridden":
commands = self._state_overridden(want, have)
elif state == "deleted":
commands = self._state_deleted(want, have)
elif state == "merged" or state == "rendered":
commands = self._state_merged(want, have)
elif state == "replaced":
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict(name=interface_name)
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(
generate_commands(interface_name, add_config, del_config)
)
return commands
@staticmethod
def _state_overridden(want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, extant in have.items():
if key in want:
desired = want[key]
else:
desired = dict(name=key)
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict(name=interface_name)
add_config = dict_diff(extant, desired)
commands.extend(generate_commands(interface_name, add_config, {}))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for key in want.keys():
interface_name = normalize_interface(key)
desired = dict(name=interface_name)
if interface_name in have:
extant = have[interface_name]
else:
continue
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(interface_name, {}, del_config))
return commands
def generate_commands(name, to_set, to_remove):
commands = []
for key, value in to_set.items():
if value is None:
continue
prefix = "" if value else "no "
commands.append("{0}lldp {1}".format(prefix, key))
for key in to_remove:
commands.append("lldp {0}".format(key))
if commands:
commands.insert(0, "interface {0}".format(name))
return commands
|
the-stack_0_25348
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_mean paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_mean(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.mean(data_x, axis=axis, keepdim=keepdim)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32)
reduce_mean("reduce_mean_test_0", data)
reduce_mean("reduce_mean_test_1", data, axis=0, keepdim=False)
reduce_mean("reduce_mean_test_2", data, axis=-1, keepdim=False)
reduce_mean("reduce_mean_test_3", data, axis=1, keepdim=True)
reduce_mean("reduce_mean_test_4", data, axis=[1,2], keepdim=False)
reduce_mean("reduce_mean_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()
|
the-stack_0_25350
|
from kivy.uix.label import Label
from kivy.properties import ListProperty
from kivy.factory import Factory
from kivy.lang import Builder
Builder.load_string("""
<MyLabel>:
bcolor: 1, 1, 1, 1
canvas.before:
Color:
rgba: self.bcolor
Rectangle:
pos: self.pos
size: self.size
""")
class MyLabel(Label):
bcolor = ListProperty([1,1,1,1])
Factory.register('KivyB', module='MyLabel')
|
the-stack_0_25353
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from copy import deepcopy
from datetime import datetime
import boto3
import pytz
import six
import sure # noqa
from botocore.exceptions import ClientError
from nose.tools import assert_raises
from moto import mock_emr
run_job_flow_args = dict(
Instances={
'InstanceCount': 3,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': 'c3.medium',
'Placement': {'AvailabilityZone': 'us-east-1a'},
'SlaveInstanceType': 'c3.xlarge',
},
JobFlowRole='EMR_EC2_DefaultRole',
LogUri='s3://mybucket/log',
Name='cluster',
ServiceRole='EMR_DefaultRole',
VisibleToAllUsers=True)
input_instance_groups = [
{'InstanceCount': 1,
'InstanceRole': 'MASTER',
'InstanceType': 'c1.medium',
'Market': 'ON_DEMAND',
'Name': 'master'},
{'InstanceCount': 3,
'InstanceRole': 'CORE',
'InstanceType': 'c1.medium',
'Market': 'ON_DEMAND',
'Name': 'core'},
{'InstanceCount': 6,
'InstanceRole': 'TASK',
'InstanceType': 'c1.large',
'Market': 'SPOT',
'Name': 'task-1',
'BidPrice': '0.07'},
{'InstanceCount': 10,
'InstanceRole': 'TASK',
'InstanceType': 'c1.xlarge',
'Market': 'SPOT',
'Name': 'task-2',
'BidPrice': '0.05'},
]
@mock_emr
def test_describe_cluster():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}]
args['Configurations'] = [
{'Classification': 'yarn-site',
'Properties': {'someproperty': 'somevalue',
'someotherproperty': 'someothervalue'}}]
args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master']
args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave']
args['Instances']['Ec2KeyName'] = 'mykey'
args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec'
args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group'
args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group'
args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False
args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group'
args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'},
{'Key': 'tag2', 'Value': 'val2'}]
cluster_id = client.run_job_flow(**args)['JobFlowId']
cl = client.describe_cluster(ClusterId=cluster_id)['Cluster']
cl['Applications'][0]['Name'].should.equal('Spark')
cl['Applications'][0]['Version'].should.equal('2.4.2')
cl['AutoTerminate'].should.equal(True)
config = cl['Configurations'][0]
config['Classification'].should.equal('yarn-site')
config['Properties'].should.equal(args['Configurations'][0]['Properties'])
attrs = cl['Ec2InstanceAttributes']
attrs['AdditionalMasterSecurityGroups'].should.equal(args['Instances']['AdditionalMasterSecurityGroups'])
attrs['AdditionalSlaveSecurityGroups'].should.equal(args['Instances']['AdditionalSlaveSecurityGroups'])
attrs['Ec2AvailabilityZone'].should.equal('us-east-1a')
attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName'])
attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId'])
attrs['EmrManagedMasterSecurityGroup'].should.equal(args['Instances']['EmrManagedMasterSecurityGroup'])
attrs['EmrManagedSlaveSecurityGroup'].should.equal(args['Instances']['EmrManagedSlaveSecurityGroup'])
attrs['IamInstanceProfile'].should.equal(args['JobFlowRole'])
attrs['ServiceAccessSecurityGroup'].should.equal(args['Instances']['ServiceAccessSecurityGroup'])
cl['Id'].should.equal(cluster_id)
cl['LogUri'].should.equal(args['LogUri'])
cl['MasterPublicDnsName'].should.be.a(six.string_types)
cl['Name'].should.equal(args['Name'])
cl['NormalizedInstanceHours'].should.equal(0)
# cl['ReleaseLabel'].should.equal('emr-5.0.0')
cl.shouldnt.have.key('RequestedAmiVersion')
cl['RunningAmiVersion'].should.equal('1.0.0')
# cl['SecurityConfiguration'].should.be.a(six.string_types)
cl['ServiceRole'].should.equal(args['ServiceRole'])
status = cl['Status']
status['State'].should.equal('TERMINATED')
# cluster['Status']['StateChangeReason']
status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime')
# status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc))
status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime')
dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal(
dict((t['Key'], t['Value']) for t in args['Tags']))
cl['TerminationProtected'].should.equal(False)
cl['VisibleToAllUsers'].should.equal(True)
@mock_emr
def test_describe_job_flows():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
expected = {}
for idx in range(400):
cluster_name = 'cluster' + str(idx)
args['Name'] = cluster_name
cluster_id = client.run_job_flow(**args)['JobFlowId']
expected[cluster_id] = {
'Id': cluster_id,
'Name': cluster_name,
'State': 'WAITING'
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(400, 600):
cluster_name = 'cluster' + str(idx)
args['Name'] = cluster_name
cluster_id = client.run_job_flow(**args)['JobFlowId']
client.terminate_job_flows(JobFlowIds=[cluster_id])
expected[cluster_id] = {
'Id': cluster_id,
'Name': cluster_name,
'State': 'TERMINATED'
}
resp = client.describe_job_flows()
resp['JobFlows'].should.have.length_of(512)
for cluster_id, y in expected.items():
resp = client.describe_job_flows(JobFlowIds=[cluster_id])
resp['JobFlows'].should.have.length_of(1)
resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id)
resp = client.describe_job_flows(JobFlowStates=['WAITING'])
resp['JobFlows'].should.have.length_of(400)
for x in resp['JobFlows']:
x['ExecutionStatusDetail']['State'].should.equal('WAITING')
resp = client.describe_job_flows(CreatedBefore=timestamp)
resp['JobFlows'].should.have.length_of(400)
resp = client.describe_job_flows(CreatedAfter=timestamp)
resp['JobFlows'].should.have.length_of(200)
@mock_emr
def test_describe_job_flow():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['AmiVersion'] = '3.8.1'
args['Instances'].update(
{'Ec2KeyName': 'ec2keyname',
'Ec2SubnetId': 'subnet-8be41cec',
'HadoopVersion': '2.4.0'})
args['VisibleToAllUsers'] = True
cluster_id = client.run_job_flow(**args)['JobFlowId']
jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
jf['AmiVersion'].should.equal(args['AmiVersion'])
jf.shouldnt.have.key('BootstrapActions')
esd = jf['ExecutionStatusDetail']
esd['CreationDateTime'].should.be.a('datetime.datetime')
# esd['EndDateTime'].should.be.a('datetime.datetime')
# esd['LastStateChangeReason'].should.be.a(six.string_types)
esd['ReadyDateTime'].should.be.a('datetime.datetime')
esd['StartDateTime'].should.be.a('datetime.datetime')
esd['State'].should.equal('WAITING')
attrs = jf['Instances']
attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName'])
attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId'])
attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion'])
attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount'])
for ig in attrs['InstanceGroups']:
# ig['BidPrice']
ig['CreationDateTime'].should.be.a('datetime.datetime')
# ig['EndDateTime'].should.be.a('datetime.datetime')
ig['InstanceGroupId'].should.be.a(six.string_types)
ig['InstanceRequestCount'].should.be.a(int)
ig['InstanceRole'].should.be.within(['MASTER', 'CORE'])
ig['InstanceRunningCount'].should.be.a(int)
ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge'])
# ig['LastStateChangeReason'].should.be.a(six.string_types)
ig['Market'].should.equal('ON_DEMAND')
ig['Name'].should.be.a(six.string_types)
ig['ReadyDateTime'].should.be.a('datetime.datetime')
ig['StartDateTime'].should.be.a('datetime.datetime')
ig['State'].should.equal('RUNNING')
attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True)
# attrs['MasterInstanceId'].should.be.a(six.string_types)
attrs['MasterInstanceType'].should.equal(args['Instances']['MasterInstanceType'])
attrs['MasterPublicDnsName'].should.be.a(six.string_types)
attrs['NormalizedInstanceHours'].should.equal(0)
attrs['Placement']['AvailabilityZone'].should.equal(args['Instances']['Placement']['AvailabilityZone'])
attrs['SlaveInstanceType'].should.equal(args['Instances']['SlaveInstanceType'])
attrs['TerminationProtected'].should.equal(False)
jf['JobFlowId'].should.equal(cluster_id)
jf['JobFlowRole'].should.equal(args['JobFlowRole'])
jf['LogUri'].should.equal(args['LogUri'])
jf['Name'].should.equal(args['Name'])
jf['ServiceRole'].should.equal(args['ServiceRole'])
jf['Steps'].should.equal([])
jf['SupportedProducts'].should.equal([])
jf['VisibleToAllUsers'].should.equal(True)
@mock_emr
def test_list_clusters():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
expected = {}
for idx in range(40):
cluster_name = 'jobflow' + str(idx)
args['Name'] = cluster_name
cluster_id = client.run_job_flow(**args)['JobFlowId']
expected[cluster_id] = {
'Id': cluster_id,
'Name': cluster_name,
'NormalizedInstanceHours': 0,
'State': 'WAITING'
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(40, 70):
cluster_name = 'jobflow' + str(idx)
args['Name'] = cluster_name
cluster_id = client.run_job_flow(**args)['JobFlowId']
client.terminate_job_flows(JobFlowIds=[cluster_id])
expected[cluster_id] = {
'Id': cluster_id,
'Name': cluster_name,
'NormalizedInstanceHours': 0,
'State': 'TERMINATED'
}
args = {}
while 1:
resp = client.list_clusters(**args)
clusters = resp['Clusters']
len(clusters).should.be.lower_than_or_equal_to(50)
for x in clusters:
y = expected[x['Id']]
x['Id'].should.equal(y['Id'])
x['Name'].should.equal(y['Name'])
x['NormalizedInstanceHours'].should.equal(y['NormalizedInstanceHours'])
x['Status']['State'].should.equal(y['State'])
x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime')
if y['State'] == 'TERMINATED':
x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime')
else:
x['Status']['Timeline'].shouldnt.have.key('EndDateTime')
x['Status']['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime')
marker = resp.get('Marker')
if marker is None:
break
args = {'Marker': marker}
resp = client.list_clusters(ClusterStates=['TERMINATED'])
resp['Clusters'].should.have.length_of(30)
for x in resp['Clusters']:
x['Status']['State'].should.equal('TERMINATED')
resp = client.list_clusters(CreatedBefore=timestamp)
resp['Clusters'].should.have.length_of(40)
resp = client.list_clusters(CreatedAfter=timestamp)
resp['Clusters'].should.have.length_of(30)
@mock_emr
def test_run_job_flow():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
cluster_id = client.run_job_flow(**args)['JobFlowId']
resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
resp['ExecutionStatusDetail']['State'].should.equal('WAITING')
resp['JobFlowId'].should.equal(cluster_id)
resp['Name'].should.equal(args['Name'])
resp['Instances']['MasterInstanceType'].should.equal(args['Instances']['MasterInstanceType'])
resp['Instances']['SlaveInstanceType'].should.equal(args['Instances']['SlaveInstanceType'])
resp['LogUri'].should.equal(args['LogUri'])
resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers'])
resp['Instances']['NormalizedInstanceHours'].should.equal(0)
resp['Steps'].should.equal([])
@mock_emr
def test_run_job_flow_with_invalid_params():
client = boto3.client('emr', region_name='us-east-1')
with assert_raises(ClientError) as e:
# cannot set both AmiVersion and ReleaseLabel
args = deepcopy(run_job_flow_args)
args['AmiVersion'] = '2.4'
args['ReleaseLabel'] = 'emr-5.0.0'
client.run_job_flow(**args)
e.exception.response['Error']['Code'].should.equal('ValidationException')
@mock_emr
def test_run_job_flow_in_multiple_regions():
regions = {}
for region in ['us-east-1', 'eu-west-1']:
client = boto3.client('emr', region_name=region)
args = deepcopy(run_job_flow_args)
args['Name'] = region
cluster_id = client.run_job_flow(**args)['JobFlowId']
regions[region] = {'client': client, 'cluster_id': cluster_id}
for region in regions.keys():
client = regions[region]['client']
resp = client.describe_cluster(ClusterId=regions[region]['cluster_id'])
resp['Cluster']['Name'].should.equal(region)
@mock_emr
def test_run_job_flow_with_new_params():
client = boto3.client('emr', region_name='us-east-1')
resp = client.run_job_flow(**run_job_flow_args)
resp.should.have.key('JobFlowId')
@mock_emr
def test_run_job_flow_with_visible_to_all_users():
client = boto3.client('emr', region_name='us-east-1')
for expected in (True, False):
args = deepcopy(run_job_flow_args)
args['VisibleToAllUsers'] = expected
resp = client.run_job_flow(**args)
cluster_id = resp['JobFlowId']
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['VisibleToAllUsers'].should.equal(expected)
@mock_emr
def test_run_job_flow_with_instance_groups():
input_groups = dict((g['Name'], g) for g in input_instance_groups)
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['Instances'] = {'InstanceGroups': input_instance_groups}
cluster_id = client.run_job_flow(**args)['JobFlowId']
groups = client.list_instance_groups(ClusterId=cluster_id)['InstanceGroups']
for x in groups:
y = input_groups[x['Name']]
x.should.have.key('Id')
x['RequestedInstanceCount'].should.equal(y['InstanceCount'])
x['InstanceGroupType'].should.equal(y['InstanceRole'])
x['InstanceType'].should.equal(y['InstanceType'])
x['Market'].should.equal(y['Market'])
if 'BidPrice' in y:
x['BidPrice'].should.equal(y['BidPrice'])
@mock_emr
def test_set_termination_protection():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['Instances']['TerminationProtected'] = False
resp = client.run_job_flow(**args)
cluster_id = resp['JobFlowId']
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['TerminationProtected'].should.equal(False)
for expected in (True, False):
resp = client.set_termination_protection(JobFlowIds=[cluster_id],
TerminationProtected=expected)
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['TerminationProtected'].should.equal(expected)
@mock_emr
def test_set_visible_to_all_users():
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['VisibleToAllUsers'] = False
resp = client.run_job_flow(**args)
cluster_id = resp['JobFlowId']
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['VisibleToAllUsers'].should.equal(False)
for expected in (True, False):
resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id],
VisibleToAllUsers=expected)
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['VisibleToAllUsers'].should.equal(expected)
@mock_emr
def test_terminate_job_flows():
client = boto3.client('emr', region_name='us-east-1')
resp = client.run_job_flow(**run_job_flow_args)
cluster_id = resp['JobFlowId']
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['Status']['State'].should.equal('WAITING')
resp = client.terminate_job_flows(JobFlowIds=[cluster_id])
resp = client.describe_cluster(ClusterId=cluster_id)
resp['Cluster']['Status']['State'].should.equal('TERMINATED')
# testing multiple end points for each feature
@mock_emr
def test_bootstrap_actions():
bootstrap_actions = [
{'Name': 'bs1',
'ScriptBootstrapAction': {
'Args': ['arg1', 'arg2'],
'Path': 's3://path/to/script'}},
{'Name': 'bs2',
'ScriptBootstrapAction': {
'Args': [],
'Path': 's3://path/to/anotherscript'}}
]
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['BootstrapActions'] = bootstrap_actions
cluster_id = client.run_job_flow(**args)['JobFlowId']
cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
for x, y in zip(cl['BootstrapActions'], bootstrap_actions):
x['BootstrapActionConfig'].should.equal(y)
resp = client.list_bootstrap_actions(ClusterId=cluster_id)
for x, y in zip(resp['BootstrapActions'], bootstrap_actions):
x['Name'].should.equal(y['Name'])
if 'Args' in y['ScriptBootstrapAction']:
x['Args'].should.equal(y['ScriptBootstrapAction']['Args'])
x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path'])
@mock_emr
def test_instance_groups():
input_groups = dict((g['Name'], g) for g in input_instance_groups)
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']:
del args['Instances'][key]
args['Instances']['InstanceGroups'] = input_instance_groups[:2]
cluster_id = client.run_job_flow(**args)['JobFlowId']
jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
base_instance_count = jf['Instances']['InstanceCount']
client.add_instance_groups(JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:])
jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
jf['Instances']['InstanceCount'].should.equal(sum(g['InstanceCount'] for g in input_instance_groups))
for x in jf['Instances']['InstanceGroups']:
y = input_groups[x['Name']]
if hasattr(y, 'BidPrice'):
x['BidPrice'].should.equal('BidPrice')
x['CreationDateTime'].should.be.a('datetime.datetime')
# x['EndDateTime'].should.be.a('datetime.datetime')
x.should.have.key('InstanceGroupId')
x['InstanceRequestCount'].should.equal(y['InstanceCount'])
x['InstanceRole'].should.equal(y['InstanceRole'])
x['InstanceRunningCount'].should.equal(y['InstanceCount'])
x['InstanceType'].should.equal(y['InstanceType'])
# x['LastStateChangeReason'].should.equal(y['LastStateChangeReason'])
x['Market'].should.equal(y['Market'])
x['Name'].should.equal(y['Name'])
x['ReadyDateTime'].should.be.a('datetime.datetime')
x['StartDateTime'].should.be.a('datetime.datetime')
x['State'].should.equal('RUNNING')
groups = client.list_instance_groups(ClusterId=cluster_id)['InstanceGroups']
for x in groups:
y = input_groups[x['Name']]
if hasattr(y, 'BidPrice'):
x['BidPrice'].should.equal('BidPrice')
# Configurations
# EbsBlockDevices
# EbsOptimized
x.should.have.key('Id')
x['InstanceGroupType'].should.equal(y['InstanceRole'])
x['InstanceType'].should.equal(y['InstanceType'])
x['Market'].should.equal(y['Market'])
x['Name'].should.equal(y['Name'])
x['RequestedInstanceCount'].should.equal(y['InstanceCount'])
x['RunningInstanceCount'].should.equal(y['InstanceCount'])
# ShrinkPolicy
x['Status']['State'].should.equal('RUNNING')
x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types)
# x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types)
x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime')
# x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime')
x['Status']['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime')
igs = dict((g['Name'], g) for g in groups)
client.modify_instance_groups(
InstanceGroups=[
{'InstanceGroupId': igs['task-1']['Id'],
'InstanceCount': 2},
{'InstanceGroupId': igs['task-2']['Id'],
'InstanceCount': 3}])
jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5)
igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups'])
igs['task-1']['InstanceRunningCount'].should.equal(2)
igs['task-2']['InstanceRunningCount'].should.equal(3)
@mock_emr
def test_steps():
input_steps = [{
'HadoopJarStep': {
'Args': [
'hadoop-streaming',
'-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py',
'-mapper', 'python wordSplitter.py',
'-input', 's3://elasticmapreduce/samples/wordcount/input',
'-output', 's3://output_bucket/output/wordcount_output',
'-reducer', 'aggregate'
],
'Jar': 'command-runner.jar',
},
'Name': 'My wordcount example',
}, {
'HadoopJarStep': {
'Args': [
'hadoop-streaming',
'-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py',
'-mapper', 'python wordSplitter2.py',
'-input', 's3://elasticmapreduce/samples/wordcount/input2',
'-output', 's3://output_bucket/output/wordcount_output2',
'-reducer', 'aggregate'
],
'Jar': 'command-runner.jar',
},
'Name': 'My wordcount example2',
}]
# TODO: implementation and test for cancel_steps
client = boto3.client('emr', region_name='us-east-1')
args = deepcopy(run_job_flow_args)
args['Steps'] = [input_steps[0]]
cluster_id = client.run_job_flow(**args)['JobFlowId']
jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
jf['Steps'].should.have.length_of(1)
client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]])
jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0]
jf['Steps'].should.have.length_of(2)
for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)):
x['ExecutionStatusDetail'].should.have.key('CreationDateTime')
# x['ExecutionStatusDetail'].should.have.key('EndDateTime')
# x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason')
# x['ExecutionStatusDetail'].should.have.key('StartDateTime')
x['ExecutionStatusDetail']['State'].should.equal('STARTING' if idx == 0 else 'PENDING')
x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER')
x['StepConfig']['HadoopJarStep']['Args'].should.equal(y['HadoopJarStep']['Args'])
x['StepConfig']['HadoopJarStep']['Jar'].should.equal(y['HadoopJarStep']['Jar'])
if 'MainClass' in y['HadoopJarStep']:
x['StepConfig']['HadoopJarStep']['MainClass'].should.equal(y['HadoopJarStep']['MainClass'])
if 'Properties' in y['HadoopJarStep']:
x['StepConfig']['HadoopJarStep']['Properties'].should.equal(y['HadoopJarStep']['Properties'])
x['StepConfig']['Name'].should.equal(y['Name'])
expected = dict((s['Name'], s) for s in input_steps)
steps = client.list_steps(ClusterId=cluster_id)['Steps']
steps.should.have.length_of(2)
for x in steps:
y = expected[x['Name']]
x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER')
x['Config']['Args'].should.equal(y['HadoopJarStep']['Args'])
x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar'])
# x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass'])
# Properties
x['Id'].should.be.a(six.string_types)
x['Name'].should.equal(y['Name'])
x['Status']['State'].should.be.within(['STARTING', 'PENDING'])
# StateChangeReason
x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime')
# x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime')
# x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime')
x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step']
x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER')
x['Config']['Args'].should.equal(y['HadoopJarStep']['Args'])
x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar'])
# x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass'])
# Properties
x['Id'].should.be.a(six.string_types)
x['Name'].should.equal(y['Name'])
x['Status']['State'].should.be.within(['STARTING', 'PENDING'])
# StateChangeReason
x['Status']['Timeline']['CreationDateTime'].should.be.a('datetime.datetime')
# x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime')
# x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime')
step_id = steps[0]['Id']
steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps']
steps.should.have.length_of(1)
steps[0]['Id'].should.equal(step_id)
steps = client.list_steps(ClusterId=cluster_id, StepStates=['STARTING'])['Steps']
steps.should.have.length_of(1)
steps[0]['Id'].should.equal(step_id)
@mock_emr
def test_tags():
input_tags = [{'Key': 'newkey1', 'Value': 'newval1'},
{'Key': 'newkey2', 'Value': 'newval2'}]
client = boto3.client('emr', region_name='us-east-1')
cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId']
client.add_tags(ResourceId=cluster_id, Tags=input_tags)
resp = client.describe_cluster(ClusterId=cluster_id)['Cluster']
resp['Tags'].should.have.length_of(2)
dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal(dict((t['Key'], t['Value']) for t in input_tags))
client.remove_tags(ResourceId=cluster_id, TagKeys=[t['Key'] for t in input_tags])
resp = client.describe_cluster(ClusterId=cluster_id)['Cluster']
resp['Tags'].should.equal([])
|
the-stack_0_25354
|
"""OpenAPI core validation response models module"""
from openapi_core.validation.models import BaseValidationResult
class ResponseValidationResult(BaseValidationResult):
def __init__(self, errors, data=None, headers=None):
super(ResponseValidationResult, self).__init__(errors)
self.data = data
self.headers = headers
|
the-stack_0_25356
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from prompt_toolkit.history import FileHistory
from ..core.dsl_grammar import *
#
#
# HISTORY
#
#
class SelectiveFileHistory(FileHistory):
"""
:class:`.SelectiveFileHistory` class that extends history but stores only queries
- strings starting with 'search'
NOTE This approach can be refined in the future
"""
def __init__(self, filename):
self.filename = filename
super(SelectiveFileHistory, self).__init__(filename)
def append_string(self, string):
" Add string to the history only if it is a valid DSL query"
l = G.allowed_starts_dsl_query()
for x in l:
if string.startswith(x):
self._loaded_strings.append(string)
self.store_string(string)
return
|
the-stack_0_25359
|
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
from context import libspn as spn
import time
import argparse
import colorama as col
import sys
from tensorflow.python.client import timeline
import os
col.init()
red = col.Fore.RED
blue = col.Fore.BLUE
green = col.Fore.GREEN
yellow = col.Fore.YELLOW
magenta = col.Fore.MAGENTA
def print1(str, file, color=yellow):
if file:
print(str, file=file)
print(color + str + col.Style.RESET_ALL)
def print2(str, file):
if file:
print(str, file=file)
print(blue + str + col.Style.RESET_ALL)
class Ops:
def dense_sing(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.SINGLE
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with single-op nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate value ops based on inf_type and log
if log:
value_op = root.get_log_value(inference_type=inf_type)
else:
value_op = root.get_value(inference_type=inf_type)
return root, spn.initialize_weights(root), value_op
def dense_block(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.BLOCK
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with block-nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate value ops based on inf_type and log
if log:
value_op = root.get_log_value(inference_type=inf_type)
else:
value_op = root.get_value(inference_type=inf_type)
return root, spn.initialize_weights(root), value_op
def dense_layer(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.LAYER
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with layer-nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate value ops based on inf_type and log
if log:
value_op = root.get_log_value(inference_type=inf_type)
else:
value_op = root.get_value(inference_type=inf_type)
return root, spn.initialize_weights(root), value_op
class OpTestResult:
"""Result of a single test of a single op."""
def __init__(self, op_name, on_gpu, spn_size, tf_size, memory_used, input_dist,
setup_time, weights_init_time, run_times, output_correct):
self.op_name = op_name
self.on_gpu = on_gpu
self.spn_size = spn_size
self.tf_size = tf_size
self.memory_used = memory_used
self.input_dist = input_dist
self.setup_time = setup_time
self.weights_init_time = weights_init_time
self.run_times = run_times
self.output_correct = output_correct
class TestResults:
"""Results for a single test for multiple ops and devices."""
def __init__(self, test_name, cpu_results, gpu_results):
self.test_name = test_name
self.cpu_results = cpu_results
self.gpu_results = gpu_results
def print(self, file):
def get_header(dev):
return ("%4s %11s %9s %8s %9s %11s %11s %17s %15s %14s %10s" %
(dev, 'op', 'SPN_size', 'TF_size', 'mem_used', 'input_dist',
'setup_time', 'weights_init_time', 'first_run_time',
'rest_run_time', 'correct'))
def get_res(res):
"""Helper function printing a single result."""
return ("%16s %7d %7d %11.4f %10s %11.2f %15.2f %15.2f %14.2f %12s" %
(res.op_name, res.spn_size, res.tf_size,
(0.0 if res.memory_used is None else res.memory_used / 1000000),
res.input_dist, res.setup_time * 1000, res.weights_init_time * 1000,
res.run_times[0] * 1000, np.mean(res.run_times[1:]) * 1000,
res.output_correct))
# Print results
print1("\n-----------------------", file)
print1("%s" % self.test_name, file)
print1("-----------------------", file)
print1(get_header("CPU"), file)
for res in sorted(self.cpu_results, key=lambda x: len(x.op_name)):
print1(get_res(res), file, (red if res.input_dist is "RAW" else green))
print1(get_header("GPU"), file)
for res in sorted(self.gpu_results, key=lambda x: len(x.op_name)):
print1(get_res(res), file, (red if res.input_dist is "RAW" else green))
class PerformanceTest:
def __init__(self, num_input_rows, num_input_vars, num_input_vals, num_decomps,
num_subsets, num_mixtures, num_input_mixtures, balanced, num_runs,
without_cpu, without_gpu, log_devs, profile, profiles_dir, file):
self.num_input_rows = num_input_rows
self.num_input_vars = num_input_vars
self.num_input_vals = num_input_vals
self.num_decomps = num_decomps
self.num_subsets = num_subsets
self.num_mixtures = num_mixtures
self.num_input_mixtures = num_input_mixtures
self.balanced = balanced
self.num_runs = num_runs
self.without_cpu = without_cpu
self.without_gpu = without_gpu
self.log_devs = log_devs
self.profile = profile
self.profiles_dir = profiles_dir
self.file = file
self.test_failed = False
print1("Params:", file)
print1("- num_input_rows=%s" % num_input_rows, file)
print1("- num_input_vars=%s" % num_input_vars, file)
print1("- num_input_vals=%s" % num_input_vals, file)
print1("- num_decomps=%s" % num_decomps, file)
print1("- num_subsets=%s" % num_subsets, file)
print1("- num_mixtures=%s" % num_mixtures, file)
print1("- num_input_mixtures=%s" % num_input_mixtures, file)
print1("- balanced=%s" % balanced, file)
print1("- num_runs=%s" % num_runs, file)
print1("", file=file)
def _run_op_test(self, op_fun, inputs, input_dist='MIXTURE',
inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True):
"""Run a single test for a single op."""
# Preparations
op_name = op_fun.__name__
device_name = '/gpu:0' if on_gpu else '/cpu:0'
# Print
print2("--> %s: on_gpu=%s, inputs_shape=%s, input_dist=%s, inference=%s, \
node_type=%s, log=%s"
% (op_name, on_gpu, inputs.shape, input_dist, ("MPE" if inf_type ==
spn.InferenceType.MPE else "MARGINAL"),
("SINGLE" if op_name == "dense_sing" else "BLOCK" if
op_name == "dense_block" else "LAYER"), log), self.file)
# Compute true output
true_out = float(self.num_input_rows)
# Create graph
tf.reset_default_graph()
with tf.device(device_name):
# Create input
inputs_pl = spn.IndicatorLeaf(num_vars=self.num_input_vars,
num_vals=self.num_input_vals)
# Create dense SPN
start_time = time.time()
root, init_ops, ops = op_fun(inputs_pl, self.num_decomps, self.num_subsets,
self.num_mixtures, self.num_input_mixtures,
self.balanced, input_dist, inf_type, log)
setup_time = time.time() - start_time
if on_gpu:
max_bytes_used_op = tf.contrib.memory_stats.MaxBytesInUse()
# Get num of SPN ops
spn_size = root.get_num_nodes()
# Get num of graph ops
tf_size = len(tf.get_default_graph().get_operations())
# Run op multiple times
output_correct = True
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=False,
log_device_placement=self.log_devs)) as sess:
# Initialize weights of all the sum node types in the graph
start_time = time.time()
init_ops.run()
weights_init_time = time.time() - start_time
run_times = []
# Create feed dictionary
feed = {inputs_pl: inputs}
for n in range(self.num_runs):
# Run
start_time = time.time()
out = sess.run(ops, feed_dict=feed)
run_times.append(time.time() - start_time)
# Test value only for MARGINAL inference
if inf_type == spn.InferenceType.MARGINAL:
try:
np.testing.assert_almost_equal((np.exp(out).sum() if log else
out.sum()), true_out,
decimal=2)
except AssertionError:
output_correct = False
self.test_failed = True
if on_gpu:
memory_used = sess.run(max_bytes_used_op)
else:
memory_used = None
if self.profile:
# Add additional options to trace the session execution
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
out = sess.run(ops, feed_dict=feed, options=options,
run_metadata=run_metadata)
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
if not os.path.exists(self.profiles_dir):
os.makedirs(self.profiles_dir)
file_name = op_name
file_name += ("_GPU_" if on_gpu else "_CPU_")
file_name += input_dist
file_name += ("_ SINGLE" if op_name == "dense_sing" else
"_BLOCK" if op_name == "dense_block" else "_LAYER")
file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \
spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else
"_MARGINAL")
with open('%s/timeline_value_%s.json' % (self.profiles_dir,
file_name), 'w') as f:
f.write(chrome_trace)
# Return stats
return OpTestResult(op_name, on_gpu, spn_size, tf_size, memory_used,
input_dist, setup_time, weights_init_time, run_times,
output_correct)
def _run_test(self, test_name, op_funs, inputs, inf_type, log):
"""Run a single test for multiple ops and devices."""
cpu_results = []
gpu_results = []
for op_fun in op_funs:
if not self.without_cpu:
cpu_results.append( # Input Dist = RAW
self._run_op_test(op_fun, inputs, input_dist="RAW",
inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append( # Input Dist = MIXTURE
self._run_op_test(op_fun, inputs, input_dist="MIXTURE",
inf_type=inf_type, log=log, on_gpu=False))
if not self.without_gpu:
gpu_results.append( # Input Dist = RAW
self._run_op_test(op_fun, inputs, input_dist="RAW",
inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append( # Input Dist = MIXTURE
self._run_op_test(op_fun, inputs, input_dist="MIXTURE",
inf_type=inf_type, log=log, on_gpu=True))
return TestResults(test_name, cpu_results, gpu_results)
def run(self):
"""Run all tests."""
print1("Running tests:", self.file)
results = []
inputs = np.ones((self.num_input_rows, self.num_input_vars), dtype=np.int) * -1
r = self._run_test('InferenceType: MARGINAL',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MARGINAL, log=False)
results.append(r)
r = self._run_test('InferenceType: MARGINAL-LOG',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MARGINAL, log=True)
results.append(r)
r = self._run_test('InferenceType: MPE',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MPE, log=False)
results.append(r)
r = self._run_test('InferenceType: MPE-LOG',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MPE, log=True)
results.append(r)
# Print results
for res in results:
res.print(self.file)
if self.test_failed:
print("\n ATLEAST ONE TEST FAILED!")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num-input-rows', default=200, type=int,
help="Num of rows of inputs")
parser.add_argument('--num-input-vars', default=5, type=int,
help="Num of input variables")
parser.add_argument('--num-input-vals', default=5, type=int,
help="Num of input values per variable")
parser.add_argument('--num-decomps', default=1, type=int,
help="Num of decompositions at each level")
parser.add_argument('--num-subsets', default=5, type=int,
help="Num of subsets in each desomposition")
parser.add_argument('--num-mixtures', default=5, type=int,
help="Num of mixtures for each subset")
parser.add_argument('--num-input-mixtures', default=5, type=int,
help="Num of input mixtures")
parser.add_argument('--balanced', default=True, action='store_true',
help="Generated dense SPN is balanced between decompositions")
parser.add_argument('--num-runs', default=50, type=int,
help="Num of times each test is run")
parser.add_argument('--log-devices', action='store_true',
help="Log on which device op is run. Affects run time!")
parser.add_argument('--without-cpu', action='store_true',
help="Do not run CPU tests")
parser.add_argument('--without-gpu', action='store_true',
help="Do not run GPU tests")
parser.add_argument('--profile', default=False, action='store_true',
help="Run test one more time and profile")
parser.add_argument('--profiles-dir', default='profiles', type=str,
help="Run test one more time and profile")
parser.add_argument('--save-to', default='', type=str,
help="Save results to file")
args = parser.parse_args()
# To ensure that SPN graph size between 'MIXTURE' and 'RAW' networks are consistant
if args.num_input_mixtures is not None:
if args.num_input_mixtures != args.num_input_vals:
sys.exit('ERROR: num_input_mixtures must be == num_input_vals')
else:
if args.num_mixtures != args.num_input_vals:
sys.exit('ERROR: num_mixtures must be == num_input_vals')
# Open a file
f = None
if args.save_to:
f = open(args.save_to, 'w')
try:
t = PerformanceTest(args.num_input_rows, args.num_input_vars,
args.num_input_vals, args.num_decomps, args.num_subsets,
args.num_mixtures, args.num_input_mixtures, args.balanced,
args.num_runs, args.without_cpu, args.without_gpu,
args.log_devices, args.profile, args.profiles_dir, f)
t.run()
finally:
if f is not None:
f.close()
if __name__ == '__main__':
main()
|
the-stack_0_25360
|
from NENV import *
import symtable
class NodeBase(Node):
pass
class Symtable_Node(NodeBase):
"""
"""
title = 'symtable'
type_ = 'symtable'
init_inputs = [
NodeInputBP(label='code'),
NodeInputBP(label='filename'),
NodeInputBP(label='compile_type'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, symtable.symtable(self.input(0), self.input(1), self.input(2)))
export_nodes(
Symtable_Node,
)
|
the-stack_0_25361
|
#!/usr/bin/env python
import os
import tempfile, yaml
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
json_tex ={
"project_name": "{{ cookiecutter.project_name }}",
"author_name": "{{ cookiecutter.author_name }}",
"email": "{{ cookiecutter.email }}",
"git_host_username": "{{ cookiecutter.git_host_username }}",
"repo_name": "{{ cookiecutter.project_name }}",
"description": "{{ cookiecutter.description }}",
"_copy_without_render": ["custom.sty"]
}
conf = {}
conf["default_context"] = json_tex
config_tex, tmp_path = tempfile.mkstemp("latex.yml")
try:
with open(config_tex, "wb") as tmp:
tmp.write(yaml.dump(conf).encode("utf-8"))
finally:
textmp_path = "https://github.com/benvial/textmp"
# https://github.com/benvial/jupyter_tools
os.system("cookiecutter -o tmp -f --no-input --config-file " + tmp_path + " " + textmp_path)
os.remove(tmp_path)
try:
os.system("rm -rf {}/reports/latex".format(PROJECT_DIRECTORY))
except OSError:
pass
os.system("mv tmp/{{ cookiecutter.project_name }} {}/reports/latex".format(PROJECT_DIRECTORY))
os.system("rm -rf tmp")
os.system("git clone https://github.com/benvial/jupyter_tools")
os.system("mv jupyter_tools notebooks")
os.system("rm -rf notebooks/.git notebooks/.gitignore" )
|
the-stack_0_25362
|
"""
The ska_tmc_cdm.schemas.subarray_node.scan module contains Marshmallow schema
that map ska_tmc_cdm.schemas.subarray_node.scan message classes to/from JSON.
"""
from marshmallow import fields, post_load, post_dump
from ska_tmc_cdm.messages.subarray_node.scan import ScanRequest
from ska_tmc_cdm.schemas import CODEC
from ska_tmc_cdm.schemas.shared import ValidatingSchema
__all__ = ["ScanRequestSchema"]
@CODEC.register_mapping(ScanRequest)
class ScanRequestSchema(ValidatingSchema): # pylint: disable=too-few-public-methods
"""
ScanRequestSchema is the Marshmallow schema that marshals a ScanRequest
to/from JSON.
"""
# Message metadata and tracing fields ------------------------------------
# schema ID, e.g., https://schema.skao.int/ska-tmc-scan/1.0
interface = fields.String()
# optional transaction ID, used to trace commands through the system
transaction_id = fields.String(required=False)
# Message content fields -------------------------------------------------
# holds numeric scan ID
scan_id = fields.Integer()
@post_load
def create_scanrequest(self, data, **_): # pylint: disable=no-self-use
"""
Convert parsed JSON back into a ScanRequest
:param data: dict containing parsed JSON values
:param _: kwargs passed by Marshmallow
:return: ScanRequest instance populated to match JSON
"""
interface = data["interface"]
transaction_id = data.get("transaction_id", None)
scan_id = data["scan_id"]
return ScanRequest(
interface=interface,
transaction_id=transaction_id,
scan_id=scan_id
)
@post_dump
def filter_nulls(self, data, **_):
"""
Filter out null values from JSON.
:param data: Marshmallow-provided dict containing parsed object values
:param _: kwargs passed by Marshmallow
:return: dict suitable for SubArrayNode configuration
"""
# filter out nulls
data = {k: v for k, v in data.items() if v is not None}
return data
|
the-stack_0_25366
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from ambari_commons import OSCheck
from resource_management import get_bare_principal
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
# Local Imports
from status_params import *
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.expect import expect
def configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled):
"""
Return a dictionary of additional configs to merge if Atlas HA is enabled.
:param atlas_hosts: List of hostnames that contain Atlas
:param metadata_port: Port number
:param is_atlas_ha_enabled: None, True, or False
:return: Dictionary with additional configs to merge to application-properties if HA is enabled.
"""
additional_props = {}
if atlas_hosts is None or len(atlas_hosts) == 0 or metadata_port is None:
return additional_props
# Sort to guarantee each host sees the same values, assuming restarted at the same time.
atlas_hosts = sorted(atlas_hosts)
# E.g., id1,id2,id3,...,idn
_server_id_list = ["id" + str(i) for i in range(1, len(atlas_hosts) + 1)]
atlas_server_ids = ",".join(_server_id_list)
additional_props["atlas.server.ids"] = atlas_server_ids
i = 0
for curr_hostname in atlas_hosts:
id = _server_id_list[i]
prop_name = "atlas.server.address." + id
prop_value = curr_hostname + ":" + metadata_port
additional_props[prop_name] = prop_value
i += 1
# This may override the existing property
if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
additional_props["atlas.server.ha.enabled"] = "false"
elif i > 1:
additional_props["atlas.server.ha.enabled"] = "true"
return additional_props
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
java_version = expect("/hostLevelParams/java_version", int)
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
_atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
# stack version
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
metadata_home = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ else format('{stack_root}/current/atlas-server')
metadata_bin = format("{metadata_home}/bin")
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
metadata_start_script = format("{metadata_bin}/atlas_start.py")
metadata_stop_script = format("{metadata_bin}/atlas_stop.py")
# metadata local directory structure
log_dir = config['configurations']['atlas-env']['metadata_log_dir']
# service locations
hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf") if 'HADOOP_HOME' in os.environ else '/etc/hadoop/conf'
# some commands may need to supply the JAAS location when running as atlas
atlas_jaas_file = format("{conf_dir}/atlas_jaas.conf")
# user
user_group = config['configurations']['cluster-env']['user_group']
# metadata env
java64_home = config['hostLevelParams']['java_home']
env_sh_template = config['configurations']['atlas-env']['content']
# credential provider
credential_provider = format( "jceks://file@{conf_dir}/atlas-site.jceks")
# command line args
ssl_enabled = default("/configurations/application-properties/atlas.enableTLS", False)
http_port = default("/configurations/application-properties/atlas.server.http.port", "21000")
https_port = default("/configurations/application-properties/atlas.server.https.port", "21443")
if ssl_enabled:
metadata_port = https_port
metadata_protocol = 'https'
else:
metadata_port = http_port
metadata_protocol = 'http'
metadata_host = config['hostname']
atlas_hosts = sorted(default('/clusterHostInfo/atlas_server_hosts', []))
metadata_server_host = atlas_hosts[0] if len(atlas_hosts) > 0 else "UNKNOWN_HOST"
# application properties
application_properties = dict(config['configurations']['application-properties'])
application_properties["atlas.server.bind.address"] = metadata_host
if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
metadata_server_url = application_properties["atlas.rest.address"]
else:
# In HDP 2.3 and 2.4 the property was computed and saved to the local config but did not exist in the database.
metadata_server_url = format('{metadata_protocol}://{metadata_server_host}:{metadata_port}')
application_properties["atlas.rest.address"] = metadata_server_url
# Atlas HA should populate
# atlas.server.ids = id1,id2,...,idn
# atlas.server.address.id# = host#:port
# User should not have to modify this property, but still allow overriding it to False if multiple Atlas servers exist
# This can be None, True, or False
is_atlas_ha_enabled = default("/configurations/application-properties/atlas.server.ha.enabled", None)
additional_ha_props = configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled)
for k,v in additional_ha_props.iteritems():
application_properties[k] = v
metadata_env_content = config['configurations']['atlas-env']['content']
metadata_opts = config['configurations']['atlas-env']['metadata_opts']
metadata_classpath = config['configurations']['atlas-env']['metadata_classpath']
data_dir = format("{stack_root}/current/atlas-server/data")
expanded_war_dir = os.environ['METADATA_EXPANDED_WEBAPP_DIR'] if 'METADATA_EXPANDED_WEBAPP_DIR' in os.environ else format("{stack_root}/current/atlas-server/server/webapp")
metadata_log4j_content = config['configurations']['atlas-log4j']['content']
metadata_solrconfig_content = default("/configurations/atlas-solrconfig/content", None)
atlas_log_level = config['configurations']['atlas-log4j']['atlas_log_level']
audit_log_level = config['configurations']['atlas-log4j']['audit_log_level']
# smoke test
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smoke_test_password = 'smoke'
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
security_check_status_file = format('{log_dir}/security_check.status')
if security_enabled:
smoke_cmd = format('curl -k --negotiate -u : -b ~/cookiejar.txt -c ~/cookiejar.txt -s -o /dev/null -w "%{{http_code}}" {metadata_protocol}://{metadata_host}:{metadata_port}/')
else:
smoke_cmd = format('curl -k -s -o /dev/null -w "%{{http_code}}" {metadata_protocol}://{metadata_host}:{metadata_port}/')
# hbase
hbase_conf_dir = "/etc/hbase/conf"
atlas_search_backend = default("/configurations/application-properties/atlas.graph.index.search.backend", "")
search_backend_solr = atlas_search_backend.startswith('solr')
# infra solr
infra_solr_znode = default("/configurations/infra-solr-env/infra_solr_znode", None)
infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
atlas_solr_shards = default("/configurations/atlas-env/atlas_solr-shards", 1)
has_infra_solr = len(infra_solr_hosts) > 0
# zookeeper
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
# get comma separated lists of zookeeper hosts from clusterHostInfo
index = 0
zookeeper_quorum = ""
for host in zookeeper_hosts:
zookeeper_host = host
if zookeeper_port is not None:
zookeeper_host = host + ":" + str(zookeeper_port)
zookeeper_quorum += zookeeper_host
index += 1
if index < len(zookeeper_hosts):
zookeeper_quorum += ","
# Atlas Ranger plugin configurations
stack_supports_atlas_ranger_plugin = check_stack_feature(StackFeature.ATLAS_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
retry_enabled = default("/commandParams/command_retry_enabled", False)
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
enable_ranger_atlas = False
atlas_server_xmx = default("configurations/atlas-env/atlas_server_xmx", 2048)
atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_new_size", 614)
hbase_master_hosts = default('/clusterHostInfo/hbase_master_hosts', [])
has_hbase_master = not len(hbase_master_hosts) == 0
ranger_admin_hosts = default('/clusterHostInfo/ranger_admin_hosts', [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
atlas_hbase_setup = format("{exec_tmp_dir}/atlas_hbase_setup.rb")
atlas_kafka_setup = format("{exec_tmp_dir}/atlas_kafka_acl.sh")
atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
hbase_user_keytab = default('/configurations/hbase-env/hbase_user_keytab', None)
hbase_principal_name = default('/configurations/hbase-env/hbase_principal_name', None)
enable_ranger_hbase = False
# ToDo: Kafka port to Atlas
# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
hosts_with_kafka = default('/clusterHostInfo/kafka_broker_hosts', [])
host_with_kafka = hostname in hosts_with_kafka
ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
has_ranger_tagsync = len(ranger_tagsync_hosts) > 0
rangertagsync_user = "rangertagsync"
kafka_keytab = default('/configurations/kafka-env/kafka_keytab', None)
kafka_principal_name = default('/configurations/kafka-env/kafka_principal_name', None)
default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_home = os.path.join(stack_root, "current", "kafka-broker")
kafka_conf_dir = os.path.join(kafka_home, "config")
kafka_zk_endpoint = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] \
and check_stack_feature(StackFeature.KAFKA_KERBEROS, stack_version_formatted):
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST', _hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config={0}/kafka_jaas.conf".format(kafka_conf_dir)
else:
kafka_kerberos_params = ''
kafka_jaas_principal = None
kafka_keytab_path = None
if has_ranger_admin and stack_supports_atlas_ranger_plugin:
# for create_hdfs_directory
namenode_host = set(default("/clusterHostInfo/namenode_host", []))
has_namenode = not len(namenode_host) == 0
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
repo_name = str(config['clusterName']) + '_atlas'
ssl_keystore_password = unicode(config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'])
ssl_truststore_password = unicode(config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'])
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
xa_audit_hdfs_is_enabled = default('/configurations/ranger-atlas-audit/xasecure.audit.destination.hdfs', False)
enable_ranger_atlas = config['configurations']['ranger-atlas-plugin-properties']['ranger-atlas-plugin-enabled']
enable_ranger_atlas = not is_empty(enable_ranger_atlas) and enable_ranger_atlas.lower() == 'yes'
enable_ranger_hbase = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']
enable_ranger_hbase = not is_empty(enable_ranger_hbase) and enable_ranger_hbase.lower() == 'yes'
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
downloaded_custom_connector = None
driver_curl_source = None
driver_curl_target = None
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
ranger_atlas_audit = config['configurations']['ranger-atlas-audit']
ranger_atlas_audit_attrs = config['configuration_attributes']['ranger-atlas-audit']
ranger_atlas_security = config['configurations']['ranger-atlas-security']
ranger_atlas_security_attrs = config['configuration_attributes']['ranger-atlas-security']
ranger_atlas_policymgr_ssl = config['configurations']['ranger-atlas-policymgr-ssl']
ranger_atlas_policymgr_ssl_attrs = config['configuration_attributes']['ranger-atlas-policymgr-ssl']
policy_user = config['configurations']['ranger-atlas-plugin-properties']['policy_user']
atlas_repository_configuration = {
'username' : config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : unicode(config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
'atlas.rest.address' : metadata_server_url,
'commonNameForCertificate' : config['configurations']['ranger-atlas-plugin-properties']['common.name.for.certificate'],
'ambari.service.check.user' : policy_user
}
if security_enabled:
atlas_repository_configuration['policy.download.auth.users'] = metadata_user
atlas_repository_configuration['tag.download.auth.users'] = metadata_user
atlas_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': atlas_repository_configuration,
'description': 'atlas repo',
'name': repo_name,
'type': 'atlas',
}
|
the-stack_0_25367
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-Head Attention layer definition."""
import math
from typing import Optional
from typing import Tuple
import paddle
from paddle import nn
from paddle.nn import initializer as I
__all__ = ["MultiHeadedAttention", "RelPositionMultiHeadedAttention"]
# Relative Positional Encodings
# https://www.jianshu.com/p/c0608efcc26f
# https://zhuanlan.zhihu.com/p/344604604
class MultiHeadedAttention(nn.Layer):
"""Multi-Head Attention layer."""
def __init__(self, n_head: int, n_feat: int, dropout_rate: float):
"""Construct an MultiHeadedAttention object.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
super().__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self,
query: paddle.Tensor,
key: paddle.Tensor,
value: paddle.Tensor
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Transform query, key and value.
Args:
query (paddle.Tensor): Query tensor (#batch, time1, size).
key (paddle.Tensor): Key tensor (#batch, time2, size).
value (paddle.Tensor): Value tensor (#batch, time2, size).
Returns:
paddle.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
paddle.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
paddle.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k)
k = k.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
v = v.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self,
value: paddle.Tensor,
scores: paddle.Tensor,
mask: Optional[paddle.Tensor]) -> paddle.Tensor:
"""Compute attention context vector.
Args:
value (paddle.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (paddle.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (paddle.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
paddle.Tensor: Transformed value weighted
by the attention score, (#batch, time1, d_model).
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
scores = scores.masked_fill(mask, -float('inf'))
attn = paddle.softmax(
scores, axis=-1).masked_fill(mask,
0.0) # (batch, head, time1, time2)
else:
attn = paddle.softmax(
scores, axis=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)
x = paddle.matmul(p_attn, value) # (batch, head, time1, d_k)
x = x.transpose([0, 2, 1, 3]).contiguous().view(
n_batch, -1, self.h * self.d_k) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self,
query: paddle.Tensor,
key: paddle.Tensor,
value: paddle.Tensor,
mask: Optional[paddle.Tensor]) -> paddle.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = paddle.matmul(q,
k.transpose([0, 1, 3, 2])) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding."""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
super().__init__(n_head, n_feat, dropout_rate)
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias_attr=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
#self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
#self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
#torch.nn.init.xavier_uniform_(self.pos_bias_u)
#torch.nn.init.xavier_uniform_(self.pos_bias_v)
pos_bias_u = self.create_parameter(
[self.h, self.d_k], default_initializer=I.XavierUniform())
self.add_parameter('pos_bias_u', pos_bias_u)
pos_bias_v = self.create_parameter(
(self.h, self.d_k), default_initializer=I.XavierUniform())
self.add_parameter('pos_bias_v', pos_bias_v)
def rel_shift(self, x, zero_triu: bool=False):
"""Compute relative positinal encoding.
Args:
x (paddle.Tensor): Input tensor (batch, head, time1, time1).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
paddle.Tensor: Output tensor. (batch, head, time1, time1)
"""
zero_pad = paddle.zeros(
(x.size(0), x.size(1), x.size(2), 1), dtype=x.dtype)
x_padded = paddle.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size(0), x.size(1), x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x) # [B, H, T1, T1]
if zero_triu:
ones = paddle.ones((x.size(2), x.size(3)))
x = x * paddle.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self,
query: paddle.Tensor,
key: paddle.Tensor,
value: paddle.Tensor,
pos_emb: paddle.Tensor,
mask: Optional[paddle.Tensor]):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (paddle.Tensor): Query tensor (#batch, time1, size).
key (paddle.Tensor): Key tensor (#batch, time2, size).
value (paddle.Tensor): Value tensor (#batch, time2, size).
pos_emb (paddle.Tensor): Positional embedding tensor
(#batch, time1, size).
mask (paddle.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
paddle.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose([0, 2, 1, 3]) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose([0, 2, 1, 3])
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose([0, 2, 1, 3])
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = paddle.matmul(q_with_bias_u, k.transpose([0, 1, 3, 2]))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = paddle.matmul(q_with_bias_v, p.transpose([0, 1, 3, 2]))
# Remove rel_shift since it is useless in speech recognition,
# and it requires special attention for streaming.
# matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
|
the-stack_0_25368
|
import json
import logging
from pandas import DataFrame, read_excel
from .gcs_to_bq_util import append_dataframe_to_bq
# This is implicitly depended on by pandas.read_excel
import xlrd # noqa: F401
from google.cloud import storage
_STATE_NAMES = [
"Alabama",
"Alaska",
"Arizona",
"Arkansas",
"California",
"Colorado",
"Connecticut",
"Delaware",
"Florida",
"Georgia",
"Hawaii",
"Idaho",
"Illinois",
"Indiana",
"Iowa",
"Kansas",
"Kentucky",
"Louisiana",
"Maine",
"Maryland",
"Massachusetts",
"Michigan",
"Minnesota",
"Mississippi",
"Missouri",
"Montana",
"Nebraska",
"Nevada",
"New Hampshire",
"New Jersey",
"New Mexico",
"New York",
"North Carolina",
"North Dakota",
"Ohio",
"Oklahoma",
"Oregon",
"Pennsylvania",
"Rhode Island",
"South Carolina",
"South Dakota",
"Tennessee",
"Texas",
"Utah",
"Vermont",
"Virginia",
"Washington",
"West Virginia",
"Wisconsin",
"Wyoming"
]
_FILEPATH = '{}-{}.xlsx'
def write_primary_care_access_to_bq(dataset, table_name, gcs_bucket, fileprefix):
"""Writes primary care access stats to BigQuery from bucket
dataset: The BigQuery dataset to write to
table_name: The name of the biquery table to write to
gcs_bucket: The name of the gcs bucket to read the data from
fileprefix: The prefix of the files in the gcs landing bucket to read from"""
client = storage.Client()
bucket = client.get_bucket(gcs_bucket)
for state_name in _STATE_NAMES:
filename = _FILEPATH.format(fileprefix, state_name)
blob = bucket.blob(filename)
local_path = '/tmp/{}'.format(filename)
blob.download_to_filename(local_path)
try:
frame = read_excel(
io=local_path, sheet_name='Ranked Measure Data', skiprows=[0, 1])
data = []
for _, row in frame.iterrows():
data.append([row[0], row[1], row[2],
row[108], row[109], row[110]])
new_dataframe = DataFrame(
data=data,
columns=('county_fips_code',
'state_name',
'county_name',
'num_primary_care_physicians',
'primary_care_physicians_rate',
'primary_care_physicians_ratio'))
column_types = {
'county_fips_code': 'STRING',
'state_name': 'STRING',
'county_name': 'STRING',
'num_primary_care_physicians': 'FLOAT64',
'primary_care_physicians_rate': 'FLOAT64',
'primary_care_physicians_ratio': 'STRING'
}
append_dataframe_to_bq(
new_dataframe, dataset, table_name, column_types=column_types)
except json.JSONDecodeError as err:
msg = 'Unable to write to BigQuery due to improperly formatted data: {}'
logging.error(msg.format(err))
|
the-stack_0_25370
|
import pytest
from whoiscache.state import WhoisCacheState
from whoiscache import types as T
def test_macros():
"""Test macro state updates"""
state = WhoisCacheState()
add1 = ("ADD", '2', T.Macro(name='A', members=['a', 'b']))
add2 = ("ADD", '3', T.Macro(name='B', members=['b', 'c']))
state.apply_update(add1)
state.apply_update(add2)
expected = {
'A': set('ab'),
'B': set('bc'),
}
assert state.macros == expected
assert state.serial == '3'
# Update
state.apply_update(("DEL", '4', T.Macro(name='B', members=[])))
# Check results
expected = {
'A': set('ab'),
}
assert state.macros == expected
assert state.serial == '4'
def test_prefix4():
"""Test IPv4 prefixes updates"""
state = WhoisCacheState()
state.apply_update(("ADD", '1', T.Route(prefix='abc', origin='asn1')))
state.apply_update(("ADD", '2', T.Route(prefix='bcd', origin='asn2')))
state.apply_update(("ADD", '3', T.Route(prefix='def', origin='asn1')))
state.apply_update(("DEL", '4', T.Route(prefix='abc', origin='asn1')))
expected = {
'asn1': set(['def']),
'asn2': set(['bcd']),
}
assert state.prefix4 == expected
# Apply update
state.apply_update(("DEL", '5', T.Route(prefix='bcd', origin='asn2')))
expected = {
'asn1': set(['def']),
}
assert state.prefix4 == expected
def test_prefix6():
"""Test IPv6 prefixes updates"""
state = WhoisCacheState()
state.apply_update(("ADD", '1', T.Route6(prefix='abc', origin='asn1')))
state.apply_update(("ADD", '2', T.Route6(prefix='bcd', origin='asn2')))
state.apply_update(("ADD", '3', T.Route6(prefix='def', origin='asn1')))
state.apply_update(("DEL", '4', T.Route6(prefix='abc', origin='asn1')))
expected = {
'asn1': set(['def']),
'asn2': set(['bcd']),
}
assert state.prefix6 == expected
# Apply update
state.apply_update(("DEL", '5', T.Route6(prefix='bcd', origin='asn2')))
expected = {
'asn1': set(['def']),
}
assert state.prefix6 == expected
|
the-stack_0_25373
|
import re
from selenium.common import exceptions
from selenium import webdriver
from selenium.webdriver.common.by import By
initial_data = {
"product0": ('Cursor', 15, 0.1),
"product1": ('Grandma', 100, 1),
"product2": ('Farm', 1100, 8),
"product3": ('Mine', 12000, 47),
"product4": ('Factory', 130000, 260),
"product5": ('Bank', 1.4 * (10 ** 6), 1400),
"product6": ('Temple', 20 * (10 ** 6), 7800),
"product7": ('Wizard tower', 330 * (10 ** 6), 44000),
"product8": ('Shipment', 51 * (10 ** 8), 260000),
"product9": ('Alchemy lab', 75 * (10 ** 9), 16 * (10 ** 5)),
"product10": ('Portal', 1 * (10 ** 12), 10 * (10 ** 6)),
"product11": ('Time machine', 14 * (10 ** 12), 65 * (10 ** 6)),
"product12": ('Antimatter condenser', 170 * (10 ** 12), 430 * (10 ** 6)),
"product13": ('Prism', 21 * (10 ** 14), 29 * (10 ** 8)),
"product14": ('Chancemaker', 26 * (10 ** 15), 21 * (10 ** 9)),
"product15": ('Fractal engine', 310 * (10 ** 15), 150 * (10 ** 9)),
"product16": ('Javascript console', 71 * (10 ** 18), 11 * (10 ** 11)),
"product17": ('Idleverse', 12 * (10 ** 21), 83 * (10 ** 11))
}
MULTIPLIERS = {
'million': 10 ** 6,
'billion': 10 ** 9,
'trillion': 10 ** 12,
'quadrillion': 10 ** 15,
'quintillion': 10 ** 18,
'sextillion': 10 ** 21,
'septillion': 10 ** 24,
}
def multiply_by_name(money_str: str):
try:
money_str = money_str.replace(",", "")
money_str = money_str.replace("\n", " ")
money_arr = money_str.split(" ")
if len(money_arr) > 1:
if "ion" in money_arr[1]:
money = float(money_arr[0]) * MULTIPLIERS[money_arr[1]]
else:
money = float(money_arr[0])
else:
money = float(money_str)
return round(money, 1)
except (ValueError, TypeError):
print(f"ERROR: Couldn't convert string {money_str} into float!")
return 10 ** 10
class Product:
def __init__(self, product, driver, wallet):
self.driver = driver
self.product = product
self.product_id = product.get_attribute('id')
self.type = 'building'
self.name = initial_data[self.product_id][0]
self.base_price = initial_data[self.product_id][1]
self.base_cps = initial_data[self.product_id][2]
self.wallet = wallet
self.owned = self.get_owned()
self.price = self.get_price()
self.cps_per_one = self.base_cps
self.cps_total = 0
self.text = ""
self.multiplier = 100 # Value in % is taken from "Stats" menu. Increases base_cps of buildings.
self.ratio = None
self.update_data()
def __repr__(self):
return f"* * * * * BUILDING * * * * *\n" \
f"Name: {self.name} | Owned: {self.owned} | Price: {self.price}\n" \
f"Ratio: {self.ratio} | TOTAL CpS {self.cps_total} | CpS one {self.cps_per_one}"
def buy(self):
self.product.click()
self.update_data()
def get_name(self):
return self.name
def update_data(self):
self.owned = self.get_owned()
self.multiplier = self.wallet.income_multiplier
self.price = self.get_price()
self.text = self.get_data()
if self.text == "No data":
# Not purchased yet. Need to adjust income according to multiplier in Stats menu.
self.cps_per_one *= self.multiplier / 100
else:
try:
data_split = self.text.split("\n")
# process first line of tooltip text.
pattern_one = re.compile(r'each\s[\w\s]*\sprod\w*\s(\d{1,3}[.,]?\d*\s?[\w]*)\scook\w*')
findings = pattern_one.finditer(data_split[0])
match = ""
for find in findings:
match = find.group(1)
self.cps_per_one = multiply_by_name(match)
# process second line of tooltip text.
pattern_total = re.compile(
r'\s(\d{1,3}[.,]?\d*\s?\w*i?o?n?)\s[\w\s]*\spro\w*\s(\d{1,3}[,.]?\d*\s?\w*)\scook\w*'
)
findings = pattern_total.finditer(data_split[1])
match_cookies = ""
for find in findings:
match_cookies = find.group(2)
self.cps_total = multiply_by_name(match_cookies)
except IndexError:
print(f"ERROR: Couldn't read data. Probably mouse movement caused distraction.")
# After values of have changed get updated value for price/cps ratio.
self.update_ratio()
def update_ratio(self):
self.ratio = self.price / self.cps_per_one
return self.ratio
def get_data(self):
data_text = "No data"
if not self.owned:
return "No data"
else:
self.mouse_over(self.product)
try:
data_text = self.driver.find_element(by=By.CSS_SELECTOR, value='#tooltip .data').text
except (exceptions.NoSuchElementException, exceptions.StaleElementReferenceException):
print(f"ERROR: Couldn't read tooltip data. Probably mouse interactions caused distraction.")
finally:
return data_text
def mouse_over(self, element):
try:
webdriver.ActionChains(self.driver).move_to_element(element).perform()
except (exceptions.NoSuchElementException, exceptions.StaleElementReferenceException):
print(f"ERROR in product.Product: Could not find or :hover over BUILDING {self.name}")
# Data for price and amount of owned is presented by lines in the form:
# Product name
# price
# owned
def get_price(self):
info_arr = [value for value in self.product.text.replace(",", "").split("\n")]
try:
price_str = info_arr[1]
price = multiply_by_name(price_str)
except IndexError:
return self.base_price
else:
return price
def get_owned(self):
info_arr = self.product.text.split("\n")
try:
owned = float(info_arr[2])
except IndexError:
return 0
else:
return owned
# No need to get description. There is nothing useful in that text.
# def get_description(self):
# if not self.owned:
# return "No description"
# else:
# self.mouse_over(self.product)
# try:
# tooltip_description_text = self.driver.find_element(By.CSS_SELECTOR, '#tooltip .description').text
# except (exceptions.NoSuchElementException, exceptions.StaleElementReferenceException):
# return "No description"
# else:
# return tooltip_description_text
|
the-stack_0_25374
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import os
import random
import warnings
from multiprocessing import cpu_count
import numpy as np
from scipy.stats import mode, pearsonr
from sklearn.metrics import (
confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef,
mean_squared_error,
)
from tqdm.auto import tqdm, trange
import pandas as pd
import torch
from simpletransformers.classification.classification_utils import InputExample, convert_examples_to_features
from simpletransformers.classification.transformer_models.albert_model import AlbertForSequenceClassification
from simpletransformers.classification.transformer_models.bert_model import BertForSequenceClassification
from simpletransformers.classification.transformer_models.camembert_model import CamembertForSequenceClassification
from simpletransformers.classification.transformer_models.distilbert_model import DistilBertForSequenceClassification
from simpletransformers.classification.transformer_models.flaubert_model import FlaubertForSequenceClassification
from simpletransformers.classification.transformer_models.roberta_model import RobertaForSequenceClassification
from simpletransformers.classification.transformer_models.xlm_model import XLMForSequenceClassification
from simpletransformers.classification.transformer_models.xlm_roberta_model import XLMRobertaForSequenceClassification
from simpletransformers.classification.transformer_models.xlnet_model import XLNetForSequenceClassification
from simpletransformers.config.global_args import global_args
from simpletransformers.classification.classification_utils import LazyClassificationDataset
from simpletransformers.custom_models.models import ElectraForSequenceClassification
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertTokenizer,
BertConfig,
BertTokenizer,
CamembertConfig,
CamembertTokenizer,
DistilBertConfig,
DistilBertTokenizer,
ElectraConfig,
ElectraTokenizer,
FlaubertConfig,
FlaubertTokenizer,
LongformerConfig,
LongformerForSequenceClassification,
LongformerTokenizer,
RobertaConfig,
RobertaTokenizer,
XLMConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class ClassificationModel:
def __init__(
self, model_type, model_name, num_labels=None, weight=None, args=None, use_cuda=True, cuda_device=-1, **kwargs,
):
"""
Initializes a ClassificationModel model.
Args:
model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
num_labels (optional): The number of labels or classes in the dataset.
weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"camembert": (CamembertConfig, CamembertForSequenceClassification, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
"longformer": (LongformerConfig, LongformerForSequenceClassification, LongformerTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
}
if args and "manual_seed" in args:
random.seed(args["manual_seed"])
np.random.seed(args["manual_seed"])
torch.manual_seed(args["manual_seed"])
if "n_gpu" in args and args["n_gpu"] > 0:
torch.cuda.manual_seed_all(args["manual_seed"])
self.args = {
"sliding_window": False,
"tie_value": 1,
"stride": 0.8,
"regression": False,
"lazy_text_column": 0,
"lazy_text_a_column": None,
"lazy_text_b_column": None,
"lazy_labels_column": 1,
"lazy_header_row": True,
"lazy_delimiter": "\t",
}
self.args.update(global_args)
saved_model_args = self._load_model_args(model_name)
if saved_model_args:
self.args.update(saved_model_args)
if args:
self.args.update(args)
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
if num_labels:
self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args["config"])
self.num_labels = num_labels
else:
self.config = config_class.from_pretrained(model_name, **self.args["config"])
self.num_labels = self.config.num_labels
self.weight = weight
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
if self.weight:
self.model = model_class.from_pretrained(
model_name, config=self.config, weight=torch.Tensor(self.weight).to(self.device), **kwargs,
)
else:
self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)
self.results = {}
if not use_cuda:
self.args["fp16"] = False
self.tokenizer = tokenizer_class.from_pretrained(
model_name, do_lower_case=self.args["do_lower_case"], **kwargs
)
self.args["model_name"] = model_name
self.args["model_type"] = model_type
if model_type in ["camembert", "xlmroberta"]:
warnings.warn(
f"use_multiprocessing automatically disabled as {model_type}"
" fails when using multiprocessing for feature conversion."
)
self.args["use_multiprocessing"] = False
if self.args["wandb_project"] and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args["wandb_project"] = None
def train_model(
self,
train_df,
multi_label=False,
output_dir=None,
show_running_loss=True,
args=None,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_df'
Args:
train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update(args)
if self.args["silent"]:
show_running_loss = False
if self.args["evaluate_during_training"] and eval_df is None:
raise ValueError(
"evaluate_during_training is enabled but eval_df is not specified."
" Pass eval_df to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args["output_dir"]
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args["overwrite_output_dir"]:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Use --overwrite_output_dir to overcome.".format(output_dir)
)
self._move_model_to_device()
if isinstance(train_df, str):
if self.args["sliding_window"]:
raise ValueError("Lazy loading cannot be used with sliding window.")
train_dataset = LazyClassificationDataset(train_df, self.tokenizer, self.args)
else:
if "text" in train_df.columns and "labels" in train_df.columns:
train_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(train_df["text"].astype(str), train_df["labels"]))
]
elif "text_a" in train_df.columns and "text_b" in train_df.columns:
train_examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(
zip(train_df["text_a"].astype(str), train_df["text_b"].astype(str), train_df["labels"])
)
]
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
train_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(train_df.iloc[:, 0], train_df.iloc[:, 1]))
]
train_dataset = self.load_and_cache_examples(train_examples, verbose=verbose)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=self.args["train_batch_size"], num_workers=14
)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataloader,
output_dir,
multi_label=multi_label,
show_running_loss=show_running_loss,
eval_df=eval_df,
verbose=verbose,
**kwargs,
)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
self._save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args["model_type"], output_dir))
def train(
self,
train_dataloader,
output_dir,
multi_label=False,
show_running_loss=True,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args["tensorboard_dir"])
t_total = len(train_dataloader) // args["gradient_accumulation_steps"] * args["num_train_epochs"]
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args["weight_decay"],
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
warmup_steps = math.ceil(t_total * args["warmup_ratio"])
args["warmup_steps"] = warmup_steps if args["warmup_steps"] == 0 else args["warmup_steps"]
optimizer = AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], eps=args["adam_epsilon"])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args["warmup_steps"], num_training_steps=t_total
)
if args["fp16"]:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args["fp16_opt_level"])
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args["num_train_epochs"]), desc="Epoch", disable=args["silent"], mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args["model_name"] and os.path.exists(args["model_name"]):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args["model_name"].split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args["gradient_accumulation_steps"])
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args["gradient_accumulation_steps"]
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args["evaluate_during_training"]:
training_progress_scores = self._create_training_progress_scores(multi_label, **kwargs)
if args["wandb_project"]:
wandb.init(project=args["wandb_project"], config={**args}, **args["wandb_kwargs"])
wandb.watch(self.model)
model.train()
for _ in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
# epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(tqdm(train_dataloader, desc="Current iteration", disable=args["silent"])):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args["n_gpu"] > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
print("\rRunning loss: %f" % loss, end="")
if args["gradient_accumulation_steps"] > 1:
loss = loss / args["gradient_accumulation_steps"]
if args["fp16"]:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# torch.nn.utils.clip_grad_norm_(
# amp.master_params(optimizer), args["max_grad_norm"]
# )
else:
loss.backward()
# torch.nn.utils.clip_grad_norm_(
# model.parameters(), args["max_grad_norm"]
# )
tr_loss += loss.item()
if (step + 1) % args["gradient_accumulation_steps"] == 0:
if args["fp16"]:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args["max_grad_norm"])
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args["max_grad_norm"])
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args["logging_steps"] > 0 and global_step % args["logging_steps"] == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args["logging_steps"], global_step)
logging_loss = tr_loss
if args["wandb_project"]:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args["save_steps"] > 0 and global_step % args["save_steps"] == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"] and (
args["evaluate_during_training_steps"] > 0
and global_step % args["evaluate_during_training_steps"] == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=args["evaluate_during_training_silent"],
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args["save_eval_checkpoints"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False,
)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if (
results[args["early_stopping_metric"]] - best_eval_metric
< args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(
f" Patience of {args['early_stopping_patience']} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if (
results[args["early_stopping_metric"]] - best_eval_metric
> args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(
f" Patience of {args['early_stopping_patience']} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args["save_model_every_epoch"] or args["evaluate_during_training"]:
os.makedirs(output_dir_current, exist_ok=True)
if args["save_model_every_epoch"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"]:
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=args["evaluate_during_training_silent"],
**kwargs,
)
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if results[args["early_stopping_metric"]] - best_eval_metric < args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(f" Patience of {args['early_stopping_patience']} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args["early_stopping_metric"]] - best_eval_metric > args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(f" Patience of {args['early_stopping_patience']} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_df, multi_label=False, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_df. Saves results to output_dir.
Args:
eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results.
model_outputs: List of model outputs for each row in eval_df
wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args["output_dir"]
self._move_model_to_device()
result, model_outputs, wrong_preds = self.evaluate(
eval_df, output_dir, multi_label=multi_label, verbose=verbose, silent=silent, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return result, model_outputs, wrong_preds
def evaluate(self, eval_df, output_dir, multi_label=False, prefix="", verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_df.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
if isinstance(eval_df, str):
eval_dataset = LazyClassificationDataset(eval_df, self.tokenizer, self.args)
eval_examples = None
else:
if "text" in eval_df.columns and "labels" in eval_df.columns:
eval_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(eval_df["text"].astype(str), eval_df["labels"]))
]
elif "text_a" in eval_df.columns and "text_b" in eval_df.columns:
eval_examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(
zip(eval_df["text_a"].astype(str), eval_df["text_b"].astype(str), eval_df["labels"])
)
]
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
eval_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(eval_df.iloc[:, 0], eval_df.iloc[:, 1]))
]
if args["sliding_window"]:
eval_dataset, window_counts = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(eval_output_dir, exist_ok=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args["eval_batch_size"])
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, disable=args["silent"] or silent):
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args["sliding_window"]:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]
out_label_ids = [
out_label_ids[i] for i in range(len(out_label_ids)) if i in [window[0] for window in window_ranges]
]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args["tie_value"])
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args["regression"] is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if not multi_label:
preds = np.argmax(preds, axis=1)
result, wrong = self.compute_metrics(preds, out_label_ids, eval_examples, **kwargs)
result["eval_loss"] = eval_loss
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
return results, model_outputs, wrong
def load_and_cache_examples(
self, examples, evaluate=False, no_cache=False, multi_label=False, verbose=True, silent=False
):
"""
Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
process_count = self.args["process_count"]
tokenizer = self.tokenizer
args = self.args
if not no_cache:
no_cache = args["no_cache"]
if not multi_label and args["regression"]:
output_mode = "regression"
else:
output_mode = "classification"
os.makedirs(self.args["cache_dir"], exist_ok=True)
mode = "dev" if evaluate else "train"
cached_features_file = os.path.join(
args["cache_dir"],
"cached_{}_{}_{}_{}_{}".format(
mode, args["model_type"], args["max_seq_length"], self.num_labels, len(examples),
),
)
if os.path.exists(cached_features_file) and (
(not args["reprocess_input_data"] and not no_cache)
or (mode == "dev" and args["use_cached_eval_features"] and not no_cache)
):
features = torch.load(cached_features_file)
if verbose:
logger.info(f" Features loaded from cache at {cached_features_file}")
else:
if verbose:
logger.info(f" Converting to features started. Cache is not used.")
if args["sliding_window"]:
logger.info(" Sliding window enabled")
features = convert_examples_to_features(
examples,
args["max_seq_length"],
tokenizer,
output_mode,
# XLNet has a CLS token at the end
cls_token_at_end=bool(args["model_type"] in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args["model_type"] in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
# RoBERTa uses an extra separator b/w pairs of sentences,
# cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
sep_token_extra=bool(args["model_type"] in ["roberta", "camembert", "xlmroberta"]),
# PAD on the left for XLNet
pad_on_left=bool(args["model_type"] in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args["model_type"] in ["xlnet"] else 0,
process_count=process_count,
multi_label=multi_label,
silent=args["silent"] or silent,
use_multiprocessing=args["use_multiprocessing"],
sliding_window=args["sliding_window"],
flatten=not evaluate,
stride=args["stride"],
add_prefix_space=bool(args["model_type"] in ["roberta", "camembert", "xlmroberta"]),
args=args,
)
if verbose and args["sliding_window"]:
logger.info(f" {len(features)} features created from {len(examples)} samples.")
if not no_cache:
torch.save(features, cached_features_file)
if args["sliding_window"] and evaluate:
window_counts = [len(sample) for sample in features]
features = [feature for feature_set in features for feature in feature_set]
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args["sliding_window"] and evaluate:
return dataset, window_counts
else:
return dataset
def compute_metrics(self, preds, labels, eval_examples=None, multi_label=False, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
preds: Model predictions
labels: Ground truth labels
eval_examples: List of examples on which evaluation was performed
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)
wrong: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
assert len(preds) == len(labels)
extra_metrics = {}
for metric, func in kwargs.items():
extra_metrics[metric] = func(labels, preds)
mismatched = labels != preds
if eval_examples:
wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]
else:
wrong = ["NA"]
if multi_label:
label_ranking_score = label_ranking_average_precision_score(labels, preds)
return {**{"LRAP": label_ranking_score}, **extra_metrics}, wrong
elif self.args["regression"]:
return {**extra_metrics}, wrong
mcc = matthews_corrcoef(labels, preds)
if self.model.num_labels == 2:
tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()
return (
{**{"mcc": mcc, "tp": tp, "tn": tn, "fp": fp, "fn": fn}, **extra_metrics},
wrong,
)
else:
return {**{"mcc": mcc}, **extra_metrics}, wrong
def predict(self, to_predict, multi_label=False):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction.
Returns:
preds: A python list of the predictions (0 or 1) for each text.
model_outputs: A python list of the raw model outputs for each text.
"""
model = self.model
args = self.args
self._move_model_to_device()
if multi_label:
eval_examples = [
InputExample(i, text, None, [0 for i in range(self.num_labels)]) for i, text in enumerate(to_predict)
]
else:
if isinstance(to_predict[0], list):
eval_examples = [InputExample(i, text[0], text[1], 0) for i, text in enumerate(to_predict)]
else:
eval_examples = [InputExample(i, text, None, 0) for i, text in enumerate(to_predict)]
if args["sliding_window"]:
eval_dataset, window_counts = self.load_and_cache_examples(eval_examples, evaluate=True, no_cache=True)
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, multi_label=multi_label, no_cache=True
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args["eval_batch_size"])
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if self.config.output_hidden_states:
for batch in tqdm(eval_dataloader, disable=args["silent"]):
model.eval()
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
embedding_outputs, layer_hidden_states = outputs[2][0], outputs[2][1:]
if multi_label:
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
all_layer_hidden_states = np.array([state.detach().cpu().numpy() for state in layer_hidden_states])
all_embedding_outputs = embedding_outputs.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
all_layer_hidden_states = np.append(
all_layer_hidden_states,
np.array([state.detach().cpu().numpy() for state in layer_hidden_states]),
axis=1,
)
all_embedding_outputs = np.append(
all_embedding_outputs, embedding_outputs.detach().cpu().numpy(), axis=0
)
else:
for batch in tqdm(eval_dataloader, disable=args["silent"]):
model.eval()
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args["sliding_window"]:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args["tie_value"])
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args["regression"] is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if multi_label:
if isinstance(args["threshold"], list):
threshold_values = args["threshold"]
preds = [
[self._threshold(pred, threshold_values[i]) for i, pred in enumerate(example)]
for example in preds
]
else:
preds = [[self._threshold(pred, args["threshold"]) for pred in example] for example in preds]
else:
preds = np.argmax(preds, axis=1)
if self.config.output_hidden_states:
return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states
else:
return preds, model_outputs
def _threshold(self, x, threshold):
if x >= threshold:
return 1
return 0
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
if isinstance(batch[0], dict):
inputs = {key: value.squeeze().to(self.device) for key, value in batch[0].items()}
inputs["labels"] = batch[1].to(self.device)
else:
batch = tuple(t.to(self.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
# XLM, DistilBERT and RoBERTa don't use segment_ids
if self.args["model_type"] != "distilbert":
inputs["token_type_ids"] = batch[2] if self.args["model_type"] in ["bert", "xlnet", "albert"] else None
return inputs
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _create_training_progress_scores(self, multi_label, **kwargs):
extra_metrics = {key: [] for key in kwargs}
if multi_label:
training_progress_scores = {
"global_step": [],
"LRAP": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
if self.model.num_labels == 2:
training_progress_scores = {
"global_step": [],
"tp": [],
"tn": [],
"fp": [],
"fn": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
elif self.model.num_labels == 1:
training_progress_scores = {
"global_step": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
training_progress_scores = {
"global_step": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
return training_progress_scores
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args["output_dir"]
os.makedirs(output_dir, exist_ok=True)
if model and not self.args["no_save"]:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args["save_optimizer_and_scheduler"]:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
self._save_model_args(output_dir)
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model_args.json"), "w") as f:
json.dump(self.args, f)
def _load_model_args(self, input_dir):
model_args_file = os.path.join(input_dir, "model_args.json")
if os.path.isfile(model_args_file):
with open(model_args_file, "r") as f:
model_args = json.load(f)
return model_args
|
the-stack_0_25378
|
"""Create a flat plate with aspect ratio 2 and a 30-degree inclination."""
import numpy
import pathlib
import petibmpy
# Flat-plate's parameters.
L = 1.0 # chord length
AR = 2.0 # aspect ratio
xc, yc, zc = 0.0, 0.0, 0.0 # center's coordinates
aoa = 30.0 # angle of inclination in degrees
ds = 0.04 # mesh spacing
# Generate coordinates of an inclined line.
n = numpy.ceil(L / ds)
s = numpy.linspace(xc - L / 2, xc + L / 2, num=n + 1)
x = xc + numpy.cos(numpy.radians(-aoa)) * s
y = yc + numpy.sin(numpy.radians(-aoa)) * s
# Extrude the line along the z direction.
zlim = (zc - L * AR / 2, zc + L * AR / 2)
nz = numpy.ceil(L * AR / ds)
x, y, z = petibmpy.extrude2d(x, y, zlim, n=nz, force=True)
# Save coordinates to file.
simudir = pathlib.Path(__file__).absolute().parents[1]
filepath = simudir / f'flatplate_aoa{int(aoa)}.body'
petibmpy.write_body(filepath, x, y, z)
|
the-stack_0_25380
|
"""
Basic chain parser tool for SmartChain functions.
Author: Tim M. (TM2013)
Co-Author: Bitkapp (aka alaniz)
Organization: Project Radon
Date: 2/17/2016
Requirements:
BitcoinRPC
An RPC-enabled client
"""
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import logging
import time
try:
import cPickle as pickle
except:
import pickle
# Debug settings
debug = True
if debug:
logging.basicConfig()
logging.getLogger("BitcoinRPC").setLevel(logging.DEBUG)
# RPC Configuration
rpc_user = "user"
rpc_pass = "pass"
rpc_port = "port"
class DataBase():
def __init__(self):
# Initialise database and RPC connection
self.loadSync()
self.rpc = AuthServiceProxy(("http://%s:%[email protected]:%s/") % (rpc_user, rpc_pass, rpc_port))
def saveSync(self):
# Dump database into a pickle
pickle.dump(self.block_data, open('block_data.p','wb'))
def loadSync(self):
# Load database from pickle
try:
self.block_data = pickle.load(open('block_data.p','rb'))
except IOError as e:
# If no pickle exists initialise a new database
self.block_data = {}
def syncFromLastBlock(self):
block_height = self.rpc.getblockcount()
# Sync from last block of existing database
try:
if self.block_data:
last_block = max(self.block_data.keys())
for block in range(last_block+1, block_height):
self.block_data[block] = self.rpc.getblockbynumber(block)["tx"]
# Start new sync process if new database has been initialised
else:
for block in range(0, block_height):
self.block_data[block] = self.rpc.getblockbynumber(block)["tx"]
except KeyboardInterrupt as e:
self.saveSync()
def returnBlock(self, blocknumber):
# Returns block data from database for a particular block
try:
block = self.block_data[blocknumber]
return block
except KeyError as e:
raise KeyError('Local database is not synced to required block height.')
def continuousSync(self):
while True:
self.syncFromLastBlock()
self.saveSync()
time.sleep(60)
#d = DataBase()
#d.syncFromLastBlock()
#d.saveSync()
#d.continuousSync()
|
the-stack_0_25384
|
import pymysql.cursors
class DB:
"""docstring for DB"""
def __init__(self):
pass
def db_connection(self, **args):
if "db" in args:
conn = pymysql.connect(host=args['host'],
user=args['user'],
password=args['password'],
db=args['db'],
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor)
else:
conn = pymysql.connect(host=args['host'],
user=args['user'],
password=args['password'],
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor)
return conn
def create_vivo_database(self, conn, dbname):
try:
# Create a cursor object
cursorObject = conn.cursor()
# Create database harvester
createDBQuery = "CREATE Database " + dbname + ";"
cursorObject.execute(createDBQuery)
except Exception as e:
print("Exeception occured:{}".format(e))
finally:
conn.close()
def create_vivo_table(self, conn, tablename):
try:
if tablename == "users":
createTableQuery = "CREATE TABLE `users`" \
"(" \
"`pid` varchar(20) NOT NULL," \
"`eid` varchar(20) NOT NULL," \
"`uid` varchar(20)," \
"`public` varchar(20) NOT NULL DEFAULT 'N'," \
"`keyword` LONGTEXT," \
"`update_date` DATE DEFAULT NULL," \
"PRIMARY KEY ( pid )" \
");"
elif tablename == "publications":
createTableQuery = "CREATE TABLE `publications` (" \
"`pid` varchar(20) NOT NULL," \
"`keyword` LONGTEXT," \
"`public` varchar(20) NOT NULL DEFAULT 'N'," \
"`update_date` DATE DEFAULT NULL," \
"PRIMARY KEY (`pid`)" \
");"
elif tablename == "relations":
createTableQuery = "CREATE TABLE `relations` (" \
"`rid` varchar(20) NOT NULL," \
"`public` varchar(20) NOT NULL DEFAULT 'N'," \
"`update_date` DATE DEFAULT NULL," \
"PRIMARY KEY (`rid`)" \
");"
cursorObject = conn.cursor()
cursorObject.execute(createTableQuery)
except Exception as e:
print("Exeception occured:{}".format(e))
def execute_query(self, conn, querystring, querytype):
try:
cursorObject = conn.cursor()
cursorObject.execute(querystring)
if querytype == "select":
rows = cursorObject.fetchall()
return rows
elif querytype == "update":
conn.commit()
except Exception as e:
print("Exeception occured:{}".format(e))
def check_exist(self, conn, tablename, keyname, value):
isExist = False
try:
querystring = "SELECT * from " + tablename + \
" where " + keyname + " = \"" + value + "\";"
cursorObject = conn.cursor()
cursorObject.execute(querystring)
rows = cursorObject.fetchall()
for row in rows:
isExist = True
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return isExist
def select_records(self, conn, tablename, keyname, value):
try:
querystring = "SELECT * from " + tablename + \
" where " + keyname + " = \"" + value + "\";"
cursorObject = conn.cursor()
cursorObject.execute(querystring)
rows = cursorObject.fetchall()
return rows
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return None
def delete_record(self, conn, tablename, keyname, value):
resp = False
try:
querystring = "DELETE from %s where %s = %s" % (
tablename, keyname, value,)
print(querystring)
cursorObject = conn.cursor()
cursorObject.execute(querystring)
conn.commit()
resp = True
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return resp
def update_user_privacy(self, conn, tablename, privacy, keyname, value):
resp = False
try:
querystring = "UPDATE %s set public = \"%s\" where %s = \"%s\";" % (
tablename, privacy, keyname, value,)
print(querystring)
cursorObject = conn.cursor()
cursorObject.execute(querystring)
conn.commit()
resp = True
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return resp
def insert_user(self, conn, username, elementid, uid, privacy):
resp = False
try:
querystring = "INSERT INTO users (pid, eid, uid, public) VALUES (\"%s\", \"%s\", \"%s\", \"%s\");" % (
username, elementid, uid, privacy,)
cursorObject = conn.cursor()
cursorObject.execute(querystring)
conn.commit()
resp = True
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return resp
def insert_publication(self, conn, pubid, privacy):
resp = False
try:
querystring = "INSERT INTO publications (pid, public) VALUES (\"%s\", \"%s\");" % (
pubid, privacy,)
cursorObject = conn.cursor()
cursorObject.execute(querystring)
conn.commit()
resp = True
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return resp
def insert_relation(self, conn, rid, privacy):
resp = False
try:
querystring = "INSERT INTO relations (rid, public) VALUES (\"%s\", \"%s\");" % (
rid, privacy,)
cursorObject = conn.cursor()
cursorObject.execute(querystring)
conn.commit()
resp = True
except Exception as e:
print("MySQL Exeception occured:{}".format(e))
return resp
|
the-stack_0_25388
|
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D schedule on x86"""
import tvm
from tvm import autotvm
from tvm.autotvm.task.nnvm_integration import deserialize_args
from tvm.autotvm.task import get_config
from .. import generic, tag
from .. import nn
from ..util import get_const_tuple
from ..nn.conv2d import conv2d, conv2d_NCHWc, \
conv2d_alter_layout, _get_workload as _get_conv2d_workload
from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload
from ..nn.depthwise_conv2d import depthwise_conv2d_NCHWc, depthwise_conv2d_nchw
from ..nn.pad import pad
from . import conv2d_avx_1x1, conv2d_avx_common
def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False):
"""
Get default schedule config for the workload
"""
if is_depthwise:
wkl = _get_depthwise_conv2d_workload(data, kernel, strides, padding, out_dtype)
from .depthwise_conv2d import _fallback_schedule
_fallback_schedule(cfg, wkl)
else:
wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype)
is_kernel_1x1 = wkl.hkernel == 1 and wkl.wkernel == 1
if is_kernel_1x1:
conv2d_avx_1x1._fallback_schedule(cfg, wkl)
else:
conv2d_avx_common._fallback_schedule(cfg, wkl)
def _create_tuning_space(cfg, data, kernel, strides, padding, dilation, layout):
"""Create schedule configuration from input arguments"""
dshape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
if layout == 'NCHW':
n, ic, h, w = dshape
oc, _, kh, kw = kshape
else:
raise ValueError("Not support this layout {} with "
"schedule template.".format(layout))
is_kernel_1x1 = kh == 1 and kw == 1
ph, pw = padding if isinstance(padding, (tuple, list)) else (padding, padding)
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (h - kh + 2 * ph) // sh + 1
ow = (w - kw + 2 * pw) // sw + 1
# Create schedule config
cfg.define_split("tile_ic", ic, num_outputs=2)
cfg.define_split("tile_oc", oc, num_outputs=2)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
@autotvm.register_topi_compute(conv2d, 'cpu', 'direct')
def _declaration_conv(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
padding = padding if isinstance(padding, (tuple, list)) else (padding, padding)
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides)
dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
if layout == 'NCHW':
_create_tuning_space(cfg, data, kernel, strides, padding, dilation, layout)
if cfg.is_fallback:
_get_default_config(cfg, data, kernel, strides, padding, out_dtype)
return _declaration_conv_impl(cfg, data, kernel, strides,
padding, dilation, layout, out_dtype)
elif layout == 'HWCN':
return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype)
elif layout == 'NHWC':
return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype)
else:
raise ValueError("not support this layout {} yet".format(layout))
def _declaration_conv_impl(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
assert layout == 'NCHW', "only support NCHW convolution for AVX"
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(dilation, int):
dilation_h, dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HPAD, WPAD = padding
HSTR, WSTR = strides
batch_size, in_channel, in_height, in_width = get_const_tuple(data.shape)
num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape)
pad_height = in_height + 2 * HPAD
pad_width = in_width + 2 * WPAD
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
out_height = (in_height + 2 * HPAD - dilated_kernel_h) // HSTR + 1
out_width = (in_width + 2 * WPAD - dilated_kernel_w) // WSTR + 1
# pack data
DOPAD = (HPAD != 0 or WPAD != 0)
if DOPAD:
data_pad = pad(data, (0, 0, HPAD, WPAD), name="data_pad")
else:
data_pad = data
# fetch schedule
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
shape = (batch_size, in_channel // ic_bn, pad_height, ic_bn, pad_width)
data_vec = tvm.compute(shape,
lambda n, C, h, c, w: data_pad[n, C * ic_bn + c, h, w],
name='data_vec')
# pack kernel
shape = (num_filter//oc_bn, in_channel//ic_bn,
kernel_height, kernel_width, ic_bn, oc_bn)
kernel_vec = tvm.compute(shape,
lambda CO, CI, h, w, ci, co:
kernel[CO * oc_bn + co, CI * ic_bn + ci, h, w],
name='kernel_vec')
# convolution
oshape = (batch_size, num_filter//oc_bn, out_height, out_width, oc_bn)
unpack_shape = (batch_size, num_filter, out_height, out_width)
ic = tvm.reduce_axis((0, in_channel), name='ic')
kh = tvm.reduce_axis((0, kernel_height), name='kh')
kw = tvm.reduce_axis((0, kernel_width), name='kw')
conv = tvm.compute(oshape, lambda n, oc_chunk, oh, ow, oc_block:
tvm.sum(data_vec[n, ic//ic_bn, oh*HSTR+kh*dilation_h, ic%ic_bn,
ow*WSTR+kw*dilation_w].astype(out_dtype) *
kernel_vec[oc_chunk, ic//ic_bn, kh, kw, ic%ic_bn,
oc_block].astype(out_dtype),
axis=[ic, kh, kw]), name='conv')
unpack = tvm.compute(unpack_shape,
lambda n, c, h, w: conv[n, c // oc_bn, h, w, c % oc_bn]
.astype(out_dtype),
name='output_unpack',
tag='conv2d_nchw')
return unpack
@autotvm.register_topi_schedule(generic.schedule_conv2d_nchw, 'cpu', ['direct'])
def schedule_conv2d(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_nchw' in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
_, _, kh, kw = get_const_tuple(kernel.shape)
is_kernel_1x1 = kh == 1 and kw == 1
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
if is_kernel_1x1:
conv2d_avx_1x1._schedule_conv(*args)
else:
conv2d_avx_common._schedule_conv(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
@generic.schedule_conv2d_nhwc.register("cpu")
def schedule_conv2d_nhwc(outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = op.axis
fused = s[op].fuse(n, h, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_nhwc' in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, tvm.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, h_pad, w_pad, c_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, h_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, h, w, c = C.op.axis
ry, rx, rc = C.op.reduce_axis
n_out, h_out, w_out, c_out = output_op.axis
s[C].vectorize(c)
if op != output_op: # fuse bias + bn + relu into conv
s[C].compute_at(s[output_op], c_out)
else:
fused = s[C].fuse(n, h, w)
s[C].parallel(fused)
scheduled_ops.append(op)
traverse(output_op)
return s
# Define template function for autotvm task
# We define schedule template in this function instead of
# declaration function since actual input arguments need
# to be altered by the schedule selected.
@autotvm.task.register("topi_x86_conv2d_NCHWc")
def _topi_nn_conv2d_NCHWc(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
data, kernel, strides, padding, dilation, origin_layout, dtype = deserialize_args(args)
raw_data_shape = get_const_tuple(data.shape)
raw_kernel_shape = get_const_tuple(kernel.shape)
# get config here
cfg = get_config()
_create_tuning_space(cfg, data, kernel, strides, padding, dilation, origin_layout)
# change shape with the value in config
ic_bn, oc_bn, ow_bn = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1])
new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn,
raw_data_shape[2], raw_data_shape[3], ic_bn)
data_layout = "NCHW%dc" % ic_bn
out_layout = "NCHW%dc" % oc_bn
new_kernel_shape = (raw_kernel_shape[0] // oc_bn, raw_kernel_shape[1] // ic_bn,
raw_kernel_shape[2], raw_kernel_shape[3], ic_bn, oc_bn)
new_data = tvm.placeholder(new_data_shape, data.dtype)
new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype)
C = _declaration_conv_NCHWc(cfg, new_data, new_kernel, strides, padding, dilation,
data_layout, out_layout, dtype)
s = _schedule_conv2d_NCHWc(cfg, [C])
return s, [new_data, new_kernel, C]
@conv2d_alter_layout.register("cpu")
def _alter_conv2d_layout(attrs, inputs, tinfo):
import nnvm.symbol as sym
copy_inputs = [s for s in inputs]
new_attrs = {k : attrs[k] for k in attrs.keys()}
data, kernel = tinfo[0], tinfo[1]
batch_size, in_channel, height, width = get_const_tuple(data.shape)
groups = attrs.get_int("groups")
out_channel = attrs.get_int("channels")
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
layout = attrs['layout']
kh, kw = attrs.get_int_tuple("kernel_size")
dtype = data.dtype
out_dtype = dtype if attrs["out_dtype"] == "same" else attrs["out_dtype"]
is_depthwise = groups == in_channel and groups == out_channel
# only optimize for NCHW
if layout != 'NCHW':
return None
if groups != 1 and not is_depthwise:
return None
dispatch_ctx = autotvm.task.DispatchContext.current
target = tvm.target.current_target()
# query schedule and fallback if necessary
workload = autotvm.task.args_to_workload(
[data, kernel, strides, padding, dilation, out_dtype], depthwise_conv2d_nchw) \
if is_depthwise else \
autotvm.task.args_to_workload(
[data, kernel, strides, padding, dilation, layout, out_dtype], conv2d)
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
_get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depthwise)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
new_attrs['layout'] = 'NCHW%dc' % ic_bn
new_attrs['out_layout'] = 'NCHW%dc' % oc_bn
new_data = tvm.placeholder((batch_size, in_channel//ic_bn, height, width, ic_bn),
dtype=data.dtype)
if is_depthwise:
# channel, channel_multiplier, kh, kw -> out_channel_chunk, kh, kw, out_channel_block
# in which out_channel = merge(channel, channel_multiplier)
kernel_sym = copy_inputs[1]
kernel_sym = sym.reshape(kernel_sym, shape=(out_channel//oc_bn, oc_bn, kh, kw))
kernel_sym = sym.transpose(kernel_sym, axes=(0, 2, 3, 1))
copy_inputs[1] = kernel_sym
# Store altered operator's config
new_kernel = tvm.placeholder((out_channel//oc_bn, kh, kw, oc_bn), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, new_attrs['layout'],
new_attrs['out_layout'], out_dtype], depthwise_conv2d_NCHWc)
else:
out_channel, _, kh, kw = get_const_tuple(kernel.shape)
# (oc, ic, h, w) -> (OC, IC, h, w, ic, oc)
new_attrs['kernel_layout'] = 'OIHW%di%do' % (ic_bn, oc_bn)
# Store altered operator's config
new_kernel = tvm.placeholder((out_channel//oc_bn, in_channel//ic_bn, kh, kw, ic_bn, oc_bn),
dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, new_attrs['layout'],
new_attrs['out_layout'], out_dtype], conv2d_NCHWc)
dispatch_ctx.update(target, new_workload, cfg)
return sym.contrib.conv2d_NCHWc(*copy_inputs, **new_attrs)
@autotvm.register_topi_compute(conv2d_NCHWc, 'cpu', 'direct')
def _declaration_conv_NCHWc(cfg, data, kernel, strides,
padding, dilation, layout, out_layout, out_dtype):
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
HPAD, WPAD = padding if isinstance(padding, (tuple, list)) else (padding, padding)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
assert (dh, dw) == (1, 1), "Does not support dilation"
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
if data.dtype == 'uint8':
oc_chunk, _, kernel_height, kernel_width, _, oc_bn, _ = get_const_tuple(kernel.shape)
else:
oc_chunk, _, kernel_height, kernel_width, _, oc_bn = get_const_tuple(kernel.shape)
num_filter = oc_chunk * oc_bn
if cfg.is_fallback:
_get_default_config(cfg, tvm.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
tvm.placeholder((num_filter, in_channel, kernel_height, kernel_width),
dtype=kernel.dtype),
strides, padding, out_dtype)
# output shape
out_height = (ih + 2 * HPAD - kernel_height) // HSTR + 1
out_width = (iw + 2 * WPAD - kernel_width) // WSTR + 1
oshape = (n, oc_chunk, out_height, out_width, oc_bn)
# DOPAD
DOPAD = (HPAD != 0 or WPAD != 0)
if DOPAD:
data_pad = pad(data, (0, 0, HPAD, WPAD, 0), name="data_pad")
else:
data_pad = data
ic = tvm.reduce_axis((0, in_channel), name='ic')
kh = tvm.reduce_axis((0, kernel_height), name='kh')
kw = tvm.reduce_axis((0, kernel_width), name='kw')
if data.dtype == 'uint8':
assert out_dtype == "int32", \
"INT8 convolution requires input dtype = uint8 and output dtype=int32"
# Intel performs dot product of 2 "4" Int8 values
# Current implementation requires ic_bn to be a multiple of 4
n_elems = 4
assert ic_bn % n_elems == 0
ic_outer = tvm.reduce_axis((0, in_channel//ic_bn), name='ic_outer')
ic_f_inner = tvm.reduce_axis((0, ic_bn//n_elems), name='ic_f_inner')
ic_s_inner = tvm.reduce_axis((0, n_elems), name='ic_s_inner')
return tvm.compute(oshape, lambda n, oc_chunk, oh, ow, oc_block:
tvm.sum(data_pad[n, ic_outer, oh*HSTR+kh, ow*WSTR+kw,
ic_f_inner * n_elems + ic_s_inner]
.astype(out_dtype) *
kernel[oc_chunk, ic_outer, kh, kw, ic_f_inner,
oc_block, ic_s_inner].astype(out_dtype),
axis=[kh, kw, ic_outer, ic_f_inner, ic_s_inner]),
name='conv2d_NCHWc_int8', tag="conv2d_NCHWc_int8")
# else: fp implementation
return tvm.compute(oshape, lambda n, oc_chunk, oh, ow, oc_block:
tvm.sum(data_pad[n, ic//ic_bn, oh*HSTR+kh, ow*WSTR+kw,
ic%ic_bn].astype(out_dtype) *
kernel[oc_chunk, ic//ic_bn, kh, kw, ic%ic_bn, oc_block],
axis=[ic, kh, kw]),
name='conv2d_NCHWc', tag="conv2d_NCHWc")
@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc, 'cpu', ['direct'])
def _schedule_conv2d_NCHWc(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_NCHWc' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
if data.dtype == 'uint8':
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc_int8(*args)
else:
_, _, kh, kw, _, _, = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
|
the-stack_0_25389
|
import os
from ..model import summary
from jinja2 import Environment, FileSystemLoader
def main(config_file):
template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'index.tpl'
n_cell_types = summary.get_number_of_cell_types_with_results(config_file)
output = template.render(site=config_file['website'], number_of_cl=n_cell_types, tpl=child_template)
with open(os.path.join(config_file['website']['output'], "index.html"), "wb") as f:
f.write(output.encode("utf-8"))
print('index.html generated')
|
the-stack_0_25393
|
from aiowing import settings
from aiowing.apps.web.models import Record
if __name__ == '__main__':
with settings.manager.allow_sync():
Record.delete().execute()
records = []
for index in range(settings.RECORDS_COUNT):
if index % 2 == 0:
active = True
else:
active = False
records.append(dict(
active=active,
name='record %d' % index,
description='description %d' % index))
with settings.pool.atomic():
Record.insert_many(records).execute()
|
the-stack_0_25396
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import get_target_arch
from lib.util import electron_gyp, import_vs_env
CONFIGURATIONS = ['Release', 'Debug']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
# Update the VS build env.
import_vs_env(get_target_arch())
ninja = os.path.join('vendor', 'depot_tools', 'ninja')
if sys.platform == 'win32':
ninja += '.exe'
args = parse_args()
for config in args.configuration:
build_path = os.path.join('out', config[0])
ret = subprocess.call([ninja, '-C', build_path, args.target])
if ret != 0:
sys.exit(ret)
def parse_args():
parser = argparse.ArgumentParser(description='Build project')
parser.add_argument('-c', '--configuration',
help='Build with Release or Debug configuration',
nargs='+',
default=CONFIGURATIONS,
required=False)
parser.add_argument('-t', '--target',
help='Build specified target',
default=electron_gyp()['project_name%'],
required=False)
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_25397
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional
from .. import config
from ..utils.logging import get_logger
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
NumpyFormatter,
PandasFormatter,
PythonFormatter,
format_table,
query_table,
)
logger = get_logger(__name__)
_FORMAT_TYPES: Dict[Optional[str], type] = {}
_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
def _register_formatter(formatter_cls: type, format_type: Optional[str], aliases: Optional[List[str]] = None):
"""
Register a Formatter object using a name and optional aliases.
This function must be used on a Formatter class.
"""
aliases = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
)
_FORMAT_TYPES[format_type] = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
)
_FORMAT_TYPES_ALIASES[alias] = format_type
def _register_unavailable_formatter(
unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
):
"""
Register an unavailable Formatter object using a name and optional aliases.
This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
"""
aliases = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
"""If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
"""
Factory function to get a Formatter given its type name and keyword arguments.
A formatter is an object that extracts and formats data from pyarrow table.
It defines the formatting for rows, colums and batches.
If the formatter for a given type name doesn't exist or is not available, an error is raised.
"""
format_type = get_format_type_from_alias(format_type)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**format_kwargs)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'"
)
|
the-stack_0_25398
|
from __future__ import unicode_literals
from collections import namedtuple
import json
import io
import os
import re
VALID_COUNTRY_CODE = re.compile(r'^\w{2,3}$')
VALIDATION_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
VALIDATION_DATA_PATH = os.path.join(VALIDATION_DATA_DIR, '%s.json')
FIELD_MAPPING = {
'A': 'street_address',
'C': 'city',
'D': 'city_area',
'N': 'name',
'O': 'company_name',
'S': 'country_area',
'X': 'sorting_code',
'Z': 'postal_code'}
KNOWN_FIELDS = set(FIELD_MAPPING.values()) | {'country_code'}
def load_validation_data(country_code='all'):
if not VALID_COUNTRY_CODE.match(country_code):
raise ValueError(
'%r is not a valid country code' % (country_code,))
country_code = country_code.lower()
path = VALIDATION_DATA_PATH % (country_code,)
if not os.path.exists(path):
raise ValueError(
'%r is not a valid country code' % (country_code,))
with io.open(path, encoding='utf-8') as data:
return json.load(data)
ValidationRules = namedtuple(
'ValidationRules', [
'country_name',
'address_format', 'address_latin_format',
'allowed_fields', 'required_fields', 'upper_fields',
'country_area_type', 'country_area_choices',
'city_type', 'city_choices',
'city_area_type', 'city_area_choices',
'postal_code_type', 'postal_code_matchers', 'postal_code_examples',
'postal_code_prefix'])
def _make_choices(rules, translated=False):
sub_keys = rules.get('sub_keys')
if not sub_keys:
return []
choices = []
sub_keys = sub_keys.split('~')
sub_names = rules.get('sub_names')
if sub_names:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_names.split('~'))
if value]
else:
if not translated:
choices += [(key, key) for key in sub_keys]
if not translated:
sub_lnames = rules.get('sub_lnames')
if sub_lnames:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_lnames.split('~'))
if value]
sub_lfnames = rules.get('sub_lfnames')
if sub_lfnames:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_lfnames.split('~'))
if value]
return choices
def _match_choices(value, choices):
if value:
value = value.strip().lower()
for name, label in choices:
if name.lower() == value:
return name
if label.lower() == value:
return name
def _load_country_data(country_code):
database = load_validation_data('zz')
country_data = database['ZZ']
if country_code:
country_code = country_code.upper()
if country_code.lower() == 'zz':
raise ValueError(
'%r is not a valid country code' % (country_code,))
database = load_validation_data(country_code.lower())
country_data.update(database[country_code])
return country_data, database
def get_validation_rules(address):
country_code = address.get('country_code', '').upper()
country_data, database = _load_country_data(country_code)
country_name = country_data.get('name', '')
address_format = country_data['fmt']
address_latin_format = country_data.get('lfmt', address_format)
format_fields = re.finditer(r'%([ACDNOSXZ])', address_format)
allowed_fields = {FIELD_MAPPING[m.group(1)] for m in format_fields}
required_fields = {FIELD_MAPPING[f] for f in country_data['require']}
upper_fields = {FIELD_MAPPING[f] for f in country_data['upper']}
languages = []
if 'languages' in country_data:
languages = country_data['languages'].split('~')
languages.remove(country_data['lang'])
postal_code_matchers = []
if 'postal_code' in required_fields:
if 'zip' in country_data:
postal_code_matchers.append(
re.compile('^' + country_data['zip'] + '$'))
postal_code_examples = country_data.get('zipex')
city_choices = []
city_area_choices = []
country_area_type = country_data['state_name_type']
city_type = country_data['locality_name_type']
city_area_type = country_data['sublocality_name_type']
postal_code_type = country_data['zip_name_type']
postal_code_prefix = country_data.get('postprefix', '')
# second level of data is for administrative areas
country_area_choices = _make_choices(country_data)
for language in languages:
localized_country_data = database['%s--%s' % (
country_code, language)]
country_area_choices += _make_choices(
localized_country_data, translated=True)
country_area = _match_choices(
address.get('country_area'), country_area_choices)
if country_area:
# third level of data is for cities
country_area_data = database['%s/%s' % (
country_code, country_area)]
if 'zip' in country_area_data:
postal_code_matchers.append(
re.compile('^' + country_area_data['zip']))
if 'zipex' in country_area_data:
postal_code_examples = country_area_data['zipex']
city_choices = _make_choices(country_area_data)
for language in languages:
localized_country_area_data = database['%s/%s--%s' % (
country_code, country_area, language)]
city_choices += _make_choices(
localized_country_area_data, translated=True)
city = _match_choices(
address.get('city'), city_choices)
if city:
# fourth level of data is for dependent sublocalities
city_data = database['%s/%s/%s' % (
country_code, country_area, city)]
if 'zip' in city_data:
postal_code_matchers.append(
re.compile('^' + city_data['zip']))
if 'zipex' in city_data:
postal_code_examples = city_data['zipex']
city_area_choices = _make_choices(city_data)
for language in languages:
localized_city_data = database['%s/%s/%s--%s' % (
country_code, country_area, city, language)]
city_area_choices += _make_choices(
localized_city_data, translated=True)
return ValidationRules(
country_name,
address_format, address_latin_format,
allowed_fields, required_fields, upper_fields,
country_area_type, country_area_choices,
city_type, city_choices,
city_area_type, city_area_choices,
postal_code_type, postal_code_matchers, postal_code_examples,
postal_code_prefix)
class InvalidAddress(ValueError):
def __init__(self, message, errors):
super(InvalidAddress, self).__init__(message)
self.errors = errors
def _normalize_field(name, rules, data, choices, errors):
value = data.get(name)
if name in rules.upper_fields and value is not None:
value = value.upper()
data[name] = value
if name not in rules.allowed_fields:
data[name] = ''
elif not value and name in rules.required_fields:
errors[name] = 'required'
elif choices:
value = _match_choices(value, choices)
if value is not None:
data[name] = value
else:
errors[name] = 'invalid'
if not value:
data[name] = ''
def normalize_address(address):
errors = {}
try:
rules = get_validation_rules(address)
except ValueError:
errors['country_code'] = 'invalid'
else:
cleaned_data = address.copy()
country_code = cleaned_data.get('country_code')
if not country_code:
errors['country_code'] = 'required'
else:
cleaned_data['country_code'] = country_code.upper()
_normalize_field(
'country_area', rules, cleaned_data, rules.country_area_choices,
errors)
_normalize_field(
'city', rules, cleaned_data, rules.city_choices, errors)
_normalize_field(
'city_area', rules, cleaned_data, rules.city_area_choices, errors)
_normalize_field(
'postal_code', rules, cleaned_data, [], errors)
postal_code = cleaned_data.get('postal_code', '')
if rules.postal_code_matchers and postal_code:
for matcher in rules.postal_code_matchers:
if not matcher.match(postal_code):
errors['postal_code'] = 'invalid'
break
_normalize_field(
'street_address', rules, cleaned_data, [], errors)
_normalize_field(
'sorting_code', rules, cleaned_data, [], errors)
if errors:
raise InvalidAddress('Invalid address', errors)
return cleaned_data
def _format_address_line(line_format, address, rules):
def _get_field(name):
value = address.get(name, '')
if name in rules.upper_fields:
value = value.upper()
return value
replacements = {'%%%s' % code: _get_field(field_name)
for code, field_name in FIELD_MAPPING.items()}
fields = re.split('(%.)', line_format)
fields = [replacements.get(f, f) for f in fields]
return ''.join(fields).strip()
def get_field_order(address, latin=False):
"""
Returns expected order of address form fields as a list of lists.
Example for PL:
>>> get_field_order({'country_code': 'PL'})
[[u'name'], [u'company_name'], [u'street_address'], [u'postal_code', u'city']]
"""
rules = get_validation_rules(address)
address_format = (
rules.address_latin_format if latin else rules.address_format)
address_lines = address_format.split('%n')
replacements = {'%%%s' % code: field_name
for code, field_name in FIELD_MAPPING.items()}
all_lines = []
for line in address_lines:
fields = re.split('(%.)', line)
single_line = [replacements.get(field) for field in fields]
single_line = list(filter(None, single_line))
all_lines.append(single_line)
return all_lines
def format_address(address, latin=False):
rules = get_validation_rules(address)
address_format = (
rules.address_latin_format if latin else rules.address_format)
address_line_formats = address_format.split('%n')
address_lines = [
_format_address_line(lf, address, rules)
for lf in address_line_formats]
address_lines.append(rules.country_name)
address_lines = filter(None, address_lines)
return '\n'.join(address_lines)
def latinize_address(address, normalized=False):
if not normalized:
address = normalize_address(address)
cleaned_data = address.copy()
country_code = address.get('country_code', '').upper()
dummy_country_data, database = _load_country_data(country_code)
if country_code:
country_area = address['country_area']
if country_area:
key = '%s/%s' % (country_code, country_area)
country_area_data = database.get(key)
if country_area_data:
cleaned_data['country_area'] = country_area_data.get(
'lname',
country_area_data.get('name', country_area))
city = address['city']
key = '%s/%s/%s' % (country_code, country_area, city)
city_data = database.get(key)
if city_data:
cleaned_data['city'] = city_data.get(
'lname',
city_data.get('name', city))
city_area = address['city_area']
key = '%s/%s/%s/%s' % (
country_code, country_area, city, city_area)
city_area_data = database.get(key)
if city_area_data:
cleaned_data['city_area'] = city_data.get(
'lname',
city_area_data.get('name', city_area))
return cleaned_data
|
the-stack_0_25401
|
#!/usr/bin/env python3
import sys
import json
import subprocess
import webbrowser
import tkinter as tk
from tkinter import StringVar, IntVar, DoubleVar, LEFT, RIGHT, BOTH, Toplevel
from tkinter.ttk import *
from tkinter import font as tkFont
from tkinter.messagebox import showinfo, showwarning, showerror
from tkinter.filedialog import askdirectory, askopenfilename
from config_default import configs
from constants import *
# This import may cause unexpected crash on Mac due to IDLE x Tkinter conflict
# (not sure, it should be fixed in latest python3.9
# [https://www.python.org/download/mac/tcltk/],
# but I still encounter this issue), and the specific reason remains unknown.
# from synthesization import synthesize
class ConfigGUI():
def __init__(self, configs):
# GUI window initialization
self.root = tk.Tk()
self.root.title("Bilingual Danmaku Synthesizer")
self.root.configure(bg=GUI_THEME_COLOR)
self.default_configs = configs
# Constants of font/size/dimension
self.helv_bold = tkFont.Font(family='Helvetica', weight='bold')
self.frame_padx = 10
self.lab_w = 15 # width of label
self.btn_w = 15 # width of button component
self.ent_w = 35 # width of entry component
self.top_pady = (10, 0)
self.btm_pady = (0, 10)
self.btn_padx = 15 # padx for the action buttons
# Build variables in input configs
self.video_path = StringVar(self.root, name=VIDEO_PATH)
self.audio_path = StringVar(self.root, name=AUDIO_PATH)
self.file_path = StringVar(self.root, name=FILE_PATH)
# Build variables in danmaku configs
self.CN_font_path = StringVar(self.root, name=FONTS + "_cn")
self.JP_font_path = StringVar(self.root, name=FONTS + "_jp")
self.EN_font_path = StringVar(self.root, name=FONTS + "_eng")
self.fontsize = DoubleVar(self.root, name=FONTSIZE)
self.comment_color = StringVar(self.root, name=COMMENT_COLOR)
self.translation_color = StringVar(self.root, name=TRANSLATION_COLOR)
self.duration = DoubleVar(self.root, name=DURATION)
self.fps = DoubleVar(self.root, name=FPS)
self.bkgd_r = DoubleVar(self.root, name="bkgd_r")
self.bkgd_g = DoubleVar(self.root, name="bkgd_g")
self.bkgd_b = DoubleVar(self.root, name="bkgd_b")
self.background_opacity = DoubleVar(self.root, name=BACKGROUND_OPACITY)
self.coverage = DoubleVar(self.root, name=COVERAGE)
self.time_range_beg = IntVar(self.root, name=TIME_RANGE + "_beg")
self.time_range_end = IntVar(self.root, name=TIME_RANGE + "_end")
# Build variables in output configs
self.codec = StringVar(self.root, name=CODEC)
self.bitrate = StringVar(self.root, name=BITRATE)
self.threads = IntVar(self.root, name=THREADS)
self.video_name = StringVar(self.root, name=VIDEO_NAME)
# Initialize variables
self.reset_vars()
# Notebook configs
self.add_config_notebook()
# Action buttons
self.add_action_buttons()
def add_config_notebook(self):
"""Add the notebook display different configs in various tabs."""
# Build notebook for configs
self.notebook = Notebook(self.root)
# Build GUI for different config tabs
self.input_configs = Frame(self.notebook)
self.build_input_config_ui()
self.danmaku_configs = Frame(self.notebook)
self.build_danmaku_config_ui()
self.output_configs = Frame(self.notebook)
self.build_output_config_ui()
# Build tabs
self.notebook.add(self.input_configs, text="Input Configs")
self.notebook.add(self.danmaku_configs, text="Danmaku Configs")
self.notebook.add(self.output_configs, text="Output Configs")
self.notebook.pack()
def add_action_buttons(self):
"""Add several action buttons outside the notebook."""
# Help and Reset button
Button(self.root, text="Help", command=self.help_support)\
.pack(side=LEFT, fill=BOTH, padx=self.btn_padx, pady=self.btm_pady)
Button(self.root, text="Reset All Configs", command=self.reset_vars)\
.pack(side=LEFT, fill=BOTH, padx=self.btn_padx, pady=self.btm_pady)
# Start button
Button(self.root, text="Start Synthesization", command=self.start_synthesization)\
.pack(side=RIGHT, fill=BOTH, padx=self.btn_padx, pady=self.btm_pady)
def help_support(self):
"""Function of providing window of help and support."""
helpWindow = tk.Toplevel(self.root)
helpWindow.title("Help and Support")
helpWindow.configure(bg=GUI_THEME_COLOR)
helpLinks = Frame(helpWindow)
helpLinks.pack(padx=self.frame_padx)
# padding of the hyperlinks
link_padx = 15
link_pady = 10
Label(helpLinks, text="Source Page: ", width=self.lab_w).grid(row=0, column=0, pady=link_pady)
sourceLink = tk.Label(helpLinks, text="Github Page", fg="blue", cursor="hand2")
sourceLink.grid(row=0, column=1, padx=link_padx, pady=link_pady)
sourceLink.bind("<Button-1>", lambda e: self.callback("https://github.com/yanyiju/bilingual-danmaku-synthesizer"))
Label(helpLinks, text="README Guide: ", width=self.lab_w).grid(row=1, column=0, pady=link_pady)
readmeLink = tk.Label(helpLinks, text="README(中文)", fg="blue", cursor="hand2")
readmeLink.grid(row=1, column=1, padx=link_padx, pady=link_pady)
readmeLink.bind("<Button-1>", lambda e: self.callback("https://github.com/yanyiju/bilingual-danmaku-synthesizer/blob/main/README.md"))
Label(helpLinks, text="Report Issue: ", width=self.lab_w).grid(row=2, column=0, pady=link_pady)
readmeLink = tk.Label(helpLinks, text="Github Issue Page", fg="blue", cursor="hand2")
readmeLink.grid(row=2, column=1, padx=link_padx, pady=link_pady)
readmeLink.bind("<Button-1>", lambda e: self.callback("https://github.com/yanyiju/bilingual-danmaku-synthesizer/issues"))
Label(helpWindow, text="MIT License © Copyright 2021 Yijun Yan").pack(pady=link_pady)
def reset_vars(self):
"""Function for resetting all variables under root."""
configs = self.default_configs
# Default input configs
self.root.setvar(name=VIDEO_PATH, value=configs[INPUT][VIDEO_PATH])
self.root.setvar(name=AUDIO_PATH, value=configs[INPUT][AUDIO_PATH])
self.root.setvar(name=FILE_PATH, value=configs[INPUT][FILE_PATH])
# Default danmaku configs
self.root.setvar(name=FONTS + "_cn", value=configs[DANMAKU][FONTS]["cn"])
self.root.setvar(name=FONTS + "_jp", value=configs[DANMAKU][FONTS]["jp"])
self.root.setvar(name=FONTS + "_eng", value=configs[DANMAKU][FONTS]["eng"])
self.root.setvar(name=FONTSIZE, value=configs[DANMAKU][FONTSIZE])
self.root.setvar(name=COMMENT_COLOR, value=configs[DANMAKU][COMMENT_COLOR])
self.root.setvar(name=TRANSLATION_COLOR, value=configs[DANMAKU][TRANSLATION_COLOR])
self.root.setvar(name=DURATION, value=configs[DANMAKU][DURATION])
self.root.setvar(name=FPS, value=configs[DANMAKU][FPS])
self.root.setvar(name="bkgd_r", value=configs[DANMAKU][BACKGROUND_RGB][0])
self.root.setvar(name="bkgd_g", value=configs[DANMAKU][BACKGROUND_RGB][1])
self.root.setvar(name="bkgd_b", value=configs[DANMAKU][BACKGROUND_RGB][2])
self.root.setvar(name=BACKGROUND_OPACITY, value=configs[DANMAKU][BACKGROUND_OPACITY])
self.root.setvar(name=COVERAGE, value=configs[DANMAKU][COVERAGE])
self.root.setvar(name=TIME_RANGE + "_beg", value=None)
self.root.setvar(name=TIME_RANGE + "_end", value=None)
# Default output configs
self.root.setvar(name=CODEC, value=configs[OUTPUT][CODEC])
self.root.setvar(name=BITRATE, value=configs[OUTPUT][BITRATE])
self.root.setvar(name=THREADS, value=configs[OUTPUT][THREADS])
self.root.setvar(name=VIDEO_NAME, value=configs[OUTPUT][VIDEO_NAME])
def build_input_config_ui(self):
"""Build the UI frame for the input configs."""
# Input property::video_path
Label(self.input_configs, text="Video Path: ", width=self.lab_w)\
.grid(row=0, column=0, padx=self.frame_padx, pady=self.top_pady)
Entry(self.input_configs, textvariable=self.video_path, width=self.ent_w)\
.grid(row=0, column=1, pady=self.top_pady)
Button(self.input_configs, text="Choose video", width=self.btn_w, command=self.getSelFilenameFunc(self.video_path))\
.grid(row=0, column=2, padx=self.btn_padx, pady=self.top_pady)
# Input property::audio_path
Label(self.input_configs, text="Audio Path: ", width=self.lab_w)\
.grid(row=1, column=0, padx=self.frame_padx)
Entry(self.input_configs, textvariable=self.audio_path, width=self.ent_w)\
.grid(row=1, column=1)
Button(self.input_configs, text="Choose audio", width=self.btn_w, command=self.getSelFilenameFunc(self.audio_path))\
.grid(row=1, column=2)
# Input property::file_path
Label(self.input_configs, text="TXT File Path: ", width=self.lab_w)\
.grid(row=2, column=0, padx=self.frame_padx)
Entry(self.input_configs, textvariable=self.file_path, width=self.ent_w)\
.grid(row=2, column=1)
Button(self.input_configs, text="Choose file directory", width=self.btn_w, command=self.getSelPathFunc(self.file_path))\
.grid(row=2, column=2)
def build_danmaku_config_ui(self):
"""Build the UI frame for the danmaku configs."""
# Danmaku property::fonts::cn
Label(self.danmaku_configs, text="Chinese Font: ", width=self.lab_w)\
.grid(row=0, column=0, padx=self.frame_padx, pady=self.top_pady)
Entry(self.danmaku_configs, textvariable=self.CN_font_path, width=self.ent_w)\
.grid(row=0, column=1, pady=self.top_pady)
Button(self.danmaku_configs, text="Choose Font File", width=self.btn_w, command=self.getSelFilenameFunc(self.CN_font_path))\
.grid(row=0, column=2, padx=self.btn_padx, pady=self.top_pady)
# Danmaku property::fonts::jp
Label(self.danmaku_configs, text="Japanese Font: ", width=self.lab_w)\
.grid(row=1, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.JP_font_path, width=self.ent_w)\
.grid(row=1, column=1)
Button(self.danmaku_configs, text="Choose Font File", width=self.btn_w, command=self.getSelFilenameFunc(self.JP_font_path))\
.grid(row=1, column=2)
# Danmaku property::fonts::eng
Label(self.danmaku_configs, text="English Font: ", width=self.lab_w)\
.grid(row=2, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.EN_font_path, width=self.ent_w)\
.grid(row=2, column=1)
Button(self.danmaku_configs, text="Choose Font File", width=self.btn_w, command=self.getSelFilenameFunc(self.EN_font_path))\
.grid(row=2, column=2)
# Danmaku property::fontsize
Label(self.danmaku_configs, text="Fontsize: ", width=self.lab_w).grid(row=3, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.fontsize, width=self.ent_w).grid(row=3, column=1)
# Danmaku property::comment_color
Label(self.danmaku_configs, text="Comment color: ", width=self.lab_w).grid(row=4, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.comment_color, width=self.ent_w).grid(row=4, column=1)
# Danmaku property::translation_color
Label(self.danmaku_configs, text="Translation color: ", width=self.lab_w).grid(row=5, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.translation_color, width=self.ent_w).grid(row=5, column=1)
# Danmaku property::duration
Label(self.danmaku_configs, text="Duration: ", width=self.lab_w).grid(row=6, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.duration, width=self.ent_w).grid(row=6, column=1)
# Danmaku property::fps
Label(self.danmaku_configs, text="FPS: ", width=self.lab_w).grid(row=7, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.fps, width=self.ent_w).grid(row=7, column=1)
# Danmaku property::background_rgb
Label(self.danmaku_configs, text="Background RGB: ", width=self.lab_w).grid(row=8, column=0, padx=self.frame_padx)
rgb_entry = Frame(self.danmaku_configs)
Label(rgb_entry, text="R").pack(side=LEFT)
Entry(rgb_entry, textvariable=self.bkgd_r, width=5).pack(side=LEFT)
Label(rgb_entry, text="G").pack(side=LEFT)
Entry(rgb_entry, textvariable=self.bkgd_g, width=5).pack(side=LEFT)
Label(rgb_entry, text="B").pack(side=LEFT)
Entry(rgb_entry, textvariable=self.bkgd_b, width=5).pack(side=LEFT)
rgb_entry.grid(row=8, column=1)
# Danmaku property::background_opacity
Label(self.danmaku_configs, text="Background Opacity: ", width=self.lab_w).grid(row=9, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.background_opacity, width=self.ent_w).grid(row=9, column=1)
# Danmaku property::coverage
Label(self.danmaku_configs, text="Coverage: ", width=self.lab_w).grid(row=10, column=0, padx=self.frame_padx)
Entry(self.danmaku_configs, textvariable=self.coverage, width=self.ent_w).grid(row=10, column=1)
# Danmaku property::background_opacity
Label(self.danmaku_configs, text="Time Range: ", width=self.lab_w).grid(row=11, column=0, padx=self.frame_padx)
range_entry = Frame(self.danmaku_configs)
Label(range_entry, text="from").pack(side=LEFT)
Entry(range_entry, textvariable=self.time_range_beg, width=5).pack(side=LEFT)
Label(range_entry, text="to").pack(side=LEFT)
Entry(range_entry, textvariable=self.time_range_end, width=5).pack(side=LEFT)
range_entry.grid(row=11, column=1)
def build_output_config_ui(self):
"""Build the UI frame for the output configs."""
# Output property::codec
Label(self.output_configs, text="Codec: ", width=self.lab_w).grid(row=0, column=0, padx=self.frame_padx, pady=self.top_pady)
Entry(self.output_configs, textvariable=self.codec, width=self.ent_w).grid(row=0, column=1, pady=self.top_pady)
# Output property::bitrate
Label(self.output_configs, text="Bitrate: ", width=self.lab_w).grid(row=1, column=0, padx=self.frame_padx)
Entry(self.output_configs, textvariable=self.bitrate, width=self.ent_w).grid(row=1, column=1)
# Output property::threads
Label(self.output_configs, text="Threads: ", width=self.lab_w).grid(row=2, column=0, padx=self.frame_padx)
Entry(self.output_configs, textvariable=self.threads, width=self.ent_w).grid(row=2, column=1)
# Output property::video_name
Label(self.output_configs, text="Video Name: ", width=self.lab_w).grid(row=3, column=0, padx=self.frame_padx)
Entry(self.output_configs, textvariable=self.video_name, width=self.ent_w).grid(row=3, column=1)
def start_synthesization(self):
"""Function combined with the main action button."""
# showinfo(title="Start Synthesization", message="Your request is being processed. Please wait for several minutes.")
try:
self.openConfirmWindow(self.getFinalConfigs())
except Exception as e:
showerror(message=str(e))
def openConfirmWindow(self, configs):
"""Open a new window for user to confirm."""
confirmWindow = tk.Toplevel(self.root)
confirmWindow.title("Request Confirmation")
def getSynthesizeFunc(configs):
"""Return the synthesize function with user's configs."""
def synthesize_func():
# close the confirmation window
confirmWindow.destroy()
# dump the final configs into a json file
with open(CONFIG_GUI_PATH, 'w') as fp:
json.dump(configs, fp, indent=4)
# use subprocess to work around instead of directly using synthesize
subprocess.Popen("python3 app_gui.py", stdout=sys.stdout, shell=True) # unsynchronized cmd
# subprocess.call("python3 app_gui.py", shell=True) # synchronized cmd
return synthesize_func
# A Label widget to show in toplevel
tk.Label(confirmWindow, text="Are you good to go with the following configs?", font=self.helv_bold).pack()
Label(confirmWindow, text=json.dumps(configs, indent=4)).pack(padx=10, pady=10)
tk.Button(confirmWindow, text="I Confirmed", font=self.helv_bold, command=getSynthesizeFunc(configs)).pack(pady=self.btm_pady)
def getSelPathFunc(self, path):
"""Return path selection function."""
def selectPath():
path_ = askdirectory()
if path_ != "":
path.set(path_)
return selectPath
def getSelFilenameFunc(self, filename):
"""Return file selection function."""
def selectFilename():
filename_ = askopenfilename()
if filename_ != "":
filename.set(filename_)
return selectFilename
def getFinalConfigs(self):
"""Return the final configs after user submission."""
final_configs = dict(self.default_configs)
# Update variables in input configs
final_configs[INPUT][VIDEO_PATH] = self.root.getvar(name=VIDEO_PATH)
final_configs[INPUT][AUDIO_PATH] = self.root.getvar(name=AUDIO_PATH)
final_configs[INPUT][FILE_PATH] = self.root.getvar(name=FILE_PATH)
# Update variables in danmaku configs
final_configs[DANMAKU][FONTS]["cn"] = self.root.getvar(name=FONTS + "_cn")
final_configs[DANMAKU][FONTS]["jp"] = self.root.getvar(name=FONTS + "_jp")
final_configs[DANMAKU][FONTS]["eng"] = self.root.getvar(name=FONTS + "_eng")
final_configs[DANMAKU][FONTSIZE] = self.root.getvar(name=FONTSIZE)
final_configs[DANMAKU][COMMENT_COLOR] = self.root.getvar(name=COMMENT_COLOR)
final_configs[DANMAKU][TRANSLATION_COLOR] = self.root.getvar(name=TRANSLATION_COLOR)
final_configs[DANMAKU][DURATION] = self.root.getvar(name=DURATION)
final_configs[DANMAKU][FPS] = self.root.getvar(name=FPS)
final_configs[DANMAKU][BACKGROUND_RGB] = [\
self.root.getvar(name="bkgd_r"), \
self.root.getvar(name="bkgd_g"), \
self.root.getvar(name="bkgd_b") \
]
final_configs[DANMAKU][BACKGROUND_OPACITY] = self.root.getvar(name=BACKGROUND_OPACITY)
final_configs[DANMAKU][COVERAGE] = self.root.getvar(name=COVERAGE)
final_configs[DANMAKU][TIME_RANGE] = [\
self.root.getvar(name=TIME_RANGE + "_beg"), \
self.root.getvar(name=TIME_RANGE + "_end") \
]
# Update variables in output configs
final_configs[OUTPUT][CODEC] = self.root.getvar(name=CODEC)
final_configs[OUTPUT][BITRATE] = self.root.getvar(name=BITRATE)
final_configs[OUTPUT][THREADS] = self.root.getvar(name=THREADS)
final_configs[OUTPUT][VIDEO_NAME] = self.root.getvar(name=VIDEO_NAME)
return final_configs
def callback(self, url):
"""Used to open hyperlinks."""
webbrowser.open_new(url)
configGUI = ConfigGUI(configs)
configGUI.root.mainloop()
|
the-stack_0_25402
|
#!/bin/python3
# -*- coding: utf-8 -*-
"""
Main module -- nutshell.nutshell
==================================
This module contains ProductServer class, which receives product
requests and forwards them to product generators. A ProductServer
instance also manages disk resources defined in
:ref:`configuration`.
The module uses :any:`nutshell.product` for defining products (:any:`nutshell.product.Info`)
and :any:`nutshell.request` for generating them using :any:`nutshell.request.Generator` .
HTTP server provided by :any:`nutshell.httpd` essentially forwards
HTTP requests to :any:`nutshell.ProductServer`.
"""
__version__ = '1.0'
__author__ = '[email protected]'
import os
import time
import subprocess # for shell escape
from pathlib import Path
import shutil # for copy cmd only...
from http import HTTPStatus
#import http.server
#HTTPresponses = http.server.SimpleHTTPRequestHandler.responses
import logging
logging.basicConfig(format='%(levelname)s\t %(name)s: %(message)s')
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
#logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
#logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s : %(message)s', datefmt='%Y%m%d%H:%M:%S')
from . import nutils
from . import product
from . import request
class ProductServer:
"""Service designed for generating image and data products served as files
"""
PRODUCT_ROOT = '.'
CACHE_ROOT = '.'
STORAGE_ROOT = '.'
TIME_DIR_SYNTAX = '{YEAR}/{MONTH}/{DAY}'
SHELL_GENERATOR_SCRIPT = 'generate.sh'
SHELL_INPUT_SCRIPT = 'input.sh'
TIMEOUT = 90
# HTTP Server Options (forward defs HTTP server, so perhaps later moved to NutServer )
HTTP_PORT = 8088
HTTP_NAME = ''
HTTP_PREFIX = 'nutshell/' # TODO
HTTP_ROOT = '.'
#HTML_TEMPLATE = 'template.html'
HTML_TEMPLATE = 'index.html'
stdout = subprocess.PIPE
stderr = subprocess.PIPE
#verbosity = 5
logger = None
counter = 0
#error_code_regexp = re.compile("^\s*([0-9]+)\\.(zip|gz)$")
supported_instructions = ['DELETE','EXISTS','MAKE','GENERATE','INPUTS','SHORTCUT','LATEST','LINK','MOVE','COPY']
def init_path(self, dirname, verify=False):
""" Expand relative path to absolute, optionally check that exists. """
#if (hasattr(self, dirname)):
path = Path(getattr(self, dirname)).absolute()
self.logger.warning(' {0} => {1}'.format(dirname, path))
if (verify) and not (path.exists()):
raise FileNotFoundError(__name__ + str(path))
setattr(self, dirname, str(path)) # TODO -> Path obj
# #else:
# raise KeyError
def __init__(self, conffile = ''):
self.logger = logging.getLogger("NutShell2")
if (conffile):
self.read_conf(conffile)
if __name__ == '__main__':
self.stdout = os.sys.stdout # discarded
self.stderr = os.sys.stderr
# self._init_dir('PRODUCT_ROOT')
# self._init_dir('CACHE_ROOT')
# self._init_dir('HTTP_ROOT') # not here!
# self._init_dir('HTTP_ROOT'+'/'+'HTML_TEMPLATE') # not here!
#def read_conf(self, conffile = 'nutshell.cnf', strict=True):
def read_conf(self, conffile = None):
"""
Read given conf file, if it exists. Raise error, if strict.
The entries are copied to respective member of self.
"""
strict = True
if not conffile:
conffile = 'nutshell.cnf'
strict = False
if (os.path.exists(conffile)):
self.logger.info("Reading conf file {0} ".format(conffile))
result = nutils.read_conf(conffile)
# print(result)
nutils.set_entries(self, result)
return True
elif strict:
self.logger.error("Conf file not found: " + conffile)
raise FileNotFoundError("Conf file not found: ", conffile)
else:
self.logger.debug("Local conf file not found (ok): " + conffile)
#print ("Conf file not found: ", conffile)
return False
# Rename... Missleading name.
def get_status(self):
return nutils.get_entries(self)
def get_cache_root(self):
"""Return the abolute path (Path) to CACHE_ROOT directory. """
return Path(self.CACHE_ROOT).absolute()
def get_storage_root(self):
"""Return the abolute path (Path) to CACHE_ROOT directory. """
return Path(self.STORAGE_ROOT).absolute()
def get_product_dir(self, product_info):
"""Return the directory containing product generator script
(generate.sh) and possible configurations etc"""
return product_info.PRODUCT_ID.replace('.', os.sep)
def get_time_dir(self, timestamp):
if (type(timestamp) != str):
timestamp = timestamp.TIMESTAMP # product_info.TIMESTAMP
if (timestamp):
if (timestamp == 'LATEST'):
return ''
else:
timevars = product.parse_timestamp2(timestamp)
# print timevars
return self.TIME_DIR_SYNTAX.format(**timevars) # + os.sep
else:
return ''
def get_generator_dir(self, product_info):
path = Path(self.PRODUCT_ROOT, *product_info.PRODUCT_ID.split('.'))
return str(path.absolute())
# return self.PRODUCT_ROOT+os.sep+product_info.PRODUCT_ID.replace('.', os.sep)
# Generalize?
def ensure_output_dir(self, outdir):
"""
Creates a writable directory, if non-existent
Currently, uses mask 777
"""
# Note: https://docs.python.org/3/library/os.html
# version 3.7: The mode argument no longer affects the file permission bits of newly-created intermediate-level directories
try:
m = os.umask(0)
#os.makedirs(str(outdir), 0o775, True)
os.makedirs(outdir, 0o777, True)
finally:
os.umask(m)
return outdir
#
def get_input_list(self, product_info, directives, log):
""" Used for reading dynamic input configuration generated by input.sh.
directives determine how the product is generated.
"""
# TODO: directives
input_query = request.InputQuery(self, product_info) # TODO: directives
if (not input_query.script.exists()):
log.debug("No input script: {0}".format(input_query.script))
return input_query
# TODO generalize (how)
log.debug(input_query.env)
input_query.run(log_basename = input_query) # ??
if (input_query.returncode == 0):
log.info(type(input_query.stdout))
if (input_query.stdout == ''):
log.warning("empty stdout of input declaration script {0}:".format(input_query.script))
else:
nutils.read_conf_text(input_query.stdout.split('\n'), input_query.inputs)
log.info(input_query.inputs)
else:
log.warning("executing failed with error code={0}: {1} ".format(input_query.returncode, input_query.script))
log.warning(input_query.stdout)
log.warning(input_query.stderr)
log.warning(input_query.log)
# else:
# log.critical("input script reported no error info")
return input_query
def retrieve_inputs(self, product_generator):
inputs = {}
if (product_generator.inputs):
product_generator.log.debug('Retrieving inputs for: ' + str(product_generator.path.name))
for i in product_generator.inputs:
#product_generator.log.info('INPUTFILE: ' + i)
input = product_generator.inputs[i] # <filename>.h5
#product_generator.error(i, input)
product_generator.log.info('Make input: {0} ({1})'.format(i, input))
input_prod_info = product.Info(filename = input)
product_generator.log.info('Make input: {0} ({1})'.format(i, input_prod_info.PRODUCT_ID))
#r = self.make_request(input_prod_info, ['MAKE'], [], product_generator.log.getChild("input[{0}]".format(i)))
r = self.make_request(input_prod_info, log = product_generator.log.getChild("input[{0}]".format(i)))
# r = self.make_request(input_prod_info, log = product_generator.log)
if (r.path):
inputs[i] = str(r.path) # sensitive
product_generator.log.debug('Success: ' + str(r.path))
else:
product_generator.log.warning('SKIPPED: ' + i)
if (not inputs):
product_generator.log.warning('All input queries returned empty')
product_generator.inputs = inputs
product_generator.env['INPUTKEYS'] = ','.join(sorted(product_generator.inputs.keys()))
product_generator.env.update(product_generator.inputs)
def query_file(self, pr):
"""
Check if file exits or is under generation.
A file is interpreted as being under generation if a corresponding,
"relatively new" empty file exists.
:param pr[nutshell.product.Info]: description of the product (string or nutshell.product.Info)
"""
if (pr.path.exists()):
pr.log.debug('File exists: {0}'.format(pr.path))
stat = pr.path.stat()
age_mins = round((time.time() - stat.st_mtime) / 60)
if (stat.st_size > 0): # Non-empty
pr.product_obj = pr.path
pr.log.info('File found (age {1}mins, size {2}): {0}'.format(pr.path, age_mins, stat.st_size))
pr.set_status(HTTPStatus.OK)
elif (age_mins > 10):
pr.log.warning("Empty file found, but over 10 mins old...")
# set status? WAIT?
else:
pr.log.warning('BUSY (empty file, under generation?)') # TODO raise (prevent deletion)
pr.product_obj = '' # BUSY
total_time = 0
for i in range(1,10):
i = i*i
total_time += i
stat = pr.path.stat()
if (stat.st_size > 0):
pr.set_status(HTTPStatus.OK)
pr.log.info("OK, finally received: {0}".format(pr.path))
return
else:
pr.log.info("Sleep {0} seconds...".format(i))
time.sleep(i)
if (total_time > self.TIMEOUT):
pr.log.warning("timeout ({0}s) exceeded".format(self.TIMEOUT))
break
pr.set_status(HTTPStatus.REQUEST_TIMEOUT)
# pr.set_status(HTTPStatus.SERVICE_UNAVAILABLE) # 503
# return
elif (pr.path_storage.exists()):
pr.log.info('Stored file exists: {0}'.format(pr.path_storage))
pr.log.info('Linking to: {0}'.format(pr.path))
# LINK
# pr.path.symlink_to(pr.path_storage.resolve())
self.ensure_output_dir(pr.path.parent)
nutils.symlink(pr.path, pr.path_storage)
pr.set_status(HTTPStatus.OK)
else:
pr.log.debug('File not found: {0}'.format(pr.path))
if (pr.product_info.TIMESTAMP == 'LATEST'):
pr.log.warning("LATEST-file not found (cannot generate it)")
pr.set_status(HTTPStatus.NOT_FOUND)
# return
def make_prod(self, pr, directives = None, TEST=False):
"""
Main function.
:param pr[nutshell.product.Info]: description of the product
"""
#self.query_file(pr)
#if (pr.status == HTTPStatus.OK):
# pr.returncode = 0
# return
# only TEST at this point
if (pr.script.exists()):
pr.log.debug('Generator script ok: {0}'.format(pr.script))
else:
pr.log.warning('Generator script not found: {0}'.format(pr.script))
# Consider case of copied valid product (without local generator)
pr.path = ''
if (not TEST):
pr.set_status(HTTPStatus.NOT_IMPLEMENTED) # Not Implemented
return
# TODO: if not stream?
pr.log.debug('Ensuring cache dir for: {0}'.format(pr.path))
self.ensure_output_dir(pr.path_tmp.parent)
self.ensure_output_dir(pr.path.parent)
if (not pr.path.exists()):
pr.path.touch()
# Runs input.sh
#if (MAKE or INPUTS or CHECK):
pr.log.debug('Querying input list (dependencies)')
input_info = pr.get_input_list(directives)
if (input_info.returncode != 0):
pr.log.debug('Input script problem, return code: {0}'.format(input_info.returncode))
if (not TEST):
pr.set_status(HTTPStatus.PRECONDITION_FAILED)
pr.remove_files()
return
if (not TEST):
self.retrieve_inputs(pr)
# MAIN
pr.log.info('Generating: {0}'.format(pr.path.name))
pr.log.debug('Environment: {0}'.format(pr.env))
try:
pr.run2(directives)
except KeyboardInterrupt:
pr.log.warning('Hey, HEY! Keyboard interrupt on main level')
pr.status = HTTPStatus.REQUEST_TIMEOUT
pr.remove_files()
raise
if (pr.returncode != 0):
pr.log.error("Error ({0}): '{1}'".format(pr.returncode, pr.error_info))
pr.remove_files()
return pr
if (not pr.path_tmp.exists()):
pr.log.error("generator did not create desired file")
pr.remove_files()
return pr
if (pr.path_tmp.stat().st_size == 0):
pr.log.error("generator failed (empty file intact)")
pr.remove_files()
return pr
pr.log.debug("Finally, move main product from tmp")
if (pr.path_tmp.is_symlink()):
pr.path.unlink()
pr.path.symlink_to(pr.path_tmp.resolve())
else:
pr.path_tmp.replace(pr.path)
globber = "{0}*".format(pr.path_tmp.stem) # note: dot omitten on purpose
pr.log.debug("Move remaiming (auxiliary) files from tmp: {0}".format(globber))
#pr.log.warning(pr.path_tmp.parent)
#pr.log.warning(pr.path_tmp.parent.glob(globber))
for p in pr.path_tmp.parent.glob(globber):
pr.log.debug("Moving {0}".format(p))
#pr.log.debug("move {0}".format(p))
p.replace(pr.path.parent.joinpath(p.name))
pr.log.debug("Removing tmp dir: {0}".format(pr.path_tmp.parent))
try:
os.rmdir(pr.path_tmp.parent)
except Exception as err:
pr.log.error("RmDir failed: {0}".format(err))
pr.set_status(HTTPStatus.OK)
def make_request(self, product_info, instructions = ['MAKE'], directives = None, log = None):
"""
Main function.
:param product_info: description of the product (string or nutshell.product.Info)
:param instructions: what should be done about the product
(``MAKE``, ``DELETE``, ``CHECK`` , ``RETURN``), see :ref:`commands` .
:param directives: how the product is generated etc
:param log: optional logging.logger
:returns: Instance of product.Generator that contains the path of
the file (if succefully generated) and information about the process.
"""
if (type(instructions) == str):
instructions = instructions.split(',')
if (type(instructions) == list):
#instructions = set(instructions)
instructions = nutils.read_conf_text(instructions)
if (type(directives) == str):
directives = directives.split(',')
if (type(directives) == list):
directives = nutils.read_conf_text(directives)
pr = request.Generator(self, product_info, log) #, instructions, directives, log)
# Future option
#if ('GENERATE' in instructions):
# instructions['DELETE'] = True
# instructions['MAKE'] = True
# Boolean:
LATEST = ('LATEST' in instructions)
SHORTCUT = ('SHORTCUT' in instructions)
LINK = instructions.get('LINK') # in instructions)
COPY = instructions.get('COPY') # in instructions) # directives)
MOVE = instructions.get('MOVE') # in instructions) # directives)
if (SHORTCUT or LATEST or LINK or COPY or MOVE):
instructions['MAKE'] = True
#instructions['CHECK'] = True
# TODO: redesign
DELETE = ('DELETE' in instructions) #or ('GENERATE' in instructions)
EXISTS = ('EXISTS' in instructions)
MAKE = ('MAKE' in instructions)
GENERATE = ('GENERATE' in instructions)
INPUTS = ('INPUTS' in instructions)
TEST = ('CHECK' in instructions)
# LOG = ('LOG' in pr.directives)
# DEBUG = ('DEBUG' in pr.directives)
# MAIN
if (DELETE or GENERATE):
if (pr.path.is_file()):
pr.path.unlink()
if (pr.path.exists()):
pr.log.warning(f"Could not delete file: {pr.path}")
pr.set_status(HTTPStatus.CONFLICT)
else:
pr.set_status(HTTPStatus.OK)
# else ?
if (EXISTS or MAKE):
self.query_file(pr)
"""
Check if file exits or is under generation.
A file is interpreted as being under generation if a corresponding,
"relatively new" empty file exists.
"""
if (pr.status == HTTPStatus.OK):
pr.returncode = 0
#GENERATE = False
EXISTS = False # No second check needed
#return
elif MAKE:
GENERATE = True
EXISTS = True
else:
pr.set_status(HTTPStatus.NOT_FOUND)
# MAIN
if (GENERATE):
pr.log.info("Making/generating... {0}".format(pr.path.name))
self.make_prod(pr, directives, TEST)
if (EXISTS):
pr.log.info(f"Exists?... {pr.path.name}")
if (pr.path.exists()):
pr.set_status(HTTPStatus.OK)
else:
pr.set_status(HTTPStatus.NOT_FOUND)
elif (INPUTS):
pr.log.info("Inputs... {0}".format(pr.path.name))
input_info = pr.get_input_list(directives)
print(input_info.inputs)
else:
pr.log.info("No further main instructions for {pr.path.name}")
#pr.log.info("NOT Making... {0}".format(pr.path.name))
if (pr.status != HTTPStatus.OK):
pr.log.warning("Action status: {0} for: {1}".format(pr.status, instructions))
pr.log.warning(pr.status)
pr.log.warning("Action failed: {0}".format(pr.path))
return pr
try:
if SHORTCUT: #and pr.product_info.TIMESTAMP:
pr.log.info('SHORTCUT: {0}'.format(pr.path_static))
self.ensure_output_dir(pr.path_static.parent)
nutils.symlink(pr.path_static, pr.path)
if LATEST:
pr.log.info('LATEST: {0} '.format(pr.path_latest))
self.ensure_output_dir(pr.path_latest.parent)
nutils.symlink(pr.path_latest, pr.path, True)
except Exception as err:
pr.set_status(HTTPStatus.INTERNAL_SERVER_ERROR)
pr.log.warning("Routine linking file failed: {0}".format(err))
try:
if LINK:
#COPY = directives['COPY']
pr.log.info('Linking: {1} <= {0}'.format(pr.path, LINK) )
path = Path(LINK)
if (path.is_dir()): # shutil does not need this...
if (not path.exists()):
self.ensure_output_dir(path)
path = path.joinpath(pr.path.name)
if (path.exists()):
path.unlink()
#shutil.copy(str(pr.path), str(COPY))
nutils.symlink(path, pr.path, True)
if COPY:
#COPY = directives['COPY']
pr.log.info('Copying: {1} <= {0}'.format(pr.path, COPY) )
path = Path(COPY)
if (path.is_dir()): # shutil does not need this here either...
if (not path.exists()):
self.ensure_output_dir(path)
path = path.joinpath(pr.path.name)
if (path.exists()):
path.unlink()
shutil.copy(str(pr.path), str(COPY))
if MOVE:
#MOVE = directives['MOVE']
pr.log.info('Moving: {1} <= {0}'.format(pr.path, MOVE) )
# product_request.path.rename(options.MOVE) does not accept plain dirname
path = Path(MOVE)
if (path.is_dir()): # ...but shutil needs this
if (not path.exists()):
self.ensure_output_dir(path)
path = path.joinpath(pr.path.name)
if (path.exists()):
path.unlink()
shutil.move(str(pr.path), str(MOVE))
except Exception as err:
pr.set_status(HTTPStatus.INTERNAL_SERVER_ERROR)
pr.log.warning("Copying/moving/linking file failed: {0}".format(err))
pr.log.info('Success: {0}'.format(pr.path))
return pr
@classmethod
def get_arg_parser(cls, parser = None):
"""Populates parser with options of this class"""
parser = product.Info.get_arg_parser(parser)
# parser = argparse.ArgumentParser()
#supported_instructions = 'DELETE,EXISTS,MAKE,GENERATE,INPUTS,SHORTCUT,LATEST,LINK,MOVE,COPY'
parser.add_argument("-c", "--conf", dest="CONF",
default=None, # "nutshell.cnf", #ProductServer.CONF_FILE?
help="Read config file",
metavar="<file>")
parser.add_argument("-a", "--instructions", metavar="<string>",
dest="INSTRUCTIONS",
default="",
help=f"Comma-separated string of instructions: {ProductServer.supported_instructions}")
parser.add_argument("-r", "--request", metavar="<string>",
dest="INSTRUCTIONS",
default="",
help="(Deprecating) Use --instructions")
parser.add_argument("-d", "--delete",
dest="DELETE",
action="store_true",
#default=False,
help="Delete product file, same as --instructions DELETE")
parser.add_argument("-e", "--exists",
dest="EXISTS",
action="store_true",
#default=False,
help="Check only if product exists")
parser.add_argument("-i", "--inputList",
dest="INPUTS",
action="store_true",
help="list input for a product, same as --instructions INPUTS")
parser.add_argument("-m", "--make",
dest="MAKE",
action="store_true",
#default=False,
help="Make product, same as --instructions MAKE")
parser.add_argument("-g", "--generate",
dest="GENERATE",
action="store_true",
help="Generate product, same as --instructions DELETE,MAKE")
parser.add_argument("-t", "--timeout",
dest="TIMEOUT",
default=90,
type=int,
help="Time limit for generating or waiting for a product")
return parser
if __name__ == '__main__':
NUTSHELL_DIR = None
if ("NUTSHELL_DIR" in os.environ):
NUTSHELL_DIR = os.environ["NUTSHELL_DIR"]
product_server = ProductServer()
logger = logging.getLogger('NutShell')
logger.setLevel(logging.INFO)
#logger.debug("parsing arguments")
# ProductInfo.get_arg_parser(parser)
parser = ProductServer.get_arg_parser()
#supported_actions = 'DELETE,MAKE,GENERATE,INPUTS,SHORTCUT,LATEST,LINK,MOVE,COPY'
parser.add_argument("PRODUCTS",
nargs='*',
help="Products to be requested")
parser.add_argument("-S", "--shortcut",
dest="SHORTCUT",
action="store_true",
help="Add link from static to timestamped dir, equals -r SHORTCUT")
parser.add_argument("-Z", "--latest",
dest="LATEST",
action="store_true",
help="Add link with timestamp replaced with 'LATEST', same as -r LATEST")
parser.add_argument("-M", "--move", metavar="<path>",
dest="MOVE",
default='',
help="Move resulting file, equals -r MOVE=<path>")
parser.add_argument("-L", "--link", metavar="<path>",
dest="LINK",
default='',
help="Link resulting file, equals -r LINK=<path>")
parser.add_argument("-C", "--copy", metavar="<path>",
dest="COPY",
help="Copy resulting file, equals -r COPY=<path>")
# Raise? Could be http default directives?
parser.add_argument("-D", "--directives",
dest="DIRECTIVES",
default='',
help="pipe-separated app instructions: DOUBLE|SCHEME=TILE|...")
options = parser.parse_args()
if (not options):
parser.print_help()
exit(1)
if (options.VERBOSE):
options.LOG_LEVEL = "DEBUG"
if (options.LOG_LEVEL):
if hasattr(logging, options.LOG_LEVEL):
logger.setLevel(getattr(logging, options.LOG_LEVEL))
else:
logger.setLevel(int(options.LOG_LEVEL))
logger.debug(options)
logger.debug('NUTSHELL_DIR={0}'.format(NUTSHELL_DIR))
product_server.verbosity = int(options.VERBOSE)
product_server.logger = logger # NEW
product_info = product.Info()
if (options.PRODUCT):
options.PRODUCTS.append(options.PRODUCT)
if (not options.PRODUCTS):
logger.debug('Product(s) not defined')
parser.print_help()
exit(1)
logger.info('Products: {0}'.format(options.PRODUCTS))
if (options.CONF):
product_server.read_conf(options.CONF)
else:
if not product_server.read_conf(): # "nutshell.cnf", False): # Local, lenient
if NUTSHELL_DIR:
#logger.info('Reading ' + NUTSHELL_DIR + "/nutshell.cnf")
product_server.read_conf(NUTSHELL_DIR + "/nutshell.cnf") #, False)
logger.debug(product_server.get_status())
instructions = {} # []
if (options.INSTRUCTIONS):
# instructions.extend(options.INSTRUCTIONS.split(','))
instructions = nutils.read_conf_text(options.INSTRUCTIONS.split(',')) # No values using ','?
# , 'TEST'
#for i in ['DELETE', 'MAKE', 'GENERATE', 'INPUTS', 'SHORTCUT', 'LATEST', 'LINK', 'MOVE', 'COPY']:
for i in ProductServer.supported_instructions:
value = getattr(options, i)
if (value): # Boolean ok, no numeric args expected, especially not zero
instructions[i] = value
if (not instructions):
instructions['MAKE'] = True
logger.info(f'Requests: {instructions}')
directives = {}
if (options.DIRECTIVES):
# directives = nutils.read_conf_text(options.DIRECTIVES.split(',')) # whattabout comma in arg?
# directives = nutils.read_conf_text(options.DIRECTIVES.split(',')) # whattabout comma in arg?
directives = nutils.read_conf_text(options.DIRECTIVES.split('|'))
logger.info('Directives: {0}'.format(directives))
fail=False
for PRODUCT in options.PRODUCTS:
logger.info('PRODUCT={0}'.format(PRODUCT))
product_info.set_product(filename = PRODUCT)
if (product_info.PRODUCT_ID and product_info.FORMAT):
product_request = product_server.make_request(product_info, instructions, directives) #, logger.getChild("make_request")
if ('INPUTS' in instructions): # or (options.VERBOSE > 6):
#nutils.print_dict(product_request.inputs)
logger.warning("Inputs:")
logger.info(product_request.inputs)
logger.info(product_request.status)
if (product_request.status != HTTPStatus.OK):
fail = True
else:
logger.warning('Could not parse product')
fail = True
#exit(1)
if (fail):
exit(1)
else:
exit(0)
|
the-stack_0_25403
|
import logging
from scapy.all import sendp, Ether, IP, UDP, Raw, conf
def wake(mac):
if logging.getLogger().getEffectiveLevel() > logging.DEBUG:
conf.verb = 0
logging.info("Sending WOL packet to %s" % (mac, ))
mac = mac.decode("hex")
sendp(Ether(dst='ff:ff:ff:ff:ff:ff') / IP(dst='255.255.255.255', flags="DF") / UDP(dport=9, sport=39227) / Raw('\xff' * 6 + mac * 16))
if __name__ == '__main__':
import sys
wake(sys.argv[1])
|
the-stack_0_25404
|
import random
import string
from model.contact import Contact
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " " * 10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_digits(maxlen):
digits = string.digits
return "".join([random.choice(digits) for i in range(random.randrange(maxlen))])
def random_date():
return str(random.randrange(1, 12, 1))
def random_year():
return str(random.randrange(1900, 2020, 1))
def random_email(maxlen):
symbols = string.ascii_letters + string.digits
email_name = "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
return email_name + ".gmail.com"
def random_month():
months = ["September", "October", "November", "May", "June"]
month = random.choice(months)
return month
testdata = [
Contact(first_name=random_string("FN", 10), middle_name=random_string("MN", 5), last_name=random_string("LN", 15),
nickname=random_string("N", 10), title=random_string("T", 10), company=random_string("C", 10),
address=random_string("C", 20), home_phone=random_digits(11), mobile_phone=random_digits(11),
work_phone=random_digits(13), fax=random_digits(9), email=random_email(10),
home_page=random_string("HP", 10), address2=random_string("C", 25),
secondary_phone=random_digits(12), notes=random_string("C", 25), bday=random_date(),
bmonth=random_month(), byear=random_year(), aday=random_date(), amonth=random_month(),
ayear=random_year())
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
the-stack_0_25407
|
from __future__ import unicode_literals
from tiny_test_fw import Utility
import os
import ttfw_idf
def get_socket_msgs(i):
msg = 'Socket message S{}'.format(i)
return ['uart_select_example: {} bytes were written to socket: {}'.format(len(msg), msg),
'uart_select_example: {} bytes were received through socket: {}'.format(len(msg), msg)]
def get_uart_msgs(i):
msg = 'UART message U{}'.format(i)
return ['uart_select_example: {} bytes were sent to UART1: {}'.format(len(msg), msg),
'uart_select_example: {} bytes were received through UART1: {}'.format(len(msg), msg)]
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC')
def test_examples_select(env, extra_data):
dut = env.get_dut('select', 'examples/system/select')
dut.start_app()
dut.expect('cpu_start: Starting scheduler', timeout=30)
exp_list = []
for i in range(1, 10):
exp_list += get_socket_msgs(i)
exp_list += get_uart_msgs(i)
Utility.console_log('Expecting:{}{}'.format(os.linesep, os.linesep.join(exp_list)))
dut.expect_all(*exp_list, timeout=60)
if __name__ == '__main__':
test_examples_select()
|
the-stack_0_25410
|
from airflow.decorators import dag, task
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.providers.postgres.operators.postgres import PostgresOperator
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Variable
from datetime import datetime, timedelta
from acona_postgres_tools import acona_truncate_table, acona_data_write, acona_fetch_data, acona_fetch_one
# [END import_module]
# [START default_args]
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow'
}
# [END default_args]
# [START instantiate_dag]
@dag(
default_args=default_args,
start_date=days_ago(2),
tags=['etl', 'humantxt'],
schedule_interval='30 5 * * 0')
def acona_humantxt():
# [END instantiate_dag]
# [START notify]
@task()
def ht_extract(domain):
"""
#### Get data for current date from humantxt API
"""
import json
import requests
import os
import urllib.parse
import pandas as pd
import re
WAREHOUSE_TOKEN = Variable.get("WAREHOUSE_TOKEN")
WAREHOUSE_URL = Variable.get("WAREHOUSE_URL")
# Load urls for domain
sql = "select url from internal.urls where domain_id = {} and status = 't'".format(domain[1])
urls = acona_fetch_data(sql)
#[('https://www.acona.app/about',), ('https://www.acona.app/metrics',), ('https://www.acona.app/legal',), ('https://www.acona.app/info',), ('https://www.acona.app/',)]
date = (datetime.now()).strftime('%Y-%m-%d %H:%M:%S')
#date = '2021-10-22 18:45:01'
for url in urls:
url = url[0]
HEADERS = {
'Content-Type': 'application/json; charset=utf-8'
}
HUMANTXT_URL = 'https://humantxt.acona.app/'
#URL = urllib.parse.quote(url, safe='')
request_url = HUMANTXT_URL + '?url=' + url + '&format=html'
r = requests.get(request_url, headers=HEADERS)
result = False
if r.status_code == 200:
result = r.json()
if result and result.get('error') != 'true':
# title
if result.get('title'):
value = result.get('title')
else:
value = ''
table = 'api.var_ht_title'
data = {'value': [value], 'datetime': [date], 'url': [url]}
dataf = pd.DataFrame(data)
acona_data_write(table, dataf)
# title char count
if result.get('title'):
value = len(result.get('title'))
else:
value = 0
table = 'api.var_ht_title_char_count'
data = {'value': [value], 'datetime': [date], 'url': [url]}
dataf = pd.DataFrame(data)
acona_data_write(table, dataf)
# content
if result.get('content'):
value = result.get('content')
else:
value = 0
table = 'api.var_ht_content_html'
data = {'value': [value], 'datetime': [date], 'url': [url]}
dataf = pd.DataFrame(data)
acona_data_write(table, dataf)
# excerpt
if result.get('excerpt'):
value = result.get('excerpt')
else:
value = 0
table = 'api.var_ht_excerpt'
data = {'value': [value], 'datetime': [date], 'url': [url]}
dataf = pd.DataFrame(data)
acona_data_write(table, dataf)
# word count
if result.get('word_count'):
value = result.get('word_count')
else:
value = 0
table = 'api.var_ht_word_count'
data = {'value': [value], 'datetime': [date], 'url': [url]}
dataf = pd.DataFrame(data)
acona_data_write(table, dataf)
# write import dates
data = {'variable': ['humantxt'], 'date': [date], 'url': [url]}
dataf = pd.DataFrame(data)
acona_data_write('internal.var_calc_dates', dataf)
# [END extract]
# [START main_flow]
# Load domains
sql = "select domain_name, domain_id from internal.domains where status = 't'"
domains = acona_fetch_data(sql)
# Loop over domains
if domains:
for domain in domains:
ht_extract(domain)
# [END main_flow]
# [START dag_invocation]
acona_humantxt = acona_humantxt()
# [END dag_invocation]
|
the-stack_0_25411
|
"""
Widgets for plotting multi-channel signals.
"""
import pyqtgraph
class SignalWidget(pyqtgraph.GraphicsLayoutWidget):
"""
Scrolling oscilloscope-like widget for displaying real-time signals.
Intended for multi-channel viewing, each channel gets its own row in the
widget, and all channels share y-axis zoom.
"""
def __init__(self):
super(SignalWidget, self).__init__()
self.plot_items = []
self.plot_data_items = []
self.n_channels = 0
self.setBackground(None)
def plot(self, data):
"""
Adds a window of data to the widget.
Previous windows are scrolled to the left, and the new data is added to
the end.
Parameters
----------
data : ndarray, shape = (n_channels, n_samples)
Window of data to add to the end of the currently-shown data.
"""
nch, nsamp = data.shape
if nch != self.n_channels:
self.n_channels = nch
self._update_num_channels()
for i, pdi in enumerate(self.plot_data_items):
pdi.setData(data[i])
def _update_num_channels(self):
self.clear()
self.plot_items = []
self.plot_data_items = []
pen = _MultiPen(self.n_channels)
for i in range(self.n_channels):
plot_item = self.addPlot(row=i, col=0)
plot_data_item = plot_item.plot(pen=pen.get_pen(i), antialias=True)
plot_item.showAxis('bottom', False)
plot_item.showGrid(y=True, alpha=0.5)
plot_item.setMouseEnabled(x=False)
plot_item.setMenuEnabled(False)
if self.n_channels > 1:
label = "ch {}".format(i)
plot_item.setLabels(left=label)
if i > 0:
plot_item.setYLink(self.plot_items[0])
self.plot_items.append(plot_item)
self.plot_data_items.append(plot_data_item)
self.plot_items[0].disableAutoRange(pyqtgraph.ViewBox.YAxis)
self.plot_items[0].setYRange(-1, 1)
class _MultiPen(object):
MIN_HUE = 160
HUE_INC = 20
VAL = 200
def __init__(self, n_colors):
self.n_colors = n_colors
self.max_hue = self.MIN_HUE + n_colors*self.HUE_INC
def get_pen(self, index):
return pyqtgraph.intColor(
index, hues=self.n_colors,
minHue=self.MIN_HUE, maxHue=self.max_hue,
minValue=self.VAL, maxValue=self.VAL)
|
the-stack_0_25416
|
"""
3. Sõnade järgnevus (11p)
Kirjuta funktsioon sõnade_järgnevus, mis võtab argumendiks sõnena mingi teksti
ning tagastab sõnastiku, mis näitab, millised sõnad järgnevad millistele
sõnadele – sõnastiku kirje võtmeks on mingi sõna ja väärtuseks hulk temal
järgnevatest sõnadest.
Suurtel ja väikestel tähtedel vahet mitte teha, tulemuseks antud sõnastikus
kasutada ainult suurtähti.
Võib eeldada, et
1) kirjavahemärkidest esinevad tekstis ainult punkt, koma, küsimärk ja
hüüumärk.
2) kirjavahemärgid järgnevad alati eelnevale sõnale vahetult
3) iga kahe sõna vahel on vähemalt üks tühik.
Näiteks
sõnade_järgnevus("Tere Madis! Tere Tere, kuidas läheb?")
peaks andma tulemuseks
{'TERE' : {'MADIS', 'TERE', 'KUIDAS'},
'MADIS' : {'TERE'},
'KUIDAS': {'LÄHEB'},
'LÄHEB' : set()}
Funktsiooni töö demonstreerimiseks tuleb argumenttekst lugeda sisse failist
tekst.txt (kodeeringus UTF-8).
Funktsiooni poolt tagastatud sõnastik tuleb salvestada muutujasse sõnade_info,
selle põhjal tuleb ekraanile kuvada need sõnad, mis on tekstis esinenud mitu
korda järjest (iga selline sõna eraldi reale).
Antud näite korral peaks ekraanile ilmuma TERE.
"""
from grader import *
from KT2_util import *
def sõnade_järgnevus(tekst):
sonad = [sona.strip(".,?!").upper() for sona in tekst.split()]
tulemus = {}
for sona in sonad:
tulemus[sona] = set()
for i in range(len(sonad)-1):
tulemus[sonad[i]].add(sonad[i+1])
return tulemus
def double_words(contents):
words = sõnade_järgnevus(contents)
return set(word for word in words if word in words[word])
## Function tests
def create_file_make_checker(sample_function, contents):
decorator = create_temporary_file('tekst.txt', contents)
checker = make_checker(sample_function)
return lambda *a, **kw: decorator(checker(*a, **kw))
def function_tests(contents):
function_checker = create_file_make_checker(sõnade_järgnevus, contents)
function_checker("TERE MADIS")
function_checker("Tere tore ole")
function_checker("Tere. tore ole")
function_checker("Tere, tore kasPOLE? Ilm on kOle.")
def description(d):
def inner(f):
setDescription(f, d)
return f
return inner
def variable_tests(contents):
variable_name = "sõnade_info"
function_name = "sõnade_järgnevus"
@test
@description("Sisu {} - Muutuja {} peab olema defineeritud".format(repr(contents), variable_name))
@create_temporary_file('tekst.txt', contents)
def variable_exists(m):
assert hasattr(m, 'module'), "Programmi täitmine ei lõppenud. Failis ei tohiks olla üle input() käski"
assert hasattr(m.module, variable_name), "Peab leiduma funktsioon nimega {name}!".format(name=variable_name, dict=m.module.__dict__)
return getattr(m.module, variable_name)
@test
@description("Sisu {} - Muutuja {} peab olema sõnastik".format(repr(contents), variable_name))
@create_temporary_file('tekst.txt', contents)
def variable_type(m):
variable = variable_exists(m)
assert isinstance(variable, dict), "Muutuja {} peaks olema sõnastik aga oli {}".format(variable_name, type(variable))
@test
@description("Sisu {} - Muutuja {} sisaldab sama mida funktsioon tagastas".format(repr(contents), variable_name))
@create_temporary_file('tekst.txt', contents)
def variable_contents(m):
variable = variable_exists(m)
assert hasattr(m, 'module'), "Programmi täitmine ei lõppenud. Failis ei tohiks olla üle input() käski"
assert hasattr(m.module, function_name), "Peab leiduma funktsioon nimega {name}!".format(name=function_name, dict=m.module.__dict__)
user_function = getattr(m.module, function_name)
expected_value = user_function(contents)
assert variable == expected_value, "Muutuja {} peaks sisaldama {}({})={}, aga sisaldas {}".\
format(variable_name, function_name, contents, expected_value, variable)
def IO_tests(contents):
double = double_words(contents)
@test
@description("Sisu {} - peaks väljastama kõik sõnad hulgast {}".format(repr(contents), double))
@create_temporary_file('tekst.txt', contents)
def io_words(m):
output = m.stdout.read()
words = output.split()
assert all(x in words for x in double), \
"Kõik sõnad hulgast {} peaksid esinema väljundis. Väljundis olevad sõnad: {}".format(double, words)
return words
@test
@description("Sisu {} - peaks väljastama ainult sõnasid hulgast {} ilma kordusteta".format(repr(contents), double))
@create_temporary_file('tekst.txt', contents)
def io_exact(m):
words = io_words(m)
assert len(words) == len(double), "Väljastama peaks sama palju sõnu kui on topelt. Väljastatud sõnad: {}".format(words)
assert set(words) == double, "Väljundis tohiks olla ainult sõnad hulgast {}. Väljastatud sõnade hulk: {}".format(double, set(words))
SAMPLE_TEST = "Tere Madis! Tere Tere, kuidas läheb?"
function_tests(SAMPLE_TEST)
variable_tests(SAMPLE_TEST)
TESTS = [
SAMPLE_TEST,
"LAUSE ILMA VAHEMÄRKIDETA ON TORE TORE ILUS ILUS JA LUND LUND SAJAB",
"lause! väikeste väikeste. tähtedega, pole pole ka ka? paha",
"Vahel pole ühtegi kordust, ei ühtegi kordust."
]
for testcase in TESTS:
IO_tests(testcase)
|
the-stack_0_25418
|
# !/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
# import imp
# import __init__ as booger
import torch
import torch.nn as nn
import torch.nn.functional as F
#
# class ResContextBlock(nn.Module):
# def __init__(self, in_filters, out_filters):
# super(ResContextBlock, self).__init__()
# self.conv1 = nn.Conv2d(in_filters, out_filters, kernel_size=(1, 1), stride=(1, 1))
# self.act1 = nn.LeakyReLU()
#
# self.conv2 = nn.Conv2d(out_filters, out_filters, (3, 3), padding=1)
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm2d(out_filters)
#
# self.conv3 = nn.Conv2d(out_filters, out_filters, (3, 3), dilation=(2, 2), padding=2)
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm2d(out_filters)
#
# def forward(self, x):
# shortcut = self.conv1(x)
# shortcut = self.act1(shortcut)
#
# resA = self.conv2(shortcut)
# resA = self.act2(resA)
# resA1 = self.bn1(resA)
#
# resA = self.conv3(resA1)
# resA = self.act3(resA)
# resA2 = self.bn2(resA)
#
# output = shortcut + resA2
# return output
# export
# class TimeDistributed(nn.Module):
# "Applies a module over tdim identically for each step"
#
# def __init__(self, module, low_mem=False, tdim=1):
# super(TimeDistributed, self).__init__()
# self.module = module
# self.low_mem = low_mem
# self.tdim = tdim
#
# def forward(self, *args, **kwargs):
# "input x with shape:(bs,seq_len,channels,width,height)"
# if self.low_mem or self.tdim != 1:
# return self.low_mem_forward(*args)
# else:
# # only support tdim=1
# inp_shape = args[0].shape
# bs, seq_len = inp_shape[0], inp_shape[1]
# out = self.module(*[x.view(bs * seq_len, *x.shape[2:]) for x in args], **kwargs)
# out_shape = out.shape
# return out.view(bs, seq_len, *out_shape[1:])
#
# def low_mem_forward(self, *args, **kwargs):
# "input x with shape:(bs,seq_len,channels,width,height)"
# tlen = args[0].shape[self.tdim]
# args_split = [torch.unbind(x, dim=self.tdim) for x in args]
# out = []
# for i in range(tlen):
# out.append(self.module(*[args[i] for args in args_split]), **kwargs)
# return torch.stack(out, dim=self.tdim)
#
# def __repr__(self):
# return f'TimeDistributed({self.module})'
# import torch.nn as nn
#
#
# class TimeDistributed(nn.Module):
# def __init__(self, module, batch_first=True):
# super(TimeDistributed, self).__init__()
# self.module = module
# self.batch_first = batch_first
#
# def forward(self, x):
#
# if len(x.size()) <= 2:
# return self.module(x)
#
# # Squash samples and timesteps into a single axis
# x_reshape = x.contiguous().view(-1, x.size(-1)) # (samples * timesteps, input_size)
#
# y = self.module(x_reshape)
#
# # We have to reshape Y
# if self.batch_first:
# y = y.contiguous().view(x.size(0), -1, y.size(-1)) # (samples, timesteps, output_size)
# else:
# y = y.view(-1, x.size(1), y.size(-1)) # (timesteps, samples, output_size)
#
# return y
class TimeDistributed(nn.Module):
def __init__(self, layer, time_steps, *args):
super(TimeDistributed, self).__init__()
self.layers = nn.ModuleList([layer for i in range(time_steps)])
def forward(self, x):
batch_size, time_steps, C, H, W = x.size()
output = torch.tensor([], device='cuda:0')
for i in range(time_steps):
output_t = self.layers[i](x[:, i, :, :, :])
output_t = output_t.unsqueeze(1)
output = torch.cat((output, output_t), 1)
# print(output)
return output
|
the-stack_0_25419
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
from language.common.utils import export_utils
class ExportUtilsTest(absltest.TestCase):
def test_numeric_timestamp_and_trailing_slashes(self):
temp_dir = self.create_tempdir().full_path
os.makedirs(os.path.join(temp_dir, "export", "best", "2", "my_module"))
os.makedirs(os.path.join(temp_dir, "export", "best", "10", "my_module"))
result = export_utils.tfhub_export_path(temp_dir + "/", "best", "my_module")
self.assertEqual(
result, os.path.join(temp_dir, "export", "best", "10", "my_module"))
def test_cleanup_old_dirs(self):
temp_dir = self.create_tempdir().full_path
for i in range(7, 12):
os.makedirs(os.path.join(temp_dir, "export", "best", str(i), "my_module"))
export_utils.clean_tfhub_exports(temp_dir, "best", exports_to_keep=3)
dirnames = os.listdir(os.path.join(temp_dir, "export", "best"))
self.assertEqual(set(dirnames), {"9", "10", "11"})
if __name__ == "__main__":
absltest.main()
|
the-stack_0_25420
|
import numpy as np
#from matplotlib.path import Path
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, \
ParasiteAxesAuxTrans
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return x, y-x
def inv_tr(x,y):
x, y = np.asarray(x), np.asarray(y)
return x, y+x
grid_helper = GridHelperCurveLinear((tr, inv_tr))
ax1 = Subplot(fig, 1, 2, 1, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
xx, yy = tr([3, 6], [5.0, 10.])
ax1.plot(xx, yy)
ax1.set_aspect(1.)
ax1.set_xlim(0, 10.)
ax1.set_ylim(0, 10.)
ax1.axis["t"]=ax1.new_floating_axis(0, 3.)
ax1.axis["t2"]=ax1.new_floating_axis(1, 7.)
ax1.grid(True)
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 2, 2, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
curvelinear_test2(fig)
plt.draw()
plt.show()
|
the-stack_0_25422
|
# -*- coding: utf-8 -*-
import time
from unittest import TestCase
from mock import Mock
from lamvery.actions.logs import LogsAction
def default_args():
args = Mock()
args.conf_file = '.lamvery.yml'
args.follow = False
args.interval = 1
args.start = '-1h'
return args
class LogsActionTestCase(TestCase):
def test_action(self):
c = Mock()
ret = [{'message': 'foo', 'eventId': 'bar', 'timestamp': int(time.time() * 1000)}]
c.get_log_events = Mock(return_value=ret)
action = LogsAction(default_args())
action._get_client = Mock(return_value=c)
action.action()
|
the-stack_0_25423
|
import gym
import numpy as np
import matplotlib.pyplot as plt
from agents import deep_q_network
from common import utils
def main():
env = gym.make("CartPole-v1")
n_states = env.observation_space.shape
n_actions = env.action_space.n
running_win_pct = []
scores = []
eps = []
agent = deep_q_network.Agent(n_states, n_actions)
n_games = 10_000
for i in range(n_games):
state = env.reset()
score = 0
while True:
action = agent.sample(state)
new_state, reward, done, _info = env.step(action)
agent.update(state, action, reward, new_state)
score += reward
state = new_state
if done:
break
scores.append(score)
eps.append(agent.epsilon)
if i % 10 == 0:
win_pct = np.mean(scores[-100:])
running_win_pct.append(win_pct)
print(f"{i}: {win_pct}, {agent.epsilon}")
filename = "cartpole_naive_dqn.png"
utils.plot_learning(scores, eps, filename)
plt.show()
env.close()
if __name__ == "__main__":
main()
|
the-stack_0_25424
|
from gov.nasa.jpl.mgss.mbee.docgen.docbook import DBParagraph
from gov.nasa.jpl.mgss.mbee.docgen.validation import ValidationRule
from gov.nasa.jpl.mgss.mbee.docgen.validation import ValidationSuite
from gov.nasa.jpl.mgss.mbee.docgen.validation import ViolationSeverity
from com.nomagic.uml2.ext.magicdraw.classes.mdkernel import *
targets = scriptInput['DocGenTargets']
# make a validation suite, give it a name
vs = ValidationSuite("TestSuite")
vs.setShowSummary(True)
vs.setShowDetail(True)
# each validation suite can have many rules, make one with name, description,
# and severity
vr = ValidationRule("Rule 1", "Stuff with names", ViolationSeverity.WARNING)
vr2 = ValidationRule("Rule 2", "Stuff that's Packages", ViolationSeverity.ERROR)
vs.addValidationRule(vr)
vs.addValidationRule(vr2)
# each rule can have many violations, give it the element and maybe some comment
for e in targets:
for t in e.getOwnedElement():
if isinstance(t, NamedElement) and t.getName() != "":
vr.addViolation(t, "Has Name")
if isinstance(t, Package):
vr2.addViolation(t, "Is Package")
scriptOutput = {}
scriptOutput["DocGenValidationOutput"] = [vs]
scriptOutput["DocGenOutput"] = [DBParagraph("'Regular' output shows up first.")]
|
the-stack_0_25425
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import pytest
import popart
import popart._internal.ir as _ir
def check_graph_inputs(graph, graph_inputs):
""" Helper function, graph inputs are what we expect. """
assert graph.getInputIds() == graph_inputs, \
f"Expected Graph {graph.getGraphString()} to have inputs {graph_inputs}, got {graph.getInputIds()}"
def check_graph_outputs(graph, graph_outputs):
""" Helper function, graph outputs are what we expect. """
assert graph.getOutputIds() == graph_outputs, \
f"Expected Graph {graph.getGraphString()} to have outputs {graph_outputs}, got {graph.getOutputIds()}"
def test_graph_construction():
""" Test that we can construct a popart._internal.ir.Graph object. """
ir = _ir.Ir()
g1Id = _ir.GraphId("g1")
g1 = _ir.Graph(ir, g1Id)
def test_graph_graph_inputs():
""" Test we can add/remove graph inputs. """
ir = _ir.Ir()
g1Id = _ir.GraphId("g1")
g1 = ir.createGraph(g1Id)
# Check initially graph inputs are empty.
check_graph_inputs(g1, [])
# Check addInput.
g1.addInput("inputA", _ir.TensorInfo(_ir.DataType.FLOAT16, [5, 5]))
check_graph_inputs(g1, ["inputA"])
g1.addInput("inputB", _ir.TensorInfo(_ir.DataType.FLOAT, [65, 5]))
check_graph_inputs(g1, ["inputA", "inputB"])
g1.addInput(1, "input1", _ir.TensorInfo(_ir.DataType.FLOAT, [65, 5]),
False)
check_graph_inputs(g1, ["inputA", "input1", "inputB"])
g1.addInput(1, "input2", _ir.TensorInfo(_ir.DataType.FLOAT, [65, 5]), True)
check_graph_inputs(g1, ["inputA", "input2", "inputB"])
# Check getInputId.
assert g1.getInputId(0) == "inputA"
assert g1.getInputId(1) == "input2"
assert g1.getInputId(2) == "inputB"
# Check getInputIndex
assert g1.getInputIndex("inputA") == 0
assert g1.getInputIndex("input2") == 1
assert g1.getInputIndex("inputB") == 2
with pytest.raises(popart.popart_exception) as excinfo:
g1.getInputIndex("nonExistingTensor")
# Check hasInputId.
assert g1.hasInputId("inputA")
assert not g1.hasInputId("input1")
# Check removeInput.
g1.removeInput(1)
check_graph_inputs(g1, ["inputA", "inputB"])
g1.removeInput("inputA")
check_graph_inputs(g1, ["inputB"])
def test_graph_graph_outputs():
""" Test we can add/remove graph outputs. """
ir = _ir.Ir()
g1Id = _ir.GraphId("g1")
g1 = ir.createGraph(g1Id)
# We add inputs as a way of adding tensors to the graph that we can mark as
# outputs.
g1.addInput("t0", _ir.TensorInfo(_ir.DataType.FLOAT16, [5, 5]))
g1.addInput("t1", _ir.TensorInfo(_ir.DataType.FLOAT16, [5, 5]))
g1.addInput("t2", _ir.TensorInfo(_ir.DataType.FLOAT16, [5, 5]))
# Check markAsInput.
check_graph_outputs(g1, [])
g1.markAsOutput("t0")
check_graph_outputs(g1, ["t0"])
g1.markAsOutput(0, "t1", False)
check_graph_outputs(g1, ["t1", "t0"])
g1.markAsOutput(0, "t2", True)
check_graph_outputs(g1, ["t2", "t0"])
# Check getOutputId.
assert g1.getOutputId(0) == "t2"
assert g1.getOutputId(1) == "t0"
# Check getOutputIndex
assert g1.getOutputIndex("t2") == 0
assert g1.getOutputIndex("t0") == 1
with pytest.raises(popart.popart_exception) as excinfo:
g1.getOutputIndex("nonExistingTensor")
# Check hasOutputId.
assert g1.hasOutputId("t0")
assert g1.hasOutputId("t2")
assert not g1.hasOutputId("t1")
# Check removeInput.
g1.removeOutput(1)
check_graph_outputs(g1, ["t2"])
g1.removeOutput("t2")
check_graph_outputs(g1, [])
def test_graph_scope_functions():
""" Test we can scope functions. """
ir = _ir.Ir()
g1Id = _ir.GraphId("g1")
g1 = ir.createGraph(g1Id)
# Test addScope
assert _ir.addScope(g1, "tensor1") == "g1/tensor1"
assert _ir.addScope(g1, "foobar") == "g1/foobar"
# Test removeScope
assert _ir.removeScope(g1, "g1/tensor1") == "tensor1"
assert _ir.removeScope(g1, "g1/foobar") == "foobar"
with pytest.raises(popart.popart_exception) as excinfo:
_ir.removeScope(g1, "h1/tensor1")
# Test getScope
assert g1.getScope().str() == "g1"
def test_graph_id_member():
""" Test .id member binding. """
ir = _ir.Ir()
g1Id = _ir.GraphId("g1")
g1 = ir.createGraph(g1Id)
assert g1.id == g1Id
def test_graph_get_graph_string():
""" Test getGraphString binding. """
ir = _ir.Ir()
g1Id = _ir.GraphId("g1")
g1 = ir.createGraph(g1Id)
assert ir.getMainGraph().getGraphString() == "the main graph"
assert g1.getGraphString() == "subgraph 'g1'"
|
the-stack_0_25430
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def download(opt, path, fname, version='1.0'):
fshort = fname[:fname.find('.')] if '.' in fname else fname
dpath = os.path.join(opt['datapath'], 'models', path, fshort)
if not build_data.built(dpath, version):
print('[downloading: ' + dpath + '/' + fname + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
url = 'https://s3.amazonaws.com/fair-data/parlai/_models/convai2/' + fname
build_data.download(url, dpath, fname)
if '.tgz' in fname or '.gz' in fname:
build_data.untar(dpath, fname)
# Mark the data as built.
build_data.mark_done(dpath, version)
|
the-stack_0_25431
|
from query.scripts.script_util import *
print('Normalizing bboxes...')
insts = FaceInstance.objects.filter().select_related('frame__video')
for inst in insts:
video = inst.frame.video
w, h = video.width, video.height
inst.bbox.x1 /= w
inst.bbox.x2 /= w
inst.bbox.y1 /= h
inst.bbox.y2 /= h
print('Saving changes...')
FaceInstance.objects.bulk_update(insts, update_fields=['bbox'], batch_size=100)
print('Done!')
|
the-stack_0_25432
|
# -*- coding: utf-8 -*-
# Copyright 2016 Resonai Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
yabt Extend
~~~~~~~~~~~~~~~~~~~~~~~
:author: Itamar Ostricher
"""
from collections import defaultdict, namedtuple, OrderedDict
from enum import Enum
from functools import partial, wraps
import pkg_resources
from ostrich.utils.collections import listify
from .logging import make_logger
logger = make_logger(__name__)
class Empty(type):
pass
PropType = Enum('PropType', """str
numeric
bool
list
dict
StrList
TargetName
Target
TargetList
File
FileList
untyped
""")
ArgSpec = namedtuple('ArgSpec', 'type default')
def evaluate_arg_spec(arg_spec):
arg_type = PropType.untyped
def_val = Empty
if isinstance(arg_spec, tuple):
if len(arg_spec) == 2:
arg_name, second = arg_spec
if isinstance(second, PropType):
arg_type = second
else:
def_val = second
elif len(arg_spec) == 3:
arg_name, arg_type, def_val = arg_spec
else:
arg_name = arg_spec
# TODO(itamar): better errors than asserts
assert isinstance(arg_name, str)
assert isinstance(arg_type, PropType)
# also check validity of def_val?
return arg_name, ArgSpec(arg_type, def_val)
INJECTED_ARGS = frozenset((
'build_params', 'deps', 'cachable', 'license', 'attempts'
'packaging_params', 'policies', 'runtime_params',
))
class Builder:
def __init__(self):
self.sig = None
self.func = None
self.test_func = None
self.cache_json_func = None
self.docstring = None
self.min_positional_args = 1 # the `name`
def register_sig(self, builder_name: str, sig: list, docstring: str,
cachable: bool=True, attempts=1):
"""Register a builder signature & docstring for `builder_name`.
The input for the builder signature is a list of "sig-spec"s
representing the builder function arguments.
Each sig-spec in the list can be:
1. A string. This represents a simple untyped positional argument name,
with no default value.
2. A 1-tuple with one string element. Same as #1.
3. A 2-tuple with ('arg-name', arg_type). This represents a typed
positional argument, if arg_type is an instance of PropType enum.
4. A 2-tuple with ('arg-name', default_value). This represents an
un-typed keyword argument with a default value.
5. A 3-tuple with ('arg-name', arg_type, default_value). This
represents a typed keyword argument with a default value,
if arg_type is an instance of PropType enum.
In addition to the args specified in the `sig` list, there are several
*injected* args:
1. A positional arg `name` of type TargetName is always the first arg.
2. A keyword arg `deps` of type TargetList and default value `None`
(or empty list) is always the first after all builder args.
3. A keyword arg `cachable` of type bool and default value taken from
the signature registration call (`cachable` arg).
4. A keyword arg `license` of type StrList and default value [].
5. A keyword arg `policies` of type StrList and default value [].
6. A keyword arg `packaging_params` of type dict and default value {}
(empty dict).
7. A keyword arg `runtime_params` of type dict and default value {}
(empty dict).
8. A keyword arg `build_params` of type dict and default value {}
(empty dict).
9. A keyword arg `attempts` of type int and default value 1.
"""
if self.sig is not None:
raise KeyError('{} already registered a signature!'
.format(builder_name))
self.sig = OrderedDict(name=ArgSpec(PropType.TargetName, Empty))
self.docstring = docstring
kwargs_section = False
for arg_spec in listify(sig):
arg_name, sig_spec = evaluate_arg_spec(arg_spec)
if arg_name in self.sig or arg_name in INJECTED_ARGS:
raise SyntaxError(
"duplicate argument '{}' in function definition"
.format(arg_name))
self.sig[arg_name] = sig_spec
if sig_spec.default == Empty:
if kwargs_section:
# TODO(itamar): how to give syntax error source annotation?
# (see: http://stackoverflow.com/questions/33717804)
raise SyntaxError(
'non-default argument follows default argument')
self.min_positional_args += 1
else:
kwargs_section = True
self.sig['deps'] = ArgSpec(PropType.TargetList, None)
self.sig['cachable'] = ArgSpec(PropType.bool, cachable)
self.sig['license'] = ArgSpec(PropType.StrList, None)
self.sig['policies'] = ArgSpec(PropType.StrList, None)
self.sig['packaging_params'] = ArgSpec(PropType.dict, None)
self.sig['runtime_params'] = ArgSpec(PropType.dict, None)
self.sig['build_params'] = ArgSpec(PropType.dict, None)
self.sig['attempts'] = ArgSpec(PropType.numeric, 1)
class Plugin:
builders = defaultdict(Builder)
hooks = {
'manipulate_target': {},
}
@classmethod
def load_plugins(cls, unused_conf):
# TODO(itamar): Support config semantics for explicitly enabling /
# disabling builders, and not just picking up everything that's
# installed.
for entry_point in pkg_resources.iter_entry_points('yabt.builders'):
entry_point.load()
logger.debug('Loaded builder {0.name} from {0.module_name} '
'(dist {0.dist})', entry_point)
logger.debug('Loaded {} builders', len(cls.builders))
cls.validate()
@classmethod
def get_hooks_for_builder(cls, builder_name: str):
for hook_name, hook_spec in Plugin.hooks.items():
if builder_name in hook_spec:
yield hook_name, hook_spec[builder_name]
@classmethod
def validate(cls):
# TODO(itamar): validate stuff
# 1. builders are functions with good signatures (name as first arg)
# 2. hooks belong to existing builders
pass
@classmethod
def remove_builder(cls, builder_name: str):
"""Remove a registered builder `builder_name`.
No reason to use this except for tests.
"""
cls.builders.pop(builder_name, None)
for hook_spec in cls.hooks.values():
hook_spec.pop(builder_name, None)
def register_builder_sig(
builder_name, sig=None, docstring=None, cachable: bool=True,
attempts=1):
Plugin.builders[builder_name].register_sig(
builder_name, sig, docstring, cachable, attempts)
logger.debug('Registered {} builder signature'.format(builder_name))
def register_build_func(builder_name):
def register_decorator(build_func):
if Plugin.builders[builder_name].func:
raise KeyError('{} already registered a build function!'
.format(builder_name))
Plugin.builders[builder_name].func = build_func
logger.debug('Registered {0} builder signature from '
'{1.__module__}.{1.__name__}()', builder_name, build_func)
@wraps(build_func)
def builder_wrapper(*args, **kwrags):
return build_func(*args, **kwrags)
return builder_wrapper
return register_decorator
def register_test_func(builder_name):
def register_decorator(test_func):
if Plugin.builders[builder_name].test_func:
raise KeyError('{} already registered a test function!'
.format(builder_name))
Plugin.builders[builder_name].test_func = test_func
logger.debug('Registered {0} builder signature from '
'{1.__module__}.{1.__name__}()', builder_name, test_func)
@wraps(test_func)
def tester_wrapper(*args, **kwrags):
return test_func(*args, **kwrags)
return tester_wrapper
return register_decorator
def register_cache_json_func(builder_name):
def register_decorator(cache_json_func):
if Plugin.builders[builder_name].cache_json_func:
raise KeyError('{} already registered a cache_json function!'
.format(builder_name))
Plugin.builders[builder_name].cache_json_func = cache_json_func
logger.debug('Registered {0} builder signature from '
'{1.__module__}.{1.__name__}()', builder_name,
cache_json_func)
@wraps(cache_json_func)
def cache_json_wrapper(*args, **kwrags):
return cache_json_func(*args, **kwrags)
return cache_json_wrapper
return register_decorator
def _register_hook(hook_name, builder_name):
def register_decorator(hook_func):
assert hook_name in Plugin.hooks
Plugin.hooks[hook_name][builder_name] = hook_func
logger.debug('Registered {0} hook for {1} builder from '
'{2.__module__}.{2.__name__}()',
hook_name, builder_name, hook_func)
@wraps(hook_func)
def hook_wrapper(*args, **kwargs):
return hook_func(*args, **kwargs)
return hook_wrapper
return register_decorator
register_manipulate_target_hook = partial(_register_hook, 'manipulate_target')
|
the-stack_0_25433
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
from contextlib import contextmanager, closing
from enum import Enum
import infra.remote
import infra.net
import infra.path
import ccf.clients
import os
import socket
from loguru import logger as LOG
class NodeNetworkState(Enum):
stopped = 0
started = 1
joined = 2
class NodeStatus(Enum):
PENDING = 0
TRUSTED = 1
RETIRED = 2
def is_addr_local(host, port):
with closing(socket.socket()) as s:
try:
s.bind((host, port or 0))
return True
except OSError:
return False
class Node:
def __init__(self, node_id, host, binary_dir=".", debug=False, perf=False):
self.node_id = node_id
self.binary_dir = binary_dir
self.debug = debug
self.perf = perf
self.remote = None
self.network_state = NodeNetworkState.stopped
self.common_dir = None
hosts, *port = host.split(":")
self.host, *self.pubhost = hosts.split(",")
self.rpc_port = int(port[0]) if port else None
self.node_port = None
if self.host == "localhost":
self.host = infra.net.expand_localhost()
if is_addr_local(self.host, self.rpc_port):
self.remote_impl = infra.remote.LocalRemote
else:
self.remote_impl = infra.remote.SSHRemote
self.pubhost = self.pubhost[0] if self.pubhost else self.host
def __hash__(self):
return self.node_id
def __eq__(self, other):
return self.node_id == other.node_id
def start(
self,
lib_name,
enclave_type,
workspace,
label,
common_dir,
members_info,
**kwargs,
):
self._start(
infra.remote.StartType.new,
lib_name,
enclave_type,
workspace,
label,
common_dir,
members_info=members_info,
**kwargs,
)
self.network_state = NodeNetworkState.joined
def join(
self,
lib_name,
enclave_type,
workspace,
label,
common_dir,
target_rpc_address,
snapshot_dir,
**kwargs,
):
self._start(
infra.remote.StartType.join,
lib_name,
enclave_type,
workspace,
label,
common_dir,
target_rpc_address=target_rpc_address,
snapshot_dir=snapshot_dir,
**kwargs,
)
def recover(self, lib_name, enclave_type, workspace, label, common_dir, **kwargs):
self._start(
infra.remote.StartType.recover,
lib_name,
enclave_type,
workspace,
label,
common_dir,
**kwargs,
)
self.network_state = NodeNetworkState.joined
def _start(
self,
start_type,
lib_name,
enclave_type,
workspace,
label,
common_dir,
target_rpc_address=None,
snapshot_dir=None,
members_info=None,
**kwargs,
):
"""
Creates a CCFRemote instance, sets it up (connects, creates the directory and ships over the files), and
(optionally) starts the node by executing the appropriate command.
If self.debug is set to True, it will not actually start up the node, but will prompt the user to do so manually
Raises exception if failed to prepare or start the node
:param lib_name: the enclave package to load
:param enclave_type: default: release. Choices: 'release', 'debug', 'virtual'
:param workspace: directory where node is started
:param label: label for this node (to differentiate nodes from different test runs)
:return: void
"""
lib_path = infra.path.build_lib_path(lib_name, enclave_type)
self.common_dir = common_dir
self.remote = infra.remote.CCFRemote(
start_type,
lib_path,
str(self.node_id),
self.host,
self.pubhost,
self.node_port,
self.rpc_port,
self.remote_impl,
enclave_type,
workspace,
label,
common_dir,
target_rpc_address,
members_info,
snapshot_dir,
binary_dir=self.binary_dir,
**kwargs,
)
self.remote.setup()
self.network_state = NodeNetworkState.started
if self.debug:
with open("/tmp/vscode-gdb.sh", "a") as f:
f.write(f"if [ $1 -eq {self.remote.local_node_id} ]; then\n")
f.write(f"cd {self.remote.remote.root}\n")
f.write(f"{' '.join(self.remote.remote.cmd)}\n")
f.write("fi\n")
print("")
phost = "localhost" if self.host.startswith("127.") else self.host
print(
"================= Please run the below command on "
+ phost
+ " and press enter to continue ================="
)
print("")
print(self.remote.debug_node_cmd())
print("")
input("Press Enter to continue...")
else:
if self.perf:
self.remote.set_perf()
self.remote.start()
self.remote.get_startup_files(self.common_dir)
self._read_ports()
LOG.info("Node {} started".format(self.node_id))
def _read_ports(self):
node_address_path = os.path.join(self.common_dir, self.remote.node_address_path)
with open(node_address_path, "r") as f:
node_host, node_port = f.read().splitlines()
node_port = int(node_port)
assert (
node_host == self.host
), f"Unexpected change in node address from {self.host} to {node_host}"
if self.node_port is not None:
assert (
node_port == self.node_port
), f"Unexpected change in node port from {self.node_port} to {node_port}"
self.node_port = node_port
rpc_address_path = os.path.join(self.common_dir, self.remote.rpc_address_path)
with open(rpc_address_path, "r") as f:
rpc_host, rpc_port = f.read().splitlines()
rpc_port = int(rpc_port)
assert (
rpc_host == self.host
), f"Unexpected change in RPC address from {self.host} to {rpc_host}"
if self.rpc_port is not None:
assert (
rpc_port == self.rpc_port
), f"Unexpected change in RPC port from {self.rpc_port} to {rpc_port}"
self.rpc_port = rpc_port
def stop(self):
if self.remote and self.network_state is not NodeNetworkState.stopped:
self.network_state = NodeNetworkState.stopped
return self.remote.stop()
return [], []
def is_stopped(self):
return self.network_state == NodeNetworkState.stopped
def is_joined(self):
return self.network_state == NodeNetworkState.joined
def wait_for_node_to_join(self, timeout=3):
"""
This function can be used to check that a node has successfully
joined a network and that it is part of the consensus.
"""
# Until the node has joined, the SSL handshake will fail as the node
# is not yet endorsed by the network certificate
try:
with self.client(connection_timeout=timeout) as nc:
rep = nc.get("/node/commit")
assert (
rep.status_code == 200
), f"An error occured after node {self.node_id} joined the network: {rep.body}"
except ccf.clients.CCFConnectionException as e:
raise TimeoutError(f"Node {self.node_id} failed to join the network") from e
def get_ledger(self):
return self.remote.get_ledger()
def get_snapshots(self):
return self.remote.get_snapshots()
def client(self, identity=None, **kwargs):
akwargs = {
"cert": os.path.join(self.common_dir, f"{identity}_cert.pem")
if identity
else None,
"key": os.path.join(self.common_dir, f"{identity}_privk.pem")
if identity
else None,
"ca": os.path.join(self.common_dir, "networkcert.pem"),
"description": f"[{self.node_id}{'|' + identity if identity is not None else ''}]",
}
akwargs.update(kwargs)
return ccf.clients.client(self.pubhost, self.rpc_port, **akwargs)
def suspend(self):
self.remote.suspend()
LOG.info(f"Node {self.node_id} suspended...")
def resume(self):
self.remote.resume()
LOG.info(f"Node {self.node_id} has resumed from suspension.")
@contextmanager
def node(node_id, host, binary_directory, debug=False, perf=False, pdb=False):
"""
Context manager for Node class.
:param node_id: unique ID of node
:param binary_directory: the directory where CCF's binaries are located
:param host: node's hostname
:param debug: default: False. If set, node will not start (user is prompted to start them manually)
:param perf: default: False. If set, node will run under perf record
:return: a Node instance that can be used to build a CCF network
"""
this_node = Node(
node_id=node_id, host=host, binary_dir=binary_directory, debug=debug, perf=perf
)
try:
yield this_node
except Exception:
if pdb:
import pdb
pdb.set_trace()
else:
raise
finally:
this_node.stop()
|
the-stack_0_25437
|
#!/usr/bin/env python
# FreeRTOS Common IO V0.1.2
# Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# http://aws.amazon.com/freertos
# http://www.FreeRTOS.org
import time
import pigpio
import socket
import threading
GPIO_R = 4
GPIO_W = 23
sample_counts = 50
sample_duration = 0.2
rising_edge = -1
period = -1
high = -1
HOST = ''
PORT = 50007
process_end = False
def callback_function(gpio, level, tick):
"""
Interrrupt call back function triggered by pwm input pulses.
:param gpio: gpio pin num
:param level: gpio level
:param tick: The number of microseconds since boot
:return:
"""
if gpio == GPIO_R:
global rising_edge, high, period
if level == 1:
if rising_edge != -1:
period = pigpio.tickDiff(rising_edge, tick)
rising_edge = tick
else:
if rising_edge != -1:
high = pigpio.tickDiff(rising_edge, tick)
def socket_thread(s):
"""
Thread function to monitor socket.
:param s: Socket handler.
:return:
"""
global process_end
try:
conn, addr = s.accept()
conn.recv(1024)
except Exception as e:
print(e)
# Notify the main thread to end process.
process_end = True
def start_pwm(pi, gpio=23, frequency=1000, duty_cycle=30):
"""
Setup gpio and start to generate pwm.
:param pi: pigpio handler
:param gpio: gpio
:param frequency: pwm frequency
:param duty_cycle: pwm duty cycle
:return: wave id
"""
if duty_cycle > 100:
duty_cycle = 100
pi.set_mode(gpio, pigpio.OUTPUT)
# Calculate period in micro seconds.
period = int(1000000.0 / frequency)
# Calculate micro seconds of falling edge.
falling_edge = int(duty_cycle / 100 * period)
# Setup one cycle of pwm.
pi.wave_add_generic([
pigpio.pulse(0, 1 << gpio, 0),
pigpio.pulse(1 << gpio, 0, falling_edge),
pigpio.pulse(0, 1 << gpio, period - falling_edge),
])
# Start pwm.
wid = pi.wave_create()
pi.wave_send_repeat(wid)
return wid
if __name__ == "__main__":
pi = pigpio.pi()
pi.set_mode(GPIO_R, pigpio.INPUT)
callback_handler = pi.callback(GPIO_R, pigpio.EITHER_EDGE, callback_function)
socket.setdefaulttimeout(10)
# Create socket server.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
# Start to generate pwm output.
wid = start_pwm(pi, GPIO_W)
# Create a thread to monitor socket.
t_s = threading.Thread(target=socket_thread, args=(s,))
t_s.start()
time_out = 100
# Polling pwm input.
while not process_end and time_out > 0:
time.sleep(sample_duration)
# Calculate frequency with period in micro seconds.
frequency = 1000000.0 / period if period > 0 else 0
# Calculate duty cycle.
duty_cycle = 100.0 * high / period if period > 0 else 0
print("{:.5f} {:.5f}".format(frequency, duty_cycle))
# Reset variables for next round.
rising_edge = period = high = -1
time_out -= 1
pi.wave_tx_stop()
pi.wave_delete(wid)
callback_handler.cancel()
pi.stop()
s.close()
|
the-stack_0_25438
|
"""
ARIMA model class.
Author: Chad Fulton
License: BSD-3
"""
from statsmodels.compat.pandas import Appender
import warnings
import numpy as np
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.statespace import sarimax
from statsmodels.tsa.statespace.kalman_filter import MEMORY_CONSERVE
from statsmodels.tsa.statespace.tools import diff
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.arima.estimators.yule_walker import yule_walker
from statsmodels.tsa.arima.estimators.burg import burg
from statsmodels.tsa.arima.estimators.hannan_rissanen import hannan_rissanen
from statsmodels.tsa.arima.estimators.innovations import (
innovations, innovations_mle)
from statsmodels.tsa.arima.estimators.gls import gls as estimate_gls
from statsmodels.tsa.arima.specification import SARIMAXSpecification
class ARIMA(sarimax.SARIMAX):
r"""
Autoregressive Integrated Moving Average (ARIMA) model, and extensions
This model is the basic interface for ARIMA-type models, including those
with exogenous regressors and those with seasonal components. The most
general form of the model is SARIMAX(p, d, q)x(P, D, Q, s). It also allows
all specialized cases, including
- autoregressive models: AR(p)
- moving average models: MA(q)
- mixed autoregressive moving average models: ARMA(p, q)
- integration models: ARIMA(p, d, q)
- seasonal models: SARIMA(P, D, Q, s)
- regression with errors that follow one of the above ARIMA-type models
Parameters
----------
endog : array_like, optional
The observed time-series process :math:`y`.
exog : array_like, optional
Array of exogenous regressors.
order : tuple, optional
The (p,d,q) order of the model for the autoregressive, differences, and
moving average components. d is always an integer, while p and q may
either be integers or lists of integers.
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity. Default
is (0, 0, 0, 0). D and s are always integers, while P and Q
may either be integers or lists of positive integers.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend. Can be specified as a
string where 'c' indicates a constant term, 't' indicates a
linear trend in time, and 'ct' includes both. Can also be specified as
an iterable defining a polynomial, as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is 'c' for
models without integration, and no trend for models with integration.
Note that all trend terms are included in the model as exogenous
regressors, which differs from how trends are included in ``SARIMAX``
models. See the Notes section for a precise definition of the
treatment of trend terms.
enforce_stationarity : bool, optional
Whether or not to require the autoregressive parameters to correspond
to a stationarity process.
enforce_invertibility : bool, optional
Whether or not to require the moving average parameters to correspond
to an invertible process.
concentrate_scale : bool, optional
Whether or not to concentrate the scale (variance of the error term)
out of the likelihood. This reduces the number of parameters by one.
This is only applicable when considering estimation by numerical
maximum likelihood.
trend_offset : int, optional
The offset at which to start time trend values. Default is 1, so that
if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only
set when the model created by extending a previous dataset.
dates : array_like of datetime, optional
If no index is given by `endog` or `exog`, an array-like object of
datetime objects can be provided.
freq : str, optional
If no index is given by `endog` or `exog`, the frequency of the
time-series may be specified here as a Pandas offset or offset string.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none'.
Notes
-----
This model incorporates both exogenous regressors and trend components
through "regression with ARIMA errors". This differs from the
specification estimated using ``SARIMAX`` which treats the trend
components separately from any included exogenous regressors. The full
specification of the model estimated here is:
.. math::
Y_{t}-\delta_{0}-\delta_{1}t-\ldots-\delta_{k}t^{k}-X_{t}\beta
& =\epsilon_{t} \\
\left(1-L\right)^{d}\left(1-L^{s}\right)^{D}\Phi\left(L\right)
\Phi_{s}\left(L\right)\epsilon_{t}
& =\Theta\left(L\right)\Theta_{s}\left(L\right)\eta_{t}
where :math:`\eta_t \sim WN(0,\sigma^2)` is a white noise process, L
is the lag operator, and :math:`G(L)` are lag polynomials corresponding
to the autoregressive (:math:`\Phi`), seasonal autoregressive
(:math:`\Phi_s`), moving average (:math:`\Theta`), and seasonal moving
average components (:math:`\Theta_s`).
`enforce_stationarity` and `enforce_invertibility` are specified in the
constructor because they affect loglikelihood computations, and so should
not be changed on the fly. This is why they are not instead included as
arguments to the `fit` method.
.. todo:: should concentrate_scale=True by default
Examples
--------
>>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))
>>> res = mod.fit()
>>> print(res.summary())
"""
def __init__(self, endog, exog=None, order=(0, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
enforce_stationarity=True, enforce_invertibility=True,
concentrate_scale=False, trend_offset=1, dates=None,
freq=None, missing='none', validate_specification=True):
# Default for trend
# 'c' if there is no integration and 'n' otherwise
# TODO: if trend='c', then we could alternatively use `demean=True` in
# the estimation methods rather than setting up `exog` and using GLS.
# Not sure if it's worth the trouble though.
integrated = order[1] > 0 or seasonal_order[1] > 0
if trend is None and not integrated:
trend = 'c'
elif trend is None:
trend = 'n'
# Construct the specification
# (don't pass specific values of enforce stationarity/invertibility,
# because we don't actually want to restrict the estimators based on
# this criteria. Instead, we'll just make sure that the parameter
# estimates from those methods satisfy the criteria.)
self._spec_arima = SARIMAXSpecification(
endog, exog=exog, order=order, seasonal_order=seasonal_order,
trend=trend, enforce_stationarity=None, enforce_invertibility=None,
concentrate_scale=concentrate_scale, trend_offset=trend_offset,
dates=dates, freq=freq, missing=missing,
validate_specification=validate_specification)
exog = self._spec_arima._model.data.orig_exog
# Raise an error if we have a constant in an integrated model
has_trend = len(self._spec_arima.trend_terms) > 0
if has_trend:
lowest_trend = np.min(self._spec_arima.trend_terms)
if lowest_trend < order[1] + seasonal_order[1]:
raise ValueError(
'In models with integration (`d > 0`) or seasonal'
' integration (`D > 0`), trend terms of lower order than'
' `d + D` cannot be (as they would be eliminated due to'
' the differencing operation). For example, a constant'
' cannot be included in an ARIMA(1, 1, 1) model, but'
' including a linear trend, which would have the same'
' effect as fitting a constant to the differenced data,'
' is allowed.')
# Keep the given `exog` by removing the prepended trend variables
input_exog = None
if exog is not None:
if _is_using_pandas(exog, None):
input_exog = exog.iloc[:, self._spec_arima.k_trend:]
else:
input_exog = exog[:, self._spec_arima.k_trend:]
# Initialize the base SARIMAX class
# Note: we don't pass in a trend value to the base class, since ARIMA
# standardizes the trend to always be part of exog, while the base
# SARIMAX class puts it in the transition equation.
super().__init__(
endog, exog, trend=None, order=order,
seasonal_order=seasonal_order,
enforce_stationarity=enforce_stationarity,
enforce_invertibility=enforce_invertibility,
concentrate_scale=concentrate_scale, dates=dates, freq=freq,
missing=missing, validate_specification=validate_specification)
self.trend = trend
# Save the input exog and input exog names, so that we can refer to
# them later (see especially `ARIMAResults.append`)
self._input_exog = input_exog
if exog is not None:
self._input_exog_names = self.exog_names[self._spec_arima.k_trend:]
else:
self._input_exog_names = None
# Override the public attributes for k_exog and k_trend to reflect the
# distinction here (for the purpose of the superclass, these are both
# combined as `k_exog`)
self.k_exog = self._spec_arima.k_exog
self.k_trend = self._spec_arima.k_trend
# Remove some init kwargs that aren't used in this model
unused = ['measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'hamilton_representation']
self._init_keys = [key for key in self._init_keys if key not in unused]
@property
def _res_classes(self):
return {'fit': (ARIMAResults, ARIMAResultsWrapper)}
def fit(self, start_params=None, transformed=True, includes_fixed=False,
method=None, method_kwargs=None, gls=None, gls_kwargs=None,
cov_type=None, cov_kwds=None, return_params=False,
low_memory=False):
"""
Fit (estimate) the parameters of the model.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : bool, optional
Whether or not `start_params` is already transformed. Default is
True.
includes_fixed : bool, optional
If parameters were previously fixed with the `fix_params` method,
this argument describes whether or not `start_params` also includes
the fixed parameters, in addition to the free parameters. Default
is False.
method : str, optional
The method used for estimating the parameters of the model. Valid
options include 'statespace', 'innovations_mle', 'hannan_rissanen',
'burg', 'innovations', and 'yule_walker'. Not all options are
available for every specification (for example 'yule_walker' can
only be used with AR(p) models).
method_kwargs : dict, optional
Arguments to pass to the fit function for the parameter estimator
described by the `method` argument.
gls : bool, optional
Whether or not to use generalized least squares (GLS) to estimate
regression effects. The default is False if `method='statespace'`
and is True otherwise.
gls_kwargs : dict, optional
Arguments to pass to the GLS estimation fit method. Only applicable
if GLS estimation is used (see `gls` argument for details).
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'approx' for the observed information matrix estimator,
calculated using a numerical approximation of the Hessian matrix.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presence of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_approx' is the same as 'robust' except that the
intermediate calculations use the 'approx' method.
- 'none' for no covariance matrix calculation.
Default is 'opg' unless memory conservation is used to avoid
computing the loglikelihood values for each observation, in which
case the default is 'oim'.
cov_kwds : dict or None, optional
A dictionary of arguments affecting covariance matrix computation.
**opg, oim, approx, robust, robust_approx**
- 'approx_complex_step' : bool, optional - If True, numerical
approximations are computed using complex-step methods. If False,
numerical approximations are computed using finite difference
methods. Default is True.
- 'approx_centered' : bool, optional - If True, numerical
approximations computed using finite difference methods use a
centered approximation. Default is False.
return_params : bool, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
low_memory : bool, optional
If set to True, techniques are applied to substantially reduce
memory usage. If used, some features of the results object will
not be available (including smoothed results and in-sample
prediction), although out-of-sample forecasting is possible.
Default is False.
Returns
-------
ARIMAResults
Examples
--------
>>> mod = sm.tsa.arima.ARIMA(endog, order=(1, 0, 0))
>>> res = mod.fit()
>>> print(res.summary())
"""
# Determine which method to use
# 1. If method is specified, make sure it is valid
if method is not None:
self._spec_arima.validate_estimator(method)
# 2. Otherwise, use state space
# TODO: may want to consider using innovations (MLE) if possible here,
# (since in some cases it may be faster than state space), but it is
# less tested.
else:
method = 'statespace'
# Can only use fixed parameters with the following methods
methods_with_fixed_params = ['statespace', 'hannan_rissanen']
if self._has_fixed_params and method not in methods_with_fixed_params:
raise ValueError(
"When parameters have been fixed, only the methods "
f"{methods_with_fixed_params} can be used; got '{method}'."
)
# Handle kwargs related to the fit method
if method_kwargs is None:
method_kwargs = {}
required_kwargs = []
if method == 'statespace':
required_kwargs = ['enforce_stationarity', 'enforce_invertibility',
'concentrate_scale']
elif method == 'innovations_mle':
required_kwargs = ['enforce_invertibility']
for name in required_kwargs:
if name in method_kwargs:
raise ValueError('Cannot override model level value for "%s"'
' when method="%s".' % (name, method))
method_kwargs[name] = getattr(self, name)
# Handle kwargs related to GLS estimation
if gls_kwargs is None:
gls_kwargs = {}
# Handle starting parameters
# TODO: maybe should have standard way of computing starting
# parameters in this class?
if start_params is not None:
if method not in ['statespace', 'innovations_mle']:
raise ValueError('Estimation method "%s" does not use starting'
' parameters, but `start_params` argument was'
' given.' % method)
method_kwargs['start_params'] = start_params
method_kwargs['transformed'] = transformed
method_kwargs['includes_fixed'] = includes_fixed
# Perform estimation, depending on whether we have exog or not
p = None
fit_details = None
has_exog = self._spec_arima.exog is not None
if has_exog or method == 'statespace':
# Use GLS if it was explicitly requested (`gls = True`) or if it
# was left at the default (`gls = None`) and the ARMA estimator is
# anything but statespace.
# Note: both GLS and statespace are able to handle models with
# integration, so we don't need to difference endog or exog here.
if has_exog and (gls or (gls is None and method != 'statespace')):
if self._has_fixed_params:
raise NotImplementedError(
'GLS estimation is not yet implemented for the case '
'with fixed parameters.'
)
p, fit_details = estimate_gls(
self.endog, exog=self.exog, order=self.order,
seasonal_order=self.seasonal_order, include_constant=False,
arma_estimator=method, arma_estimator_kwargs=method_kwargs,
**gls_kwargs)
elif method != 'statespace':
raise ValueError('If `exog` is given and GLS is disabled'
' (`gls=False`), then the only valid'
" method is 'statespace'. Got '%s'."
% method)
else:
method_kwargs.setdefault('disp', 0)
res = super().fit(
return_params=return_params, low_memory=low_memory,
cov_type=cov_type, cov_kwds=cov_kwds, **method_kwargs)
if not return_params:
res.fit_details = res.mlefit
else:
# Handle differencing if we have an integrated model
# (these methods do not support handling integration internally,
# so we need to manually do the differencing)
endog = self.endog
order = self._spec_arima.order
seasonal_order = self._spec_arima.seasonal_order
if self._spec_arima.is_integrated:
warnings.warn('Provided `endog` series has been differenced'
' to eliminate integration prior to parameter'
' estimation by method "%s".' % method,
stacklevel=2,)
endog = diff(
endog, k_diff=self._spec_arima.diff,
k_seasonal_diff=self._spec_arima.seasonal_diff,
seasonal_periods=self._spec_arima.seasonal_periods)
if order[1] > 0:
order = (order[0], 0, order[2])
if seasonal_order[1] > 0:
seasonal_order = (seasonal_order[0], 0, seasonal_order[2],
seasonal_order[3])
if self._has_fixed_params:
method_kwargs['fixed_params'] = self._fixed_params.copy()
# Now, estimate parameters
if method == 'yule_walker':
p, fit_details = yule_walker(
endog, ar_order=order[0], demean=False,
**method_kwargs)
elif method == 'burg':
p, fit_details = burg(endog, ar_order=order[0],
demean=False, **method_kwargs)
elif method == 'hannan_rissanen':
p, fit_details = hannan_rissanen(
endog, ar_order=order[0],
ma_order=order[2], demean=False, **method_kwargs)
elif method == 'innovations':
p, fit_details = innovations(
endog, ma_order=order[2], demean=False,
**method_kwargs)
# innovations computes estimates through the given order, so
# we want to take the estimate associated with the given order
p = p[-1]
elif method == 'innovations_mle':
p, fit_details = innovations_mle(
endog, order=order,
seasonal_order=seasonal_order,
demean=False, **method_kwargs)
# In all cases except method='statespace', we now need to extract the
# parameters and, optionally, create a new results object
if p is not None:
# Need to check that fitted parameters satisfy given restrictions
if (self.enforce_stationarity
and self._spec_arima.max_reduced_ar_order > 0
and not p.is_stationary):
raise ValueError('Non-stationary autoregressive parameters'
' found with `enforce_stationarity=True`.'
' Consider setting it to False or using a'
' different estimation method, such as'
' method="statespace".')
if (self.enforce_invertibility
and self._spec_arima.max_reduced_ma_order > 0
and not p.is_invertible):
raise ValueError('Non-invertible moving average parameters'
' found with `enforce_invertibility=True`.'
' Consider setting it to False or using a'
' different estimation method, such as'
' method="statespace".')
# Build the requested results
if return_params:
res = p.params
else:
# Handle memory conservation option
if low_memory:
conserve_memory = self.ssm.conserve_memory
self.ssm.set_conserve_memory(MEMORY_CONSERVE)
# Perform filtering / smoothing
if (self.ssm.memory_no_predicted or self.ssm.memory_no_gain
or self.ssm.memory_no_smoothing):
func = self.filter
else:
func = self.smooth
res = func(p.params, transformed=True, includes_fixed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
# Save any details from the fit method
res.fit_details = fit_details
# Reset memory conservation
if low_memory:
self.ssm.set_conserve_memory(conserve_memory)
return res
@Appender(sarimax.SARIMAXResults.__doc__)
class ARIMAResults(sarimax.SARIMAXResults):
@Appender(sarimax.SARIMAXResults.append.__doc__)
def append(self, endog, exog=None, refit=False, fit_kwargs=None, **kwargs):
# MLEResults.append will concatenate the given `exog` here with
# `data.orig_exog`. However, `data.orig_exog` already has had any
# trend variables prepended to it, while the `exog` given here should
# not. Instead, we need to temporarily replace `orig_exog` and
# `exog_names` with the ones that correspond to those that were input
# by the user.
if exog is not None:
orig_exog = self.model.data.orig_exog
exog_names = self.model.exog_names
self.model.data.orig_exog = self.model._input_exog
self.model.exog_names = self.model._input_exog_names
# Perform the appending procedure
out = super().append(endog, exog=exog, refit=refit,
fit_kwargs=fit_kwargs, **kwargs)
# Now we reverse the temporary change made above
if exog is not None:
self.model.data.orig_exog = orig_exog
self.model.exog_names = exog_names
return out
class ARIMAResultsWrapper(sarimax.SARIMAXResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(
sarimax.SARIMAXResultsWrapper._wrap_attrs, _attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
sarimax.SARIMAXResultsWrapper._wrap_methods, _methods)
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults) # noqa:E305
|
the-stack_0_25442
|
import requests
from .exceptions import CIVOAPIError
from .networks import Networks
from .utils import filter_list
class Kubernetes:
"""
Kubernetes clusters are a number of instances on the Civo cloud platform running the Kubernetes cloud orchestration platform.
"""
supported_k8s = ['NYC1', 'SVG1']
def __init__(self, headers, api_url, region=None):
self.region = region
self.headers = headers
self._api_url = api_url
self.url = '{}/v2/kubernetes/clusters'.format(self._api_url)
self.kube_version = '{}/v2/kubernetes/versions'.format(self._api_url)
self.marketplace_url = '{}/v2/kubernetes/applications'.format(self._api_url)
def get_url(self, path=None, region=None):
""" Construct the API URL, appending necessary parameters
:param path: additional path components (optional)
:param region: target region (optional)
:return: str
"""
region = region if region else self.region
if region is not None and region not in self.supported_k8s:
raise CIVOAPIError('Kubernetes is not supported in: %s' % region)
path = path if path else ""
query = "?region={region}".format(region=region) if region else ""
return self.url + path + query
def create(self, name: str, num_nodes: int = 3, nodes_size: str = 'g3.small', kubernetes_version: str = None,
tags: str = None, network: str = None, region: str = None) -> dict:
"""
Function to create a cluster of kubernetes
:param name: a name for your cluster, must be unique within your account (required)
:param num_nodes: the number of instances to create (optional, the default at the time of writing is 3)
:param nodes_size: the size of each node (optional, the default is currently g2.small)
:param kubernetes_version: the version of k3s to install (optional, the default is currently the latest available)
:param tags: a space separated list of tags, to be used freely as required (optional)
:param network: network to be attached to cluster instance, not validated (optional, default: Default)
:param region: the civo region to be used for instance creation, not validated (optional)
:return: dict
"""
payload = {
'name': name,
'num_target_nodes': num_nodes,
'target_nodes_size': nodes_size,
'tags': '',
'applications': '',
'kubernetes_version': ''
}
if tags:
payload['tags'] = tags
if kubernetes_version:
payload['kubernetes_version'] = kubernetes_version
networks_list = Networks(self.headers, self._api_url, region).search()
if not network:
payload['network_id'] = [i['id'] for i in networks_list if i['default']][0]
else:
payload['network_id'] = [i['id'] for i in networks_list if i['name'] == network or i['id'] == network][0]
r = requests.post(self.get_url(region=region), headers=self.headers, params=payload)
return r.json()
def search(self, filter: str = None, region: str = None) -> dict:
"""
A list of clusters accessible from an account is available
:param filter: Filter json object the format is 'id:6224cd2b-d416-4e92-bdbb-db60521c8eb9',
you can filter by any object that is inside the json
:param region: the civo region to be used for instance creation, not validated (optional)
:return: objects dict
"""
payload = {}
r = requests.get(self.get_url(region=region), headers=self.headers, params=payload)
if filter:
data = r.json()
return filter_list(data=data, filter_by=filter)
return r.json()
def retrieving(self, id: str, region: str = None) -> object:
"""
Function to retrieving a single cluster's details
:param id: id of the objects
:param region: the civo region to be used for instance creation, not validated (optional)
:return: object json
"""
r = requests.get(self.get_url(region=region, path='/{}'.format(id)), headers=self.headers)
return r.json()
def update(self, id: str, name: str = None, num_nodes: int = None, applications: str = None, version: str = None,
node_destroy: str = None, region: str = None) -> dict:
"""
Function to update a cluster of kubernetes
:param node_destroy: if you are scaling down by one, you can give a hint on the node's name to be destroyed.
:param version: the version of k3s to upgrade to.
:param applications: a comma separated list of applications to install. Spaces within application names are fine
but shouldn't be either side of the comma.
:param id: id of the cluster
:param name: the cluster's new name
:param num_nodes: how many nodes should the cluster scale to.
:param region: the civo region to be used for instance creation, not validated (optional)
:return: dict
"""
payload = {}
if name:
payload['name'] = name
if num_nodes:
payload['num_target_nodes'] = num_nodes
if version:
payload['version'] = version
if node_destroy:
payload['node_destroy'] = node_destroy
if applications:
payload['applications'] = applications
r = requests.put(self.get_url(region=region, path='/{}'.format(id)), headers=self.headers, params=payload)
return r.json()
def marketplace(self, filter: str = None) -> dict:
"""
A user can install applications in to their cluster from the marketplace using the `update` call above.
:param filter: Filter json object the format is 'id:6224cd2b-d416-4e92-bdbb-db60521c8eb9',
you can filter by any object that is inside the json
:return: objects dict
"""
payload = {}
r = requests.get(self.marketplace_url, headers=self.headers, params=payload)
if filter:
data = r.json()
return filter_list(data=data, filter_by=filter)
return r.json()
def delete(self, id: str, region: str = None) -> dict:
"""
A user can delete a cluster and all underlying nodes.
:param id: id of the cluster
:param region: the civo region to be used for instance creation, not validated (optional)
:return: dict
"""
r = requests.delete(self.get_url(region=region, path='/{}'.format(id)), headers=self.headers)
return r.json()
def recycle(self, id: str, hostname: str, region: str = None) -> dict:
"""
A user can delete and recreate one of the underlying nodes, if it's having a problem.
:param hostname: he name of the node to recycle.
:param id: id of the cluster
:param region: the civo region to be used for instance creation, not validated (optional)
:return: dict
"""
payload = {'hostname': hostname}
r = requests.post(self.get_url(region=region, path='/{}/recycle'.format(id)), headers=self.headers, payload=payload)
return r.json()
def versions(self, filter: str = None) -> dict:
"""
A list of versions available to install isavailable
:param filter: Filter json object the format is 'type:stable',
you can filter by any object that is inside the json
:return: objects dict
"""
payload = {}
r = requests.get(self.kube_version, headers=self.headers, params=payload)
if filter:
data = r.json()
return filter_list(data=data, filter_by=filter)
return r.json()
|
the-stack_0_25443
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 19:27:57 2019
@author: HCHO
"""
#文件后缀重命名
import os
#symbol指文件后缀
def rename(symbol):
i=31
dictory = os.path.dirname(__file__)
for root, dirs, files in os.walk(dictory, topdown=True):
for name in files:
#print(name)
postfix=os.path.splitext(name)
if (postfix[1]==symbol):
print(name)
oldname=name
newname='EditBushouSave'+str(i)+symbol
os.rename(oldname,newname)
i+=1
rename('.dat')
|
the-stack_0_25444
|
"""Support to interface with universal remote control devices."""
from __future__ import annotations
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import timedelta
from enum import IntEnum
import functools as ft
import logging
from typing import Any, cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity, ToggleEntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVITY = "activity"
ATTR_ACTIVITY_LIST = "activity_list"
ATTR_CURRENT_ACTIVITY = "current_activity"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE = "device"
ATTR_NUM_REPEATS = "num_repeats"
ATTR_DELAY_SECS = "delay_secs"
ATTR_HOLD_SECS = "hold_secs"
ATTR_ALTERNATIVE = "alternative"
ATTR_TIMEOUT = "timeout"
DOMAIN = "remote"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SERVICE_SEND_COMMAND = "send_command"
SERVICE_LEARN_COMMAND = "learn_command"
SERVICE_DELETE_COMMAND = "delete_command"
SERVICE_SYNC = "sync"
DEFAULT_NUM_REPEATS = 1
DEFAULT_DELAY_SECS = 0.4
DEFAULT_HOLD_SECS = 0
class RemoteEntityFeature(IntEnum):
"""Supported features of the remote entity."""
LEARN_COMMAND = 1
DELETE_COMMAND = 2
ACTIVITY = 4
# These SUPPORT_* constants are deprecated as of Home Assistant 2022.5.
# Please use the RemoteEntityFeature enum instead.
SUPPORT_LEARN_COMMAND = 1
SUPPORT_DELETE_COMMAND = 2
SUPPORT_ACTIVITY = 4
REMOTE_SERVICE_ACTIVITY_SCHEMA = make_entity_service_schema(
{vol.Optional(ATTR_ACTIVITY): cv.string}
)
@bind_hass
def is_on(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the remote is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for remotes."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_OFF, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TURN_ON, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TOGGLE, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_toggle"
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(
ATTR_NUM_REPEATS, default=DEFAULT_NUM_REPEATS
): cv.positive_int,
vol.Optional(ATTR_DELAY_SECS): vol.Coerce(float),
vol.Optional(ATTR_HOLD_SECS, default=DEFAULT_HOLD_SECS): vol.Coerce(float),
},
"async_send_command",
)
component.async_register_entity_service(
SERVICE_LEARN_COMMAND,
{
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_COMMAND_TYPE): cv.string,
vol.Optional(ATTR_ALTERNATIVE): cv.boolean,
vol.Optional(ATTR_TIMEOUT): cv.positive_int,
},
"async_learn_command",
)
component.async_register_entity_service(
SERVICE_DELETE_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
},
"async_delete_command",
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_unload_entry(entry)
@dataclass
class RemoteEntityDescription(ToggleEntityDescription):
"""A class that describes remote entities."""
class RemoteEntity(ToggleEntity):
"""Base class for remote entities."""
entity_description: RemoteEntityDescription
_attr_activity_list: list[str] | None = None
_attr_current_activity: str | None = None
_attr_supported_features: int = 0
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
@property
def current_activity(self) -> str | None:
"""Active activity."""
return self._attr_current_activity
@property
def activity_list(self) -> list[str] | None:
"""List of available activities."""
return self._attr_activity_list
@final
@property
def state_attributes(self) -> dict[str, Any] | None:
"""Return optional state attributes."""
if not self.supported_features & RemoteEntityFeature.ACTIVITY:
return None
return {
ATTR_ACTIVITY_LIST: self.activity_list,
ATTR_CURRENT_ACTIVITY: self.current_activity,
}
def send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
raise NotImplementedError()
async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
await self.hass.async_add_executor_job(
ft.partial(self.send_command, command, **kwargs)
)
def learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
raise NotImplementedError()
async def async_learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
await self.hass.async_add_executor_job(ft.partial(self.learn_command, **kwargs))
def delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
raise NotImplementedError()
async def async_delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
await self.hass.async_add_executor_job(
ft.partial(self.delete_command, **kwargs)
)
|
the-stack_0_25445
|
"""Runs backward selection on a single CIFAR image for a pre-trained model.
Unlike SIS, only runs backward selection once. In the returned SISResult object,
ignore the SIS and just use backward selection values.
Example usage:
python run_sis_on_adv_robust.py \
--model_checkpoint_dir=./madrynet/models/adv_trained \
--image_idx=10 \
--out_dir=./madrynet/sis_results \
--batch_size=128 \
--sis_threshold=0.99
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
import json
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from sufficient_input_subsets import sis
from madrynet.cifar10_challenge.model import Model
FLAGS = flags.FLAGS
flags.DEFINE_float('sis_threshold', 0, 'Threshold to use for SIS.')
flags.DEFINE_integer('batch_size', 128, 'Batch size for model inference.')
flags.DEFINE_integer('image_idx', None, 'Image index (into CIFAR) test set.')
# flags.DEFINE_integer('gpu', 0, 'GPU (for cuda_visible_devices).')
flags.DEFINE_string('out_dir', None, 'Path to write out file with SIS.')
flags.DEFINE_string(
'model_checkpoint_dir', None, 'Path to model checkpoint directory.')
__TF_SESSION__ = None
def tf_config():
"""Configures TensorFlow and returns corresponding tf.Session object."""
#os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
#os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
return sess
def make_f_cnn(sess, output_tensor, input_tensor, class_idx, batch_size=128):
def f_cnn(batch_of_inputs):
preds = predict(
sess, output_tensor, input_tensor, batch_of_inputs,
batch_size=batch_size)
return preds[:, class_idx]
return f_cnn
def predict(sess, output_tensor, input_tensor, x, batch_size=128):
x = np.array(x)
preds = []
for batch_idx in range(int(np.ceil(x.shape[0] / batch_size))):
x_batch = x[batch_size*batch_idx:batch_size*(batch_idx+1)]
batch_preds = sess.run(
[output_tensor], feed_dict={input_tensor: x_batch})[0]
preds.append(batch_preds)
preds = np.vstack(preds)
assert preds.shape[0] == x.shape[0]
return preds
def sis_result_to_dict(sis_result):
return {
'sis': sis_result.sis.tolist(),
'ordering_over_entire_backselect': sis_result.ordering_over_entire_backselect.tolist(),
'values_over_entire_backselect': sis_result.values_over_entire_backselect.tolist(),
'mask': sis_result.mask.tolist(),
}
def create_output_dict(collection, sis_threshold, model_checkpoint_dir,
image_idx, target_class_idx):
return {
'collection': [sis_result_to_dict(sr) for sr in collection],
'sis_threshold': sis_threshold,
'model_checkpoint_dir': model_checkpoint_dir,
'image_idx': image_idx,
'target_class_idx': target_class_idx,
}
def write_dict_to_json(dict_to_write, filepath):
with open(filepath, 'w') as f:
json.dump(dict_to_write, f)
def main(argv):
del argv
global __TF_SESSION__
__TF_SESSION__ = tf_config() # cuda_visible_devices=str(FLAGS.gpu))
sess = __TF_SESSION__
logging.basicConfig(level=logging.INFO)
sis_threshold = FLAGS.sis_threshold
batch_size = FLAGS.batch_size
model_checkpoint_dir = FLAGS.model_checkpoint_dir
out_dir = FLAGS.out_dir
image_idx = FLAGS.image_idx
logging.info('SIS threshold: %f' % sis_threshold)
logging.info('Batch size: %d' % batch_size)
logging.info('Model checkpoint dir: %s' % model_checkpoint_dir)
logging.info('Out dir: %s' % out_dir)
logging.info('Image idx: %s' % image_idx)
out_path = os.path.join(out_dir, 'test_%d_sis.json' % image_idx)
logging.info('Will write to outpath: %s' % out_path)
# Check if outfile already exists.
if os.path.exists(out_path):
logging.info('Outfile already exists. Exiting.')
return
# Load model.
model = Model(mode='eval')
model_softmax = tf.nn.softmax(model.pre_softmax)
input_tensor = model.x_input
checkpoint = tf.train.latest_checkpoint(model_checkpoint_dir)
# Restore the checkpoint
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
logging.info('Loaded TF model.')
# Load and preprocess CIFAR data.
logging.info('Loading CIFAR data.')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
X_TRAIN_MEAN = np.array([125.3, 123.0, 113.9]) / 255.
X_TRAIN_STD = np.array([63.0, 62.1, 66.7]) / 255.
x_train = (x_train - X_TRAIN_MEAN) / X_TRAIN_STD
x_test = (x_test - X_TRAIN_MEAN) / X_TRAIN_STD
# Define fully masked input.
fully_masked_input = np.zeros((32, 32, 3), dtype='float32')
# Run SIS.
original_image = x_test[image_idx]
initial_prediction = predict(
sess, model_softmax, input_tensor, np.array([original_image]))[0]
target_class_idx = int(np.argmax(initial_prediction))
logging.info('Target class idx: %d' % target_class_idx)
f_class = make_f_cnn(
sess, model_softmax, input_tensor, target_class_idx,
batch_size=batch_size)
logging.info('Starting to run SIS.')
initial_mask = sis.make_empty_boolean_mask_broadcast_over_axis(
original_image.shape, 2)
sis_result = sis.find_sis(
f_class,
sis_threshold,
original_image,
initial_mask,
fully_masked_input,
)
collection = [sis_result]
logging.info('Done running SIS.')
# Write SIS collection to file.
output_dict = create_output_dict(
collection, sis_threshold, model_checkpoint_dir, image_idx,
target_class_idx)
logging.info('Writing SIS output to: %s' % out_path)
# util.create_directory(out_dir)
write_dict_to_json(output_dict, out_path)
if __name__ == '__main__':
flags.mark_flag_as_required('model_checkpoint_dir')
flags.mark_flag_as_required('out_dir')
flags.mark_flag_as_required('image_idx')
app.run(main)
|
the-stack_0_25448
|
"""
sphinx.util.inventory
~~~~~~~~~~~~~~~~~~~~~
Inventory utility functions for Sphinx.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import zlib
from sphinx.util import logging
if False:
# For type annotation
from typing import Callable, Dict, IO, Iterator, Tuple # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.typing import Inventory # NOQA
BUFSIZE = 16 * 1024
logger = logging.getLogger(__name__)
class InventoryFileReader:
"""A file reader for inventory file.
This reader supports mixture of texts and compressed texts.
"""
def __init__(self, stream):
# type: (IO) -> None
self.stream = stream
self.buffer = b''
self.eof = False
def read_buffer(self):
# type: () -> None
chunk = self.stream.read(BUFSIZE)
if chunk == b'':
self.eof = True
self.buffer += chunk
def readline(self):
# type: () -> str
pos = self.buffer.find(b'\n')
if pos != -1:
line = self.buffer[:pos].decode()
self.buffer = self.buffer[pos + 1:]
elif self.eof:
line = self.buffer.decode()
self.buffer = b''
else:
self.read_buffer()
line = self.readline()
return line
def readlines(self):
# type: () -> Iterator[str]
while not self.eof:
line = self.readline()
if line:
yield line
def read_compressed_chunks(self):
# type: () -> Iterator[bytes]
decompressor = zlib.decompressobj()
while not self.eof:
self.read_buffer()
yield decompressor.decompress(self.buffer)
self.buffer = b''
yield decompressor.flush()
def read_compressed_lines(self):
# type: () -> Iterator[str]
buf = b''
for chunk in self.read_compressed_chunks():
buf += chunk
pos = buf.find(b'\n')
while pos != -1:
yield buf[:pos].decode()
buf = buf[pos + 1:]
pos = buf.find(b'\n')
class InventoryFile:
@classmethod
def load(cls, stream, uri, joinfunc):
# type: (IO, str, Callable) -> Inventory
reader = InventoryFileReader(stream)
line = reader.readline().rstrip()
if line == '# Sphinx inventory version 1':
return cls.load_v1(reader, uri, joinfunc)
elif line == '# Sphinx inventory version 2':
return cls.load_v2(reader, uri, joinfunc)
else:
raise ValueError('invalid inventory header: %s' % line)
@classmethod
def load_v1(cls, stream, uri, join):
# type: (InventoryFileReader, str, Callable) -> Inventory
invdata = {} # type: Inventory
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
for line in stream.readlines():
name, type, location = line.rstrip().split(None, 2)
location = join(uri, location)
# version 1 did not add anchors to the location
if type == 'mod':
type = 'py:module'
location += '#module-' + name
else:
type = 'py:' + type
location += '#' + name
invdata.setdefault(type, {})[name] = (projname, version, location, '-')
return invdata
@classmethod
def load_v2(cls, stream, uri, join):
# type: (InventoryFileReader, str, Callable) -> Inventory
invdata = {} # type: Inventory
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
line = stream.readline()
if 'zlib' not in line:
raise ValueError('invalid inventory header (not compressed): %s' % line)
for line in stream.read_compressed_lines():
# be careful to handle names with embedded spaces correctly
m = re.match(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)',
line.rstrip())
if not m:
continue
name, type, prio, location, dispname = m.groups()
if type == 'py:module' and type in invdata and \
name in invdata[type]: # due to a bug in 1.1 and below,
# two inventory entries are created
# for Python modules, and the first
# one is correct
continue
if location.endswith('$'):
location = location[:-1] + name
location = join(uri, location)
invdata.setdefault(type, {})[name] = (projname, version,
location, dispname)
return invdata
@classmethod
def dump(cls, filename, env, builder):
# type: (str, BuildEnvironment, Builder) -> None
def escape(string):
# type: (str) -> str
return re.sub("\\s+", " ", string)
with open(os.path.join(filename), 'wb') as f:
# header
f.write(('# Sphinx inventory version 2\n'
'# Project: %s\n'
'# Version: %s\n'
'# The remainder of this file is compressed using zlib.\n' %
(escape(env.config.project),
escape(env.config.version))).encode())
# body
compressor = zlib.compressobj(9)
for domainname, domain in sorted(env.domains.items()):
for name, dispname, typ, docname, anchor, prio in \
sorted(domain.get_objects()):
if anchor.endswith(name):
# this can shorten the inventory by as much as 25%
anchor = anchor[:-len(name)] + '$'
uri = builder.get_target_uri(docname)
if anchor:
uri += '#' + anchor
if dispname == name:
dispname = '-'
entry = ('%s %s:%s %s %s %s\n' %
(name, domainname, typ, prio, uri, dispname))
f.write(compressor.compress(entry.encode()))
f.write(compressor.flush())
|
the-stack_0_25450
|
from typing import Dict
from sklearn.compose import TransformedTargetRegressor
from sklearn.compose._column_transformer import ColumnTransformer
from sklearn.ensemble import HistGradientBoostingRegressor, RandomForestRegressor
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
def create_estimators(preprocessing: ColumnTransformer) -> Dict[str, GridSearchCV]:
"""Create estimators for model fitting.
ElasticNet, HistGradientBoost and RandomForest regressors are added.
More can be added.
Args:
preprocessing: Preprocessing Pipeline
Returns:
Key-value pairs of estimator name and instantiated estimator
"""
# ElasticNet (alpha = 1.0 -> Lasso)
param_grid = {
"regressor__regressor__alpha": (0.001, 0.01, 0.1, 1.0),
"regressor__regressor__l1_ratio": (0.05, 0.2, 0.5, 0.7, 0.9, 1.0),
}
en_pipe = TransformedTargetRegressor(
regressor=Pipeline(
[("preprocessor", preprocessing), ("regressor", ElasticNet())]
),
transformer=StandardScaler(),
)
en_search = GridSearchCV(en_pipe, param_grid=param_grid, cv=5,)
# RandomForest
param_grid = {
"regressor__regressor__n_estimators": [50, 100, 200],
"regressor__regressor__max_depth": [5, 6, 7, 15],
}
rf_pipe = TransformedTargetRegressor(
regressor=Pipeline(
[("preprocessor", preprocessing), ("regressor", RandomForestRegressor())]
),
transformer=StandardScaler(),
)
rf_search = GridSearchCV(rf_pipe, param_grid=param_grid, cv=5,)
# HistGradientBoost
param_grid = {
"regressor__regressor__l2_regularization": [0.0, 0.1, 1.0],
"regressor__regressor__max_depth": [6, 15],
"regressor__regressor__max_iter": [100, 200],
}
hgb_pipe = TransformedTargetRegressor(
regressor=Pipeline(
[
("preprocessor", preprocessing),
("regressor", HistGradientBoostingRegressor()),
]
),
transformer=StandardScaler(),
)
hgb_search = GridSearchCV(hgb_pipe, param_grid=param_grid, cv=5,)
return {
"ElasticNet": en_search,
"RandomForest": rf_search,
"HistGradientBoost": hgb_search,
}
|
the-stack_0_25451
|
"""Support for interfacing to the Logitech SqueezeBox API."""
import asyncio
import logging
from pysqueezebox import Server
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
ATTR_COMMAND,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_START,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.dt import utcnow
from .__init__ import start_server_discovery
from .const import (
DEFAULT_PORT,
DOMAIN,
ENTRY_PLAYERS,
KNOWN_PLAYERS,
PLAYER_DISCOVERY_UNSUB,
)
SERVICE_CALL_METHOD = "call_method"
SERVICE_CALL_QUERY = "call_query"
SERVICE_SYNC = "sync"
SERVICE_UNSYNC = "unsync"
ATTR_QUERY_RESULT = "query_result"
ATTR_SYNC_GROUP = "sync_group"
_LOGGER = logging.getLogger(__name__)
DISCOVERY_INTERVAL = 60
SUPPORT_SQUEEZEBOX = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SEEK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
| SUPPORT_CLEAR_PLAYLIST
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_PORT),
cv.deprecated(CONF_PASSWORD),
cv.deprecated(CONF_USERNAME),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
}
),
)
KNOWN_SERVERS = "known_servers"
ATTR_PARAMETERS = "parameters"
ATTR_OTHER_PLAYER = "other_player"
ATTR_TO_PROPERTY = [
ATTR_QUERY_RESULT,
ATTR_SYNC_GROUP,
]
SQUEEZEBOX_MODE = {
"pause": STATE_PAUSED,
"play": STATE_PLAYING,
"stop": STATE_IDLE,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up squeezebox platform from platform entry in configuration.yaml (deprecated)."""
if config:
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up an LMS Server from a config entry."""
config = config_entry.data
_LOGGER.debug("Reached async_setup_entry for host=%s", config[CONF_HOST])
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config[CONF_HOST]
port = config[CONF_PORT]
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(config_entry.entry_id, {})
known_players = hass.data[DOMAIN].get(KNOWN_PLAYERS)
if known_players is None:
hass.data[DOMAIN][KNOWN_PLAYERS] = known_players = []
entry_players = hass.data[DOMAIN][config_entry.entry_id].setdefault(
ENTRY_PLAYERS, []
)
_LOGGER.debug("Creating LMS object for %s", host)
lms = Server(async_get_clientsession(hass), host, port, username, password)
async def _discovery(now=None):
"""Discover squeezebox players by polling server."""
async def _discovered_player(player):
"""Handle a (re)discovered player."""
entity = next(
(
known
for known in known_players
if known.unique_id == player.player_id
),
None,
)
if entity and not entity.available:
# check if previously unavailable player has connected
await player.async_update()
entity.available = player.connected
if not entity:
_LOGGER.debug("Adding new entity: %s", player)
entity = SqueezeBoxEntity(player)
known_players.append(entity)
entry_players.append(entity)
async_add_entities([entity])
players = await lms.async_get_players()
if players:
for player in players:
hass.async_create_task(_discovered_player(player))
hass.data[DOMAIN][config_entry.entry_id][
PLAYER_DISCOVERY_UNSUB
] = hass.helpers.event.async_call_later(DISCOVERY_INTERVAL, _discovery)
_LOGGER.debug("Adding player discovery job for LMS server: %s", host)
asyncio.create_task(_discovery())
# Register entity services
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_CALL_METHOD,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMETERS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
},
"async_call_method",
)
platform.async_register_entity_service(
SERVICE_CALL_QUERY,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMETERS): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
},
"async_call_query",
)
platform.async_register_entity_service(
SERVICE_SYNC, {vol.Required(ATTR_OTHER_PLAYER): cv.string}, "async_sync",
)
platform.async_register_entity_service(SERVICE_UNSYNC, None, "async_unsync")
# Start server discovery task if not already running
if hass.is_running:
asyncio.create_task(start_server_discovery(hass))
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, start_server_discovery(hass)
)
return True
class SqueezeBoxEntity(MediaPlayerEntity):
"""
Representation of a SqueezeBox device.
Wraps a pysqueezebox.Player() object.
"""
def __init__(self, player):
"""Initialize the SqueezeBox device."""
self._player = player
self._last_update = None
self._query_result = {}
self._available = True
@property
def device_state_attributes(self):
"""Return device-specific attributes."""
squeezebox_attr = {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
return squeezebox_attr
@property
def name(self):
"""Return the name of the device."""
return self._player.name
@property
def unique_id(self):
"""Return a unique ID."""
return self._player.player_id
@property
def available(self):
"""Return True if device connected to LMS server."""
return self._available
@available.setter
def available(self, val):
"""Set available to True or False."""
self._available = bool(val)
@property
def state(self):
"""Return the state of the device."""
if not self.available:
return STATE_UNAVAILABLE
if not self._player.power:
return STATE_OFF
if self._player.mode:
return SQUEEZEBOX_MODE.get(self._player.mode)
return None
async def async_update(self):
"""Update the Player() object."""
# only update available players, newly available players will be rediscovered and marked available
if self._available:
last_media_position = self.media_position
await self._player.async_update()
if self.media_position != last_media_position:
self._last_update = utcnow()
if self._player.connected is False:
_LOGGER.info("Player %s is not available", self.name)
self._available = False
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._player.volume:
return int(float(self._player.volume)) / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player.muting
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._player.url
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._player.duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._player.time
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_update
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._player.image_url
@property
def media_title(self):
"""Title of current playing media."""
return self._player.title
@property
def media_artist(self):
"""Artist of current playing media."""
return self._player.artist
@property
def media_album_name(self):
"""Album of current playing media."""
return self._player.album
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._player.shuffle
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SQUEEZEBOX
@property
def sync_group(self):
"""List players we are synced with."""
player_ids = {
p.unique_id: p.entity_id for p in self.hass.data[DOMAIN][KNOWN_PLAYERS]
}
sync_group = []
for player in self._player.sync_group:
if player in player_ids:
sync_group.append(player_ids[player])
return sync_group
@property
def query_result(self):
"""Return the result from the call_query service."""
return self._query_result
async def async_turn_off(self):
"""Turn off media player."""
await self._player.async_set_power(False)
async def async_volume_up(self):
"""Volume up media player."""
await self._player.async_set_volume("+5")
async def async_volume_down(self):
"""Volume down media player."""
await self._player.async_set_volume("-5")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
volume_percent = str(int(volume * 100))
await self._player.async_set_volume(volume_percent)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
await self._player.async_set_muting(mute)
async def async_media_play_pause(self):
"""Send pause command to media player."""
await self._player.async_toggle_pause()
async def async_media_play(self):
"""Send play command to media player."""
await self._player.async_play()
async def async_media_pause(self):
"""Send pause command to media player."""
await self._player.async_pause()
async def async_media_next_track(self):
"""Send next track command."""
await self._player.async_index("+1")
async def async_media_previous_track(self):
"""Send next track command."""
await self._player.async_index("-1")
async def async_media_seek(self, position):
"""Send seek command."""
await self._player.async_time(position)
async def async_turn_on(self):
"""Turn the media player on."""
await self._player.async_set_power(True)
async def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the current playlist.
"""
cmd = "play"
if kwargs.get(ATTR_MEDIA_ENQUEUE):
cmd = "add"
await self._player.async_load_url(media_id, cmd)
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
shuffle_mode = "song" if shuffle else "none"
await self._player.async_set_shuffle(shuffle_mode)
async def async_clear_playlist(self):
"""Send the media player the command for clear playlist."""
await self._player.async_clear_playlist()
async def async_call_method(self, command, parameters=None):
"""
Call Squeezebox JSON/RPC method.
Additional parameters are added to the command to form the list of
positional parameters (p0, p1..., pN) passed to JSON/RPC server.
"""
all_params = [command]
if parameters:
for parameter in parameters:
all_params.append(parameter)
await self._player.async_query(*all_params)
async def async_call_query(self, command, parameters=None):
"""
Call Squeezebox JSON/RPC method where we care about the result.
Additional parameters are added to the command to form the list of
positional parameters (p0, p1..., pN) passed to JSON/RPC server.
"""
all_params = [command]
if parameters:
for parameter in parameters:
all_params.append(parameter)
self._query_result = await self._player.async_query(*all_params)
_LOGGER.debug("call_query got result %s", self._query_result)
async def async_sync(self, other_player):
"""
Add another Squeezebox player to this player's sync group.
If the other player is a member of a sync group, it will leave the current sync group
without asking.
"""
player_ids = {
p.entity_id: p.unique_id for p in self.hass.data[DOMAIN][KNOWN_PLAYERS]
}
other_player_id = player_ids.get(other_player)
if other_player_id:
await self._player.async_sync(other_player_id)
else:
_LOGGER.info("Could not find player_id for %s. Not syncing.", other_player)
async def async_unsync(self):
"""Unsync this Squeezebox player."""
await self._player.async_unsync()
|
the-stack_0_25453
|
from .idp_oauth2 import Oauth2ClientBase
from fence.config import config
class GoogleOauth2Client(Oauth2ClientBase):
"""
client for interacting with google oauth 2,
as google openid connect is supported under oauth2
https://developers.google.com/api-client-library/python/guide/aaa_oauth
"""
DISCOVERY_URL = "https://accounts.google.com/.well-known/openid-configuration"
def __init__(self, settings, logger, HTTP_PROXY=None):
super(GoogleOauth2Client, self).__init__(
settings,
logger,
scope=settings.get("scope") or "openid email",
idp="Google",
HTTP_PROXY=HTTP_PROXY,
)
def get_auth_url(self):
"""
Get authorization uri from discovery doc
"""
authorization_endpoint = self.get_value_from_discovery_doc(
"authorization_endpoint", "https://accounts.google.com/o/oauth2/v2/auth"
)
uri, _ = self.session.create_authorization_url(
authorization_endpoint, prompt="login"
)
return uri
def get_user_id(self, code):
"""
Get user id
"""
if config.get("MOCK_GOOGLE_AUTH", False):
return {"email": "[email protected]"}
try:
token_endpoint = self.get_value_from_discovery_doc(
"token_endpoint", "https://oauth2.googleapis.com/token"
)
jwks_endpoint = self.get_value_from_discovery_doc(
"jwks_uri", "https://www.googleapis.com/oauth2/v3/certs"
)
claims = self.get_jwt_claims_identity(token_endpoint, jwks_endpoint, code)
if claims["email"] and claims["email_verified"]:
return {"email": claims["email"], "sub": claims.get("sub")}
elif claims["email"]:
return {"error": "Email is not verified"}
else:
return {"error": "Can't get user's Google email!"}
except Exception as e:
self.logger.exception("Can't get user info")
return {"error": "Can't get your Google email: {}".format(e)}
|
the-stack_0_25456
|
class Video():
def __init__(self,
seed,
rank,
type,
videoId,
kind,
channelId,
channelTitle,
description,
liveBroadcastContent,
publishedAt,
title,
viewCount,
likeCount,
dislikeCount,
favoriteCount,
commentCount,
duration,
dimension,
definition,
caption,
licensedContent,
):
self.seed = seed
self.rank = rank
self.type = type
self.videoId = videoId
self.kind = kind
self.channelId = channelId
self.channelTitle = channelTitle
self.description = description
self.liveBroadcastContent = liveBroadcastContent
self.publishedAt = publishedAt
self.title = title
self.viewCount = viewCount
self.likeCount = likeCount
self.dislikeCount = dislikeCount
self.favoriteCount = favoriteCount
self.commentCount = commentCount
self.duration = duration
self.dimension = dimension
self.definition = definition
self.caption = caption
self.licensedContent = licensedContent
|
the-stack_0_25458
|
#!/usr/bin/env python
from functools import reduce
import sys
sys.path.append('..')
from libs.helpers import get_input
pairs = {'(': ')', '[': ']', '{': '}', '<': '>'}
def get_corrupt_chars(lines):
for l in lines:
corrupt, invalid_char = is_corrupt(l)
if corrupt:
yield(invalid_char)
def complete_lines(lines):
for l in lines:
corrupt, stack = is_corrupt(l)
if not corrupt:
stack.reverse()
yield([pairs[c] for c in stack])
def is_corrupt(line):
"""Returns the invalid closing character if the line is corrupt, otherwise
the full stack of unterminated characters is returned."""
stack = []
for c in line:
if c in pairs:
stack.append(c)
elif pairs[stack[-1]] == c:
stack.pop()
else:
return True, c
return False, stack
corrupt_scores = {')': 3, ']': 57, '}': 1197, '>': 25137}
def get_corrupt_score(corr_chars):
return sum([corrupt_scores[c] for c in corr_chars])
completion_scores = {')': 1, ']': 2, '}': 3, '>': 4}
def get_completion_score(lines):
scores = []
for l in lines:
score = reduce(lambda x, y: x * 5 + y,
[completion_scores[c] for c in l])
scores.append(score)
scores.sort()
return scores[int(len(scores) / 2)]
if __name__ == '__main__':
lines = get_input('./input.txt')
cc = get_corrupt_chars(lines)
score = get_corrupt_score(cc)
print('score: {}'.format(score))
comp_chars = complete_lines(lines)
score = get_completion_score(comp_chars)
print('score: {}'.format(score))
|
the-stack_0_25461
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ucb
class CommandInfoChange(object):
"""
Const Class
specifies reasons for sending CommandInfoChangeEvents.
See Also:
`API CommandInfoChange <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1ucb_1_1CommandInfoChange.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.CommandInfoChange'
__ooo_type_name__: str = 'const'
COMMAND_INSERTED = 0
"""
A command was inserted into a XCommandInfo.
"""
COMMAND_REMOVED = 1
"""
A command was removed from a XCommandInfo.
"""
__all__ = ['CommandInfoChange']
|
the-stack_0_25463
|
from binaryninja import BinaryViewType
from modules.CryptoScan import CryptoScan
import sys
import argparse
parser = argparse.ArgumentParser(description='Scan binaries for crypto related contents')
static_parser = parser.add_mutually_exclusive_group(required=False)
static_parser.add_argument('--static', dest='static', action='store_true')
static_parser.add_argument('--no-static', dest='static', action='store_false')
parser.set_defaults(static=True)
signature_parser = parser.add_mutually_exclusive_group(required=False)
signature_parser.add_argument('--signature', dest='signature', action='store_true')
signature_parser.add_argument('--no-signature', dest='signature', action='store_false')
parser.set_defaults(signature=True)
il_parser = parser.add_mutually_exclusive_group(required=False)
il_parser.add_argument('--il', dest='il', action='store_true')
il_parser.add_argument('--no-il', dest='il', action='store_false')
parser.set_defaults(il=True)
parser.add_argument('filenames', nargs='+')
args = parser.parse_args()
try:
options = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
options = {'static' : args.static, 'signature' : args.signature, 'il': args.il}
if any(option for option in options.values()):
for filename in args.filenames:
bv = BinaryViewType.get_view_of_file(filename)
cs = CryptoScan(bv, options)
cs.start()
|
the-stack_0_25464
|
# -*- coding: utf-8 -*-
import six
from scrapy import signals
from scrapy.exceptions import NotConfigured, CloseSpider
from scrapy.utils.misc import load_object
from .items import RssItem
from .exporters import RssItemExporter
class RssExportPipeline(object):
def __init__(self):
self.files = {}
self.exporters = {}
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
try:
file = open(spider.settings.get('FEED_FILE'), 'wb')
except TypeError:
raise NotConfigured('FEED_FILE parameter does not string or does not exist')
except (IOError, OSError) as e:
raise CloseSpider('Cannot open file {}: {}'.format(spider.settings.get('FEED_FILE', None), e))
self.files[spider] = file
feed_title = spider.settings.get('FEED_TITLE')
if not feed_title:
raise NotConfigured('FEED_TITLE parameter does not exist')
feed_link = spider.settings.get('FEED_LINK')
if not feed_link:
raise NotConfigured('FEED_LINK parameter does not exist')
feed_description = spider.settings.get('FEED_DESCRIPTION')
if feed_description is None:
raise NotConfigured('FEED_DESCRIPTION parameter does not exist')
item_cls = spider.settings.get('FEED_ITEM_CLASS', spider.settings.get('FEED_ITEM_CLS', RssItem))
if isinstance(item_cls, six.string_types):
item_cls = load_object(item_cls)
namespaces = spider.settings.get('FEED_NAMESPACES', {})
feed_exporter = spider.settings.get('FEED_EXPORTER', RssItemExporter)
if isinstance(feed_exporter, six.string_types):
feed_exporter = load_object(feed_exporter)
if not issubclass(feed_exporter, RssItemExporter):
raise TypeError("FEED_EXPORTER must be RssItemExporter or its subclass, not '{}'".format(feed_exporter))
self.exporters[spider] = feed_exporter(file, feed_title, feed_link, feed_description,
namespaces=namespaces, item_cls=item_cls)
self.exporters[spider].start_exporting()
def spider_closed(self, spider):
self.exporters[spider].finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporters[spider].export_item(item)
return item
|
the-stack_0_25467
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class DisconnectPacket(DataPacket):
NID = ProtocolInfo.DISCONNECT_PACKET
hideDisconnectionScreen = False
message = ""
def canBeSentBeforeLogin():
return True
def decodePayload(self):
self.hideDisconnectionScreen = self.getBool()
if not self.hideDisconnectionScreen:
self.message = self.getString()
def encodePayload(self):
self.putBool(self.hideDisconnectionScreen)
if not self.hideDisconnectionScreen:
self.putString(self.message)
|
the-stack_0_25468
|
import os
import sys
import pymake
pth = os.path.join('..', '..')
if pth not in sys.path:
sys.path.append(pth)
def get_distribution_name(versiontexname):
dist = None
fname = versiontexname
with open(fname) as f:
lines = f.readlines()
f.close()
for line in lines:
# \newcommand{\modflowversion}{mf6beta0.9.00}
srchtxt = '\\newcommand{\\modflowversion}'
istart = line.rfind('{') + 1
istop = line.rfind('}')
if 0 < istart < istop:
dist = line[istart: istop]
return dist
return None
# Set up the path to the distribution
fname = os.path.join('..', 'version.tex')
dist = get_distribution_name(fname)
distpth = os.path.join('..', '..', 'distribution', dist)
if not os.path.isdir(distpth):
raise Exception(distpth + ' does not exist. ')
# Open the file
f = open('example_table.tex', 'w')
# Write the latex header
s = r'''
\small
\begin{longtable}{p{3cm} p{1cm} p{3cm} p{2.5cm}p{4cm}}
\caption{List of example problems and simulation characteristics}\tabularnewline
\hline
\hline
\textbf{Name} & \textbf{NPER} & \textbf{Namefile(s)} & \textbf{Dimensions (NLAY, NROW, NCOL), (NLAY, NCPL) or (NODES)} & \textbf{Stress Packages} \\
\hline
\endfirsthead
\hline
\hline
\textbf{Name} & \textbf{NPER} & \textbf{Namefile(s)} & \textbf{Dimensions (NLAY, NROW, NCOL) or (NODES)} & \textbf{Stress Packages} \\
\hline
\endhead
'''
f.write(s)
f.write('\n')
expth = os.path.join(distpth, 'examples')
files = os.listdir(expth)
for exname in files:
# Skip if not a directory
if not os.path.isdir(os.path.join(expth, exname)):
continue
# example name
s = '{} '.format(exname)
# number of models
mfnamefile = os.path.join(expth, exname, 'mfsim.nam')
model_files, outfiles = pymake.autotest.get_mf6_files(mfnamefile)
nmodels = len([w for w in model_files if w.lower().endswith('.nam')])
# s += '& {} '.format(nmodels)
# Number of stress periods
tdis = [w for w in model_files if w.upper().endswith('.TDIS')][0]
nper = pymake.autotest.get_mf6_nper(os.path.join(expth, exname, tdis))
s += '& {} '.format(nper)
# Name files
namefiles = [w for w in model_files if w.lower().endswith('.nam')]
s += '& '
cellstring = '\parbox[t]{3cm}{' + ''.join(r' {} \\'.format(nf) for nf in namefiles) + '}'
s += cellstring
# Model shape
dis_files = [w for w in model_files if w.upper().endswith('.DIS')
or w.upper().endswith('.DISU')
or w.upper().endswith('.DISV')]
s += '& '
mshapes = []
for disfile in dis_files:
mshape = pymake.autotest.get_mf6_mshape(os.path.join(expth, exname, disfile))
mshapes.append(mshape)
cellstring = '\parbox[t]{3cm}{' + ''.join(r' {} \\'.format(ms) for ms in mshapes) + '}'
s += cellstring
# File types
namefiles = [w for w in model_files if w.lower().endswith('.nam')]
s += '& '
lines = []
for nf in namefiles:
ftypes = pymake.autotest.get_mf6_ftypes(os.path.join(expth, exname, nf),
['CHD6', 'WEL6', 'DRN6', 'RIV6', 'GHB6', 'SFR6', 'RCH6',
'EVT6', 'SFR6', 'UZF6', 'MAW6', 'LAK6', 'MVR6'])
ss = ''
for st in ftypes:
if st[:3] not in ss:
ss += st[:3] + ' '
if len(ss) == 0:
ss = 'none'
lines.append(ss)
cellstring = '\parbox[t]{4cm}{' + ''.join(r' {} \\'.format(ls) for ls in lines) + '}'
s += cellstring
# End the table line for this example
s = s.replace('_', r'\_')
s += r'\\'
f.write(s)
f.write('\n')
f.write(r'\hline' + '\n')
s = r'''\hline
\end{longtable}
\label{table:examples}
\normalsize
'''
f.write(s)
f.close()
print('done...')
|
the-stack_0_25469
|
"""
module to obtain info surrounding the 'base-sphere'
Functions:
- get_base_sphere: function to find the base sphere (centroid) of the molecule
- get_levels: finds path through molecule from base sphere
- get_area: finds surface area of the molecule\
- rescale_inputs: rescale matrix constructs to have area equal to 4pi
- base_error: error handling to rotate molecule away from having atom over north pole
Exceptions:
- ArithmeticError: raised when molecule has negative surface area (typically bridged bicyclics)
"""
from collections import namedtuple
import numpy as np
from scipy.spatial import distance_matrix
from numpy import linalg as la
from utils import get_chain
def get_base_sphere(centres):
"""
Function which selects the starting atom (base-sphere). This is taken as the atom closest to the centroid
@param centres:
@return: centres, base_sphere
"""
# Find the centroid
centroid = [
np.sum(centres[:, 0]) / len(centres[:, 0]),
np.sum(centres[:, 1]) / len(centres[:, 1]),
np.sum(centres[:, 2]) / len(centres[:, 2]),
]
# Find the index of the minimum Euclidean distance and set this as the base sphere
base_sphere = np.argmin(np.sqrt(np.sum(np.square(centres - centroid), axis=1)))
# re-centre so the base sphere site on the origin
c_rel = centres - centres[base_sphere]
centres = c_rel[:]
base = namedtuple("base", ["centres", "base_sphere"])
return base(centres=centres, base_sphere=base_sphere)
def get_levels(adjacency_matrix, no_atoms, base_sphere):
"""
Function to generate matrix of levels starting from base sphere. produce a matrix of integers row = level;
1 = non-terminal at this level, 2 = terminal at this level
@param adjacency_matrix:
@param no_atoms:
@param base_sphere:
@return: level_mat, no_levels
"""
r_sum = adjacency_matrix.sum(axis=1)
to_do = no_atoms - 1 # how may remaining spheres need to be assigned
assigned, level_mat = np.zeros((1, no_atoms), dtype=int), np.zeros(
(1, no_atoms), dtype=int
)
assigned[0, base_sphere] = 1
level_mat[0, base_sphere] = 1
current_level = 0
while to_do > 0 and current_level < 500:
next_level = np.zeros((1, no_atoms), dtype=int)
for j in range(0, no_atoms):
if level_mat[current_level, j] == 1:
current_sphere = j
for i in range(0, no_atoms):
if (
adjacency_matrix[current_sphere, i] == 1
and r_sum[i] == 1
and assigned[0, i] == 0
):
next_level[0, i] = 2
assigned[0, i] = 1
to_do += -1
if (
adjacency_matrix[current_sphere, i] == 1
and r_sum[i] > 1
and assigned[0, i] == 0
):
next_level[0, i] = 1
assigned[0, i] = 1
to_do += -1
level_mat = np.vstack((level_mat, next_level))
current_level += 1
no_levels = len(level_mat) - 1 # number of levels
levels = namedtuple("levels", ["level_mat", "no_levels"])
return levels(level_mat=level_mat, no_levels=no_levels)
def get_area(adjacency_matrix, centres, no_atoms, radii):
"""
Function to return the surface area of the molecule, and the matrix of lambda values
If the area is negative (usually for bridged bicyclic compounds with >2 intersecting rings) a
ValueError is raised. As the area is computed as the area of a sphere - the bit where two spheres
intersect, multiple large spheres intersecting leads to a negative value, and thus the surface of the
molecule cannot be approximated.
@param adjacency_matrix:
@param centres:
@param no_atoms:
@param radii:
@return: area and matrix of lambda values
"""
# matrix of distances between intersecting atoms
distances = adjacency_matrix * distance_matrix(centres, centres)
# matrix of lambdas
lam = np.zeros((no_atoms, no_atoms))
for i in range(0, no_atoms):
for j in range(0, no_atoms):
if adjacency_matrix[i, j] == 1:
lam[i, j] = (radii[i] ** 2 - radii[j] ** 2 + distances[i, j] ** 2) / (
2 * distances[i, j]
)
else:
lam[i, j] = 0
# surface area of the molecule
area = 0
for i in range(0, no_atoms):
sphere_i = 4 * np.pi * radii[i] ** 2
for j in range(0, no_atoms):
if adjacency_matrix[i, j] == 1:
sphere_i = sphere_i - 2 * radii[i] * np.pi * abs(radii[i] - lam[i, j])
area += sphere_i
if area < 0:
raise ArithmeticError("Negative Surface Area, cannot approximate surface")
mol_area = namedtuple("mol_area", ["lam", "area"])
return mol_area(lam=lam, area=area)
def rescale_inputs(area, centres, radii, lam):
"""
Function to rescale all inputs to give total surface area equal to 4pi
@param area:
@param centres:
@param radii:
@param lam:
@return: inputs rescaled to have surface area 4pi
"""
centres_r = centres * np.sqrt(4 * np.pi / area)
radii_r = radii * np.sqrt(4 * np.pi / area)
lam_r = lam * np.sqrt(4 * np.pi / area)
rescaled = namedtuple("rescaled", ["centres_r", "radii_r", "lam_r"])
return rescaled(centres_r=centres_r, radii_r=radii_r, lam_r=lam_r)
def base_error(levels, inputs, base, rescaled):
"""
Function to return the vector of next level spheres and the updated rescaled centres post-error handling
@param levels:
@param inputs:
@param base:
@param rescaled:
@return: updated centres
"""
# unpack tuples
no_levels = levels.no_levels
level_mat = levels.level_mat
no_atoms = inputs.no_atoms
adjacency_matrix = inputs.adjacency_matrix
base_sphere = base.base_sphere
centres_r = rescaled.centres_r
radii_r = rescaled.radii_r
lam_r = rescaled.lam_r
# Fingerprint Matrix that tells you how to navigate through molecule
fingerprint = np.tile(-1, (no_levels + 1, no_atoms))
for i in range(0, no_levels + 1):
for j in range(0, no_atoms):
if level_mat[i][j] > 0:
s_list = get_chain(no_atoms, level_mat, adjacency_matrix, j, i)
for k in range(0, len(s_list)):
fingerprint[k][j] = s_list[k]
# Code to produce vector of next level spheres
sphere_levels_vec = []
next_level = []
for i in range(0, no_atoms):
stop = 0
j = 0
while stop < 1:
if fingerprint[j][i] == i:
stop = 1
else:
j = j + 1
sphere_levels_vec.append(j)
next_spheres = []
for s_n in range(0, no_atoms):
if (
j < no_levels
and fingerprint[j][s_n] == i
and fingerprint[j + 1][s_n] == s_n
):
next_spheres.append(s_n)
next_level.append(next_spheres)
# Error handling code - take the base sphere and rotate so that north pole is in base sphere
fine = 0
cover_sphere = base_sphere
i = 0
while i < len(next_level[base_sphere]) and fine == 0:
check_sphere = next_level[base_sphere][i]
if (
la.norm(
centres_r[check_sphere] - radii_r[base_sphere] * np.array([0, 0, 1])
)
<= radii_r[check_sphere]
):
cover_sphere = check_sphere
fine = 1 # if there is something over the north pole
i += 1
fine_2 = 0
angle_x = 10
while angle_x <= np.pi and fine_2 == 0:
# define matrix to rotate about x and y
rot_mat_x = np.array(
[
[1, 0, 0],
[0, np.cos(angle_x), -np.sin(angle_x)],
[0, np.sin(angle_x), np.cos(angle_x)],
]
)
centres_r = np.matmul(centres_r, rot_mat_x)
unit_cover = (1 / la.norm(centres_r[cover_sphere])) * centres_r[cover_sphere]
plane_point = 0.85 * lam_r[base_sphere][cover_sphere] * unit_cover
v_rand = np.random.rand(3)
v_rand = v_rand / (la.norm(v_rand))
w_rand = np.cross(unit_cover, v_rand)
a_coefficient = la.norm(w_rand) ** 2
b_coefficient = 2 * np.dot(plane_point, w_rand)
c_coefficient = la.norm(plane_point) ** 2 - radii_r[base_sphere] ** 2
mu = (
-b_coefficient
+ np.sqrt(b_coefficient ** 2 - 4 * a_coefficient * c_coefficient)
) / (2 * a_coefficient)
test_point = plane_point + mu * w_rand
fine_2 = 1
for i in range(0, len(next_level[base_sphere])):
check_sphere = next_level[base_sphere][i]
if la.norm(centres_r[check_sphere] - test_point) <= radii_r[check_sphere]:
fine_2 = 0
angle_x = angle_x + 10
error = namedtuple("error", ["sphere_levels_vec", "next_level", "centres_r"])
return error(
sphere_levels_vec=sphere_levels_vec, next_level=next_level, centres_r=centres_r
)
|
the-stack_0_25470
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("pipeline", "0016_auto_20181220_0958"),
]
operations = [
migrations.AddField(
model_name="pipelinetemplate",
name="has_subprocess",
field=models.BooleanField(default=False, verbose_name="\u662f\u5426\u542b\u6709\u5b50\u6d41\u7a0b"),
),
]
|
the-stack_0_25472
|
"""Solving Darcy Flow using ConvNet with mixed residual loss
Flow through Porous Media, 2D
div (K(s) grad u(s)) = 0, s = (s1, s2) in (0, 1) x (0, 1)
Boundary:
u = 1, s1 = 0; u = 0, s1 = 1
u_s2 = 0, s2 in {0, 1}
Optimizer: L-BFGS
Considered nonlinear PDE. (nonlinear corrections to Darcy)
"""
import torch
import torch.nn as nn
import torch.autograd as ag
import torch.nn.functional as F
import torch.optim as optim
from models.codec import Decoder
from utils.image_gradient import SobelFilter
from models.darcy import conv_continuity_constraint as continuity_constraint
from models.darcy import conv_boundary_condition as boundary_condition
from utils.plot import save_stats, plot_prediction_det, plot_prediction_det_animate2
from utils.misc import mkdirs, to_numpy
import numpy as np
import argparse
import h5py
import sys
import time
import os
from pprint import pprint
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def main():
parser = argparse.ArgumentParser(description='CNN to solve PDE')
parser.add_argument('--exp-dir', type=str, default='./experiments/solver', help='color map')
parser.add_argument('--nonlinear', action='store_true', default=False, help='set True for nonlinear PDE')
# data
parser.add_argument('--data-dir', type=str, default="./datasets", help='directory to dataset')
parser.add_argument('--data', type=str, default='grf', choices=['grf', 'channelized', 'warped_grf'], help='data type')
parser.add_argument('--kle', type=int, default=512, help='# kle terms')
parser.add_argument('--imsize', type=int, default=64, help='image size')
parser.add_argument('--idx', type=int, default=8, help='idx of input, please use 0 ~ 999')
parser.add_argument('--alpha1', type=float, default=1.0, help='coefficient for the squared term')
parser.add_argument('--alpha2', type=float, default=1.0, help='coefficient for the cubic term')
# latent size: (nz, sz, sz)
parser.add_argument('--nz', type=int, default=1, help='# feature maps of latent z')
# parser.add_argument('--sz', type=int, default=16, help='feature map size of latent z')
parser.add_argument('--blocks', type=list, default=[8, 6], help='# layers in each dense block of the decoder')
parser.add_argument('--weight-bound', type=float, default=10, help='weight for boundary condition loss')
parser.add_argument('--lr', type=float, default=0.5, help='learning rate')
parser.add_argument('--epochs', type=int, default=500, help='# epochs to train')
parser.add_argument('--test-freq', type=int, default=50, help='every # epoch to test')
parser.add_argument('--ckpt-freq', type=int, default=250, help='every # epoch to save model')
parser.add_argument('--cmap', type=str, default='jet', help='color map')
parser.add_argument('--same-scale', action='store_true', help='true for setting noise to be same scale as output')
parser.add_argument('--animate', action='store_true', help='true to plot animate figures')
parser.add_argument('--cuda', type=int, default=1, help='cuda number')
parser.add_argument('-v', '--verbose', action='store_true', help='True for versbose output')
args = parser.parse_args()
pprint(vars(args))
device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() else "cpu")
dataset = f'{args.data}_kle{args.kle}' if args.data == 'grf' else args.data
hyparams = f'{dataset}_idx{args.idx}_dz{args.nz}_blocks{args.blocks}_'\
f'lr{args.lr}_wb{args.weight_bound}_epochs{args.epochs}'
if args.nonlinear:
from utils.fenics import solve_nonlinear_poisson
exp_name = 'conv_mixed_residual_nonlinear'
from models.darcy import conv_constitutive_constraint_nonlinear as constitutive_constraint
hyparams = hyparams + f'_alpha1_{args.alpha1}_alpha2_{args.alpha2}'
else:
exp_name = 'conv_mixed_residual'
from models.darcy import conv_constitutive_constraint as constitutive_constraint
run_dir = args.exp_dir + '/' + exp_name + '/' + hyparams
mkdirs(run_dir)
# load data
assert args.idx < 1000
if args.data == 'grf':
assert args.kle in [512, 128, 1024, 2048]
ntest = 1000 if args.kle == 512 else 1024
hdf5_file = args.data_dir + f'/{args.imsize}x{args.imsize}/kle{args.kle}_lhs{ntest}_test.hdf5'
elif args.data == 'warped_grf':
hdf5_file = args.data_dir + f'/{args.imsize}x{args.imsize}/warped_gp_ng64_n1000.hdf5'
elif args.data == 'channelized':
hdf5_file = args.data_dir + f'/{args.imsize}x{args.imsize}/channel_ng64_n512_test.hdf5'
else:
raise ValueError('No dataset are found for the speficied parameters')
print(f'dataset: {hdf5_file}')
with h5py.File(hdf5_file, 'r') as f:
input_data = f['input'][()]
output_data = f['output'][()]
print(f'input: {input_data.shape}')
print(f'output: {output_data.shape}')
# permeability, (1, 1, 64, 64)
perm_arr = input_data[[args.idx]]
# pressure, flux_hor, flux_ver, (3, 64, 64)
if args.nonlinear:
# solve nonlinear Darcy for perm_arr with FEniCS
output_file = run_dir + '/output_fenics.npy'
if os.path.isfile(output_file):
output_arr = np.load(output_file)
print('Loaded solved output field')
else:
print('Solve nonlinear poisson with FEniCS...')
output_arr = solve_nonlinear_poisson(perm_arr[0, 0], args.alpha1,
args.alpha2, run_dir)
np.save(output_file, output_arr)
else:
output_arr = output_data[args.idx]
print('output shape: ', output_arr.shape)
# model
model = Decoder(args.nz, out_channels=3, blocks=args.blocks).to(device)
print(f'model size: {model.model_size}')
fixed_latent = torch.randn(1, args.nz, 16, 16).to(device) * 0.5
perm_tensor = torch.FloatTensor(perm_arr).to(device)
sobel_filter = SobelFilter(args.imsize, correct=True, device=device)
optimizer = optim.LBFGS(model.parameters(),
lr=args.lr, max_iter=20, history_size=50)
logger = {}
logger['loss'] = []
def train(epoch):
model.train()
def closure():
optimizer.zero_grad()
output = model(fixed_latent)
if args.nonlinear:
energy = constitutive_constraint(perm_tensor, output,
sobel_filter, args.alpha1, args.alpha2) \
+ continuity_constraint(output, sobel_filter)
else:
energy = constitutive_constraint(perm_tensor, output,
sobel_filter) + continuity_constraint(output, sobel_filter)
loss_dirichlet, loss_neumann = boundary_condition(output)
loss_boundary = loss_dirichlet + loss_neumann
loss = energy + loss_boundary * args.weight_bound
loss.backward()
if args.verbose:
print(f'epoch {epoch}: loss {loss.item():6f}, '\
f'energy {energy.item():.6f}, diri {loss_dirichlet.item():.6f}, '\
f'neum {loss_neumann.item():.6f}')
return loss
loss = optimizer.step(closure)
loss_value = loss.item() if not isinstance(loss, float) else loss
logger['loss'].append(loss_value)
print(f'epoch {epoch}: loss {loss_value:.6f}')
if epoch % args.ckpt_freq == 0:
torch.save(model.state_dict(), run_dir + "/model_epoch{}.pth".format(epoch))
def test(epoch):
if epoch % args.epochs == 0 or epoch % args.test_freq == 0:
output = model(fixed_latent)
output = to_numpy(output)
if args.animate:
i_plot = epoch // args.test_freq
plot_prediction_det_animate2(run_dir, output_arr, output[0], epoch, args.idx, i_plot,
plot_fn='imshow', cmap=args.cmap, same_scale=args.same_scale)
else:
plot_prediction_det(run_dir, output_arr, output[0], epoch, args.idx,
plot_fn='imshow', cmap=args.cmap, same_scale=args.same_scale)
np.save(run_dir + f'/epoch{epoch}.npy', output[0])
print('start training...')
dryrun = False
tic = time.time()
for epoch in range(1, args.epochs + 1):
if not dryrun:
train(epoch)
test(epoch)
print(f'Finished optimization for {args.epochs} epochs using {(time.time()-tic)/60:.3f} minutes')
save_stats(run_dir, logger, 'loss')
# save input
plt.imshow(perm_arr[0, 0])
plt.colorbar()
plt.savefig(run_dir + '/input.png')
plt.close()
if __name__ == '__main__':
main()
|
the-stack_0_25473
|
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
'''
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) < 1:
return
idx_dict = {}
for i in range(len(nums)):
if nums[i] in idx_dict:
return [idx_dict[nums[i]], i]
else:
idx_dict[target - nums[i]] = i
arr = [2,7,11,15]
print(twoSum(arr, 9))
|
the-stack_0_25474
|
import glob
import os
import shutil
import tempfile
import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
from arkouda import io_util, pdarrayIO
class CategoricalTest(ArkoudaTest):
@classmethod
def setUpClass(cls):
super(CategoricalTest, cls).setUpClass()
CategoricalTest.cat_test_base_tmp = "{}/categorical_test".format(os.getcwd())
io_util.get_directory(CategoricalTest.cat_test_base_tmp)
def _getCategorical(self, prefix: str = "string", size: int = 11) -> ak.Categorical:
return ak.Categorical(ak.array(["{} {}".format(prefix, i) for i in range(1, size)]))
def _getRandomizedCategorical(self) -> ak.Categorical:
return ak.Categorical(
ak.array(
[
"string",
"string1",
"non-string",
"non-string2",
"string",
"non-string",
"string3",
"non-string2",
"string",
"non-string",
]
)
)
def testBaseCategorical(self):
cat = self._getCategorical()
self.assertTrue((ak.array([7, 5, 9, 8, 2, 1, 4, 0, 3, 6]) == cat.codes).all())
self.assertTrue((ak.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) == cat.segments).all())
self.assertTrue(
(
ak.array(
[
"string 8",
"string 6",
"string 5",
"string 9",
"string 7",
"string 2",
"string 10",
"string 1",
"string 4",
"string 3",
"N/A",
]
)
== cat.categories
).all()
)
self.assertEqual(10, cat.size)
self.assertEqual("category", cat.objtype)
with self.assertRaises(ValueError):
ak.Categorical(ak.arange(0, 5, 10))
def testCategoricalFromCodesAndCategories(self):
codes = ak.array([7, 5, 9, 8, 2, 1, 4, 0, 3, 6])
categories = ak.unique(
ak.array(
[
"string 8",
"string 6",
"string 5",
"string 9",
"string 7",
"string 2",
"string 10",
"string 1",
"string 4",
"string 3",
"N/A",
]
)
)
cat = ak.Categorical.from_codes(codes, categories)
self.assertTrue((codes == cat.codes).all())
self.assertTrue((categories == cat.categories).all())
def testContains(self):
cat = self._getCategorical()
self.assertTrue(cat.contains("string").all())
def testEndsWith(self):
cat = self._getCategorical()
self.assertTrue(cat.endswith("1").any())
def testStartsWith(self):
cat = self._getCategorical()
self.assertTrue(cat.startswith("string").all())
def testGroup(self):
group = self._getRandomizedCategorical().group()
self.assertTrue((ak.array([2, 5, 9, 6, 1, 3, 7, 0, 4, 8]) == group).all())
def testUnique(self):
cat = self._getRandomizedCategorical()
self.assertTrue(
(
ak.Categorical(
ak.array(["non-string", "string3", "string1", "non-string2", "string"])
).to_ndarray()
== cat.unique().to_ndarray()
).all()
)
def testToNdarray(self):
cat = self._getRandomizedCategorical()
ndcat = np.array(
[
"string",
"string1",
"non-string",
"non-string2",
"string",
"non-string",
"string3",
"non-string2",
"string",
"non-string",
]
)
self.assertTrue((cat.to_ndarray() == ndcat).all())
def testEquality(self):
cat = self._getCategorical()
catDupe = self._getCategorical()
catNonDupe = self._getRandomizedCategorical()
self.assertTrue((cat == catDupe).all())
self.assertTrue((cat != catNonDupe).all())
c1 = ak.Categorical(ak.array(["a", "b", "c", "a", "b"]))
c2 = ak.Categorical(ak.array(["a", "x", "c", "y", "b"]))
res = c1 == c2
ans = ak.array([True, False, True, False, True])
self.assertTrue((res == ans).all())
def testBinop(self):
cat = self._getCategorical()
catDupe = self._getCategorical()
catNonDupe = self._getRandomizedCategorical()
self.assertTrue((cat._binop(catDupe, "==")).all())
self.assertTrue((cat._binop(catNonDupe, "!=")).all())
self.assertTrue(
(
ak.array([True, True, True, True, True, True, True, True, True, True])
== cat._binop(catDupe, "==")
).all()
)
self.assertTrue(
(
ak.array([False, False, False, False, False, False, False, False, False, False])
== cat._binop(catDupe, "!=")
).all()
)
self.assertTrue(
(
ak.array([True, False, False, False, False, False, False, False, False, False])
== cat._binop("string 1", "==")
).all()
)
self.assertTrue(
(
ak.array([True, False, False, False, False, False, False, False, False, False])
== cat._binop(np.str_("string 1"), "==")
).all()
)
self.assertTrue(
(
ak.array([False, True, True, True, True, True, True, True, True, True])
== cat._binop("string 1", "!=")
).all()
)
self.assertTrue(
(
ak.array([False, True, True, True, True, True, True, True, True, True])
== cat._binop(np.str_("string 1"), "!=")
).all()
)
with self.assertRaises(NotImplementedError):
cat._binop("string 1", "===")
with self.assertRaises(TypeError):
cat._binop(1, "==")
def testIn1d(self):
vals = [i % 3 for i in range(10)]
valsTwo = [i % 2 for i in range(10)]
stringsOne = ak.array(["String {}".format(i) for i in vals])
stringsTwo = ak.array(["String {}".format(i) for i in valsTwo])
catOne = ak.Categorical(stringsOne)
catTwo = ak.Categorical(stringsTwo)
answer = ak.array([x < 2 for x in vals])
self.assertTrue((answer == ak.in1d(catOne, catTwo)).all())
self.assertTrue((answer == ak.in1d(catOne, stringsTwo)).all())
with self.assertRaises(TypeError):
ak.in1d(catOne, ak.randint(0, 5, 5))
def testConcatenate(self):
catOne = self._getCategorical("string", 51)
catTwo = self._getCategorical("string-two", 51)
resultCat = catOne.concatenate([catTwo])
self.assertEqual("category", resultCat.objtype)
self.assertIsInstance(resultCat, ak.Categorical)
self.assertEqual(100, resultCat.size)
# Since Categorical.concatenate uses Categorical.from_codes method, confirm
# that both permutation and segments are None
self.assertFalse(resultCat.permutation)
self.assertFalse(resultCat.segments)
resultCat = ak.concatenate([catOne, catOne], ordered=False)
self.assertEqual("category", resultCat.objtype)
self.assertIsInstance(resultCat, ak.Categorical)
self.assertEqual(100, resultCat.size)
# Since Categorical.concatenate uses Categorical.from_codes method, confirm
# that both permutation and segments are None
self.assertFalse(resultCat.permutation)
self.assertFalse(resultCat.segments)
# Concatenate two Categoricals with different categories,
# and test result against original strings
s1 = ak.array(["abc", "de", "abc", "fghi", "de"])
s2 = ak.array(["jkl", "mno", "fghi", "abc", "fghi", "mno"])
c1 = ak.Categorical(s1)
c2 = ak.Categorical(s2)
# Ordered concatenation
s12ord = ak.concatenate([s1, s2], ordered=True)
c12ord = ak.concatenate([c1, c2], ordered=True)
self.assertTrue((ak.Categorical(s12ord) == c12ord).all())
# Unordered (but still deterministic) concatenation
s12unord = ak.concatenate([s1, s2], ordered=False)
c12unord = ak.concatenate([c1, c2], ordered=False)
self.assertTrue((ak.Categorical(s12unord) == c12unord).all())
# Tiny concatenation
# Used to fail when length of array was less than numLocales
# CI uses 2 locales, so try with length-1 arrays
a = ak.Categorical(ak.array(["a"]))
b = ak.Categorical(ak.array(["b"]))
c = ak.concatenate((a, b), ordered=False)
ans = ak.Categorical(ak.array(["a", "b"]))
self.assertTrue((c == ans).all())
def testSaveAndLoadCategorical(self):
"""
Test to save categorical to hdf5 and read it back successfully
"""
num_elems = 51 # _getCategorical starts counting at 1, so the size is really off by one
cat = self._getCategorical(size=num_elems)
with self.assertRaises(ValueError): # Expect error for mode not being append or truncate
cat.save("foo", dataset="bar", mode="not_allowed")
with tempfile.TemporaryDirectory(dir=CategoricalTest.cat_test_base_tmp) as tmp_dirname:
dset_name = "categorical_array" # name of categorical array
# Test the save functionality & confirm via h5py
cat.save(f"{tmp_dirname}/cat-save-test", dataset=dset_name)
import h5py
f = h5py.File(tmp_dirname + "/cat-save-test_LOCALE0000", mode="r")
keys = set(f.keys())
if (
pdarrayIO.ARKOUDA_HDF5_FILE_METADATA_GROUP in keys
): # Ignore the metadata group if it exists
keys.remove(pdarrayIO.ARKOUDA_HDF5_FILE_METADATA_GROUP)
self.assertEqual(len(keys), 5, "Expected 5 keys")
self.assertSetEqual(
set(f"categorical_array.{k}" for k in cat._get_components_dict().keys()), keys
)
f.close()
# Now try to read them back with load_all
x = ak.load_all(path_prefix=f"{tmp_dirname}/cat-save-test")
self.assertTrue(dset_name in x)
cat_from_hdf = x[dset_name]
expected_categories = [f"string {i}" for i in range(1, num_elems)] + ["N/A"]
# Note assertCountEqual asserts a and b have the same elements
# in the same amount regardless of order
self.assertCountEqual(cat_from_hdf.categories.to_ndarray().tolist(),
expected_categories)
# Asserting the optional components and sizes are correct
# for both constructors should be sufficient
self.assertTrue(cat_from_hdf.segments is not None)
self.assertTrue(cat_from_hdf.permutation is not None)
print(f"==> cat_from_hdf.size:{cat_from_hdf.size}")
self.assertTrue(cat_from_hdf.size == num_elems - 1)
def test_unused_categories_logic(self):
"""
Test that Categoricals built from_codes and from slices
that have unused categories behave correctly
"""
s = ak.array([str(i) for i in range(10)])
s12 = s[1:3]
cat = ak.Categorical(s)
cat12 = cat[1:3]
self.assertListEqual(
ak.in1d(s, s12).to_ndarray().tolist(), ak.in1d(cat, cat12).to_ndarray().tolist()
)
self.assertSetEqual(
set(ak.unique(s12).to_ndarray().tolist()), set(ak.unique(cat12).to_ndarray().tolist())
)
cat_from_codes = ak.Categorical.from_codes(ak.array([1, 2]), s)
self.assertListEqual(
ak.in1d(s, s12).to_ndarray().tolist(), ak.in1d(cat, cat_from_codes).to_ndarray().tolist()
)
self.assertSetEqual(
set(ak.unique(s12).to_ndarray().tolist()),
set(ak.unique(cat_from_codes).to_ndarray().tolist()),
)
def testSaveAndLoadCategoricalMulti(self):
"""
Test to build a pseudo dataframe with multiple
categoricals, pdarrays, strings objects and successfully
write/read it from HDF5
"""
c1 = self._getCategorical(prefix="c1", size=51)
c2 = self._getCategorical(prefix="c2", size=52)
pda1 = ak.zeros(51)
strings1 = ak.random_strings_uniform(9, 10, 52)
with tempfile.TemporaryDirectory(dir=CategoricalTest.cat_test_base_tmp) as tmp_dirname:
df = {"cat1": c1, "cat2": c2, "pda1": pda1, "strings1": strings1}
ak.save_all(df, f"{tmp_dirname}/cat-save-test")
x = ak.load_all(path_prefix=f"{tmp_dirname}/cat-save-test")
self.assertTrue(len(x.items()) == 4)
# Note assertCountEqual asserts a and b have the same
# elements in the same amount regardless of order
self.assertCountEqual(
x["cat1"].categories.to_ndarray().tolist(), c1.categories.to_ndarray().tolist()
)
self.assertCountEqual(
x["cat2"].categories.to_ndarray().tolist(), c2.categories.to_ndarray().tolist()
)
self.assertCountEqual(x["pda1"].to_ndarray().tolist(), pda1.to_ndarray().tolist())
self.assertCountEqual(x["strings1"].to_ndarray().tolist(), strings1.to_ndarray().tolist())
def testNA(self):
s = ak.array(["A", "B", "C", "B", "C"])
# NAval present in categories
c = ak.Categorical(s, NAvalue="C")
self.assertListEqual(c.isna().to_ndarray().tolist(), [False, False, True, False, True])
self.assertTrue(c.NAvalue == "C")
# Test that NAval survives registration
c.register("my_categorical")
c2 = ak.Categorical.attach("my_categorical")
self.assertTrue(c2.NAvalue == "C")
# default NAval not present in categories
c = ak.Categorical(s)
self.assertTrue(not c.isna().any())
self.assertTrue(c.NAvalue == "N/A")
def testStandardizeCategories(self):
c1 = ak.Categorical(ak.array(["A", "B", "C"]))
c2 = ak.Categorical(ak.array(["B", "C", "D"]))
c3, c4 = ak.Categorical.standardize_categories([c1, c2])
self.assertTrue((c3.categories == c4.categories).all())
self.assertTrue(not c3.isna().any())
self.assertTrue(not c4.isna().any())
self.assertTrue(c3.categories.size == c1.categories.size + 1)
self.assertTrue(c4.categories.size == c2.categories.size + 1)
def testLookup(self):
keys = ak.array([1, 2, 3])
values = ak.Categorical(ak.array(["A", "B", "C"]))
args = ak.array([3, 2, 1, 0])
ret = ak.lookup(keys, values, args)
expected = ["C", "B", "A", "N/A"]
self.assertListEqual(ret.to_ndarray().tolist(), expected)
def tearDown(self):
super(CategoricalTest, self).tearDown()
for f in glob.glob("{}/*".format(CategoricalTest.cat_test_base_tmp)):
os.remove(f)
@classmethod
def tearDownClass(cls):
super(CategoricalTest, cls).tearDownClass()
shutil.rmtree(CategoricalTest.cat_test_base_tmp)
|
the-stack_0_25477
|
import numpy as np
import os
import cv2
import random
from PIL import Image
import ImageHeader
class Utility(object):
def readGIFFile(self, filename, imgHdr = ImageHeader.ImageHeader()):
frame = Image.open(filename).convert('L')
npImage = np.array(frame)
# below is for direct input into tensorflow
# imgInput = np.true_divide(np.array(frame),255)
frame.close()
return npImage
def readFileLabel(self, dirname):
self.filelist = []
for root, dirs, files in os.walk(dirname):
for filename in files:
self.filelist.append(filename)
return self.filelist
def McCallRule(self, fileList = []):
col = 11
fileMatrix = [[0] * col for x in range(15)]
numOfFile = len(fileList)
mccall70 = []
mccall20 = []
mccall10 = []
for ii in range(165):
jj = ii % 15
kk = ii % 11
fileMatrix[jj][kk] = fileList[ii]
print("file list: ", fileMatrix[jj][kk])
for aa in range(15):
for bb in range(8):
mccall70.append(fileMatrix[aa][bb])
for cc in range(8,10):
mccall20.append((fileMatrix[aa][cc]))
for dd in range(10,11):
mccall10.append(fileMatrix[aa][dd])
return mccall70, mccall20, mccall10
def McCallRuleWrap(self, fileList = []):
col = 11
fileMatrix = [[0] * col for x in range(15)]
numOfFile = len(fileList)
mccall70 = []
mccall20 = []
mccall10 = []
for ii in range(165):
jj = ii % 15
kk = ii % 11
fileMatrix[jj][kk] = fileList[ii]
print("file list: ", fileMatrix[jj][kk])
for aa in range(15):
for bb in range(3, 11):
mccall70.append(fileMatrix[aa][bb])
for ee in range(0, 0):
mccall70.append(fileMatrix[aa][ee])
for cc in range(0, 2):
mccall20.append(fileMatrix[aa][cc])
for ff in range(0, 0):
mccall20.append(fileMatrix[aa][ff])
for dd in range(2, 3):
mccall10.append(fileMatrix[aa][dd])
return mccall70, mccall20, mccall10
def ParetoRule(self, fileList = []):
col = 11
fileMatrix = [[0] * col for x in range(15)]
pareto90 = []
pareto10 = []
for ii in range(165):
jj = ii % 15
kk = ii % 11
fileMatrix[jj][kk] = fileList[ii]
print("file list: ", fileMatrix[jj][kk])
for aa in range(15):
for bb in range(10, 11):
pareto90.append(fileMatrix[aa][bb])
for dd in range(0, 9):
pareto90.append((fileMatrix[aa][dd]))
for cc in range(9, 10):
pareto10.append((fileMatrix[aa][cc]))
return pareto90, pareto10
def RandomImg(self, filelist=[]):
randomImg = []
for ii in range(165):
imgIndex = random.randrange(165)
randomImg.append(filelist[imgIndex])
return randomImg
def SaveImageFile(self, filepath, targetImg):
cv2.imwrite(filepath, targetImg)
def DisplayImage(self, targetList, dirname):
for filename in targetList:
frame = Image.open(dirname +'/'+ filename)
npImage = np.array(frame)
cv2.imshow("Labeled image", npImage)
cv2.waitKey()
cv2.destroyAllWindows()
def SaveConfusionMatrix(self, targetMatrix, filepath):
matrixFile = open(filepath, "w")
matrixFile.writelines(targetMatrix)
matrixFile.close()
def SaveClassification(self, targetReport, filepath):
classFile = open(filepath, "w")
classFile.writelines(targetReport)
classFile.close()
def DisplayWithOverlay(self, targetImage, imgDir, targetOverlay, overlayDir):
frame = Image.open(imgDir+"/"+targetImage).convert('L')
overlay = Image.open(overlayDir+"/"+targetOverlay)
backGround = np.array(frame, np.uint8)
backData = cv2.cvtColor(backGround, cv2.COLOR_GRAY2BGRA)
foreGround = np.array(overlay)
foreData = cv2.cvtColor(foreGround, cv2.COLOR_RGBA2BGRA)
cv2.imshow("foredata", foreData)
backWidth, backHeight = backGround.shape
finalForeData = cv2.resize(foreData, (backWidth, backHeight))
outImage = cv2.add(finalForeData, backData)
cv2.imshow("Image with overlay", outImage)
cv2.waitKey(0)
|
the-stack_0_25479
|
#! /usr/bin/env python3
import time
import requests
from lxml import etree
import json
import traceback
import pandas as pd
import numpy as np
import re
import os
print_urls = False # I set this to true when debugging
# constants for URLs
EMAIL = '[email protected]'
API_KEY_FILE = 'ncbi_api_key.txt'
BASE_URL = ('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/')
TOOL = 'methnet'
ESEARCH_DB = 'pubmed'
ELINK_DB = 'pmc'
ELINK_LINKNAME = 'pmc_pmc_cites' # give PMCID
# ELINK_LINKNAME = 'pmc_refs_pubmed' # give PMCID
# ELINK_LINKNAME = 'pubmed_pubmed_refs' # give PMID
MAX_N_RESULTS = 100000
# if there's an API key provided, we can make 10 requests/sec, if not, 3
try:
API_KEY = open(API_KEY_FILE, 'r').read().strip()
MAX_REQUESTS_PER_SEC = 11
except Exception:
API_KEY = [''] # this will go into the URL but will be ignored by the API
MAX_REQUESTS_PER_SEC = 3
def build_esearch_url(field_query):
'''
This function builds a esearch URL witht the given methods query.
'''
url = (f'{BASE_URL}esearch.fcgi?&db={ESEARCH_DB}&retmax={MAX_N_RESULTS}'
f'&api_key={API_KEY}&email={EMAIL}&tool={TOOL}&usehistory=y'
f'&term={field_query}+AND+pubmed+pmc+open+access[filter]')
url = url.replace(' ', '+')
url = url.replace('"', '%22').replace('(', '%28').replace(')', '%28')
if print_urls:
print('ESEARCH URL', url, '\n')
return(url)
def build_efetch_url_from_history(pmcid, esearch_tree):
query_key = esearch_tree.find('QueryKey').text
web_env = esearch_tree.find('WebEnv').text
url = (f'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
f'efetch.fcgi?db=pmc&id={pmcid}'
f'&api_key={API_KEY}&email={EMAIL}&tool={TOOL}'
f'&WebEnv={web_env}&query_key={query_key}')
if print_urls:
print('EFETCH URL', url, '\n')
return url
def build_elink_url(id):
'''
Returns an e-link URL for the ID for a paper.
'''
url = (f'{BASE_URL}elink.fcgi?&dbfrom={ELINK_DB}'
f'&api_key={API_KEY}&email={EMAIL}&tool={TOOL}'
f'&linkname={ELINK_LINKNAME}&id={id}')
if print_urls:
print('ELINK URL', url, '\n')
return url
def get_ids(tree):
'''
Get the list of IDs from an XML tree
'''
ids = []
for id_element in tree.iter('Id'):
ids.append(int(id_element.text))
return ids
def space_searches(n_searches):
'''
Space out the searches in time so that we don't exceed the max. allowed
per second. With the API, it is 10/sec; without it is 3/sec
'''
if n_searches > MAX_REQUESTS_PER_SEC:
time.sleep(MAX_REQUESTS_PER_SEC/100)
def divide_list_into_chunks(my_list, chunk_len):
for i in range(0, len(my_list), chunk_len):
yield my_list[i:i + chunk_len]
def pmids_to_pmcids(pmid_list):
urls = []
all_pmcids = []
# break list of ids into chunks of 200, since that is the limit for the api
ids_in_chunks = list(divide_list_into_chunks(pmid_list, 200))
for chunk in ids_in_chunks:
url = ('https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?'
f'api_key={API_KEY}&tool={TOOL}&email={EMAIL}&ids={chunk}')\
.replace(' ', '').replace('[', '').replace(']', '')
urls.append(url)
if print_urls:
print('PMID-PMCID URL: ', url, '\n')
response = requests.get(url)
tree = etree.XML(response.content)
full_pmcids = tree.xpath("//record/@pmcid")
pmcids = [int(re.match(r"PMC(\d+)", pmcid).group(1)) for pmcid in full_pmcids]
all_pmcids = all_pmcids + pmcids
space_searches(n_searches=len(ids_in_chunks))
return all_pmcids, urls
def get_method_data(pmcids, esearch_tree):
'''
Get the number of times each method is mentioned in each paper.
'''
method_data = np.zeros((len(pmcids), len(list_methods_queries)))
for n_id, pmcid in enumerate(pmcids):
print(f'Getting data for ID {n_id} / {len(pmcids)}', end='\r')
efetch_url = build_efetch_url_from_history(pmcid, esearch_tree)
response = requests.get(efetch_url)
resp_text = response.text.lower()
for n_meth, method in enumerate(list_methods_queries):
method_data[n_id, n_meth] = resp_text.count(method)
return method_data
def get_pmid_from_efetch_result(response):
text = response.text
before = '<article-id pub-id-type="pmid">'
after = '</article-id>'
start = [m.start() for m in re.finditer(before, text)][0] + len(before)
end = [m.start() for m in re.finditer(after, text)][0]
pmid = int(text[start: end])
return pmid
def get_first_instance(tree, term):
items = []
for el in tree.iter(term):
items.append(el.text)
if len(items) > 0:
item = items[0]
else:
item = []
return item
def make_data(field_query, list_methods_queries, data_id='noname',
save_data=True):
'''
The master function
'''
# make pandas dataframe to store results
columns = (['pmcid', 'pmid', 'month', 'year', 'title', 'journal', 'refs'] +
list_methods_queries +
['esearch_url', 'efetch_url', 'elink_url', 'pm_pmc_cnvrt_urls'])
data = pd.DataFrame(columns=columns)
data = data.set_index('pmcid', drop=True)
datafile = f'../data/pubmed_data__{data_id}.csv'
try:
# ESEARCH ###########################################################
esearch_url = build_esearch_url(field_query)
# get response from URL in an XML tree format
response = requests.get(esearch_url)
esearch_tree = etree.XML(response.content)
# get ids of papers that include the given methods-related keyword
if int(esearch_tree.find('Count').text) > MAX_N_RESULTS:
print(f'Warning: More than the max. number of results allowed per'
' page; only the first {MAX_N_RESULTS} will be considered')
if int(esearch_tree.find('Count').text) > 0: # if there are results
esearch_pmids = get_ids(esearch_tree)
else:
esearch_pmids = []
print('No search results:\'(')
print(f'\n{len(esearch_pmids)} articles found\n')
# convert the pubmed ids from the search results to pmc ids, so that we
# can access the full text with efetch
print('Converting PMIDs to PMCIDs\n')
pmcids, cnvrt_urls = pmids_to_pmcids(esearch_pmids)
print(f'{len(pmcids)} PMCIDs\n')
# add new row in the data dataframe, one for each ID found
print('Adding rows to dataframe\n')
data_method = pd.DataFrame(columns=columns)
data_method['pmcid'] = pmcids
data_method = data_method.set_index('pmcid', drop=True)
data.append(data_method)
# get data for each paper
for n_id, pmcid in enumerate(pmcids):
print(f'Getting data for ID {n_id + 1} / {len(pmcids)}', end='\r')
# EFETCH ##############################################################
# use efetch to get the full text
efetch_url = build_efetch_url_from_history(pmcid, esearch_tree)
response = requests.get(efetch_url)
resp_text = response.text.lower()
eft_tree = etree.XML(response.content)
# get info from the xml
title = get_first_instance(eft_tree, 'article-title')
journal = get_first_instance(eft_tree, 'journal-title')
month = get_first_instance(eft_tree, 'month')
year = get_first_instance(eft_tree, 'year')
pmid = get_pmid_from_efetch_result(response)
# get the counts of the number of times each method was mentioned
for n_meth, method in enumerate(list_methods_queries):
data.loc[pmcid, method] = resp_text.count(method)
# ELINK ##############################################################
# get the ids of papers cited by each paper
elink_url = build_elink_url(pmcid)
response = requests.get(elink_url)
elink_tree = etree.XML(response.content)
elink_ids = get_ids(elink_tree)
data.loc[pmcid, 'pmid'] = pmid
data.loc[pmcid, 'refs'] = elink_ids
data.loc[pmcid, 'month'] = month
data.loc[pmcid, 'year'] = year
data.loc[pmcid, 'title'] = title
data.loc[pmcid, 'journal'] = journal
data.loc[pmcid, 'esearch_url'] = esearch_url
data.loc[pmcid, 'efetch_url'] = efetch_url
data.loc[pmcid, 'elink_url'] = elink_url
data.loc[pmcid, 'pm_pmc_cnvrt_urls'] = cnvrt_urls
space_searches(n_searches=len(pmcids))
print('\n')
# save the data if there's an error so we can debug
except Exception as err:
traceback.print_tb(err.__traceback__)
print(err)
with open(f'../data/response_dumped_by_error__{data_id}.json', 'w') as json_file:
json.dump(response.text, json_file)
if os.path.exists(datafile):
os.remove(datafile)
# save all the data
data.to_csv(datafile)
return data
|
the-stack_0_25480
|
import asyncio
from collections import OrderedDict
import cytoolz
import functools
from hexbytes import HexBytes
import logging
from typing import (
Dict,
List,
Optional,
Set
)
from web3 import Web3
from web3.datastructures import AttributeDict
from web3.utils.contracts import find_matching_event_abi
from web3.utils.events import get_event_data
from web3.utils.filters import construct_event_filter_params
import wings
DEFAULT_WINDOW_SIZE = 100
class ContractEventLogger:
_cel_logger: Optional[logging.Logger] = None
@classmethod
def logger(cls) -> logging.Logger:
if cls._cel_logger is None:
cls._cel_logger = logging.getLogger(__name__)
return cls._cel_logger
def __init__(self,
w3: Web3,
address: str,
contract_abi: List[Dict[str, any]],
block_events_window_size: Optional[int] = DEFAULT_WINDOW_SIZE):
super().__init__()
self._w3: Web3 = w3
self._ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self._block_events_window_size = block_events_window_size
self._address: str = address
self._contract_abi: List[Dict[str, any]] = contract_abi
self._event_abi_map: Dict[str, Dict[str, any]] = {}
self._event_cache: Set[HexBytes] = set()
self._block_events: OrderedDict = OrderedDict()
@property
def address(self) -> str:
return self._address
@property
def contract_abi(self) -> List[Dict[str, any]]:
return self._contract_abi
async def get_new_entries_from_logs(self,
event_name: str,
block_hashes: List[HexBytes]) -> List[AttributeDict]:
event_abi: Dict[str, any] = self._event_abi_map.get(event_name, None)
if event_abi is None:
event_abi = find_matching_event_abi(self._contract_abi, event_name=event_name)
self._event_abi_map[event_name] = event_abi
_, event_filter_params = construct_event_filter_params(event_abi,
contract_address=self._address)
tasks = []
for block_hash in block_hashes:
event_filter_params["blockHash"] = block_hash.hex()
tasks.append(self._get_logs(event_filter_params))
raw_logs = await asyncio.gather(*tasks, return_exceptions=True)
logs: List[any] = list(cytoolz.concat(raw_logs))
new_entries = []
for log in logs:
event_data: AttributeDict = get_event_data(event_abi, log)
event_data_block_number: int = event_data["blockNumber"]
event_data_tx_hash: HexBytes = event_data["transactionHash"]
if event_data_tx_hash not in self._event_cache:
if event_data_block_number not in self._block_events:
self._block_events[event_data_block_number] = [event_data_tx_hash]
else:
self._block_events[event_data_block_number].append(event_data_tx_hash)
self._event_cache.add(event_data_tx_hash)
new_entries.append(event_data)
else:
self.logger().debug(
f"Duplicate event transaction hash found - '{event_data_tx_hash.hex()}'."
)
while len(self._block_events) > self._block_events_window_size:
tx_hashes: List[HexBytes] = self._block_events.popitem(last=False)[1]
for tx_hash in tx_hashes:
self._event_cache.remove(tx_hash)
return new_entries
async def _get_logs(self,
event_filter_params: Dict[str, any],
max_tries: Optional[int] = 30) -> List[Dict[str, any]]:
ev_loop: asyncio.BaseEventLoop = self._ev_loop
count: int = 0
logs = []
while True:
try:
count += 1
if count > max_tries:
self.logger().debug(
f"Error fetching logs from block with filters: '{event_filter_params}'."
)
break
logs = await ev_loop.run_in_executor(
wings.get_executor(),
functools.partial(
self._w3.eth.getLogs,
event_filter_params))
break
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().debug(f"Block not found with filters: '{event_filter_params}'. Retrying...")
await asyncio.sleep(0.5)
return logs
|
the-stack_0_25481
|
def load(h):
return ({'abbr': 'TIROS-N', 'code': 1, 'title': 'TIROS-N'},
{'abbr': 'NOAA-6/HIRS', 'code': 2, 'title': 'NOAA-6/HIRS'},
{'abbr': 'NOAA-7/HIRS', 'code': 3, 'title': 'NOAA-7/HIRS'},
{'abbr': 'NOAA-8/HIRS', 'code': 4, 'title': 'NOAA-8/HIRS'},
{'abbr': 'NOAA-9/HIRS', 'code': 5, 'title': 'NOAA-9/HIRS'},
{'abbr': 'NOAA-10/HIRS', 'code': 6, 'title': 'NOAA-10/HIRS'},
{'abbr': 'NOAA-11/HIRS', 'code': 7, 'title': 'NOAA-11/HIRS'},
{'abbr': 'NOAA-12/HIRS', 'code': 8, 'title': 'NOAA-12/HIRS'},
{'abbr': 'NOAA-14/HIRS', 'code': 9, 'title': 'NOAA-14/HIRS'},
{'abbr': 'NOAA-15/HIRS', 'code': 10, 'title': 'NOAA-15/HIRS'},
{'abbr': 'NOAA-16/HIRS', 'code': 11, 'title': 'NOAA-16/HIRS'},
{'abbr': 'NOAA-17/HIRS', 'code': 12, 'title': 'NOAA-17/HIRS'},
{'abbr': 'NOAA-18/HIRS', 'code': 13, 'title': 'NOAA-18/HIRS'},
{'abbr': 'NOAA-19/HIRS', 'code': 14, 'title': 'NOAA-19/HIRS'},
{'abbr': 'METOP-A/HIRS', 'code': 15, 'title': 'METOP-A/HIRS'},
{'abbr': 'NOAA-15/AMSUA', 'code': 1001, 'title': 'NOAA-15/AMSUA'},
{'abbr': 'NOAA-16/AMSUA', 'code': 1002, 'title': 'NOAA-16/AMSUA'},
{'abbr': 'NOAA-17/AMSUA', 'code': 1003, 'title': 'NOAA-17/AMSUA'},
{'abbr': 'NOAA-18/AMSUA', 'code': 1004, 'title': 'NOAA-18/AMSUA'},
{'abbr': 'NOAA-19/AMSUA', 'code': 1005, 'title': 'NOAA-19/AMSUA'},
{'abbr': 'NOAA-19/AMSUA', 'code': 1006, 'title': 'NOAA-19/AMSUA'},
{'abbr': 'METOP-A/AMSUA', 'code': 1007, 'title': 'METOP-A/AMSUA'},
{'abbr': 'AQUA/AMSUA', 'code': 1008, 'title': 'AQUA/AMSUA'},
{'abbr': 'NOAA-15/AMSUB', 'code': 2001, 'title': 'NOAA-15/AMSUB'},
{'abbr': 'NOAA-16/AMSUB', 'code': 2002, 'title': 'NOAA-16/AMSUB'},
{'abbr': 'NOAA-17/AMSUB', 'code': 2003, 'title': 'NOAA-17/AMSUB'},
{'abbr': 'NOAA-18/AMSUB', 'code': 2004, 'title': 'NOAA-18/AMSUB'},
{'abbr': 'NOAA-18/AMSUB', 'code': 2005, 'title': 'NOAA-18/AMSUB'},
{'abbr': 'NOAA-19/MHS', 'code': 3001, 'title': 'NOAA-19/MHS'},
{'abbr': 'METOP-A/MHS', 'code': 3002, 'title': 'METOP-A/MHS'},
{'abbr': 'GOES-5/IMAGER', 'code': 4001, 'title': 'GOES-5/IMAGER'},
{'abbr': 'GOES-8/IMAGER', 'code': 4002, 'title': 'GOES-8/IMAGER'},
{'abbr': 'GOES-9/IMAGER', 'code': 4003, 'title': 'GOES-9/IMAGER'},
{'abbr': 'GOES-10/IMAGER', 'code': 4004, 'title': 'GOES-10/IMAGER'},
{'abbr': 'GOES-11/IMAGER', 'code': 4005, 'title': 'GOES-11/IMAGER'},
{'abbr': 'GOES-12/IMAGER', 'code': 4006, 'title': 'GOES-12/IMAGER'},
{'abbr': 'METEOSAT-7/MVIRI', 'code': 4007, 'title': 'METEOSAT-7/MVIRI'},
{'abbr': 'METEOSAT-8/SEVIRI', 'code': 4008, 'title': 'METEOSAT-8/SEVIRI'},
{'abbr': 'METEOSAT-9/SEVIRI', 'code': 4009, 'title': 'METEOSAT-9/SEVIRI'},
{'abbr': 'MTSAT-1R/IMAGER', 'code': 4010, 'title': 'MTSAT-1R/IMAGER'},
{'abbr': 'ERS-2/GOME', 'code': 5001, 'title': 'ERS-2/GOME'},
{'abbr': 'METEOSAT-8/SEVIRI', 'code': 5002, 'title': 'METEOSAT-8/SEVIRI'},
{'abbr': 'METEOSAT-9/SEVIRI', 'code': 5003, 'title': 'METEOSAT-9/SEVIRI'},
{'abbr': 'AURA/MLS', 'code': 5004, 'title': 'AURA/MLS'},
{'abbr': 'AURA/OMI', 'code': 5005, 'title': 'AURA/OMI'},
{'abbr': 'NOAA-9/SBUV', 'code': 5006, 'title': 'NOAA-9/SBUV'},
{'abbr': 'NOAA-11/SBUV', 'code': 5007, 'title': 'NOAA-11/SBUV'},
{'abbr': 'NOAA-14/SBUV', 'code': 5008, 'title': 'NOAA-14/SBUV'},
{'abbr': 'NOAA-16/SBUV', 'code': 5009, 'title': 'NOAA-16/SBUV'},
{'abbr': 'NOAA-17/SBUV', 'code': 5010, 'title': 'NOAA-17/SBUV'},
{'abbr': 'NOAA-18/SBUV', 'code': 5011, 'title': 'NOAA-18/SBUV'},
{'abbr': 'NOAA-19/SBUV', 'code': 5012, 'title': 'NOAA-19/SBUV'},
{'abbr': 'METOP-A/GOME-2', 'code': 5013, 'title': 'METOP-A/GOME-2'},
{'abbr': 'ENVISAT/SCIAMACHY', 'code': 5014, 'title': 'ENVISAT/SCIAMACHY'},
{'abbr': 'ENVISAT/GOMOS', 'code': 5015, 'title': 'ENVISAT/GOMOS'},
{'abbr': 'ENVISAT/MIPAS', 'code': 5016, 'title': 'ENVISAT/MIPAS'},
{'abbr': 'Metror-3/TOMS', 'code': 5017, 'title': 'Metror-3/TOMS'},
{'abbr': 'Nimbus-7/TOMS', 'code': 5018, 'title': 'Nimbus-7/TOMS'},
{'abbr': 'ENVISAT/GOMOS', 'code': 6001, 'title': 'ENVISAT/GOMOS'},
{'abbr': 'ENVISAT/MERIS', 'code': 6002, 'title': 'ENVISAT/MERIS'},
{'abbr': 'METOP-A/GRAS', 'code': 7001, 'title': 'METOP-A/GRAS'},
{'abbr': 'CHAMP', 'code': 7002, 'title': 'CHAMP'},
{'abbr': 'GRACE-A', 'code': 7003, 'title': 'GRACE-A'},
{'abbr': 'COSMIC-1', 'code': 7004, 'title': 'COSMIC-1'},
{'abbr': 'COSMIC-2', 'code': 7005, 'title': 'COSMIC-2'},
{'abbr': 'COSMIC-3', 'code': 7006, 'title': 'COSMIC-3'},
{'abbr': 'COSMIC-4', 'code': 7007, 'title': 'COSMIC-4'},
{'abbr': 'COSMIC-5', 'code': 7008, 'title': 'COSMIC-5'},
{'abbr': 'COSMIC-6', 'code': 7009, 'title': 'COSMIC-6'},
{'abbr': 'METEOSAT-2/AMV', 'code': 8001, 'title': 'METEOSAT-2/AMV'},
{'abbr': 'METEOSAT-3/AMV', 'code': 8002, 'title': 'METEOSAT-3/AMV'},
{'abbr': 'METEOSAT-4/AMV', 'code': 8003, 'title': 'METEOSAT-4/AMV'},
{'abbr': 'METEOSAT-5/AMV', 'code': 8014, 'title': 'METEOSAT-5/AMV'},
{'abbr': 'METEOSAT-6/AMV', 'code': 8005, 'title': 'METEOSAT-6/AMV'},
{'abbr': 'METEOSAT-7/AMV', 'code': 8006, 'title': 'METEOSAT-7/AMV'},
{'abbr': 'METEOSAT-8/AMV', 'code': 8007, 'title': 'METEOSAT-8/AMV'},
{'abbr': 'METEOSAT-9/AMV', 'code': 8008, 'title': 'METEOSAT-9/AMV'},
{'abbr': 'GMS-5/AMV', 'code': 8009, 'title': 'GMS-5/AMV'},
{'abbr': 'MTSAT-1R/AMV', 'code': 8010, 'title': 'MTSAT-1R/AMV'},
{'abbr': 'GOES-9/WV', 'code': 8011, 'title': 'GOES-9/WV'},
{'abbr': 'GOES-10/AMV', 'code': 8012, 'title': 'GOES-10/AMV'},
{'abbr': 'GOES-11/AMV', 'code': 8013, 'title': 'GOES-11/AMV'},
{'abbr': 'GOES-12/AMV', 'code': 8014, 'title': 'GOES-12/AMV'},
{'abbr': 'NOAA-15/AVHRR', 'code': 8015, 'title': 'NOAA-15/AVHRR'},
{'abbr': 'NOAA-16/AVHRR', 'code': 8016, 'title': 'NOAA-16/AVHRR'},
{'abbr': 'NOAA-17/AVHRR', 'code': 8017, 'title': 'NOAA-17/AVHRR'},
{'abbr': 'NOAA-18/AVHRR', 'code': 8018, 'title': 'NOAA-18/AVHRR'},
{'abbr': 'NOAA-19/AVHRR', 'code': 8019, 'title': 'NOAA-19/AVHRR'},
{'abbr': 'TERRA/MODIS', 'code': 8020, 'title': 'TERRA/MODIS'},
{'abbr': 'AQUA/MODIS', 'code': 8021, 'title': 'AQUA/MODIS'},
{'abbr': 'FY-2C/IR', 'code': 8022, 'title': 'FY-2C/IR'},
{'abbr': 'ERS/SCATT', 'code': 9001, 'title': 'ERS/SCATT'},
{'abbr': 'ERS/SCATT', 'code': 9002, 'title': 'ERS/SCATT'},
{'abbr': 'ERS-2/SCATT', 'code': 9003, 'title': 'ERS-2/SCATT'},
{'abbr': 'QuickSCAT/SeaWind', 'code': 9004, 'title': 'QuickSCAT/SeaWind'},
{'abbr': 'METOP-A/ASCAT', 'code': 9005, 'title': 'METOP-A/ASCAT'},
{'abbr': 'DSMP-7/SSMI', 'code': 10001, 'title': 'DSMP-7/SSMI'},
{'abbr': 'DSMP-8/SSMI', 'code': 10002, 'title': 'DSMP-8/SSMI'},
{'abbr': 'DSMP-9/SSMI', 'code': 10003, 'title': 'DSMP-9/SSMI'},
{'abbr': 'DSMP-10/SSMI', 'code': 10004, 'title': 'DSMP-10/SSMI'},
{'abbr': 'DSMP-11/SSMI', 'code': 10005, 'title': 'DSMP-11/SSMI'},
{'abbr': 'DSMP-13/SSMI', 'code': 10006, 'title': 'DSMP-13/SSMI'},
{'abbr': 'DSMP-14/SSMI', 'code': 10007, 'title': 'DSMP-14/SSMI'},
{'abbr': 'DSMP-15/SSMI', 'code': 10008, 'title': 'DSMP-15/SSMI'},
{'abbr': 'DSMP-8/SSMI', 'code': 10009, 'title': 'DSMP-8/SSMI'},
{'abbr': 'DSMP-9/SSMI', 'code': 10010, 'title': 'DSMP-9/SSMI'},
{'abbr': 'DSMP-10/SSMI', 'code': 10011, 'title': 'DSMP-10/SSMI'},
{'abbr': 'DSMP-11/SSMI', 'code': 10012, 'title': 'DSMP-11/SSMI'},
{'abbr': 'DSMP-13/SSMI', 'code': 10013, 'title': 'DSMP-13/SSMI'},
{'abbr': 'DSMP-14/SSMI', 'code': 10014, 'title': 'DSMP-14/SSMI'},
{'abbr': 'DSMP-15/SSMI', 'code': 10015, 'title': 'DSMP-15/SSMI'},
{'abbr': 'METOP-A/IASI', 'code': 11001, 'title': 'METOP-A/IASI'},
{'abbr': 'AQUA/AIRS', 'code': 12001, 'title': 'AQUA/AIRS'},
{'abbr': 'DMSP-16/SSMIS', 'code': 13001, 'title': 'DMSP-16/SSMIS'},
{'abbr': 'TRMM/TMI', 'code': 14001, 'title': 'TRMM/TMI'},
{'abbr': 'AQUA/AMSRE', 'code': 15001, 'title': 'AQUA/AMSRE'},
{'abbr': 'Automatic-Land', 'code': 16001, 'title': 'Automatic-Land'},
{'abbr': 'Manual-Land', 'code': 16002, 'title': 'Manual-Land'},
{'abbr': 'Abbreviated-SYNOP', 'code': 16003, 'title': 'Abbreviated-SYNOP'},
{'abbr': 'METAR', 'code': 16004, 'title': 'METAR'},
{'abbr': 'DRIBU', 'code': 16005, 'title': 'DRIBU'},
{'abbr': 'Automatic-SHIP', 'code': 16006, 'title': 'Automatic-SHIP'},
{'abbr': 'Reduced-SHIP', 'code': 16007, 'title': 'Reduced-SHIP'},
{'abbr': 'SHIP', 'code': 16008, 'title': 'SHIP'},
{'abbr': 'Abbreviated-SHIP', 'code': 16009, 'title': 'Abbreviated-SHIP'},
{'abbr': 'DRIBU-BATHY', 'code': 16010, 'title': 'DRIBU-BATHY'},
{'abbr': 'DRIBU-TESAC', 'code': 16011, 'title': 'DRIBU-TESAC'},
{'abbr': 'Ground-Based-GPS', 'code': 16012, 'title': 'Ground-Based-GPS'},
{'abbr': 'Land-PILOT', 'code': 16013, 'title': 'Land-PILOT'},
{'abbr': 'PILOT-SHIP', 'code': 16014, 'title': 'PILOT-SHIP'},
{'abbr': 'American-WindProfilers',
'code': 16015,
'title': 'American-WindProfilers'},
{'abbr': 'American-WindProfilers',
'code': 16016,
'title': 'American-WindProfilers'},
{'abbr': 'European-WindProfilers',
'code': 16017,
'title': 'European-WindProfilers'},
{'abbr': 'Japanese-WindProfilers',
'code': 16018,
'title': 'Japanese-WindProfilers'},
{'abbr': 'TEMP-SHIP', 'code': 16019, 'title': 'TEMP-SHIP'},
{'abbr': 'DROP-Sonde', 'code': 16020, 'title': 'DROP-Sonde'},
{'abbr': 'Mobile-TEMP', 'code': 16021, 'title': 'Mobile-TEMP'},
{'abbr': 'Land-TEMP', 'code': 16022, 'title': 'Land-TEMP'},
{'abbr': 'ROCOB-TEMP', 'code': 16023, 'title': 'ROCOB-TEMP'},
{'abbr': 'SHIP-ROCOB', 'code': 16024, 'title': 'SHIP-ROCOB'},
{'abbr': 'European-WindProfilers',
'code': 16025,
'title': 'European-WindProfilers'},
{'abbr': 'AIREP', 'code': 16026, 'title': 'AIREP'},
{'abbr': 'CODAR', 'code': 16027, 'title': 'CODAR'},
{'abbr': 'COLBA', 'code': 16028, 'title': 'COLBA'},
{'abbr': 'AMDAR', 'code': 16029, 'title': 'AMDAR'},
{'abbr': 'ACARS', 'code': 16030, 'title': 'ACARS'},
{'abbr': 'PAOB', 'code': 16031, 'title': 'PAOB'},
{'abbr': 'PAOB', 'code': 16032, 'title': 'PAOB'},
{'abbr': 'SATOB_Temperature', 'code': 16033, 'title': 'SATOB_Temperature'},
{'abbr': 'SATOB_Wind', 'code': 16034, 'title': 'SATOB_Wind'},
{'abbr': 'SATOB_Temperature', 'code': 16035, 'title': 'SATOB_Temperature'},
{'abbr': 'SATOB_Temperature', 'code': 16036, 'title': 'SATOB_Temperature'},
{'abbr': 'SATEM_500km', 'code': 16037, 'title': 'SATEM_500km'},
{'abbr': 'SATEM_500km', 'code': 16038, 'title': 'SATEM_500km'},
{'abbr': 'SATEM_500km', 'code': 16039, 'title': 'SATEM_500km'},
{'abbr': 'SATEM_500km', 'code': 16040, 'title': 'SATEM_500km'},
{'abbr': 'SATEM_250km', 'code': 16041, 'title': 'SATEM_250km'},
{'abbr': 'SATEM_250km', 'code': 16042, 'title': 'SATEM_250km'},
{'abbr': 'SATEM_250km', 'code': 16043, 'title': 'SATEM_250km'},
{'abbr': 'SATEM_250km', 'code': 16044, 'title': 'SATEM_250km'},
{'abbr': 'Automatic_Land', 'code': 17001, 'title': 'Automatic_Land'},
{'abbr': 'Manual_Land', 'code': 17002, 'title': 'Manual_Land'},
{'abbr': 'Abbreviated_SYNOP', 'code': 17003, 'title': 'Abbreviated_SYNOP'},
{'abbr': 'METAR', 'code': 17004, 'title': 'METAR'},
{'abbr': 'DRIBU', 'code': 17005, 'title': 'DRIBU'},
{'abbr': 'Automatic_SHIP', 'code': 17006, 'title': 'Automatic_SHIP'},
{'abbr': 'Reduced_SHIP', 'code': 17007, 'title': 'Reduced_SHIP'},
{'abbr': 'SHIP', 'code': 17008, 'title': 'SHIP'},
{'abbr': 'Abbreviated-SHIP', 'code': 17009, 'title': 'Abbreviated-SHIP'},
{'abbr': 'DRIBU-BATHY', 'code': 17010, 'title': 'DRIBU-BATHY'},
{'abbr': 'DRIBU-TESAC', 'code': 17011, 'title': 'DRIBU-TESAC'},
{'abbr': 'Ground-Based_GPS', 'code': 17012, 'title': 'Ground-Based_GPS'},
{'abbr': 'Land-PILOT', 'code': 17013, 'title': 'Land-PILOT'},
{'abbr': 'PILOT-SHIP', 'code': 17014, 'title': 'PILOT-SHIP'},
{'abbr': 'American-Wind', 'code': 17015, 'title': 'American-Wind'},
{'abbr': 'American-Wind', 'code': 17016, 'title': 'American-Wind'},
{'abbr': 'European-Wind', 'code': 17017, 'title': 'European-Wind'},
{'abbr': 'Japanese-Wind', 'code': 17018, 'title': 'Japanese-Wind'},
{'abbr': 'TEMP-SHIP', 'code': 17019, 'title': 'TEMP-SHIP'},
{'abbr': 'DROP-Sonde', 'code': 17020, 'title': 'DROP-Sonde'},
{'abbr': 'Mobile-TEMP', 'code': 17021, 'title': 'Mobile-TEMP'},
{'abbr': 'Land-TEMP', 'code': 17022, 'title': 'Land-TEMP'},
{'abbr': 'ROCOB-TEMP', 'code': 17023, 'title': 'ROCOB-TEMP'},
{'abbr': 'SHIP-ROCOB', 'code': 17024, 'title': 'SHIP-ROCOB'})
|
the-stack_0_25482
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import sys
import pytest
from datadog_checks.dev.conditions import CheckCommandOutput, CheckDockerLogs, CheckEndpoints, WaitFor
from datadog_checks.dev.errors import RetryError
from datadog_checks.dev.subprocess import run_command
from .common import not_appveyor
HERE = os.path.dirname(os.path.abspath(__file__))
DOCKER_DIR = os.path.join(HERE, 'docker')
class TestWaitFor:
def test_no_error_no_result_success(self):
assert WaitFor(lambda: None)() is True
def test_no_error_true_result_success(self):
assert WaitFor(lambda: True)() is True
def test_no_error_non_true_result_fail(self):
with pytest.raises(RetryError):
WaitFor(lambda: False, attempts=1)()
def test_error_fail(self):
def f():
raise Exception
with pytest.raises(RetryError):
WaitFor(f, attempts=1)()
class TestCheckCommandOutput:
def test_no_matches(self):
check_command_output = CheckCommandOutput(
'{} -c "import os;print(\'foo\')"'.format(sys.executable), ['bar'], attempts=1
)
with pytest.raises(RetryError):
check_command_output()
def test_matches(self):
check_command_output = CheckCommandOutput(
'{} -c "import os;print(\'foo\')"'.format(sys.executable), ['foo', 'bar']
)
matches = check_command_output()
assert matches == 1
def test_matches_all_fail(self):
check_command_output = CheckCommandOutput(
'{} -c "import os;print(\'foo\')"'.format(sys.executable), ['foo', 'bar'], matches='all', attempts=1
)
with pytest.raises(RetryError):
check_command_output()
def test_matches_all_success(self):
check_command_output = CheckCommandOutput(
'{} -c "import os;print(\'foobar\')"'.format(sys.executable), ['foo', 'bar'], matches='all'
)
matches = check_command_output()
assert matches == 2
class TestCheckDockerLogs:
pytestmark = [pytest.mark.docker, not_appveyor]
def test_no_matches(self):
compose_file = os.path.join(DOCKER_DIR, 'test_default.yaml')
run_command(['docker-compose', '-f', compose_file, 'down'])
check_docker_logs = CheckDockerLogs(compose_file, 'Vault server started', attempts=1)
with pytest.raises(RetryError):
check_docker_logs()
def test_matches(self):
compose_file = os.path.join(DOCKER_DIR, 'test_default.yaml')
check_docker_logs = CheckDockerLogs(compose_file, 'Vault server started')
try:
run_command(['docker-compose', '-f', compose_file, 'up', '-d'], check=True)
check_docker_logs()
finally:
run_command(['docker-compose', '-f', compose_file, 'down'], capture=True)
class TestCheckEndpoints:
def test_fail(self):
check_endpoints = CheckEndpoints('https://google.microsoft', attempts=1)
with pytest.raises(RetryError):
check_endpoints()
def test_success(self):
check_endpoints = CheckEndpoints(['https://google.com', 'https://bing.com'])
check_endpoints()
|
the-stack_0_25483
|
import numpy as np
import threading
class game_engine:
"""
规则:
1. 不带货的情况下 AGV可以在普通道路 货架区 和摆放货物的格子无障碍穿行
2. 不带货的情况下 AGV不可穿墙
3. 不带货的情况下 AGV不可以行驶至其他AGV所在位置
* 若两AGV目标格子相同 则随机有一方可以成功 随机的方式既以随机的顺序计算AGV下一步位置及合法性
* 注意这里要implement两次check 有可能一个AGV1走了,另一个AGV2要到该AGV1之前所在位置
4. 在带货的情况下 除了2和3规定的情况 AGV还不能前往有货的格子
5. AGV 可以在道路中间把货物放下 但不可以把货塞在错误的货架上
"""
def __init__(self, _map, parcel_gen_gap, parcel_gen_seq=None, step_left=None, dl_bound=None, auto_unload=False):
"""
_map is a (x,y,3) numpy array, where the layers are:
1st: walls, spawn points, shelves
2nd: parcels
3rd: players(robos)
step_left: None: no deadline
np.array([]) or []: random deadline given by the engine
np.array([...]) or [...]: a list of given deadline
See the main python file for more information.
"""
self._lock = threading.Lock() # 避免 atomic 未同步
self.auto_unload = auto_unload
self._map = _map.astype(int)
self.should_gen = parcel_gen_gap>0 or (parcel_gen_seq is not None)
self.parcel_gen_gap = parcel_gen_gap
self.parcel_gen_seq = parcel_gen_seq # [[(x11,y11,shelf11)], [], [(x21,y21,shelf21),(x22,y22,shelf22)], ...]
self.success_score = 10
if dl_bound is None:
self.lower_dl = 1 * (self._map.shape[0]+self._map.shape[1])
self.upper_dl = 4 * (self._map.shape[0]+self._map.shape[1])
else:
self.lower_dl, self.upper_dl = dl_bound
if step_left is None:
self.step_left = None
elif len(step_left)==0:
self.step_left = -np.ones([np.max(self._map[:,:,0])], dtype=int)
for i in range(len(self.step_left)):
if i+1 in self._map[:,:,1]:
self.step_left[i] = np.random.randint(self.upper_dl-self.lower_dl+1)+self.lower_dl
else:
self.step_left = step_left
self.score = 0
self.steps = 0
self.n_delivered = 0
self.n_delayed = 0
# i, j, whether the player carries a parcel. 注意 逻辑ij和显示时的xy是相反的
self.players = np.zeros([np.sum(_map[:,:,2]>0),3])
for i in range(_map.shape[0]):
for j in range(_map.shape[1]):
if self._map[i,j,2]>0:
self.players[self._map[i,j,2]-1][0] = i
self.players[self._map[i,j,2]-1][1] = j
def step(self, moves):
"""
moves = [move_0, move_1, ...]
move = (x,y,grab)
The function updates self._map, and returns the reward.
"""
moves_indices = np.array([[i,move[0],move[1],move[2]] for i,move in enumerate(moves)])
moves_indices = np.random.permutation(moves_indices)
candidates = []
for move in moves_indices:
if int(move[0])>=len(self.players): continue
old_pos = (self.players[int(move[0])][:2]).astype(int)
new_pos = (old_pos+move[1:3]).astype(int)
if np.sum(np.abs(old_pos-new_pos))>0:
# 过界
if new_pos[0]<0 or new_pos[1]<0 or new_pos[0]>=self._map.shape[0] or new_pos[1]>=self._map.shape[1]:
continue
# 墙壁
if self._map[new_pos[0], new_pos[1]][0]==-1:
continue
# 其他AGV
if self._map[new_pos[0], new_pos[1]][2]>0:
candidates.append(move)
continue
# 若AGV身上有货物
if self.players[int(move[0])][2]!=0:
# 非AGV上的货物
if self._map[new_pos[0], new_pos[1]][1]>0 and self._map[new_pos[0], new_pos[1]][2]<=0:
continue
self.players[int(move[0])][:2] = new_pos
self.update_player_map() # 更新AGV位置
# 如果身上带着货物 更新货物位置
if self.players[int(move[0])][2]!=0:
self._map[:,:,1][new_pos[0], new_pos[1]] = self.players[int(move[0])][2]
self._map[:,:,1][old_pos[0], old_pos[1]] = 0
# 抓起/放下
if self.auto_unload:
if self.players[int(move[0])][2]>0 and self._map[new_pos[0], new_pos[1], 0]==self.players[int(move[0])][2]:
self.players[int(move[0])][2] = 0
if move[-1]!=0 and self.players[int(move[0])][2]==0:
self.players[int(move[0])][2] = self._map[:,:,1][old_pos[0], old_pos[1]]
else:
if move[-1]!=0:
if self.players[int(move[0])][2]>0:
self.players[int(move[0])][2] = 0
else:
self.players[int(move[0])][2] = self._map[:,:,1][old_pos[0], old_pos[1]]
candidate_last_length = -1
while(len(candidates)>0 and not(len(candidates)==candidate_last_length)):
# 更好的实现方法是 topological sort 否则无法解决环的问题
new_candidates = []
for move in candidates:
old_pos = (self.players[int(move[0])][:2]).astype(int)
new_pos = (old_pos+move[1:3]).astype(int)
# 其他AGV
if self._map[new_pos[0], new_pos[1]][2]>0:
new_candidates.append(move)
continue
self.players[int(move[0])][:2] = new_pos
self.update_player_map() # 更新AGV位置
# 如果身上带着货物 更新货物位置
if self.players[int(move[0])][2]!=0:
self._map[:,:,1][new_pos[0], new_pos[1]] = self.players[int(move[0])][2]
self._map[:,:,1][old_pos[0], old_pos[1]] = 0
# 抓起/放下
if self.auto_unload:
if self.players[int(move[0])][2]>0 and self._map[new_pos[0], new_pos[1], 0]==self.players[int(move[0])][2]:
self.players[int(move[0])][2] = 0
if move[-1]!=0 and self.players[int(move[0])][2]==0:
self.players[int(move[0])][2] = self._map[:,:,1][old_pos[0], old_pos[1]]
else:
if move[-1]!=0:
if self.players[int(move[0])][2]>0:
self.players[int(move[0])][2] = 0
else:
self.players[int(move[0])][2] = self._map[:,:,1][old_pos[0], old_pos[1]]
candidate_last_length = len(candidates)
candidates = new_candidates
delta_score = self.update_score() # 检查是否有包裹摆放到位 有的话更新分数
if self.should_gen:
if self.parcel_gen_seq is None:
if self.parcel_gen_gap<1:
n = int(np.round(np.random.random() * 2 / self.parcel_gen_gap))
self.generate_parcels(n)
elif self.parcel_gen_gap>=1 and np.random.random()<(1/self.parcel_gen_gap):
self.generate_parcels(1)
else:
self.generate_parcels(None)
if self.step_left is not None:
self.step_left-=1
for i in range(len(self.step_left)):
if self.step_left[i]<0: self.step_left[i]=-1
self.steps+=1
return delta_score
def update_player_map(self):
player_map = np.zeros(self._map.shape[:2])
for i,p in enumerate(self.players):
player_map[int(p[0]), int(p[1])] = i+1
self._map[:,:,2] = player_map
def update_score(self):
correct_map = (self._map[:,:,0]==self._map[:,:,1]).astype(int) * (self._map[:,:,1]!=0).astype(int)
for p in self.players:
if p[2]!=0:
correct_map[int(p[0]),int(p[1])]=0
correct_coords = np.argwhere(correct_map==1)
parcel_coords = np.argwhere(self._map[:,:,1]>0)
_s = 0
if self.step_left is not None:
for x,y in parcel_coords:
if self.step_left[self._map[x,y,1]-1]<0:
_s-=1
for x,y in correct_coords:
if self.step_left[self._map[x,y,1]-1]<0:
self.n_delayed+=1
_s+=self.success_score
self.n_delivered+=1
self.step_left[self._map[x,y,1]-1] = -1
else:
for x,y in correct_coords:
_s+=self.success_score
self.n_delivered+=1
with self._lock:
self.score+=_s
self._map[:,:,1] *= (correct_map==0)
return _s
def generate_parcels(self, num):
if self.parcel_gen_seq is None:
for _ in range(num):
shelf_max = np.max(self._map[:,:,0])
spawn_locs = (self._map[:,:,0]==-2).astype(int)
parcel_locs = (self._map[:,:,1]>0).astype(int)
parcel_locs *= spawn_locs
avail_locs = spawn_locs-parcel_locs
if np.sum(avail_locs) == 0:
# 包裹重生点已被占满
break
if np.sum(self._map[:,:,1]>0)>=shelf_max:
# 已经不可能产生不重复的包裹
break
avail_indices = np.nonzero(avail_locs)
loc_id = np.random.randint(len(avail_indices[0]))
loc = (avail_indices[0][loc_id],avail_indices[1][loc_id])
# 同一时间 每个parcel目的地不能相同
p = np.ones(shelf_max)
for i in range(shelf_max):
if (i+1) in self._map[:,:,1]:
p[i]=0
p = p/np.sum(p)
obj = int(np.random.choice(np.arange(shelf_max)+1, p = p))
if self.step_left is not None:
self.step_left[obj-1] = np.random.randint(self.upper_dl-self.lower_dl+1)+self.lower_dl
self._map[loc[0],loc[1],1] = obj
else:
if self.step_left is not None:
for x,y,shelf_index,dl in self.parcel_gen_seq[self.steps]:
if self._map[x,y,1]==0 and shelf_index not in self._map[:,:,1]:
self._map[x,y,1] = shelf_index
self.step_left[shelf_index-1] = dl
else:
for x,y,shelf_index in self.parcel_gen_seq[self.steps]:
if self._map[x,y,1]==0 and shelf_index not in self._map[:,:,1]:
self._map[x,y,1] = shelf_index
def get_score(self):
# score, num_delivered, num_time_out
parcel_coords = np.argwhere(self._map[:,:,1]>0)
n_timeout = 0
if self.step_left is not None:
for x,y in parcel_coords:
if self.step_left[self._map[x,y,1]-1]<0:
n_timeout+=1
return self.steps, self.score, self.n_delivered, self.n_delayed, n_timeout
def get_state(self):
if self.step_left is not None:
return np.copy(self._map), np.copy(self.players), np.copy(self.step_left)
return np.copy(self._map), np.copy(self.players), None
def set_state(self, _map, players, step_left=None):
self._map = _map
self.players = players
self.step_left = step_left
|
the-stack_0_25486
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils import with_metaclass
from abc import ABCMeta, abstractproperty
from itertools import product
import numpy as np
from six import string_types
from skbio.util import classproperty, overrides
from skbio.util._misc import MiniRegistry
from ._sequence import Sequence
class IUPACSequence(with_metaclass(ABCMeta, Sequence)):
"""Store biological sequence data conforming to the IUPAC character set.
This is an abstract base class (ABC) that cannot be instantiated.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
Raises
------
ValueError
If sequence characters are not in the IUPAC character set [1]_.
See Also
--------
NucleotideSequence
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
"""
# ASCII is built such that the difference between uppercase and lowercase
# is the 6th bit.
_ascii_invert_case_bit_offset = 32
_number_of_extended_ascii_codes = 256
_ascii_lowercase_boundary = 90
__validation_mask = None
__degenerate_codes = None
__nondegenerate_codes = None
__gap_codes = None
@classproperty
def _validation_mask(cls):
# TODO These masks could be defined (as literals) on each concrete
# object. For now, memoize!
if cls.__validation_mask is None:
cls.__validation_mask = np.invert(np.bincount(
np.fromstring(''.join(cls.alphabet), dtype=np.uint8),
minlength=cls._number_of_extended_ascii_codes).astype(bool))
return cls.__validation_mask
@classproperty
def _degenerate_codes(cls):
if cls.__degenerate_codes is None:
degens = cls.degenerate_chars
cls.__degenerate_codes = np.asarray([ord(d) for d in degens])
return cls.__degenerate_codes
@classproperty
def _nondegenerate_codes(cls):
if cls.__nondegenerate_codes is None:
nondegens = cls.nondegenerate_chars
cls.__nondegenerate_codes = np.asarray([ord(d) for d in nondegens])
return cls.__nondegenerate_codes
@classproperty
def _gap_codes(cls):
if cls.__gap_codes is None:
gaps = cls.gap_chars
cls.__gap_codes = np.asarray([ord(g) for g in gaps])
return cls.__gap_codes
@classproperty
def alphabet(cls):
"""Return valid IUPAC characters.
This includes gap, non-degenerate, and degenerate characters.
Returns
-------
set
Valid IUPAC characters.
"""
return cls.degenerate_chars | cls.nondegenerate_chars | cls.gap_chars
@classproperty
def gap_chars(cls):
"""Return characters defined as gaps.
Returns
-------
set
Characters defined as gaps.
"""
return set('-.')
@classproperty
def degenerate_chars(cls):
"""Return degenerate IUPAC characters.
Returns
-------
set
Degenerate IUPAC characters.
"""
return set(cls.degenerate_map)
@abstractproperty
@classproperty
def nondegenerate_chars(cls):
"""Return non-degenerate IUPAC characters.
Returns
-------
set
Non-degenerate IUPAC characters.
"""
return set() # pragma: no cover
@abstractproperty
@classproperty
def degenerate_map(cls):
"""Return mapping of degenerate to non-degenerate characters.
Returns
-------
dict (set)
Mapping of each degenerate IUPAC character to the set of
non-degenerate IUPAC characters it represents.
"""
return set() # pragma: no cover
@property
def _motifs(self):
return _motifs
@overrides(Sequence)
def __init__(self, sequence, metadata=None, positional_metadata=None,
validate=True, lowercase=False):
super(IUPACSequence, self).__init__(
sequence, metadata, positional_metadata)
if lowercase is False:
pass
elif lowercase is True or isinstance(lowercase, string_types):
lowercase_mask = self._bytes > self._ascii_lowercase_boundary
self._convert_to_uppercase(lowercase_mask)
# If it isn't True, it must be a string_type
if not (lowercase is True):
self.positional_metadata[lowercase] = lowercase_mask
else:
raise TypeError("lowercase keyword argument expected a bool or "
"string, but got %s" % type(lowercase))
if validate:
self._validate()
def _convert_to_uppercase(self, lowercase):
if np.any(lowercase):
with self._byte_ownership():
self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
def lowercase(self, lowercase):
"""Return a case-sensitive string representation of the sequence.
Parameters
----------
lowercase: str or boolean vector
If lowercase is a boolean vector, it is used to set sequence
characters to lowercase in the output string. True values in the
boolean vector correspond to lowercase characters. If lowercase
is a str, it is treated like a key into the positional metadata,
pointing to a column which must be a boolean vector.
That boolean vector is then used as described previously.
Returns
-------
str
String representation of sequence with specified characters set to
lowercase.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACGT')
>>> s.lowercase([True, True, False, False])
'acGT'
>>> s = DNA('ACGT',
... positional_metadata={'exons': [True, False, False, True]})
>>> s.lowercase('exons')
'aCGt'
Constructor automatically populates a column in positional metadata
when the lowercase keyword argument is provided:
>>> s = DNA('ACgt', lowercase='introns')
>>> s.lowercase('introns')
'ACgt'
>>> s = DNA('ACGT', lowercase='introns')
>>> s.lowercase('introns')
'ACGT'
"""
index = self._munge_to_index_array(lowercase)
outbytes = self._bytes.copy()
outbytes[index] ^= self._ascii_invert_case_bit_offset
return str(outbytes.tostring().decode('ascii'))
def _validate(self):
# This is the fastest way that we have found to identify the
# presence or absence of certain characters (numbers).
# It works by multiplying a mask where the numbers which are
# permitted have a zero at their index, and all others have a one.
# The result is a vector which will propogate counts of invalid
# numbers and remove counts of valid numbers, so that we need only
# see if the array is empty to determine validity.
invalid_characters = np.bincount(
self._bytes, minlength=self._number_of_extended_ascii_codes
) * self._validation_mask
if np.any(invalid_characters):
bad = list(np.where(
invalid_characters > 0)[0].astype(np.uint8).view('|S1'))
raise ValueError(
"Invalid character%s in sequence: %r. Valid IUPAC characters: "
"%r" % ('s' if len(bad) > 1 else '',
[str(b.tostring().decode("ascii")) for b in bad] if
len(bad) > 1 else bad[0],
list(self.alphabet)))
def gaps(self):
"""Find positions containing gaps in the biological sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a gap character is present
at that position in the biological sequence.
See Also
--------
has_gaps
Examples
--------
>>> from skbio import DNA
>>> s = DNA('AC-G-')
>>> s.gaps()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._gap_codes)
def has_gaps(self):
"""Determine if the sequence contains one or more gap characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of gap
characters in the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_gaps()
False
>>> t = DNA('A.CAC--GACGTT')
>>> t.has_gaps()
True
"""
# TODO use count, there aren't that many gap chars
return bool(self.gaps().any())
def degenerates(self):
"""Find positions containing degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a degenerate character is
present at that position in the biological sequence.
See Also
--------
has_degenerates
nondegenerates
has_nondegenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.degenerates()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._degenerate_codes)
def has_degenerates(self):
"""Determine if sequence contains one or more degenerate characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of degenerate
characters in the biological sequence.
See Also
--------
degenerates
nondegenerates
has_nondegenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACAC-GACGTT')
>>> s.has_degenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_degenerates()
True
"""
# TODO use bincount!
return bool(self.degenerates().any())
def nondegenerates(self):
"""Find positions containing non-degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a non-degenerate character
is present at that position in the biological sequence.
See Also
--------
has_nondegenerates
degenerates
has_nondegenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.nondegenerates()
array([ True, True, False, True, False], dtype=bool)
"""
return np.in1d(self._bytes, self._nondegenerate_codes)
def has_nondegenerates(self):
"""Determine if sequence contains one or more non-degenerate characters
Returns
-------
bool
Indicates whether there are one or more occurrences of
non-degenerate characters in the biological sequence.
See Also
--------
nondegenerates
degenerates
has_degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('NWNNNNNN')
>>> s.has_nondegenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_nondegenerates()
True
"""
return bool(self.nondegenerates().any())
def degap(self):
"""Return a new sequence with gap characters removed.
Returns
-------
IUPACSequence
A new sequence with all gap characters removed.
See Also
--------
gap_chars
Notes
-----
The type and metadata of the result will be the same as the
biological sequence. If positional metadata is present, it will be
filtered in the same manner as the sequence characters and included in
the resulting degapped sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('GGTC-C--ATT-C.',
... positional_metadata={'quality':range(14)})
>>> t = s.degap()
>>> t # doctest: +NORMALIZE_WHITESPACE
DNA('GGTCCATTC', length=9, has_metadata=False,
has_positional_metadata=True)
"""
return self[np.invert(self.gaps())]
def expand_degenerates(self):
"""Yield all possible non-degenerate versions of the sequence.
Yields
------
IUPACSequence
Non-degenerate version of the sequence.
See Also
--------
degenerate_map
Notes
-----
There is no guaranteed ordering to the non-degenerate sequences that
are yielded.
Each non-degenerate sequence will have the same type, metadata,
and positional metadata as the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> seq_generator = seq.expand_degenerates()
>>> for s in sorted(seq_generator, key=str):
... s
DNA('TAG', length=3, has_metadata=False, has_positional_metadata=False)
DNA('TGG', length=3, has_metadata=False, has_positional_metadata=False)
"""
degen_chars = self.degenerate_map
nonexpansion_chars = self.nondegenerate_chars.union(self.gap_chars)
expansions = []
for char in self:
char = str(char)
if char in nonexpansion_chars:
expansions.append(char)
else:
expansions.append(degen_chars[char])
result = product(*expansions)
return (self._to(sequence=''.join(nondegen_seq)) for nondegen_seq in
result)
def find_motifs(self, motif_type, min_length=1, ignore=None):
"""Search the biological sequence for motifs.
Options for `motif_type`:
Parameters
----------
motif_type : str
Type of motif to find.
min_length : int, optional
Only motifs at least as long as `min_length` will be returned.
ignore : 1D array_like (bool), optional
Boolean vector indicating positions to ignore when matching.
Yields
------
slice
Location of the motif in the biological sequence.
Raises
------
ValueError
If an unknown `motif_type` is specified.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACGGGGAGGCGGAG')
>>> for motif_slice in s.find_motifs('purine-run', min_length=2):
... motif_slice
... str(s[motif_slice])
slice(2, 9, None)
'GGGGAGG'
slice(10, 14, None)
'GGAG'
Gap characters can disrupt motifs:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run'):
... motif_slice
slice(0, 2, None)
slice(3, 5, None)
Gaps can be ignored by passing the gap boolean vector to `ignore`:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run', ignore=s.gaps()):
... motif_slice
slice(0, 5, None)
"""
if motif_type not in self._motifs:
raise ValueError("Not a known motif (%r) for this sequence (%s)." %
(motif_type, self.__class__.__name__))
return self._motifs[motif_type](self, min_length, ignore)
@overrides(Sequence)
def _constructor(self, **kwargs):
return self.__class__(validate=False, lowercase=False, **kwargs)
_motifs = MiniRegistry()
# Leave this at the bottom
_motifs.interpolate(IUPACSequence, "find_motifs")
|
the-stack_0_25490
|
#!/usr/bin/env python
import ConfigParser
import os, sys
config_file = "../share/blenderfarm/blenderfarm.config"
def getConfig():
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), config_file)
if not os.path.isfile(config_path):
sys.stderr.write("\ncan't find configuration file '%s'\n\n" % config_path)
sys.exit()
Config = ConfigParser.ConfigParser()
Config.read(config_path)
return Config
|
the-stack_0_25493
|
#!/usr/bin/env python
import rospy
from race.msg import drive_param
from geometry_msgs.msg import PoseStamped
import math
import numpy as np
from numpy import linalg as LA
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import csv
import os
#############
# CONSTANTS #
#############
LOOKAHEAD_DISTANCE = 0.7 # meters
#VELOCITY = 1.0 # m/s
VELOCITY = 0.5
###########
# GLOBALS #
###########
# Import waypoints.csv into a list (path_points)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../waypoints/fifthlab1.csv')
with open(filename) as f:
path_points = [tuple(line) for line in csv.reader(f)]
# Turn path_points into a list of floats to eliminate the need for casts in the code below.
path_points = [(float(point[0]), float(point[1]), float(point[2])) for point in path_points]
# Publisher for 'drive_parameters' (speed and steering angle)
pub = rospy.Publisher('drive_parameters', drive_param, queue_size=1)
#############
# FUNCTIONS #
#############
# Computes the Euclidean distance between two 2D points p1 and p2.
def dist(p1, p2):
return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
# Input data is PoseStamped message from topic /pf/viz/inferred_pose.
# Runs pure pursuit and publishes velocity and steering angle.
def callback(data):
min = 5000000
# Note: These following numbered steps below are taken from R. Craig Coulter's paper on pure pursuit.
# 1. Determine the current location of the vehicle (we are subscribed to vesc/odom)
# Hint: Read up on PoseStamped message type in ROS to determine how to extract x, y, and yaw.
x_cord = (data.pose.position.x)
y_cord = (data.pose.position.y)
yaw = euler_from_quaternion([data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w])
print("car x:", x_cord)
print("car y:", y_cord)
print("car yaw:", yaw[2])
#wrong?
if (yaw[2] <= 0.785 and yaw[2] >= -0.785):
direction = "A"
elif (yaw[2] <= 2.355 and yaw[2] >= 0.785):
direction = "D"
elif (yaw[2] >= -2.355 and yaw[2] <= -0.785):
direction = "B"
else:
direction = "C"
print("direction", direction)
# 2. Find the path point closest to the vehicle that is >= 1 lookahead distance from vehicle's current location.
for point in path_points:
if (direction == "A" and point[0] > x_cord):
if (dist((point), (x_cord, y_cord)) < min and dist((point), (x_cord, y_cord)) >= LOOKAHEAD_DISTANCE):
min = dist((point), (x_cord, y_cord))
closest_point = point
if (direction == "B" and point[1] < y_cord):
if (dist((point), (x_cord, y_cord)) < min and dist((point), (x_cord, y_cord)) >= LOOKAHEAD_DISTANCE):
min = dist((point), (x_cord, y_cord))
closest_point = point
if (direction == "C" and point[0] < x_cord):
if (dist((point), (x_cord, y_cord)) < min and dist((point), (x_cord, y_cord)) >= LOOKAHEAD_DISTANCE):
min = dist((point), (x_cord, y_cord))
closest_point = point
if (direction == "D" and point[1] > y_cord):
if (dist((point), (x_cord, y_cord)) < min and dist((point), (x_cord, y_cord)) >= LOOKAHEAD_DISTANCE):
min = dist((point), (x_cord, y_cord))
closest_point = point
# 3. Transform the goal point to vehicle coordinates.
goal_point = closest_point
print("goal_point:", goal_point)
# 4. Calculate the curvature = 1/r = 2x/l^2
# The curvature is transformed into steering wheel angle by the vehicle on board controller.
# Hint: You may need to flip to negative because for the VESC a right steering angle has a negative value.
if (direction == "A"):
angle = -(2 * ( y_cord - goal_point[1])) / (LOOKAHEAD_DISTANCE**2)
elif(direction == "C"):
angle = (2 * ( y_cord - goal_point[1])) / (LOOKAHEAD_DISTANCE**2)
elif(direction == "D"):
angle = (2 * ( x_cord - goal_point[0])) / (LOOKAHEAD_DISTANCE**2)
else:
angle = -(2 * ( x_cord - goal_point[0] )) / (LOOKAHEAD_DISTANCE**2)
angle = np.clip(angle, -0.2967, 0.2967) # 0.2967 radians = 17 degrees because car can only turn 24 degrees max
print("angle", angle)
print("******************************************")
msg = drive_param()
msg.velocity = VELOCITY
msg.angle = angle
pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('pure_pursuit')
rospy.Subscriber('/pf/viz/inferred_pose', PoseStamped, callback, queue_size=1)
rospy.spin()
|
the-stack_0_25494
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import itertools
import re
from oslo_log import log as logging
from oslo_utils import strutils
import six
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import objects
from nova import quota
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
_STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
task_states.REBOOT_PENDING: 'REBOOT',
task_states.REBOOT_STARTED: 'REBOOT',
task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.REBOOT_PENDING_HARD: 'HARD_REBOOT',
task_states.REBOOT_STARTED_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
task_states.MIGRATING: 'MIGRATING',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
vm_states.STOPPED: {
'default': 'SHUTOFF',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
},
vm_states.RESIZED: {
'default': 'VERIFY_RESIZE',
# Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE
# state so we comment that out for future reference only.
# task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE',
task_states.RESIZE_REVERTING: 'REVERT_RESIZE',
},
vm_states.PAUSED: {
'default': 'PAUSED',
task_states.MIGRATING: 'MIGRATING',
},
vm_states.SUSPENDED: {
'default': 'SUSPENDED',
},
vm_states.RESCUED: {
'default': 'RESCUE',
},
vm_states.ERROR: {
'default': 'ERROR',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
},
vm_states.DELETED: {
'default': 'DELETED',
},
vm_states.SOFT_DELETED: {
'default': 'SOFT_DELETED',
},
vm_states.SHELVED: {
'default': 'SHELVED',
},
vm_states.SHELVED_OFFLOADED: {
'default': 'SHELVED_OFFLOADED',
},
}
def status_from_state(vm_state, task_state='default'):
"""Given vm_state and task_state, return a status string."""
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN'))
status = task_map.get(task_state, task_map['default'])
if status == "UNKNOWN":
LOG.error(_LE("status is UNKNOWN from vm_state=%(vm_state)s "
"task_state=%(task_state)s. Bad upgrade or db "
"corrupted?"),
{'vm_state': vm_state, 'task_state': task_state})
return status
def task_and_vm_state_from_status(statuses):
"""Map the server's multiple status strings to list of vm states and
list of task states.
"""
vm_states = set()
task_states = set()
lower_statuses = [status.lower() for status in statuses]
for state, task_map in six.iteritems(_STATE_MAP):
for task_state, mapped_state in six.iteritems(task_map):
status_string = mapped_state
if status_string.lower() in lower_statuses:
vm_states.add(state)
task_states.add(task_state)
# Add sort to avoid different order on set in Python 3
return sorted(vm_states), sorted(task_states)
def get_sort_params(input_params, default_key='created_at',
default_dir='desc'):
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to the 'sort_key' and 'sort_dir' parameter values. These
sorting parameters can be specified multiple times in order to generate
the list of sort keys and directions.
The input parameters are not modified.
:param input_params: webob.multidict of request parameters (from
nova.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
'sort_key' parameters are supplied
:param default_dir: default sort dir value, added to the list if no
'sort_dir' parameters are supplied
:returns: list of sort keys, list of sort dirs
"""
params = input_params.copy()
sort_keys = []
sort_dirs = []
while 'sort_key' in params:
sort_keys.append(params.pop('sort_key').strip())
while 'sort_dir' in params:
sort_dirs.append(params.pop('sort_dir').strip())
if len(sort_keys) == 0 and default_key:
sort_keys.append(default_key)
if len(sort_dirs) == 0 and default_dir:
sort_dirs.append(default_dir)
return sort_keys, sort_dirs
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_int_param(request, 'limit')
if 'page_size' in request.GET:
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
if 'offset' in request.GET:
params['offset'] = _get_int_param(request, 'offset')
return params
def _get_int_param(request, param):
"""Extract integer param from request or fail."""
try:
int_param = utils.validate_integer(request.GET[param], param,
min_value=0)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return int_param
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
"""
params = get_pagination_params(request)
offset = params.get('offset', 0)
limit = CONF.osapi_max_limit
limit = min(limit, params.get('limit') or limit)
return items[offset:(offset + limit)]
def get_limit_and_marker(request):
"""Get limited parameter from request."""
params = get_pagination_params(request)
limit = CONF.osapi_max_limit
limit = min(limit, params.get('limit', limit))
marker = params.get('marker', None)
return limit, marker
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def remove_trailing_version_from_href(href):
"""Removes the api version from the href.
Given: 'http://www.nova.com/compute/v1.1'
Returns: 'http://www.nova.com/compute'
Given: 'http://www.nova.com/v1.1'
Returns: 'http://www.nova.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.rsplit('/', 1)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if not expression.match(url_parts.pop()):
LOG.debug('href %s does not contain version', href)
raise ValueError(_('href %s does not contain version') % href)
new_path = url_join(*url_parts)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def check_img_metadata_properties_quota(context, metadata):
if not metadata:
return
try:
QUOTAS.limit_check(context, metadata_items=len(metadata))
except exception.OverQuota:
expl = _("Image metadata limit exceeded")
raise webob.exc.HTTPForbidden(explanation=expl)
def get_networks_for_instance_from_nw_info(nw_info):
networks = collections.OrderedDict()
for vif in nw_info:
ips = vif.fixed_ips()
floaters = vif.floating_ips()
label = vif['network']['label']
if label not in networks:
networks[label] = {'ips': [], 'floating_ips': []}
for ip in itertools.chain(ips, floaters):
ip['mac_address'] = vif['address']
networks[label]['ips'].extend(ips)
networks[label]['floating_ips'].extend(floaters)
return networks
def get_networks_for_instance(context, instance):
"""Returns a prepared nw_info list for passing into the view builders
We end up with a data structure like::
{'public': {'ips': [{'address': '10.0.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '2001::1',
'version': 6,
'mac_address': 'aa:aa:aa:aa:aa:aa'}],
'floating_ips': [{'address': '172.16.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '172.16.2.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'}]},
...}
"""
nw_info = compute_utils.get_nw_info_for_instance(instance)
return get_networks_for_instance_from_nw_info(nw_info)
def raise_http_conflict_for_instance_invalid_state(exc, action, server_id):
"""Raises a webob.exc.HTTPConflict instance containing a message
appropriate to return via the API based on the original
InstanceInvalidState exception.
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
if attr is not None and state is not None:
msg = _("Cannot '%(action)s' instance %(server_id)s while it is in "
"%(attr)s %(state)s") % {'action': action, 'attr': attr,
'state': state,
'server_id': server_id}
else:
# At least give some meaningful message
msg = _("Instance %(server_id)s is in an invalid state for "
"'%(action)s'") % {'action': action, 'server_id': server_id}
raise webob.exc.HTTPConflict(explanation=msg)
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.allow_instance_snapshots:
LOG.warning(_LW('Rejecting snapshot request, snapshots currently'
' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
return inner
def url_join(*parts):
"""Convenience method for joining parts of a URL
Any leading and trailing '/' characters are removed, and the parts joined
together with '/' as a separator. If last element of 'parts' is an empty
string, the returned URL will have a trailing slash.
"""
parts = parts or [""]
clean_parts = [part.strip("/") for part in parts if part]
if not parts[-1]:
# Empty last element should add a trailing slash
clean_parts.append("")
return "/".join(clean_parts)
class ViewBuilder(object):
"""Model API responses as dictionaries."""
def _get_project_id(self, request):
"""Get project id from request url if present or empty string
otherwise
"""
project_id = request.environ["nova.context"].project_id
if project_id in request.url:
return project_id
return ''
def _get_links(self, request, identifier, collection_name):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
}]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_compute_link_prefix(request.application_url)
url = url_join(prefix,
self._get_project_id(request),
collection_name)
return "%s?%s" % (url, urlparse.urlencode(params))
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_compute_link_prefix(request.application_url)
return url_join(prefix,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_trailing_version_from_href(request.application_url)
base_url = self._update_compute_link_prefix(base_url)
return url_join(base_url,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_collection_links(self,
request,
items,
collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable. This is included if:
1) 'limit' param is specified and equals the number of items.
2) 'limit' param is specified but it exceeds CONF.osapi_max_limit,
in this case the number of items is CONF.osapi_max_limit.
3) 'limit' param is NOT specified but the number of items is
CONF.osapi_max_limit.
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.osapi_max_limit)),
CONF.osapi_max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
elif 'id' in last_item:
last_item_id = last_item["id"]
else:
last_item_id = last_item["flavorid"]
links.append({
"rel": "next",
"href": self._get_next_link(request,
last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return urlparse.urlunsplit(url_parts).rstrip('/')
def _update_glance_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_glance_link_prefix)
def _update_compute_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_compute_link_prefix)
def get_instance(compute_api, context, instance_id, expected_attrs=None):
"""Fetch an instance from the compute API, handling error checking."""
try:
return compute_api.get(context, instance_id,
expected_attrs=expected_attrs)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
def normalize_name(name):
# NOTE(alex_xu): This method is used by v2.1 legacy v2 compat mode.
# In the legacy v2 API, some of APIs strip the spaces and some of APIs not.
# The v2.1 disallow leading/trailing, for compatible v2 API and consistent,
# we enable leading/trailing spaces and strip spaces in legacy v2 compat
# mode. Althrough in legacy v2 API there are some APIs didn't strip spaces,
# but actually leading/trailing spaces(that means user depend on leading/
# trailing spaces distinguish different instance) is pointless usecase.
return name.strip()
def raise_feature_not_supported(msg=None):
if msg is None:
msg = _("The requested functionality is not supported.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
def get_flavor(context, flavor_id):
try:
return objects.Flavor.get_by_flavor_id(context, flavor_id)
except exception.FlavorNotFound as error:
raise exc.HTTPNotFound(explanation=error.format_message())
def check_cells_enabled(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if not CONF.cells.enable:
raise_feature_not_supported()
return function(*args, **kwargs)
return inner
def is_all_tenants(search_opts):
"""Checks to see if the all_tenants flag is in search_opts
:param dict search_opts: The search options for a request
:returns: boolean indicating if all_tenants are being requested or not
"""
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
all_tenants = strutils.bool_from_string(all_tenants, True)
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
else:
# The empty string is considered enabling all_tenants
all_tenants = 'all_tenants' in search_opts
return all_tenants
|
the-stack_0_25498
|
"""SCons.Tool.SCCS.py
Tool-specific initialization for SCCS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/SCCS.py 3603 2008/10/10 05:46:45 scons"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
SCCS to an Environment."""
def SCCSFactory(env=env):
""" """
act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR')
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'SCCS', SCCSFactory)
env.SCCS = SCCSFactory
env['SCCS'] = 'sccs'
env['SCCSFLAGS'] = SCons.Util.CLVar('')
env['SCCSGETFLAGS'] = SCons.Util.CLVar('')
env['SCCSCOM'] = '$SCCS $SCCSFLAGS get $SCCSGETFLAGS $TARGET'
def exists(env):
return env.Detect('sccs')
|
the-stack_0_25501
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test transaction download behavior
"""
from test_framework.messages import (
CInv,
CTransaction,
FromHex,
MSG_TX,
MSG_TYPE_MASK,
MSG_WTX,
msg_inv,
msg_notfound,
)
from test_framework.p2p import (
P2PInterface,
p2p_lock,
)
from test_framework.test_framework import XEPTestFramework
from test_framework.util import (
assert_equal,
)
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
import time
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.tx_getdata_count = 0
def on_getdata(self, message):
for i in message.inv:
if i.type & MSG_TYPE_MASK == MSG_TX or i.type & MSG_TYPE_MASK == MSG_WTX:
self.tx_getdata_count += 1
# Constants from net_processing
GETDATA_TX_INTERVAL = 60 # seconds
INBOUND_PEER_TX_DELAY = 2 # seconds
TXID_RELAY_DELAY = 2 # seconds
OVERLOADED_PEER_DELAY = 2 # seconds
MAX_GETDATA_IN_FLIGHT = 100
MAX_PEER_TX_ANNOUNCEMENTS = 5000
# Python test constants
NUM_INBOUND = 10
MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY
class TxDownloadTest(XEPTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 2
def test_tx_requests(self):
self.log.info("Test that we request transactions from all our peers, eventually")
txid = 0xdeadbeef
self.log.info("Announce the txid from each incoming peer to node 0")
msg = msg_inv([CInv(t=MSG_WTX, h=txid)])
for p in self.nodes[0].p2ps:
p.send_and_ping(msg)
outstanding_peer_index = [i for i in range(len(self.nodes[0].p2ps))]
def getdata_found(peer_index):
p = self.nodes[0].p2ps[peer_index]
with p2p_lock:
return p.last_message.get("getdata") and p.last_message["getdata"].inv[-1].hash == txid
node_0_mocktime = int(time.time())
while outstanding_peer_index:
node_0_mocktime += MAX_GETDATA_INBOUND_WAIT
self.nodes[0].setmocktime(node_0_mocktime)
self.wait_until(lambda: any(getdata_found(i) for i in outstanding_peer_index))
for i in outstanding_peer_index:
if getdata_found(i):
outstanding_peer_index.remove(i)
self.nodes[0].setmocktime(0)
self.log.info("All outstanding peers received a getdata")
def test_inv_block(self):
self.log.info("Generate a transaction on node 0")
tx = self.nodes[0].createrawtransaction(
inputs=[{ # coinbase
"txid": self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0],
"vout": 0
}],
outputs={ADDRESS_BCRT1_UNSPENDABLE: 50 - 0.00025},
)
tx = self.nodes[0].signrawtransactionwithkey(
hexstring=tx,
privkeys=[self.nodes[0].get_deterministic_priv_key().key],
)['hex']
ctx = FromHex(CTransaction(), tx)
txid = int(ctx.rehash(), 16)
self.log.info(
"Announce the transaction to all nodes from all {} incoming peers, but never send it".format(NUM_INBOUND))
msg = msg_inv([CInv(t=MSG_TX, h=txid)])
for p in self.peers:
p.send_and_ping(msg)
self.log.info("Put the tx in node 0's mempool")
self.nodes[0].sendrawtransaction(tx)
# Since node 1 is connected outbound to an honest peer (node 0), it
# should get the tx within a timeout. (Assuming that node 0
# announced the tx within the timeout)
# The timeout is the sum of
# * the worst case until the tx is first requested from an inbound
# peer, plus
# * the first time it is re-requested from the outbound peer, plus
# * 2 seconds to avoid races
assert self.nodes[1].getpeerinfo()[0]['inbound'] == False
timeout = 2 + INBOUND_PEER_TX_DELAY + GETDATA_TX_INTERVAL
self.log.info("Tx should be received at node 1 after {} seconds".format(timeout))
self.sync_mempools(timeout=timeout)
def test_in_flight_max(self):
self.log.info("Test that we don't load peers with more than {} transaction requests immediately".format(MAX_GETDATA_IN_FLIGHT))
txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)]
p = self.nodes[0].p2ps[0]
with p2p_lock:
p.tx_getdata_count = 0
mock_time = int(time.time() + 1)
self.nodes[0].setmocktime(mock_time)
for i in range(MAX_GETDATA_IN_FLIGHT):
p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])]))
p.sync_with_ping()
mock_time += INBOUND_PEER_TX_DELAY
self.nodes[0].setmocktime(mock_time)
p.wait_until(lambda: p.tx_getdata_count >= MAX_GETDATA_IN_FLIGHT)
for i in range(MAX_GETDATA_IN_FLIGHT, len(txids)):
p.send_message(msg_inv([CInv(t=MSG_WTX, h=txids[i])]))
p.sync_with_ping()
self.log.info("No more than {} requests should be seen within {} seconds after announcement".format(MAX_GETDATA_IN_FLIGHT, INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1))
self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1)
p.sync_with_ping()
with p2p_lock:
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT)
self.log.info("If we wait {} seconds after announcement, we should eventually get more requests".format(INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY))
self.nodes[0].setmocktime(mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY)
p.wait_until(lambda: p.tx_getdata_count == len(txids))
def test_expiry_fallback(self):
self.log.info('Check that expiry will select another peer for download')
WTXID = 0xffaa
peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
for p in [peer1, peer2]:
p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
# One of the peers is asked for the tx
peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
with p2p_lock:
peer_expiry, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
assert_equal(peer_fallback.tx_getdata_count, 0)
self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) # Wait for request to peer_expiry to expire
peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
with p2p_lock:
assert_equal(peer_fallback.tx_getdata_count, 1)
self.restart_node(0) # reset mocktime
def test_disconnect_fallback(self):
self.log.info('Check that disconnect will select another peer for download')
WTXID = 0xffbb
peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
for p in [peer1, peer2]:
p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
# One of the peers is asked for the tx
peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
with p2p_lock:
peer_disconnect, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
assert_equal(peer_fallback.tx_getdata_count, 0)
peer_disconnect.peer_disconnect()
peer_disconnect.wait_for_disconnect()
peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
with p2p_lock:
assert_equal(peer_fallback.tx_getdata_count, 1)
def test_notfound_fallback(self):
self.log.info('Check that notfounds will select another peer for download immediately')
WTXID = 0xffdd
peer1 = self.nodes[0].add_p2p_connection(TestP2PConn())
peer2 = self.nodes[0].add_p2p_connection(TestP2PConn())
for p in [peer1, peer2]:
p.send_message(msg_inv([CInv(t=MSG_WTX, h=WTXID)]))
# One of the peers is asked for the tx
peer2.wait_until(lambda: sum(p.tx_getdata_count for p in [peer1, peer2]) == 1)
with p2p_lock:
peer_notfound, peer_fallback = (peer1, peer2) if peer1.tx_getdata_count == 1 else (peer2, peer1)
assert_equal(peer_fallback.tx_getdata_count, 0)
peer_notfound.send_and_ping(msg_notfound(vec=[CInv(MSG_WTX, WTXID)])) # Send notfound, so that fallback peer is selected
peer_fallback.wait_until(lambda: peer_fallback.tx_getdata_count >= 1, timeout=1)
with p2p_lock:
assert_equal(peer_fallback.tx_getdata_count, 1)
def test_preferred_inv(self):
self.log.info('Check that invs from preferred peers are downloaded immediately')
self.restart_node(0, extra_args=['[email protected]'])
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
peer.send_message(msg_inv([CInv(t=MSG_WTX, h=0xff00ff00)]))
peer.wait_until(lambda: peer.tx_getdata_count >= 1, timeout=1)
with p2p_lock:
assert_equal(peer.tx_getdata_count, 1)
def test_large_inv_batch(self):
self.log.info('Test how large inv batches are handled with relay permission')
self.restart_node(0, extra_args=['[email protected]'])
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS + 1)
self.log.info('Test how large inv batches are handled without relay permission')
self.restart_node(0)
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS)
peer.sync_with_ping()
with p2p_lock:
assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS)
def test_spurious_notfound(self):
self.log.info('Check that spurious notfound is ignored')
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
def run_test(self):
# Run tests without mocktime that only need one peer-connection first, to avoid restarting the nodes
self.test_expiry_fallback()
self.test_disconnect_fallback()
self.test_notfound_fallback()
self.test_preferred_inv()
self.test_large_inv_batch()
self.test_spurious_notfound()
# Run each test against new xepd instances, as setting mocktimes has long-term effects on when
# the next trickle relay event happens.
for test in [self.test_in_flight_max, self.test_inv_block, self.test_tx_requests]:
self.stop_nodes()
self.start_nodes()
self.connect_nodes(1, 0)
# Setup the p2p connections
self.peers = []
for node in self.nodes:
for _ in range(NUM_INBOUND):
self.peers.append(node.add_p2p_connection(TestP2PConn()))
self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND))
test()
if __name__ == '__main__':
TxDownloadTest().main()
|
the-stack_0_25503
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
import os
from collections import defaultdict
import numpy as np
from scipy.stats import ttest_rel
###
### VARIABLES (update as necessary)
###
dataset = 'uvigomed'
data_to_analyse_path = '../{}/results/2019-07-15/'.format(dataset)
###
### CODE
###
# To keep tabs on training set sizes and classifiers when performing a paired t-test later
train_sizes = {}
classifiers = []
# To store means and standard deviations keyed by classifier type then keyed by training set size then micro/macro
mean_dict = {}
stdev_dict = {}
# To store all samples for paired t-test keyed by micro/macro then classifier type then training set size
sample_dict = {}
sample_dict['micro'] = {}
sample_dict['macro'] = {}
def calculate_mean_stdev(train_size_to_results, f1_type):
for train_size, results in train_size_to_results.items():
mean_dict[classifier_balance][train_size][f1_type] = np.mean(results)
stdev_dict[classifier_balance][train_size][f1_type] = np.std(results, ddof=1)
result_files = os.listdir(data_to_analyse_path)
for result_file in result_files:
classifier_balance = result_file.split('.')[0]
classifier_balance_split = classifier_balance.split('_')
# We've encountered another classifier
classifiers.append(classifier_balance)
# Initialise the mean and stdev dictionaries for this classifier
mean_dict[classifier_balance] = defaultdict(lambda: {})
stdev_dict[classifier_balance] = defaultdict(lambda: {})
# First part of the name before underscore is the classifier name, 'nb', 'svc' or 'kb.'
classifier = classifier_balance_split[0]
# Second part of the filename is proportional or balanced
dataset_balance = classifier_balance_split[1]
# Dictionary containing list of results for each training set size
train_size_to_micro_results = defaultdict(list)
train_size_to_macro_results = defaultdict(list)
# Get the results
with open(data_to_analyse_path + result_file, newline='') as csvfile:
result_reader = csv.reader(csvfile)
for row in result_reader:
train_size = int(row[0])
micro_f1 = float(row[1])
macro_f1 = float(row[2])
train_size_to_micro_results[train_size].append(micro_f1)
train_size_to_macro_results[train_size].append(macro_f1)
train_sizes[train_size] = None
# Store the samples for the t-test later
sample_dict['micro'][classifier_balance] = train_size_to_micro_results
sample_dict['macro'][classifier_balance] = train_size_to_macro_results
# Calculate means and standard deviations at each training set size
calculate_mean_stdev(train_size_to_micro_results, 'micro')
calculate_mean_stdev(train_size_to_macro_results, 'macro')
# Print out results in a format that can be pasted into a Jupyer notebook and a Latex report
def print_results():
for classifier, results in mean_dict.items():
train_sizes = '['
micro_results = '['
macro_results = '['
micro_results_std = '['
macro_results_std = '['
comma = False
for train_size, f1_results in results.items():
if comma:
train_sizes += ', '
micro_results += ', '
macro_results += ', '
micro_results_std += ', '
macro_results_std += ', '
train_sizes += '{}'.format(train_size)
micro_results += '{:.6f}'.format(f1_results['micro'])
macro_results += '{:.6f}'.format(f1_results['macro'])
micro_results_std += '{:.6f}'.format(stdev_dict[classifier][train_size]['micro'])
macro_results_std += '{:.6f}'.format(stdev_dict[classifier][train_size]['macro'])
comma = True
train_sizes += ']'
micro_results += ']'
macro_results += ']'
micro_results_std += ']'
macro_results_std += ']'
print('---------------------------')
print('Results for {}'.format(classifier))
print('')
print('train_sizes = {}'.format(train_sizes))
print('{}_micro = {}'.format(classifier, micro_results))
print('{}_macro = {}'.format(classifier, macro_results))
print('{}_micro_std = {}'.format(classifier, micro_results_std))
print('{}_macro_std = {}'.format(classifier, macro_results_std))
print('')
print_results()
# Perform a paired t-test between classifiers
for f1_type in ['micro', 'macro']:
for train_size in train_sizes.keys():
with open('results/{}_{}_{}.csv'.format(dataset, f1_type, train_size), 'w', newline='') as csvfile:
result_writer = csv.writer(csvfile)
smallest_p_value = np.inf
largest_p_value = 0
# Get the p-value between all classifiers
for i in range(len(classifiers)):
for j in range(i+1, len(classifiers)):
# Proportional training sets are sometimes larger than the maximum balanced training set size
# In this case we cannot calculate a p value for the pairwise comparison
if (len(sample_dict[f1_type][classifiers[i]][train_size]) != 0 and
len(sample_dict[f1_type][classifiers[j]][train_size]) != 0):
_, p = ttest_rel(sample_dict[f1_type][classifiers[i]][train_size][:30],
sample_dict[f1_type][classifiers[j]][train_size][:30])
# Write out the result
result_writer.writerow([classifiers[i], classifiers[j], p])
if p < smallest_p_value:
smallest_p_value = p
if p > largest_p_value:
largest_p_value = p
# Write out some debug
print('---------------------------')
print('Results for F1: {}, training set size {}'.format(f1_type, train_size))
print('Smallest p value: {}, largest p value: {}'.format(smallest_p_value, largest_p_value))
print('')
|
the-stack_0_25504
|
import json
import logging
import os
import time
from oauthlib.oauth2 import TokenExpiredError, WebApplicationClient, BackendApplicationClient
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError, RequestException, ProxyError
from requests.exceptions import SSLError, Timeout, ConnectionError
# Dynamic loading of module Retry by requests.packages
# noinspection PyUnresolvedReferences
from requests.packages.urllib3.util.retry import Retry
from requests_oauthlib import OAuth2Session
from stringcase import pascalcase, camelcase, snakecase
from tzlocal import get_localzone
from pytz import UnknownTimeZoneError, UTC, timezone as get_timezone
from .utils import ME_RESOURCE, BaseTokenBackend, FileSystemTokenBackend, Token
log = logging.getLogger(__name__)
O365_API_VERSION = 'v2.0'
GRAPH_API_VERSION = 'v1.0'
OAUTH_REDIRECT_URL = 'https://login.microsoftonline.com/common/oauth2/nativeclient' # version <= 1.1.3. : 'https://outlook.office365.com/owa/'
RETRIES_STATUS_LIST = (
429, # Status code for TooManyRequests
500, 502, 503, 504 # Server errors
)
RETRIES_BACKOFF_FACTOR = 0.5
DEFAULT_SCOPES = {
# wrap any scope in a 1 element tuple to avoid prefixing
'basic': [('offline_access',), 'User.Read'],
'mailbox': ['Mail.Read'],
'mailbox_shared': ['Mail.Read.Shared'],
'message_send': ['Mail.Send'],
'message_send_shared': ['Mail.Send.Shared'],
'message_all': ['Mail.ReadWrite', 'Mail.Send'],
'message_all_shared': ['Mail.ReadWrite.Shared', 'Mail.Send.Shared'],
'address_book': ['Contacts.Read'],
'address_book_shared': ['Contacts.Read.Shared'],
'address_book_all': ['Contacts.ReadWrite'],
'address_book_all_shared': ['Contacts.ReadWrite.Shared'],
'calendar': ['Calendars.Read'],
'calendar_shared': ['Calendars.Read.Shared'],
'calendar_all': ['Calendars.ReadWrite'],
'calendar_shared_all': ['Calendars.ReadWrite.Shared'],
'users': ['User.ReadBasic.All'],
'onedrive': ['Files.Read.All'],
'onedrive_all': ['Files.ReadWrite.All'],
'sharepoint': ['Sites.Read.All'],
'sharepoint_dl': ['Sites.ReadWrite.All'],
'settings_all': ['MailboxSettings.ReadWrite'],
}
class Protocol:
""" Base class for all protocols """
# Override these in subclass
_protocol_url = 'not_defined' # Main url to request.
_oauth_scope_prefix = '' # Prefix for scopes
_oauth_scopes = {} # Dictionary of {scopes_name: [scope1, scope2]}
def __init__(self, *, protocol_url=None, api_version=None,
default_resource=None,
casing_function=None, protocol_scope_prefix=None,
timezone=None, **kwargs):
""" Create a new protocol object
:param str protocol_url: the base url used to communicate with the
server
:param str api_version: the api version
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
:param function casing_function: the casing transform function to be
used on api keywords (camelcase / pascalcase)
:param str protocol_scope_prefix: prefix url for scopes
:param pytz.UTC or str timezone: preferred timezone, defaults to the
system timezone
:raises ValueError: if protocol_url or api_version are not supplied
"""
if protocol_url is None or api_version is None:
raise ValueError(
'Must provide valid protocol_url and api_version values')
self.protocol_url = protocol_url or self._protocol_url
self.protocol_scope_prefix = protocol_scope_prefix or ''
self.api_version = api_version
self.service_url = '{}{}/'.format(protocol_url, api_version)
self.default_resource = default_resource or ME_RESOURCE
self.use_default_casing = True if casing_function is None else False
self.casing_function = casing_function or camelcase
if timezone and isinstance(timezone, str):
timezone = get_timezone(timezone)
try:
self.timezone = timezone or get_localzone() # pytz timezone
except UnknownTimeZoneError as e:
log.info('Timezone not provided and the local timezone could not be found. Default to UTC.')
self.timezone = UTC # pytz.timezone('UTC')
self.max_top_value = 500 # Max $top parameter value
# define any keyword that can be different in this protocol
# for example, attachments Odata type differs between Outlook
# rest api and graph: (graph = #microsoft.graph.fileAttachment and
# outlook = #Microsoft.OutlookServices.FileAttachment')
self.keyword_data_store = {}
def get_service_keyword(self, keyword):
""" Returns the data set to the key in the internal data-key dict
:param str keyword: key to get value for
:return: value of the keyword
"""
return self.keyword_data_store.get(keyword, None)
def convert_case(self, key):
""" Returns a key converted with this protocol casing method
Converts case to send/read from the cloud
When using Microsoft Graph API, the keywords of the API use
lowerCamelCase Casing
When using Office 365 API, the keywords of the API use PascalCase Casing
Default case in this API is lowerCamelCase
:param str key: a dictionary key to convert
:return: key after case conversion
:rtype: str
"""
return key if self.use_default_casing else self.casing_function(key)
@staticmethod
def to_api_case(key):
""" Converts key to snake_case
:param str key: key to convert into snake_case
:return: key after case conversion
:rtype: str
"""
return snakecase(key)
def get_scopes_for(self, user_provided_scopes):
""" Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
"""
if user_provided_scopes is None:
# return all available scopes
user_provided_scopes = [app_part for app_part in self._oauth_scopes]
elif isinstance(user_provided_scopes, str):
user_provided_scopes = [user_provided_scopes]
if not isinstance(user_provided_scopes, (list, tuple)):
raise ValueError(
"'user_provided_scopes' must be a list or a tuple of strings")
scopes = set()
for app_part in user_provided_scopes:
for scope in self._oauth_scopes.get(app_part, [(app_part,)]):
scopes.add(self.prefix_scope(scope))
return list(scopes)
def prefix_scope(self, scope):
""" Inserts the protocol scope prefix if required"""
if self.protocol_scope_prefix:
if isinstance(scope, tuple):
return scope[0]
elif scope.startswith(self.protocol_scope_prefix):
return scope
else:
return '{}{}'.format(self.protocol_scope_prefix, scope)
else:
if isinstance(scope, tuple):
return scope[0]
else:
return scope
class MSGraphProtocol(Protocol):
""" A Microsoft Graph Protocol Implementation
https://docs.microsoft.com/en-us/outlook/rest/compare-graph-outlook
"""
_protocol_url = 'https://graph.microsoft.com/'
_oauth_scope_prefix = 'https://graph.microsoft.com/'
_oauth_scopes = DEFAULT_SCOPES
def __init__(self, api_version='v1.0', default_resource=None,
**kwargs):
""" Create a new Microsoft Graph protocol object
_protocol_url = 'https://graph.microsoft.com/'
_oauth_scope_prefix = 'https://graph.microsoft.com/'
:param str api_version: api version to use
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
"""
super().__init__(protocol_url=self._protocol_url,
api_version=api_version,
default_resource=default_resource,
casing_function=camelcase,
protocol_scope_prefix=self._oauth_scope_prefix,
**kwargs)
self.keyword_data_store['message_type'] = 'microsoft.graph.message'
self.keyword_data_store['event_message_type'] = 'microsoft.graph.eventMessage'
self.keyword_data_store[
'file_attachment_type'] = '#microsoft.graph.fileAttachment'
self.keyword_data_store[
'item_attachment_type'] = '#microsoft.graph.itemAttachment'
self.max_top_value = 999 # Max $top parameter value
class MSOffice365Protocol(Protocol):
""" A Microsoft Office 365 Protocol Implementation
https://docs.microsoft.com/en-us/outlook/rest/compare-graph-outlook
"""
_protocol_url = 'https://outlook.office.com/api/'
_oauth_scope_prefix = 'https://outlook.office.com/'
_oauth_scopes = DEFAULT_SCOPES
def __init__(self, api_version='v2.0', default_resource=None,
**kwargs):
""" Create a new Office 365 protocol object
_protocol_url = 'https://outlook.office.com/api/'
_oauth_scope_prefix = 'https://outlook.office.com/'
:param str api_version: api version to use
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
"""
super().__init__(protocol_url=self._protocol_url,
api_version=api_version,
default_resource=default_resource,
casing_function=pascalcase,
protocol_scope_prefix=self._oauth_scope_prefix,
**kwargs)
self.keyword_data_store[
'message_type'] = 'Microsoft.OutlookServices.Message'
self.keyword_data_store[
'event_message_type'] = 'Microsoft.OutlookServices.EventMessage'
self.keyword_data_store[
'file_attachment_type'] = '#Microsoft.OutlookServices.' \
'FileAttachment'
self.keyword_data_store[
'item_attachment_type'] = '#Microsoft.OutlookServices.' \
'ItemAttachment'
self.max_top_value = 999 # Max $top parameter value
class MSBusinessCentral365Protocol(Protocol):
""" A Microsoft Business Central Protocol Implementation
https://docs.microsoft.com/en-us/dynamics-nav/api-reference/v1.0/endpoints-apis-for-dynamics
"""
_protocol_url = 'https://api.businesscentral.dynamics.com/'
_oauth_scope_prefix = 'https://api.businesscentral.dynamics.com/'
_oauth_scopes = DEFAULT_SCOPES
_protocol_scope_prefix = 'https://api.businesscentral.dynamics.com/'
def __init__(self, api_version='v1.0', default_resource=None,environment=None,
**kwargs):
""" Create a new Microsoft Graph protocol object
_protocol_url = 'https://api.businesscentral.dynamics.com/'
_oauth_scope_prefix = 'https://api.businesscentral.dynamics.com/'
:param str api_version: api version to use
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
"""
if environment:
_version = "2.0"
_environment = "/"+environment
else:
_version = "1.0"
_environment = ''
self._protocol_url = "{}v{}{}/api/".format(self._protocol_url, _version, _environment)
super().__init__(protocol_url=self._protocol_url,
api_version=api_version,
default_resource=default_resource,
casing_function=camelcase,
protocol_scope_prefix=self._protocol_scope_prefix,
**kwargs)
self.keyword_data_store['message_type'] = 'microsoft.graph.message'
self.keyword_data_store['event_message_type'] = 'microsoft.graph.eventMessage'
self.keyword_data_store[
'file_attachment_type'] = '#microsoft.graph.fileAttachment'
self.keyword_data_store[
'item_attachment_type'] = '#microsoft.graph.itemAttachment'
self.max_top_value = 999 # Max $top parameter value
class Connection:
""" Handles all communication (requests) between the app and the server """
_allowed_methods = ['get', 'post', 'put', 'patch', 'delete']
def __init__(self, credentials, *, scopes=None,
proxy_server=None, proxy_port=8080, proxy_username=None,
proxy_password=None, requests_delay=200, raise_http_errors=True,
request_retries=3, token_backend=None,
tenant_id='common',
auth_flow_type='authorization',
timeout=None, json_encoder=None,
verify_ssl=True, **kwargs):
""" Creates an API connection object
:param tuple credentials: a tuple of (client_id, client_secret)
Generate client_id and client_secret in https://apps.dev.microsoft.com
:param list[str] scopes: list of scopes to request access to
:param str proxy_server: the proxy server
:param int proxy_port: the proxy port, defaults to 8080
:param str proxy_username: the proxy username
:param str proxy_password: the proxy password
:param int requests_delay: number of milliseconds to wait between api
calls.
The Api will respond with 429 Too many requests if more than
17 requests are made per second. Defaults to 200 milliseconds
just in case more than 1 connection is making requests
across multiple processes.
:param bool raise_http_errors: If True Http 4xx and 5xx status codes
will raise as exceptions
:param int request_retries: number of retries done when the server
responds with 5xx error codes.
:param BaseTokenBackend token_backend: the token backend used to get
and store tokens
:param str tenant_id: use this specific tenant id, defaults to common
:param str auth_flow_type: the auth method flow style used: Options:
- 'authorization': 2 step web style grant flow using an authentication url
- 'public': 2 step web style grant flow using an authentication url for public apps where
client secret cannot be secured
- 'credentials': also called client credentials grant flow using only the cliend id and secret
:param float or tuple timeout: How long to wait for the server to send
data before giving up, as a float, or a tuple (connect timeout, read timeout)
:param JSONEncoder json_encoder: The JSONEnocder to use during the JSON serialization on the request.
:param bool verify_ssl: set the verify flag on the requests library
:param dict kwargs: any extra params passed to Connection
:raises ValueError: if credentials is not tuple of
(client_id, client_secret)
"""
if auth_flow_type == 'public': # allow client id only for public flow
if not isinstance(credentials, tuple) or len(credentials) != 1 or (not credentials[0]):
raise ValueError('Provide client id only for public flow credentials')
else:
if not isinstance(credentials, tuple) or len(credentials) != 2 or (not credentials[0] and not credentials[1]):
raise ValueError('Provide valid auth credentials')
self._auth_flow_type = auth_flow_type # 'authorization' or 'credentials' or 'public'
if auth_flow_type == 'credentials' and tenant_id == 'common':
raise ValueError('When using the "credentials" auth_flow the "tenant_id" must be set')
self.tenant_id = tenant_id
self.auth = credentials
self.scopes = scopes
self.store_token = True
token_backend = token_backend or FileSystemTokenBackend(**kwargs)
if not isinstance(token_backend, BaseTokenBackend):
raise ValueError('"token_backend" must be an instance of a subclass of BaseTokenBackend')
self.token_backend = token_backend
self.session = None # requests Oauth2Session object
self.proxy = {}
self.set_proxy(proxy_server, proxy_port, proxy_username, proxy_password)
self.requests_delay = requests_delay or 0
self._previous_request_at = None # store previous request time
self.raise_http_errors = raise_http_errors
self.request_retries = request_retries
self.timeout = timeout
self.json_encoder = json_encoder
self.verify_ssl = verify_ssl
self.naive_session = None # lazy loaded: holds a requests Session object
self._oauth2_authorize_url = 'https://login.microsoftonline.com/' \
'{}/oauth2/v2.0/authorize'.format(tenant_id)
self._oauth2_token_url = 'https://login.microsoftonline.com/' \
'{}/oauth2/v2.0/token'.format(tenant_id)
self.oauth_redirect_url = 'https://login.microsoftonline.com/common/oauth2/nativeclient'
@property
def auth_flow_type(self):
return self._auth_flow_type
def set_proxy(self, proxy_server, proxy_port, proxy_username,
proxy_password):
""" Sets a proxy on the Session
:param str proxy_server: the proxy server
:param int proxy_port: the proxy port, defaults to 8080
:param str proxy_username: the proxy username
:param str proxy_password: the proxy password
"""
if proxy_server and proxy_port:
if proxy_username and proxy_password:
self.proxy = {
"http": "http://{}:{}@{}:{}".format(proxy_username,
proxy_password,
proxy_server,
proxy_port),
"https": "https://{}:{}@{}:{}".format(proxy_username,
proxy_password,
proxy_server,
proxy_port),
}
else:
self.proxy = {
"http": "http://{}:{}".format(proxy_server, proxy_port),
"https": "https://{}:{}".format(proxy_server, proxy_port),
}
def get_authorization_url(self, requested_scopes=None,
redirect_uri=None, **kwargs):
""" Initializes the oauth authorization flow, getting the
authorization url that the user must approve.
:param list[str] requested_scopes: list of scopes to request access for
:param str redirect_uri: redirect url configured in registered app
:param kwargs: allow to pass unused params in conjunction with Connection
:return: authorization url
:rtype: str
"""
redirect_uri = redirect_uri or self.oauth_redirect_url
scopes = requested_scopes or self.scopes
if not scopes:
raise ValueError('Must provide at least one scope')
self.session = oauth = self.get_session(redirect_uri=redirect_uri,
scopes=scopes)
# TODO: access_type='offline' has no effect according to documentation
# This is done through scope 'offline_access'.
auth_url, state = oauth.authorization_url(
url=self._oauth2_authorize_url, access_type='offline')
return auth_url, state
def request_token(self, authorization_url, *,
state=None,
redirect_uri=None,
requested_scopes=None,
store_token=True,
**kwargs):
""" Authenticates for the specified url and gets the token, save the
token for future based if requested
:param str or None authorization_url: url given by the authorization flow
:param str state: session-state identifier for web-flows
:param str redirect_uri: callback url for web-flows
:param lst requested_scopes: a list of scopes to be requested.
Only used when auth_flow_type is 'credentials'
:param bool store_token: whether or not to store the token,
so you don't have to keep opening the auth link and
authenticating every time
:param kwargs: allow to pass unused params in conjunction with Connection
:return: Success/Failure
:rtype: bool
"""
redirect_uri = redirect_uri or self.oauth_redirect_url
# Allow token scope to not match requested scope.
# (Other auth libraries allow this, but Requests-OAuthlib
# raises exception on scope mismatch by default.)
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
os.environ['OAUTHLIB_IGNORE_SCOPE_CHANGE'] = '1'
scopes = requested_scopes or self.scopes
if self.session is None:
if self.auth_flow_type in ('authorization', 'public'):
self.session = self.get_session(state=state,
redirect_uri=redirect_uri)
elif self.auth_flow_type == 'credentials':
self.session = self.get_session(scopes=scopes)
else:
raise ValueError('"auth_flow_type" must be "authorization", "public" or "credentials"')
try:
if self.auth_flow_type == 'authorization':
self.token_backend.token = Token(self.session.fetch_token(
token_url=self._oauth2_token_url,
authorization_response=authorization_url,
include_client_id=True,
client_secret=self.auth[1]))
elif self.auth_flow_type == 'public':
self.token_backend.token = Token(self.session.fetch_token(
token_url=self._oauth2_token_url,
authorization_response=authorization_url,
include_client_id=True))
elif self.auth_flow_type == 'credentials':
self.token_backend.token = Token(self.session.fetch_token(
token_url=self._oauth2_token_url,
include_client_id=True,
client_secret=self.auth[1],
scope=scopes))
except Exception as e:
log.error('Unable to fetch auth token. Error: {}'.format(str(e)))
return False
if store_token:
self.token_backend.save_token()
return True
def get_session(self, *, state=None,
redirect_uri=None,
load_token=False,
scopes=None):
""" Create a requests Session object
:param str state: session-state identifier to rebuild OAuth session (CSRF protection)
:param str redirect_uri: callback URL specified in previous requests
:param list(str) scopes: list of scopes we require access to
:param bool load_token: load and ensure token is present
:return: A ready to use requests session, or a rebuilt in-flow session
:rtype: OAuth2Session
"""
redirect_uri = redirect_uri or self.oauth_redirect_url
client_id = self.auth[0]
if self.auth_flow_type in ('authorization', 'public'):
oauth_client = WebApplicationClient(client_id=client_id)
elif self.auth_flow_type == 'credentials':
oauth_client = BackendApplicationClient(client_id=client_id)
else:
raise ValueError('"auth_flow_type" must be "authorization", "credentials" or "public"')
requested_scopes = scopes or self.scopes
if load_token:
# gets a fresh token from the store
token = self.token_backend.get_token()
if token is None:
raise RuntimeError('No auth token found. Authentication Flow needed')
oauth_client.token = token
if self.auth_flow_type in ('authorization', 'public'):
requested_scopes = None # the scopes are already in the token (Not if type is backend)
session = OAuth2Session(client_id=client_id,
client=oauth_client,
token=token,
scope=requested_scopes)
else:
session = OAuth2Session(client_id=client_id,
client=oauth_client,
state=state,
redirect_uri=redirect_uri,
scope=requested_scopes)
session.verify = self.verify_ssl
session.proxies = self.proxy
if self.request_retries:
retry = Retry(total=self.request_retries, read=self.request_retries,
connect=self.request_retries,
backoff_factor=RETRIES_BACKOFF_FACTOR,
status_forcelist=RETRIES_STATUS_LIST)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def get_naive_session(self):
""" Creates and returns a naive session """
naive_session = Session() # requests Session object
naive_session.proxies = self.proxy
naive_session.verify = self.verify_ssl
if self.request_retries:
retry = Retry(total=self.request_retries, read=self.request_retries,
connect=self.request_retries,
backoff_factor=RETRIES_BACKOFF_FACTOR,
status_forcelist=RETRIES_STATUS_LIST)
adapter = HTTPAdapter(max_retries=retry)
naive_session.mount('http://', adapter)
naive_session.mount('https://', adapter)
return naive_session
def refresh_token(self):
"""
Refresh the OAuth authorization token.
This will be called automatically when the access token
expires, however, you can manually call this method to
request a new refresh token.
:return bool: Success / Failure
"""
if self.session is None:
self.session = self.get_session(load_token=True)
token = self.token_backend.token
if not token:
raise RuntimeError('Token not found.')
if token.is_long_lived or self.auth_flow_type == 'credentials':
log.info('Refreshing token')
if self.auth_flow_type == 'authorization':
client_id, client_secret = self.auth
self.token_backend.token = Token(
self.session.refresh_token(
self._oauth2_token_url,
client_id=client_id,
client_secret=client_secret)
)
elif self.auth_flow_type == 'public':
client_id = self.auth[0]
self.token_backend.token = Token(
self.session.refresh_token(
self._oauth2_token_url,
client_id=client_id)
)
elif self.auth_flow_type == 'credentials':
if self.request_token(None, store_token=False) is False:
log.error('Refresh for Client Credentials Grant Flow failed.')
return False
log.info('New oauth token fetched by refresh method')
else:
log.error('You can not refresh an access token that has no "refreh_token" available.'
'Include "offline_access" scope when authenticating to get a "refresh_token"')
return False
if self.store_token:
self.token_backend.save_token()
return True
def _check_delay(self):
""" Checks if a delay is needed between requests and sleeps if True """
if self._previous_request_at:
dif = round(time.time() - self._previous_request_at,
2) * 1000 # difference in miliseconds
if dif < self.requests_delay:
sleep_for = (self.requests_delay - dif)
log.info('Sleeping for {} miliseconds'.format(sleep_for))
time.sleep(sleep_for / 1000) # sleep needs seconds
self._previous_request_at = time.time()
def _internal_request(self, request_obj, url, method, **kwargs):
""" Internal handling of requests. Handles Exceptions.
:param request_obj: a requests session.
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
method = method.lower()
if method not in self._allowed_methods:
raise ValueError('Method must be one of the allowed ones')
if method == 'get':
kwargs.setdefault('allow_redirects', True)
elif method in ['post', 'put', 'patch']:
if 'headers' not in kwargs:
kwargs['headers'] = {}
if kwargs.get('headers') is not None and kwargs['headers'].get(
'Content-type') is None:
kwargs['headers']['Content-type'] = 'application/json'
if 'data' in kwargs and kwargs['data'] is not None and kwargs['headers'].get(
'Content-type') == 'application/json':
kwargs['data'] = json.dumps(kwargs['data'], cls=self.json_encoder) # convert to json
if self.timeout is not None:
kwargs['timeout'] = self.timeout
request_done = False
token_refreshed = False
while not request_done:
self._check_delay() # sleeps if needed
try:
log.info('Requesting ({}) URL: {}'.format(method.upper(), url))
log.info('Request parameters: {}'.format(kwargs))
# auto_retry will occur inside this function call if enabled
response = request_obj.request(method, url, **kwargs)
response.raise_for_status() # raise 4XX and 5XX error codes.
log.info('Received response ({}) from URL {}'.format(
response.status_code, response.url))
request_done = True
return response
except TokenExpiredError as e:
# Token has expired, try to refresh the token and try again on the next loop
log.info('Oauth Token is expired')
if self.token_backend.token.is_long_lived is False and self.auth_flow_type == 'authorization':
raise e
if token_refreshed:
# Refresh token done but still TokenExpiredError raise
raise RuntimeError('Token Refresh Operation not working')
should_rt = self.token_backend.should_refresh_token(self)
if should_rt is True:
# The backend has checked that we can refresh the token
if self.refresh_token() is False:
raise RuntimeError('Token Refresh Operation not working')
token_refreshed = True
elif should_rt is False:
# the token was refreshed by another instance and updated into
# this instance, so: update the session token and
# go back to the loop and try the request again.
request_obj.token = self.token_backend.token
else:
# the refresh was performed by the tokend backend.
token_refreshed = True
except (ConnectionError, ProxyError, SSLError, Timeout) as e:
# We couldn't connect to the target url, raise error
log.debug('Connection Error calling: {}.{}'
''.format(url, ('Using proxy: {}'.format(self.proxy)
if self.proxy else '')))
raise e # re-raise exception
except HTTPError as e:
# Server response with 4XX or 5XX error status codes
# try to extract the error message:
try:
error = response.json()
error_message = error.get('error', {}).get('message', '')
except ValueError:
error_message = ''
status_code = int(e.response.status_code / 100)
if status_code == 4:
# Client Error
# Logged as error. Could be a library error or Api changes
log.error('Client Error: {} | Error Message: {}'.format(str(e), error_message))
else:
# Server Error
log.debug('Server Error: {}'.format(str(e)))
if self.raise_http_errors:
if error_message:
raise HTTPError('{} | Error Message: {}'.format(e.args[0], error_message), response=response) from None
else:
raise e
else:
return e.response
except RequestException as e:
# catch any other exception raised by requests
log.debug('Request Exception: {}'.format(str(e)))
raise e
def naive_request(self, url, method, **kwargs):
""" Makes a request to url using an without oauth authorization
session, but through a normal session
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
if self.naive_session is None:
# lazy creation of a naive session
self.naive_session = self.get_naive_session()
return self._internal_request(self.naive_session, url, method, **kwargs)
def oauth_request(self, url, method, **kwargs):
""" Makes a request to url using an oauth session
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
# oauth authentication
if self.session is None:
self.session = self.get_session(load_token=True)
return self._internal_request(self.session, url, method, **kwargs)
def get(self, url, params=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'get')
:param str url: url to send get oauth request to
:param dict params: request parameter to get the service data
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'get', params=params, **kwargs)
def post(self, url, data=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'post')
:param str url: url to send post oauth request to
:param dict data: post data to update the service
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'post', data=data, **kwargs)
def put(self, url, data=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'put')
:param str url: url to send put oauth request to
:param dict data: put data to update the service
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'put', data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'patch')
:param str url: url to send patch oauth request to
:param dict data: patch data to update the service
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'patch', data=data, **kwargs)
def delete(self, url, **kwargs):
""" Shorthand for self.request(url, 'delete')
:param str url: url to send delete oauth request to
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'delete', **kwargs)
def __del__(self):
"""
Clear the session by closing it
This should be called manually by the user "del account.con"
There is no guarantee that this method will be called by the garbage collection
But this is not an issue because this connections will be automatically closed.
"""
if self.session:
self.session.close()
def oauth_authentication_flow(client_id, client_secret, scopes=None,
protocol=None, **kwargs):
""" A helper method to perform the OAuth2 authentication flow.
Authenticate and get the oauth token
:param str client_id: the client_id
:param str client_secret: the client_secret
:param list[str] scopes: a list of protocol user scopes to be converted
by the protocol or raw scopes
:param Protocol protocol: the protocol to be used.
Defaults to MSGraphProtocol
:param kwargs: other configuration to be passed to the Connection instance,
connection.get_authorization_url or connection.request_token
:return: Success or Failure
:rtype: bool
"""
credentials = (client_id, client_secret)
protocol = protocol or MSGraphProtocol()
con = Connection(credentials, scopes=protocol.get_scopes_for(scopes),
**kwargs)
consent_url, _ = con.get_authorization_url(**kwargs)
print('Visit the following url to give consent:')
print(consent_url)
token_url = input('Paste the authenticated url here:\n')
if token_url:
result = con.request_token(token_url, **kwargs) # no need to pass state as the session is the same
if result:
print('Authentication Flow Completed. Oauth Access Token Stored. '
'You can now use the API.')
else:
print('Something go wrong. Please try again.')
return bool(result)
else:
print('Authentication Flow aborted.')
return False
|
the-stack_0_25505
|
# -*- coding: utf-8 -*-
"""Define the CloudEndure API wrapper related logic.
Attributes:
API_VERSION (str): The CloudEndure API version to be used.
AUTH_TTL (str): The authentication token expiration in seconds. Defaults to: 3600.
HOST (str): The CloudEndure API URI. Defaults to: https://console.cloudendure.com
logger (logging.Logger): The default logger for the module.
"""
from __future__ import annotations
import datetime
import json
import logging
import os
from typing import Any, Dict, List
from webbrowser import open_new_tab
import requests
from requests.models import Response
from requests.sessions import Session
from .config import CloudEndureConfig
from .exceptions import CloudEndureException
HOST: str = os.environ.get("CLOUDENDURE_HOST", "https://console.cloudendure.com")
API_VERSION: str = os.environ.get("CLOUDENDURE_API_VERSION", "latest").lower()
AUTH_TTL: datetime.timedelta = datetime.timedelta(
seconds=int(os.environ.get("CLOUDENDURE_AUTH_TTL", "3600"))
) # Default to 60 minutes.
METHOD_TYPES: List[str] = ["get", "post", "patch", "delete", "put"]
logger: logging.Logger = logging.getLogger(__name__)
class CloudEndureAPI:
"""Define the CloudEndure API base.
Attributes:
api_endpoint (str): The CloudEndure API endpoint to be used for API calls.
credentials (dict): The mapping of CloudEndure credentials.
session (requests.Session): The requests Session to be used throughout the lifecycle
of this API interaction.
"""
TOP_LEVEL: List[str] = ["projects", "blueprints"]
def __init__(self, config: CloudEndureConfig, *args, **kwargs) -> None:
"""Initialize the CloudEndure API client.
Attributes:
time_now (datetime): The datetime now in UTC.
"""
time_now: datetime.datetime = datetime.datetime.utcnow()
self.api_endpoint: str = f"{HOST}/api/{API_VERSION}"
self.config: CloudEndureConfig = config
self.projects: List[str] = []
self.session: Session = requests.Session()
_xsrf_token: str = self.config.active_config.get("token", "")
self.session.headers: Dict[str, str] = {
"Content-Type": "application/json",
"Accept": "text/plain",
}
self.timestamps: Dict[str, Any] = {
"created": time_now,
"updated": time_now,
"last_call": time_now,
}
if _xsrf_token:
self.session.headers.update({"X-XSRF-TOKEN": _xsrf_token})
def login(self, username: str = "", password: str = "", token: str = "") -> bool:
"""Login to the CloudEndure API console.
Args:
username (str): The CloudEndure username to be used.
Defaults to the environment specific default.
password (str): The CloudEndure password to be used.
Defaults to the environment specific default.
token (str): The CloudEndure token to be used. This argument takes precedence.
If provided, username and password will not be used.
Defaults to the environment specific default.
Attributes:
endpoint (str): The CloudEndure API endpoint to be used.
_username (str): The CloudEndure API username.
_password (str): The CloudEndure API password.
_token (str): The CloudEndure API token.
_auth (dict): The CloudEndure API username/password dictionary map.
response (requests.Response): The CloudEndure API login request response object.
_xsrf_token (str): The XSRF token to be used for subsequent API requests.
TODO:
* Verify default XSRF-Token TTL and check validity before performing
subsequent authentication requests.
"""
_username: str = self.config.active_config["username"] or username
_password: str = self.config.active_config["password"] or password
_token: str = self.config.active_config["user_api_token"] or token
_auth: Dict[str, str] = {}
if _token:
_auth["userApiToken"] = _token
elif _username and _password:
_auth = {"username": _username, "password": _password}
else:
print("You must configure your authentication credentials!")
return False
# Attempt to login to the CloudEndure API via a POST request.
response: requests.Response = self.api_call(
"login", "post", data=json.dumps(_auth)
)
# Check whether or not the request was successful.
if response.status_code not in [200, 307]:
if response.status_code == 401:
print(
"\nBad CloudEndure Credentials! Check your username/password and try again!\n"
)
elif response.status_code == 402:
print(
"\nNo CloudEndure License! Please configure your account and try again!\n"
)
elif response.status_code == 429:
print(
"\nCloudEndure authentication failure limit reached! Please try again later!\n"
)
return False
# Grab the XSRF token received from the response, as stored in cookies.
# _xsrf_token: str = str(response.cookies["XSRF-TOKEN"])
_xsrf_token: str = str(response.cookies.get("XSRF-TOKEN", ""))
if not _xsrf_token:
raise CloudEndureException("Failed to fetch a token from CloudEndure!")
# Strip the XSRF token of wrapping double-quotes from the cookie.
if _xsrf_token.startswith('"') and _xsrf_token.endswith('"'):
_xsrf_token: str = _xsrf_token[1:-1]
# Set the XSRF token data on the CloudEndureAPI object.
time_now: datetime.datetime = datetime.datetime.utcnow()
self.config.update_token(_xsrf_token)
self.session.headers.update({"X-XSRF-TOKEN": _xsrf_token})
self.timestamps["last_call"] = time_now
return True
@staticmethod
def get_endpoint(
path: str,
api_version: str = "latest",
host: str = "https://console.cloudendure.com",
) -> str:
"""Build the endpoint path.
Returns:
str: The CloudEndure API endpoint to be used.
"""
return f"{host}/api/{api_version}/{path}"
def api_call(
self, path: str, method: str = "get", data: Dict[str, Any] = None
) -> Response:
"""Handle CloudEndure API calls based on the defined parameters.
Args:
path (str): The path to be used to perform the call.
Keyword Args:
method (str): The API method call to be performed. i.e.: get,
data (dict): The data dictionary to be used to perform the request.
Returns:
requests.models.Response: The CloudEndure API response.
"""
method: str = method.lower() # Ensure the provided method is lowercase.
if data is None:
data: Dict[str, Any] = {}
if method not in METHOD_TYPES:
print("Please specify a valid method type! Must be one of: ", METHOD_TYPES)
return Response()
if method not in ["get", "delete"] and data is None:
print(
"Paramater mismatch! If calling anything other than get or delete provide data!"
)
return Response()
# Attempt to call the CloudEndure API.
try:
ce_call = getattr(self.session, method)
_path = self.get_endpoint(path)
return ce_call(_path, data=data)
except Exception as e:
print(f"Exception encountered in CloudEndure API call: ({e})")
return Response()
def check_creds(self, login: bool = True) -> Dict[str, str]:
"""Check the credential TTL."""
threshold: datetime.datetime = datetime.datetime.utcnow() - AUTH_TTL
if threshold < self.config.active_config.get("last_updated", 0):
if login:
is_valid: bool = self.login()
if is_valid:
return {"status": "updated"}
return {"status": "expired"}
return {"status": "valid"}
def post_endpoint(self, path: str = "") -> Response:
"""Create a POST request against the specified path."""
response: requests.Response = self.session.post(f"{self.api_endpoint}/{path}")
return response
def get_projects(self, current_project: str = "") -> List[Any]:
"""Get the CloudEndure projects associated with the authenticated account."""
self.login()
response: requests.Response = self.session.get(f"{self.api_endpoint}/projects")
data: Dict[str, Any] = response.json()
status_code: int = response.status_code
if status_code not in [200]:
raise CloudEndureException()
projects: List[Any] = data["items"]
self.projects: List[Any] = projects
if current_project:
return list(
filter(lambda project: project["name"] == current_project, projects)
)
return projects
@classmethod
def docs(self) -> str:
"""Open the CloudEndure API documentation page."""
docs_url: str = os.environ.get(
"CLOUDENDURE_API_DOCS", "https://console.cloudendure.com/api_doc/apis.html"
)
open_new_tab(docs_url)
return docs_url
|
the-stack_0_25506
|
from amuse.support.interface import InCodeComponentImplementation
from amuse.test.amusetest import TestWithMPI
from amuse.test.amusetest import TestCase
import numpy
import sys
import os
import time
import pickle
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.rfi import python_code
from amuse.rfi.core import (
PythonCodeInterface,
LegacyFunctionSpecification,
legacy_function,
)
from amuse.rfi.async_request import AsyncRequestsPool
from amuse.rfi.async_request import ASyncRequestSequence
from amuse.rfi.tools.create_python_worker import CreateAPythonWorker
from amuse.support import exceptions
class ForTestingInterface(PythonCodeInterface):
def __init__(self, **options):
PythonCodeInterface.__init__(self, implementation_factory=ForTestingImplementation, **options)
@legacy_function
def get_mass():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('mass', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
function.id = 10
return function
@legacy_function
def set_mass():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('mass', dtype='float64', direction=function.IN, description="The new mass of the particle")
function.result_type = 'int32'
function.can_handle_array = True
function.id = 11
return function
@legacy_function
def new_particle():
function = LegacyFunctionSpecification()
function.addParameter('mass', dtype='float64', direction=function.IN, description="The new mass of the particle")
function.addParameter('other', dtype='int32', direction=function.IN)
function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def delete_particle():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def echo_int():
function = LegacyFunctionSpecification()
function.addParameter('int_in', dtype='int32', direction=function.IN)
function.addParameter('int_out', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
function.id = 12
return function
@legacy_function
def echo_double():
function = LegacyFunctionSpecification()
function.addParameter('double_in', dtype='float64', direction=function.IN)
function.addParameter('double_out', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
function.id = 13
return function
@legacy_function
def echo_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.addParameter('string_out', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
function.id = 14
return function
@legacy_function
def echo_strings():
function = LegacyFunctionSpecification()
function.addParameter('string_inout1', dtype='string', direction=function.INOUT)
function.addParameter('string_inout2', dtype='string', direction=function.INOUT)
function.result_type = 'int32'
function.can_handle_array = True
function.id = 15
return function
@legacy_function
def echo_bool():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='bool', direction=function.IN)
function.addParameter('string_out', dtype='bool', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
function.id = 16
return function
@legacy_function
def sleep():
function = LegacyFunctionSpecification()
function.addParameter('number_of_seconds', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = False
return function
@legacy_function
def sum_doubles():
function = LegacyFunctionSpecification()
function.addParameter('double_in1', dtype='float64', direction=function.IN)
function.addParameter('double_in2', dtype='float64', direction=function.IN, default=1.0)
function.addParameter('double_out', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def multiply_ints():
function = LegacyFunctionSpecification()
function.addParameter('int_in1', dtype='int32', direction=function.IN)
function.addParameter('int_in2', dtype='int32', direction=function.IN)
function.addParameter('int_out', dtype='int32', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def print_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def print_error_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def get_position():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.OUT)
function.addParameter('y', dtype='float64', direction=function.OUT)
function.addParameter('z', dtype='float64', direction=function.OUT)
function.addParameter('length', 'int32', function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
return function
@legacy_function
def set_position():
function = LegacyFunctionSpecification()
function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)
function.addParameter('x', dtype='float64', direction=function.IN)
function.addParameter('y', dtype='float64', direction=function.IN)
function.addParameter('z', dtype='float64', direction=function.IN)
function.addParameter('length', 'int32', function.LENGTH)
function.result_type = 'int32'
function.must_handle_array = True
return function
@legacy_function
def copy_over_interface():
function = LegacyFunctionSpecification()
function.addParameter('comm_identifier', dtype='int32', direction=function.IN)
function.addParameter('encoded_interface', dtype='string', direction=function.IN)
function.result_type = 'int32'
function.can_handle_array = False
return function
@legacy_function
def deep_echo_string():
function = LegacyFunctionSpecification()
function.addParameter('string_in', dtype='string', direction=function.IN)
function.addParameter('string_out', dtype='string', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
return function
@legacy_function
def return_control():
function = LegacyFunctionSpecification()
function.result_type = 'int32'
function.can_handle_array = False
return function
@legacy_function
def echo_quantity():
function = LegacyFunctionSpecification()
function.addParameter('quantity_in', dtype='float64', direction=function.IN)
function.addParameter('quantity_out', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.can_handle_array = True
function.has_units = True
function.id = 23
return function
@legacy_function
def echo_quantities():
function = LegacyFunctionSpecification()
function.addParameter('quantity_in', dtype='float64', direction=function.IN)
function.addParameter('quantity_out', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.must_handle_array = True
function.has_units = True
function.id = 23
return function
@legacy_function
def echo_quantities_error():
function = LegacyFunctionSpecification()
function.addParameter('quantity_in', dtype='float64', direction=function.IN)
function.addParameter('quantity_out', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.must_handle_array = True
function.has_units = True
function.id = 25
return function
basic_python_exe = """#!{executable}
import sys
import os
from subprocess import call
if __name__ == '__main__':
command = sys.argv[1:]
dirname=os.path.dirname(__file__)
dirname=os.path.abspath(dirname)
logfile=os.path.join(dirname, 'pythonexe.log')
with open(logfile, 'a') as stream:
stream.write('start {{0}}\\n'.format(command[0]))
stream.flush()
command.insert(0, sys.executable)
returncode = call(command, close_fds=False)
with open(logfile, 'a') as stream:
stream.write('end {{0}} {{1}}\\n'.format(command[0], returncode))
stream.flush()
sys.exit(returncode)
"""
class ForTestingImplementation(object):
def __init__(self):
self.masses = [0.0] * 100
self._particle_data = numpy.reshape(numpy.arange(300.0), (-1, 3))
self.maxindex = 0
def new_particle(self, mass, other, index_of_the_particle):
try:
self.masses[self.maxindex] = mass
index_of_the_particle.value = self.maxindex
self.maxindex += 1
return 0
except:
return -1
def get_mass(self, index_of_the_particle, mass):
try:
mass.value = self.masses[index_of_the_particle]
return 0
except:
return -1
def set_mass(self, index_of_the_particle, mass):
try:
self.masses[index_of_the_particle] = mass
return 0
except:
return -1
def echo_int(self, int_in, int_out):
int_out.value = int_in
return 0
def echo_double(self, double_in, double_out):
double_out.value = double_in
return 0
def echo_string(self, string_in, string_out):
string_out.value = string_in
return 0
def echo_bool(self, in_, out_):
out_.value = in_
return 0
def print_string(self, string_in):
print(string_in)
return 0
def print_error_string(self, string_in):
print(string_in, file=sys.stderr)
return 0
def echo_strings(self, string_inout1, string_inout2):
string_inout1.value = string_inout1.value[::-1]
string_inout2.value = string_inout2.value[::-1]
return 0
def sleep(self, number_of_seconds):
import time
time.sleep(number_of_seconds)
return 0
def sum_doubles(self, double_in1, double_in2, double_out):
double_out.value = double_in1 + double_in2
return 0
def multiply_ints(self, int_in1, int_in2, int_out):
int_out.value = int_in1 * int_in2
return 0
def get_position(self, index_of_the_particle, x, y, z, length):
try:
x.value = self._particle_data[index_of_the_particle, 0]
y.value = self._particle_data[index_of_the_particle, 1]
z.value = self._particle_data[index_of_the_particle, 2]
return 0
except:
return -1
def set_position(self, index_of_the_particle, x, y, z, length):
try:
self._particle_data[index_of_the_particle, 0] = x
self._particle_data[index_of_the_particle, 1] = y
self._particle_data[index_of_the_particle, 2] = z
return 0
except:
return -1
def copy_over_interface(self, comm_identifier, encoded_interface):
self._other = pickle.loads(encoded_interface.encode("latin-1"))
self._other.channel.intercomm = self._interface.communicators[comm_identifier]
return 0
def deep_echo_string(self, string_in, string_out):
result, errorcode = self._other.echo_string(string_in)
string_out.value = result[::-1]
return errorcode
def return_control(self):
self._other.internal__activate_communicator(0)
return 0
def echo_quantity(self, quantity_in, quantity_out):
quantity_out.value = quantity_in * (10 | (1.0/units.s))
return 0
def echo_quantities(self, quantity_in, quantity_out):
quantity_out.value = quantity_in * (10 | (1.0/units.s))
return 0
def echo_quantities_error(self, quantity_in, quantity_out):
raise Exception("an unexpected event")
return 0
class ForTesting(InCodeComponentImplementation):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, ForTestingInterface(**options), **options)
def define_methods(self, object):
object.add_method("sleep", (units.s,), (object.ERROR_CODE,))
object.add_method("new_particle", (units.kg, object.LINK("particles")), (object.INDEX, object.ERROR_CODE,))
def define_particle_sets(self, object):
object.define_set('particles', 'index_of_the_particle')
object.set_new('particles', 'new_particle')
object.set_delete('particles', 'delete_particle')
object.add_setter('particles', 'set_mass')
object.add_getter('particles', 'get_mass')
class TestCreatePythonWorker(TestCase):
def test1(self):
x = CreateAPythonWorker()
x.implementation_factory = ForTestingImplementation
x.channel_type = 'mpi'
x.interface_class = ForTestingInterface
script_string = x.new_executable_script_string()
self.assertTrue(script_string.find('syspath = (') > 0)
self.assertTrue(script_string.find('ForTestingInterface') > 0)
self.assertTrue(script_string.find('ForTestingImplementation') > 0)
self.assertTrue(script_string.find('test_python_implementation') > 0)
self.assertTrue(script_string.find('PythonImplementation(instance, ForTestingInterface)') > 0)
try:
st = compile(script_string, 'test.py', 'exec')
except SyntaxError as ex:
self.fail("Compilation error {0}".format(ex))
def test2(self):
x = CreateAPythonWorker()
x.implementation_factory = ForTestingImplementation
x.channel_type = 'sockets'
x.interface_class = ForTestingInterface
script_string = x.new_executable_script_string()
self.assertTrue(script_string.find('syspath = (') > 0)
self.assertTrue(script_string.find('ForTestingInterface') > 0)
self.assertTrue(script_string.find('ForTestingImplementation') > 0)
self.assertTrue(script_string.find('test_python_implementation') > 0)
self.assertTrue(script_string.find('PythonImplementation(instance, ForTestingInterface)') > 0)
self.assertTrue(script_string.find('start_socket') > 0)
try:
st = compile(script_string, 'test.py', 'exec')
except SyntaxError as ex:
self.fail("Compilation error {0}".format(ex))
class TestInterface(TestWithMPI):
def ForTesting(self, **options):
options["worker_dir"] = self.get_path_to_results()
return ForTesting(**options)
def ForTestingInterface(self, **options):
options["worker_dir"] = self.get_path_to_results()
return ForTestingInterface(**options)
def test02(self):
implementation = ForTestingImplementation()
x = python_code.PythonImplementation(implementation, ForTestingInterface)
input_message = python_code.ClientSideMPIMessage(0, 10, 1)
input_message.ints = [1]
output_message = python_code.ClientSideMPIMessage(0, 10, 1)
x.handle_message(input_message, output_message)
self.assertEqual(len(output_message.ints), 1)
self.assertEqual(len(output_message.doubles), 1)
self.assertEqual(output_message.ints[0], 0)
self.assertEqual(output_message.doubles[0], 0.0)
def test03(self):
implementation = ForTestingImplementation()
x = python_code.PythonImplementation(implementation, ForTestingInterface)
input_message = python_code.ClientSideMPIMessage(0, 11, 1)
input_message.ints = [1]
input_message.doubles = [12.0]
output_message = python_code.ClientSideMPIMessage(0, 10, 1)
x.handle_message(input_message, output_message)
self.assertEqual(len(output_message.ints), 1)
self.assertEqual(len(output_message.doubles), 0)
self.assertEqual(output_message.ints[0], 0)
self.assertEqual(implementation.masses[1], 12.0)
def test04(self):
implementation = ForTestingImplementation()
x = python_code.PythonImplementation(implementation, ForTestingInterface)
input_message = python_code.ClientSideMPIMessage(0, 11, 4)
input_message.ints = [1, 2, 3, 4]
input_message.doubles = [12.0, 13.0, 14.0, 15.0]
output_message = python_code.ClientSideMPIMessage(0, 10, 4)
x.handle_message(input_message, output_message)
self.assertEqual(len(output_message.ints), 4)
self.assertEqual(len(output_message.doubles), 0)
self.assertEqual(output_message.ints[0], 0)
self.assertEqual(output_message.ints[3], 0)
self.assertEqual(implementation.masses[1], 12.0)
self.assertEqual(implementation.masses[2], 13.0)
self.assertEqual(implementation.masses[3], 14.0)
self.assertEqual(implementation.masses[4], 15.0)
def test05(self):
x = self.ForTestingInterface()
error = x.set_mass(1, 10.0)
self.assertEqual(error, 0)
answer, error = x.get_mass(1)
self.assertEqual(error, 0)
self.assertEqual(answer, 10.0)
x.stop()
def test06(self):
x = self.ForTestingInterface()
errors = x.set_mass([1, 2], [10.0, 11.0])
self.assertEqual(errors[0], 0)
self.assertEqual(errors[1], 0)
answer, errors = x.get_mass([1, 2])
self.assertEqual(errors[0], 0)
self.assertEqual(answer[0], 10.0)
self.assertEqual(answer[1], 11.0)
x.stop()
x.stop()
def test07(self):
x = self.ForTestingInterface()
int_out, error = x.echo_int(20)
self.assertEqual(error, 0)
self.assertEqual(int_out, 20)
x.stop()
x.stop()
def test08(self):
implementation = ForTestingImplementation()
x = python_code.PythonImplementation(implementation, ForTestingInterface)
input_message = python_code.ClientSideMPIMessage(0, 12, 1)
input_message.ints = [20]
output_message = python_code.ClientSideMPIMessage(0, 10, 1)
x.handle_message(input_message, output_message)
self.assertEqual(len(output_message.ints), 2)
self.assertEqual(output_message.ints[0], 0)
self.assertEqual(output_message.ints[1], 20)
def test09(self):
x = self.ForTestingInterface()
string_out, error = x.echo_string("1234567")
self.assertEqual(error, 0)
self.assertEqual(string_out, "1234567")
x.stop()
def test10(self):
x = self.ForTestingInterface()
string_out, error = x.echo_string(["aaaaa", "bbbb"])
self.assertEqual(error[0], 0)
self.assertEqual(len(string_out), 2)
self.assertEqual(string_out[0], "aaaaa")
self.assertEqual(string_out[1], "bbbb")
x.stop()
def test11(self):
x = self.ForTestingInterface()
string_out, error = x.echo_string(["", "bbbb"])
self.assertEqual(error[0], 0)
self.assertEqual(len(string_out), 2)
self.assertEqual(string_out[0], "")
self.assertEqual(string_out[1], "bbbb")
x.stop()
def test12(self):
x = self.ForTestingInterface()
str1_out, str2_out, error = x.echo_strings("abc", "def")
self.assertEqual(error, 0)
self.assertEqual(str1_out, "cba")
self.assertEqual(str2_out, "fed")
x.stop()
def test13(self):
x = self.ForTestingInterface()
str1_out, str2_out, error = x.echo_strings(["abc", "def"], ["ghi", "jkl"])
self.assertEqual(error[0], 0)
self.assertEqual(error[1], 0)
self.assertEqual(str1_out[0], "cba")
self.assertEqual(str1_out[1], "fed")
self.assertEqual(str2_out[0], "ihg")
self.assertEqual(str2_out[1], "lkj")
x.stop()
def test14(self):
x = self.ForTestingInterface()
result = x.sleep(2)
self.assertEqual(result, 0)
request = x.sleep.asynchronous(0.01)
request.wait()
result = request.result()
self.assertEqual(result, 0)
x.stop()
def test15(self):
x = self.ForTestingInterface()
y = self.ForTestingInterface()
request1 = x.sleep.asynchronous(0.5)
self.assertFalse(request1.is_result_available())
request2 = y.sleep.asynchronous(1.5)
self.assertFalse(request2.is_result_available())
request2.wait()
self.assertTrue(request1.is_result_available())
self.assertTrue(request2.is_result_available())
self.assertEqual(request1.result(), 0)
self.assertEqual(request2.result(), 0)
y.stop()
x.stop()
def test16(self):
x = self.ForTestingInterface()
request1 = x.sleep.asynchronous(0.4)
x.sleep(0.01)
self.assertTrue(request1.is_result_available(), True)
request1.wait()
self.assertTrue(request1.is_result_available())
x.sleep(0.01)
self.assertTrue(request1.is_result_available())
x.stop()
def test17(self):
x = self.ForTesting()
self.assertTrue(x.sleep.is_async_supported)
request = x.sleep.asynchronous(0.2 | units.s)
request.wait()
result = request.result()
self.assertEqual(result, [])
x.stop()
def test18(self):
print("Testing the splitting of very long MPI messages into blocks")
x = self.ForTesting(max_message_length=10)
N = 100
doubles = x.echo_double([1.0*i for i in range(N)])
self.assertTrue(list(doubles) == [1.0*i for i in range(N)])
sums = x.sum_doubles([1.0*i for i in range(N)], [1.0*i for i in range(N)])
self.assertTrue(list(sums) == [2.0*i for i in range(N)])
products = x.multiply_ints(list(range(N)), list(range(N)))
self.assertTrue(list(products) == [i*i for i in range(N)])
N = 101
doubles = x.echo_double([1.0*i for i in range(N)])
self.assertTrue(list(doubles) == [1.0*i for i in range(N)])
sums = x.sum_doubles([1.0*i for i in range(N)], [1.0*i for i in range(N)])
self.assertTrue(list(sums) == [2.0*i for i in range(N)])
products = x.multiply_ints(list(range(N)), list(range(N)))
self.assertTrue(list(products) == [i*i for i in range(N)])
x.stop()
def test19(self):
print("Testing the splitting of very long MPI messages into blocks II: strings")
x = self.ForTesting(max_message_length=10)
N = 100
strings1, strings2 = x.echo_strings(['REDRUM' for i in range(N)], ['stressed' for i in range(N)])
self.assertTrue(list(strings1) == ['MURDER' for i in range(N)])
self.assertTrue(list(strings2) == ['desserts' for i in range(N)])
N = 101
strings1, strings2 = x.echo_strings(['REDRUM' for i in range(N)], ['stressed' for i in range(N)])
self.assertTrue(list(strings1) == ['MURDER' for i in range(N)])
self.assertTrue(list(strings2) == ['desserts' for i in range(N)])
x.stop()
def xtest20(self):
#
# TURNED OFF support for redirection,
# by default output is redirected to /dev/null
# if you need file, use the support from your mpi implementation
#
if os.path.exists("pout.000"):
os.remove("pout.000")
if os.path.exists("perr.000"):
os.remove("perr.000")
x = self.ForTesting(redirect_stderr_file='perr', redirect_stdout_file='pout', redirection="file")
x.print_string("abc")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000", "r") as f:
content = f.read()
self.assertEqual(content.strip(), "abc")
self.assertTrue(os.path.exists("perr.000"))
with open("perr.000", "r") as f:
content = f.read()
self.assertEqual(content.strip(), "exex")
x = self.ForTesting(redirect_stderr_file='pout', redirect_stdout_file='pout', redirection="file")
x.print_string("def")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000", "r") as f:
content = f.read()
self.assertEqual(content.strip(), "abc\ndef\nexex")
def test21(self):
print("Testing must_handle_array for Python codes")
instance = self.ForTestingInterface()
x, y, z, err = instance.get_position(list(range(100)))
self.assertEqual(err, 0)
self.assertEqual(x, numpy.arange(0.0, 300.0, 3.0))
self.assertEqual(y, numpy.arange(1.0, 300.0, 3.0))
self.assertEqual(z, numpy.arange(2.0, 300.0, 3.0))
x, y, z, err = instance.get_position(list(range(101)))
self.assertEqual(err, -1)
self.assertEqual(list(instance.get_position(1).values()), [3.0, 4.0, 5.0, 0])
err = instance.set_position(list(range(100)), numpy.arange(100.0), numpy.arange(100.0, 200.0), numpy.arange(200.0, 300.0))
self.assertEqual(err, 0)
err = instance.set_position(list(range(101)), numpy.arange(101.0), numpy.arange(101.0, 202.0), numpy.arange(202.0, 303.0))
self.assertEqual(err, -1)
err = instance.set_position(0, -1.0, -2.0, -3.0)
x, y, z, err = instance.get_position(list(range(100)))
self.assertEqual(err, 0)
self.assertEqual(x, numpy.concatenate(([-1.0], numpy.arange(1.0, 100.0))))
self.assertEqual(y, numpy.concatenate(([-2.0], numpy.arange(101.0, 200.0))))
self.assertEqual(z, numpy.concatenate(([-3.0], numpy.arange(201.0, 300.0))))
instance.stop()
def test22(self):
pool = AsyncRequestsPool()
x = self.ForTestingInterface()
y = self.ForTestingInterface()
request1 = x.sleep.asynchronous(0.5)
request2 = y.sleep.asynchronous(1.5)
finished_requests = []
def handle_result(request, index):
self.assertTrue(request.is_result_available())
finished_requests.append(index)
pool.add_request(request1, handle_result, [1])
pool.add_request(request2, handle_result, [2])
pool.wait()
self.assertEqual(len(finished_requests), 1)
self.assertEqual(len(pool), 1)
pool.wait()
self.assertEqual(len(finished_requests), 2)
self.assertEqual(len(pool), 0)
self.assertTrue(request1.is_result_available())
self.assertTrue(request2.is_result_available())
self.assertEqual(request1.result(), 0)
self.assertEqual(request2.result(), 0)
y.stop()
x.stop()
def test22b(self):
pool = AsyncRequestsPool()
x = self.ForTestingInterface()
y = self.ForTestingInterface()
request1 = x.sleep.asynchronous(0.2)
request2 = y.sleep.asynchronous(0.2)
finished_requests = []
def handle_result(request, index):
self.assertTrue(request.is_result_available())
finished_requests.append(index)
pool.add_request(request1, handle_result, [1])
pool.add_request(request2, handle_result, [2])
time.sleep(1.0)
pool.wait()
pool.wait()
self.assertEqual(len(finished_requests), 2)
self.assertEqual(len(pool), 0)
self.assertTrue(request1.is_result_available())
self.assertTrue(request2.is_result_available())
self.assertEqual(request1.result(), 0)
self.assertEqual(request2.result(), 0)
pool.wait()
self.assertEqual(len(pool), 0)
y.stop()
x.stop()
def test23(self):
path = self.get_path_to_results()
exe = os.path.join(path, "pythonexe")
log = os.path.join(path, "pythonexe.log")
if os.path.exists(exe):
os.remove(exe)
if os.path.exists(log):
os.remove(log)
string = basic_python_exe.format(executable=sys.executable)
with open(exe, 'w') as f:
f.write(string)
os.chmod(exe, 0o777)
instance = self.ForTestingInterface(
use_python_interpreter=True,
python_interpreter=exe
)
x, y, z, err = instance.get_position(list(range(100)))
self.assertEqual(err, 0)
self.assertEqual(x, numpy.arange(0.0, 300.0, 3.0))
self.assertEqual(y, numpy.arange(1.0, 300.0, 3.0))
self.assertEqual(z, numpy.arange(2.0, 300.0, 3.0))
instance.stop()
time.sleep(0.3)
self.assertTrue(os.path.exists(log))
with open(log, 'r') as f:
loglines = f.read().splitlines()
self.assertEqual(len(loglines), 2)
self.assertTrue(loglines[0].startswith('start '))
self.assertTrue(loglines[1].startswith('end '))
def test24(self):
# same as test23 but now with redirection is none
path = self.get_path_to_results()
exe = os.path.join(path, "pythonexe")
log = os.path.join(path, "pythonexe.log")
if os.path.exists(exe):
os.remove(exe)
if os.path.exists(log):
os.remove(log)
string = basic_python_exe.format(executable=sys.executable)
with open(exe, 'w') as f:
f.write(string)
os.chmod(exe, 0o777)
instance = self.ForTestingInterface(
use_python_interpreter=True,
python_interpreter=exe,
redirection="none"
)
x, y, z, err = instance.get_position(list(range(100)))
self.assertEqual(err, 0)
self.assertEqual(x, numpy.arange(0.0, 300.0, 3.0))
self.assertEqual(y, numpy.arange(1.0, 300.0, 3.0))
self.assertEqual(z, numpy.arange(2.0, 300.0, 3.0))
instance.stop()
time.sleep(0.3)
self.assertTrue(os.path.exists(log))
with open(log, 'r') as f:
loglines = f.read().splitlines()
self.assertEqual(len(loglines), 2)
self.assertTrue(loglines[0].startswith('start '))
self.assertTrue(loglines[1].startswith('end '))
def test25(self):
self.check_for_mpi()
instance = self.ForTestingInterface(polling_interval_in_milliseconds=100)
(output1, error1) = instance.internal__get_message_polling_interval()
instance.stop()
self.assertEqual(error1, 0)
self.assertEqual(output1, 100000)
def test25(self):
instance = self.ForTestingInterface(polling_interval_in_milliseconds=100)
if instance.channel.is_polling_supported():
(output1, error1) = instance.internal__get_message_polling_interval()
self.assertEqual(error1, 0)
self.assertEqual(output1, 100000)
instance.stop()
def test26(self):
self.check_for_mpi()
instance1 = self.ForTestingInterface()
instance2 = self.ForTestingInterface()
portname, error = instance1.internal__open_port()
self.assertTrue(len(portname) > 0)
self.assertEqual(error, 0)
request1 = instance1.internal__accept_on_port.asynchronous(portname)
request2 = instance2.internal__connect_to_port.asynchronous(portname)
request1.wait()
request2.wait()
port_id1, error1 = request1.result()
port_id2, error2 = request2.result()
self.assertTrue(port_id1 >= 0)
self.assertTrue(port_id2 >= 0)
self.assertEqual(error1, 0)
self.assertEqual(error2, 0)
def test27(self):
self.check_for_mpi()
instance1 = self.ForTestingInterface(redirection="none")
instance2 = self.ForTestingInterface(redirection="none")
encoded_interface = pickle.dumps(instance1, 0)
decoded_interface = pickle.loads(encoded_interface)
#pickle.loads(pickle.dumps(instance1,0))
portname, error = instance2.internal__open_port()
request1 = instance2.internal__accept_on_port.asynchronous(portname)
request2 = instance1.internal__connect_to_port.asynchronous(portname)
request1.wait()
request2.wait()
port_id1, error1 = request1.result()
port_id2, error2 = request2.result()
instance2.copy_over_interface(port_id2, pickle.dumps(instance1, 0).decode('latin-1'))
instance1.internal__activate_communicator(port_id1)
result, errorcode = instance2.deep_echo_string("hello")
self.assertEqual(errorcode, 0)
self.assertEqual(result, "olleh")
result, errorcode = instance2.deep_echo_string("world")
self.assertEqual(errorcode, 0)
self.assertEqual(result, "dlrow")
instance2.return_control()
result, errorcode = instance1.echo_string("world")
self.assertEqual(errorcode, 0)
self.assertEqual(result, "world")
def test28(self):
x = self.ForTestingInterface()
def next_request(index):
if index < 3:
return x.sleep.asynchronous(0.1)
else:
return None
sequence = ASyncRequestSequence(next_request)
self.assertFalse(sequence.is_finished)
sequence.wait()
self.assertTrue(sequence.is_finished)
result = sequence.result()
self.assertEqual(len(result), 3)
x.stop()
def test29(self):
pool = AsyncRequestsPool()
x = self.ForTestingInterface()
y = self.ForTestingInterface()
sequenced_requests_indices = []
def next_request(index):
if index < 4:
sequenced_requests_indices.append(index)
return x.sleep.asynchronous(0.5)
else:
return None
request1 = ASyncRequestSequence(next_request)
request2 = y.sleep.asynchronous(1.0)
finished_requests = []
def handle_result(request, index):
self.assertTrue(request.is_result_available())
self.assertTrue(request.is_finished)
finished_requests.append(index)
pool.add_request(request1, handle_result, [1])
pool.add_request(request2, handle_result, [2])
pool.wait()
self.assertEqual(len(finished_requests), 1)
self.assertEqual(len(pool), 1)
self.assertEqual(finished_requests, [2])
self.assertTrue(len(sequenced_requests_indices) > 0)
pool.wait()
self.assertEqual(len(finished_requests), 2)
self.assertEqual(len(pool), 0)
x.sleep(0.1)
self.assertEqual(sequenced_requests_indices, [0, 1, 2, 3])
self.assertTrue(request1.is_result_available())
self.assertTrue(request2.is_result_available())
self.assertEqual(request1.result(), [0, 0, 0, 0])
self.assertEqual(request2.result(), 0)
y.stop()
x.stop()
def test30(self):
instance = self.ForTesting()
input = [1.0, 2.0, 3.0]
output = instance.sum_doubles(input, 5)
self.assertAlmostRelativeEquals(output, [6.0, 7.0, 8.0])
output = instance.sum_doubles(5, input)
self.assertAlmostRelativeEquals(output, [6.0, 7.0, 8.0])
def test31(self):
x = self.ForTesting()
p = datamodel.Particles(5)
p.mass = [1, 2, 3, 4, 5] | units.kg
p.other = None
for pi in p:
x.particles.add_particle(pi)
self.assertAlmostRelativeEquals(x.particles.mass, [1, 2, 3, 4, 5])
x.stop()
def test32(self):
x = self.ForTestingInterface()
quantity_out, error = x.echo_quantity(20.0 | units.m)
self.assertEqual(error, 0)
self.assertEqual(quantity_out, 200 | (units.m/units.s))
quantity_out, error = x.echo_quantity(30)
self.assertEqual(error, 0)
self.assertEqual(quantity_out, 300 | (1.0/units.s))
x.stop()
def test33(self):
x = self.ForTestingInterface()
quantity_out, error = x.echo_quantity([20, 30, 40] | units.m)
self.assertEqual(error, 0)
self.assertEqual(quantity_out, [200, 300, 400] | (units.m/units.s))
x.stop()
def test34(self):
x = self.ForTestingInterface()
#self.assertException(x.echo_quantities_error, [20, 30, 40] | units.m)
quantity_out, error = x.echo_quantities([20, 30, 40] | units.m)
self.assertEqual(error, 0)
self.assertEqual(quantity_out, [200, 300, 400] | (units.m/units.s))
x.stop()
def test35(self):
x = self.ForTesting(max_message_length=10)
N = 10
doubles = x.echo_double([1.0*i for i in range(N)])
self.assertTrue(list(doubles) == [1.0*i for i in range(N)])
sums = x.sum_doubles([3.0*i for i in range(N)])
self.assertTrue(list(sums) == [3.0*i + 1 for i in range(N)])
N = 11
doubles = x.echo_double([1.0*i for i in range(N)])
self.assertTrue(list(doubles) == [1.0*i for i in range(N)])
sums = x.sum_doubles([3.0*i for i in range(N)])
self.assertTrue(list(sums) == [3.0*i + 1 for i in range(N)])
x.stop()
def test36(self):
x = self.ForTestingInterface()
self.assertRaises(exceptions.CodeException, x.echo_quantities_error, ([20, 30, 40] | units.m), expected_message="Exception when calling function 'echo_quantities_error', of code 'ForTestingInterface', exception was 'Error in code: an unexpected event'")
x.stop()
def test37(self):
x = self.ForTestingInterface()
request = x.echo_quantity.asynchronous([20, 30, 40] | units.m)
quantity_out, error = request.result()
self.assertEqual(error, 0)
self.assertEqual(quantity_out, [200, 300, 400] | (units.m/units.s))
x.stop()
def test40(self):
x = self.ForTesting()
out = x.echo_bool([True, False, True])
self.assertEqual(out, [True, False, True])
x.stop()
|
the-stack_0_25508
|
import os
from poison_detection import ActivationDefence
class Activations:
def __init__(self, model, para):
# self.poison = model.get_train_poison()
# defence = ActivationDefence(model.classifier, data.x_train, data.y_train,
# data_path=os.path.join(data.data_path, 'train'), batch_size=data.batch_size)
# self.activations = self._get_activations(defence)
self.activations = model
self.para = para
def _get_activations(self, defences):
nb_layers = len(defences.classifier.layer_names)
activations_by_layers = []
'''
for i in range(nb_layers):
activations_by_layers.append(
defences.classifier.get_activations(defences.x_train, layer=i, data_path=defences.data_path,
batch_size=defences.batch_size))
'''
activations_by_layers.append(
defences.classifier.get_activations(defences.x_train, layer=nb_layers - 2, data_path=defences.data_path,
batch_size=defences.batch_size))
nb_layers = 1
activations = [[] for i in range(len(defences.x_train))]
for i in range(nb_layers):
for j in range(len(defences.x_train)):
activations[j].append(activations_by_layers[i][j])
# print(len(activations[0]))
return activations
def restore_data(self, data):
data = data(self.para)
data.load_data();
data.restore_train_backdoor(self.poison)
# self.shuffle_activations(data.shuffled_indices)
data.gen_test_backdoor()
return data
def shuffle_activations(self, shuffled_index):
self.activations = [self.activations[i] for i in shuffled_index]
|
the-stack_0_25510
|
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
import time
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import iden_coeffs, _conc_mos
from pyscf.pbc import tools
from pyscf.pbc.df.df_jk import zdotNN, zdotCN, zdotNC, KPT_DIFF_TOL
from pyscf.pbc.df.fft_ao2mo import _format_kpts
def get_eri(mydf, kpts=None, compact=True):
if mydf._cderi is None:
mydf.build()
cell = mydf.cell
kptijkl = _format_kpts(kpts)
kpti, kptj, kptk, kptl = kptijkl
nao = cell.nao_nr()
nao_pair = nao * (nao+1) // 2
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0]-nao**4*8/1e6)
####################
# gamma point, the integral is real and with s4 symmetry
if abs(kptijkl).sum() < KPT_DIFF_TOL:
eriR = numpy.zeros((nao_pair,nao_pair))
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, True):
lib.ddot(LpqR.T, LpqR, 1, eriR, 1)
LpqR = LpqI = None
if not compact:
eriR = ao2mo.restore(1, eriR, nao).reshape(nao**2,-1)
return eriR
elif (abs(kpti-kptk).sum() < KPT_DIFF_TOL) and (abs(kptj-kptl).sum() < KPT_DIFF_TOL):
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
zdotNN(LpqR.T, LpqI.T, LpqR, LpqI, 1, eriR, eriI, 1)
LpqR = LpqI = None
return eriR + eriI*1j
####################
# (kpt) i == j == k == l != 0
#
# (kpt) i == l && j == k && i != j && j != k =>
# both vbar and ovlp are zero. It corresponds to the exchange integral.
#
# complex integrals, N^4 elements
elif (abs(kpti-kptl).sum() < KPT_DIFF_TOL) and (abs(kptj-kptk).sum() < KPT_DIFF_TOL):
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
zdotNC(LpqR.T, LpqI.T, LpqR, LpqI, 1, eriR, eriI, 1)
LpqR = LpqI = None
# transpose(0,1,3,2) because
# j == k && i == l =>
# (L|ij).transpose(0,2,1).conj() = (L^*|ji) = (L^*|kl) => (M|kl)
eri = lib.transpose((eriR+eriI*1j).reshape(-1,nao,nao), axes=(0,2,1))
return eri.reshape(nao**2,-1)
####################
# aosym = s1, complex integrals
#
# kpti == kptj => kptl == kptk
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1.
#
else:
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for (LpqR, LpqI), (LrsR, LrsI) in \
lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False),
mydf.sr_loop(kptijkl[2:], max_memory, False)):
zdotNN(LpqR.T, LpqI.T, LrsR, LrsI, 1, eriR, eriI, 1)
LpqR = LpqI = LrsR = LrsI = None
return eriR + eriI*1j
def general(mydf, mo_coeffs, kpts=None, compact=True):
if mydf._cderi is None:
mydf.build()
cell = mydf.cell
kptijkl = _format_kpts(kpts)
kpti, kptj, kptk, kptl = kptijkl
if isinstance(mo_coeffs, numpy.ndarray) and mo_coeffs.ndim == 2:
mo_coeffs = (mo_coeffs,) * 4
all_real = not any(numpy.iscomplexobj(mo) for mo in mo_coeffs)
max_memory = max(2000, (mydf.max_memory - lib.current_memory()[0]) * .5)
####################
# gamma point, the integral is real and with s4 symmetry
if abs(kptijkl).sum() < KPT_DIFF_TOL and all_real:
ijmosym, nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)
klmosym, nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)
eri_mo = numpy.zeros((nij_pair,nkl_pair))
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
ijR = klR = None
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, True):
ijR, klR = _dtrans(LpqR, ijR, ijmosym, moij, ijslice,
LpqR, klR, klmosym, mokl, klslice, sym)
lib.ddot(ijR.T, klR, 1, eri_mo, 1)
LpqR = LpqI = None
return eri_mo
elif (abs(kpti-kptk).sum() < KPT_DIFF_TOL) and (abs(kptj-kptl).sum() < KPT_DIFF_TOL):
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]
eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex)
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
zij = zkl = None
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
buf = LpqR+LpqI*1j
zij, zkl = _ztrans(buf, zij, moij, ijslice,
buf, zkl, mokl, klslice, sym)
lib.dot(zij.T, zkl, 1, eri_mo, 1)
LpqR = LpqI = buf = None
return eri_mo
####################
# (kpt) i == j == k == l != 0
# (kpt) i == l && j == k && i != j && j != k =>
#
elif (abs(kpti-kptl).sum() < KPT_DIFF_TOL) and (abs(kptj-kptk).sum() < KPT_DIFF_TOL):
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nlk_pair, molk, lkslice = _conc_mos(mo_coeffs[3], mo_coeffs[2])[1:]
eri_mo = numpy.zeros((nij_pair,nlk_pair), dtype=numpy.complex)
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[3]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[2]))
zij = zlk = None
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
buf = LpqR+LpqI*1j
zij, zlk = _ztrans(buf, zij, moij, ijslice,
buf, zlk, molk, lkslice, sym)
lib.dot(zij.T, zlk.conj(), 1, eri_mo, 1)
LpqR = LpqI = buf = None
nmok = mo_coeffs[2].shape[1]
nmol = mo_coeffs[3].shape[1]
eri_mo = lib.transpose(eri_mo.reshape(-1,nmol,nmok), axes=(0,2,1))
return eri_mo.reshape(nij_pair,nlk_pair)
####################
# aosym = s1, complex integrals
#
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1. => kptl == kptk
#
else:
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]
eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex)
zij = zkl = None
for (LpqR, LpqI), (LrsR, LrsI) in \
lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False),
mydf.sr_loop(kptijkl[2:], max_memory, False)):
zij, zkl = _ztrans(LpqR+LpqI*1j, zij, moij, ijslice,
LrsR+LrsI*1j, zkl, mokl, klslice, False)
lib.dot(zij.T, zkl, 1, eri_mo, 1)
LpqR = LpqI = LrsR = LrsI = None
return eri_mo
def _mo_as_complex(mo_coeffs):
mos = []
for c in mo_coeffs:
if c.dtype == numpy.float64:
mos.append(c+0j)
else:
mos.append(c)
return mos
def _dtrans(Lpq, Lij, ijmosym, moij, ijslice,
Lrs, Lkl, klmosym, mokl, klslice, sym):
Lij = _ao2mo.nr_e2(Lpq, moij, ijslice, aosym='s2', mosym=ijmosym, out=Lij)
if sym:
Lkl = Lij
else:
Lkl = _ao2mo.nr_e2(Lrs, mokl, klslice, aosym='s2', mosym=klmosym, out=Lkl)
return Lij, Lkl
def _ztrans(Lpq, zij, moij, ijslice, Lrs, zkl, mokl, klslice, sym):
tao = []
ao_loc = None
zij = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=zij)
if sym:
zkl = zij
else:
zkl = _ao2mo.r_e2(Lrs, mokl, klslice, tao, ao_loc, out=zkl)
return zij, zkl
|
the-stack_0_25511
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as mcolors
mpl.use('TkAgg')
labels = ["cpp","pure","selectcase","bit","cctype"]
funcs = ['is_control','is_printable','is_white','is_blank','is_graphical',
'is_punctuation','is_alphanum','is_alpha','is_upper','is_lower',
'is_digit','is_hex_digit']
def main():
rdict = {}
for r in labels:
fname = "./results/results_{}.txt".format(r)
res = np.loadtxt(fname)
res[:,1:] *= 1.e-9
rdict[r] = res
print(rdict["cpp"])
print(rdict["cpp"])
x = 1.5*np.arange(len(funcs))
width = 0.2
fig, ax = plt.subplots(figsize=(10,5))
rects1 = ax.bar(x-2*width,rdict["cpp"][4,1:],width,label="C++")
rects2 = ax.bar(x-width,rdict["pure"][4,1:],width,label="fortran_ascii_pure")
rects2 = ax.bar(x,rdict["selectcase"][4,1:],width,label="fortran_ascii_selectcase")
rects2 = ax.bar(x+width,rdict["bit"][4,1:],width,label="fortran_ascii_bit")
rects2 = ax.bar(x+2*width,rdict["cctype"][4,1:],width,label="fortran_ascii_cctype")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Billion Characters per Second')
ax.set_xticks(x)
ax.set_xticklabels(funcs,rotation=45)
ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0.)
fig.tight_layout()
fig.savefig("speed_vs_method.png",dpi=300)
cmap = plt.cm.cool
norm = mcolors.Normalize(vmin=1, vmax=6)
fig2, ax2 = plt.subplots(figsize=(8,5))
tects1 = ax2.bar(x-5*width/2,rdict["pure"][0,1:],width,color=cmap(norm(1)),label="1000")
tects2 = ax2.bar(x-3*width/2,rdict["pure"][1,1:],width,color=cmap(norm(2)),label="10000")
tects2 = ax2.bar(x-width/2,rdict["pure"][2,1:],width,color=cmap(norm(3)),label="100000")
tects2 = ax2.bar(x+width/2,rdict["pure"][3,1:],width,color=cmap(norm(4)),label="1000000")
tects2 = ax2.bar(x+3*width/2,rdict["pure"][4,1:],width,color=cmap(norm(5)),label="10000000")
tects2 = ax2.bar(x+5*width/2,rdict["pure"][5,1:],width,color=cmap(norm(6)),label="100000000")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax2.set_ylabel('Billion Characters per Second')
ax2.set_xticks(x)
ax2.set_xticklabels(funcs,rotation=45)
ax2.set_title("fortran_ascii_pure")
ax2.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0.)
fig2.tight_layout()
fig2.savefig("speed_vs_nchar.png",dpi=300)
plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_25512
|
import pexpect
from subprocess import check_output
from typing import Optional
from hwtBuildsystem.common.cmdResult import TclCmdResult
from hwtBuildsystem.common.executor import ToolExecutor
from hwtBuildsystem.yosys.api.project import YosysProject
from hwtBuildsystem.yosys.config import YosysConfig
class YosysExecutor(ToolExecutor):
def __init__(self, execFile=None,
timeout=6 * 60 * 60,
logComunication=False,
workerCnt:Optional[int]=None):
super(YosysExecutor, self).__init__(workerCnt)
if execFile is None:
execFile = YosysConfig.getExec()
self.execFile = execFile
self.proc = None
self.timeout = timeout
self.logComunication = logComunication
self.encoding = 'ASCII'
def getVersion(self):
return check_output([self.execFile, '-V']).decode()
def __enter__(self) -> 'YosysExecutor':
cmd = []
self.proc = pexpect.spawn(self.execFile, cmd)
self.firstCmd = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
p = self.proc
if p.isalive():
p.sendline('exit')
p.expect("exit", timeout=self.timeout) # block while cmd ends
if p.isalive():
p.terminate()
def exeCmd(self, cmd) -> TclCmdResult:
p = self.proc
if self.firstCmd:
p.expect("yosys>", timeout=self.timeout) # block while command line init
self.firstCmd = False
if self.logComunication:
print(cmd)
p.sendline(cmd)
# @attention: there is timing issue in reading from tty next command returns corrupted line
p.readline() # read cmd from tty
# p.expect(cmd, timeout=self.timeout)
try:
p.expect("yosys>", timeout=self.timeout) # block while cmd ends
except pexpect.EOF:
pass
t = p.before.decode(self.encoding)
if self.logComunication:
print(t, end="")
res = TclCmdResult.fromStdoutStr(cmd, t)
res.raiseOnErrors()
return res
def project(self, root, name) -> YosysProject:
return YosysProject(self, root, name)
if __name__ == "__main__":
with YosysExecutor(logComunication=True) as q:
print(q.getVersion())
h = q.exeCmd('help')
print(h.resultText)
err = q.exeCmd("xyz")
print('finished')
|
the-stack_0_25514
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class OctaveStatistics(OctavePackage, SourceforgePackage):
"""Additional statistics functions for Octave."""
homepage = "https://octave.sourceforge.io/statistics/"
sourceforge_mirror_path = "octave/statistics-1.4.2.tar.gz"
version('1.4.2', sha256='7976814f837508e70367548bfb0a6d30aa9e447d4e3a66914d069efb07876247')
depends_on('octave-io')
extends('[email protected]:')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.