hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
793e7fffbdd5c89651bd754571e1af6c08b7b976 | 820 | py | Python | add-binary/solution.py | LYZhelloworld/Leetcode | 1ef3c8d3a75a20755e7474427224ed8757f97932 | [
"MIT"
] | null | null | null | add-binary/solution.py | LYZhelloworld/Leetcode | 1ef3c8d3a75a20755e7474427224ed8757f97932 | [
"MIT"
] | null | null | null | add-binary/solution.py | LYZhelloworld/Leetcode | 1ef3c8d3a75a20755e7474427224ed8757f97932 | [
"MIT"
] | null | null | null | class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
x = [i == '1' for i in a[::-1]]
y = [i == '1' for i in b[::-1]]
r = []
carry = False
if len(x) > len(y):
y += [False] * (len(x) - len(y))
else:
x += [False] * (len(y) - len(x))
for d in range(len(x)):
s, carry = self.full_adder(x[d], y[d], carry)
r += [s]
if carry:
r += [True]
r.reverse()
return ''.join(['1' if i else '0' for i in r])
def half_adder(self, a, b):
return a ^ b, a & b
def full_adder(self, a, b, cin):
s1, c1 = self.half_adder(a, b)
s2, c2 = self.half_adder(s1, cin)
return s2, c1 | c2
| 24.117647 | 57 | 0.4 |
793e81ebd98b987ef5fea1e69b37fa789775b1f4 | 7,321 | py | Python | dp_multiq/csmooth.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | dp_multiq/csmooth.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | dp_multiq/csmooth.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CDP smooth sensitivity method for computing differentially private quantiles.
The smooth sensitivity method is described in
"Smooth Sensitivity and Sampling in Private Data Analysis" by Nissim,
Raskhodnikova, and Smith
(https://cs-people.bu.edu/ads22/pubs/NRS07/NRS07-full-draft-v1.pdf). Details for
the CDP noise distribution appear in Section 3.1 of "Average-Case Averages:
Private Algorithms for Smooth Sensitivity and Mean Estimation" by Bun and
Steinke (NeurIPS 2019). Details for optimizing t, s, and sigma appear in
Section 3.1.1 of the same paper.
"""
import numpy as np
from dp_multiq import base
from dp_multiq import smooth_utils
def compute_triples(eps, ts):
"""Returns triples of form (t, log(s), sigma) for hyperparameter optimization.
Args:
eps: Privacy parameter epsilon.
ts: Array of possible smooth sensitivity parameters.
"""
triples = np.empty([len(ts), 3])
for t_idx in range(len(ts)):
t = ts[t_idx]
triples[t_idx, 0] = t
sigma = opt_sigma(eps, t)
triples[t_idx, 2] = sigma
triples[t_idx, 1] = -1.5 * (sigma**2) + np.log(eps - (t / sigma))
return triples
def opt_sigma(eps, t):
"""Returns optimal sigma as detailed in Section 3.1.1 of Bun and Steinke.
Args:
eps: Privacy parameter epsilon.
t: Smooth sensitivity parameter.
"""
return np.real(np.roots([5 * eps / t, -5, 0, -1])[0])
def lln(sigma):
"""Returns a sample from the Laplace Log-Normal distribution.
Args:
sigma: Sigma parameter for the Laplace Log-Normal distribution.
"""
return np.random.laplace() * np.exp(sigma * np.random.normal())
def csmooth(sorted_data, data_low, data_high, qs, divided_eps, ts):
"""Returns eps^2/2-CDP quantile estimates for qs.
Args:
sorted_data: Array of data points sorted in increasing order.
data_low: Lower limit for any differentially private quantile output value.
data_high: Upper limit for any differentially private quantile output value.
qs: Increasing array of quantiles in [0,1].
divided_eps: Privacy parameter epsilon. Assumes eps has already been divided
so that the overall desired privacy guarantee is achieved.
ts: Array of smooth sensitivity parameters, one for each q in qs.
"""
sorted_data = np.clip(sorted_data, data_low, data_high)
o = np.empty(len(qs))
triples = compute_triples(divided_eps, ts)
for i in range(len(qs)):
t, log_s, sigma = triples[i]
true_quantile_idx = base.quantile_index(len(sorted_data), qs[i])
true_quantile_value = sorted_data[true_quantile_idx]
laplace_log_normal_noise = lln(sigma)
log_sensitivity = smooth_utils.compute_log_smooth_sensitivity(
sorted_data, data_low, data_high, true_quantile_idx, t)
noise = np.sign(laplace_log_normal_noise) * np.exp(
log_sensitivity + np.log(np.abs(laplace_log_normal_noise)) - log_s)
o[i] = true_quantile_value + noise
o = np.clip(o, data_low, data_high)
return np.sort(o)
def log_choose_triple_idx(triples, eps, log_sensitivities):
"""Returns triple (t, log_s, sigma) that minimizes noisy statistic variance.
Args:
triples: Array with entries of form (t, log_s, sigma).
eps: Privacy parameter epsilon.
log_sensitivities: Log(t smooth sensitivity) for each t in triples.
"""
variances = np.empty(len(triples))
for triple_idx in range(len(triples)):
numerator = 2 * (np.exp(2 * log_sensitivities[triple_idx]))
denominator = np.exp(-5 * (triples[triple_idx][2]**2)) * (
(eps - (triples[triple_idx][0] / triples[triple_idx][2]))**2)
variances[triple_idx] = numerator / denominator
return np.argmin(variances)
def csmooth_tune_and_return_ts(sorted_data, data_low, data_high, qs,
divided_eps, log_t_low, log_t_high, num_t):
"""Returns ts minimizing variance for data and each q under ~eps^2/2-CDP.
Args:
sorted_data: Array of data points sorted in increasing order.
data_low: Lower limit for any differentially private quantile output value.
data_high: Upper limit for any differentially private quantile output value.
qs: Increasing array of quantiles in [0,1].
divided_eps: Privacy parameter epsilon. Assumes eps has already been divided
so that the overall desired privacy guarantee is achieved.
log_t_low: Tuning range for t has lower bound 10^(log_t_low).
log_t_high: Tuning range for t has upper bound 10^(log_t_high).
num_t: Number of logarithmically spaced t used to populate tuning range.
"""
sorted_data = np.clip(sorted_data, data_low, data_high)
triples = compute_triples(divided_eps,
np.logspace(log_t_low, log_t_high, num_t))
num_qs = len(qs)
ts = np.empty(num_qs)
for i in range(num_qs):
true_quantile_idx = base.quantile_index(len(sorted_data), qs[i])
log_sensitivities = np.zeros(len(triples))
for triple_idx in range(len(triples)):
t = triples[triple_idx, 0]
log_sensitivities[
triple_idx] = smooth_utils.compute_log_smooth_sensitivity(
sorted_data, data_low, data_high, true_quantile_idx, t)
ts[i] = triples[log_choose_triple_idx(triples, divided_eps,
log_sensitivities)][0]
return ts
def csmooth_tune_t_experiment(eps, num_samples, num_trials, num_quantiles_range,
data_low, data_high, log_t_low, log_t_high,
num_t):
"""Returns 2-D array of ts, tuned for each (num_quantiles, quantile) pair.
Args:
eps: Privacy parameter epsilon.
num_samples: Number of standard Gaussian samples to draw for each trial.
num_trials: Number of trials to average.
num_quantiles_range: Array of number of quantiles to estimate.
data_low: Lower bound for data, used by CSmooth.
data_high: Upper bound for data, used by CSmooth.
log_t_low: Tuning range for t has lower bound 10^(log_t_low).
log_t_high: Tuning range for t has upper bound 10^(log_t_high).
num_t: Number of logarithmically spaced t used to populate tuning range.
"""
ts = [np.zeros(num_quantiles) for num_quantiles in num_quantiles_range]
num_quantiles_idx = 0
for num_quantiles_idx in range(len(num_quantiles_range)):
num_quantiles = num_quantiles_range[num_quantiles_idx]
divided_eps = eps / np.sqrt(num_quantiles)
for _ in range(num_trials):
sorted_data = base.gen_gaussian(num_samples, 0, 1)
qs = np.linspace(0, 1, num_quantiles + 2)[1:-1]
ts[num_quantiles_idx] += csmooth_tune_and_return_ts(
sorted_data, data_low, data_high, qs, divided_eps, log_t_low,
log_t_high, num_t) / num_trials
print("Finished num_quantiles: {}".format(num_quantiles))
return ts
| 41.129213 | 80 | 0.714656 |
793e8273f88b0f2b7ce110aa9b57d8e53036e64a | 11,313 | py | Python | code/python/Publisher/v3/fds/sdk/Publisher/model/account_directories_root.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/Publisher/v3/fds/sdk/Publisher/model/account_directories_root.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/Publisher/v3/fds/sdk/Publisher/model/account_directories_root.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Publisher API
Allow clients to fetch Publisher Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 3
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.Publisher.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.Publisher.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.Publisher.model.account_directories import AccountDirectories
globals()['AccountDirectories'] = AccountDirectories
class AccountDirectoriesRoot(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (AccountDirectories,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AccountDirectoriesRoot - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (AccountDirectories): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AccountDirectoriesRoot - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (AccountDirectories): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.015209 | 121 | 0.574472 |
793e8409b2a4d12488dff56ea216a81b3efed7f2 | 125 | py | Python | 6_command/no_command.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 6_command/no_command.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 6_command/no_command.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | from command import Command
class NoCommand(Command):
def excuse(self):
pass
def undo(self):
pass
| 12.5 | 27 | 0.616 |
793e845f73958f35d2ea7faa1847f8a8ad73e2ad | 11,688 | py | Python | aiida/tools/graph/graph_traversers.py | HaoZeke/aiida-core | 1a4cada67fe36353326dcebfe888ebc01a6c5b7b | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/tools/graph/graph_traversers.py | HaoZeke/aiida-core | 1a4cada67fe36353326dcebfe888ebc01a6c5b7b | [
"MIT",
"BSD-3-Clause"
] | 2 | 2019-03-06T11:23:42.000Z | 2020-03-09T09:34:07.000Z | aiida/tools/graph/graph_traversers.py | lorisercole/aiida-core | 84c2098318bf234641219e55795726f99dc25a16 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for functions to traverse AiiDA graphs."""
from numpy import inf
from aiida.common.links import GraphTraversalRules, LinkType
def get_nodes_delete(starting_pks, get_links=False, **kwargs):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified authorized
links and directions for deletion.
:type starting_pks: list or tuple or set
:param starting_pks: Contains the (valid) pks of the starting nodes.
:param bool get_links:
Pass True to also return the links between all nodes (found + initial).
:param bool create_forward: will traverse CREATE links in the forward direction.
:param bool call_calc_forward: will traverse CALL_CALC links in the forward direction.
:param bool call_work_forward: will traverse CALL_WORK links in the forward direction.
"""
traverse_links = validate_traversal_rules(GraphTraversalRules.DELETE, **kwargs)
traverse_output = traverse_graph(
starting_pks,
get_links=get_links,
links_forward=traverse_links['forward'],
links_backward=traverse_links['backward']
)
function_output = {
'nodes': traverse_output['nodes'],
'links': traverse_output['links'],
'rules': traverse_links['rules_applied']
}
return function_output
def get_nodes_export(starting_pks, get_links=False, **kwargs):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified authorized
links and directions for export. This will also return the links and
the traversal rules parsed.
:type starting_pks: list or tuple or set
:param starting_pks: Contains the (valid) pks of the starting nodes.
:param bool get_links:
Pass True to also return the links between all nodes (found + initial).
:param bool input_calc_forward: will traverse INPUT_CALC links in the forward direction.
:param bool create_backward: will traverse CREATE links in the backward direction.
:param bool return_backward: will traverse RETURN links in the backward direction.
:param bool input_work_forward: will traverse INPUT_WORK links in the forward direction.
:param bool call_calc_backward: will traverse CALL_CALC links in the backward direction.
:param bool call_work_backward: will traverse CALL_WORK links in the backward direction.
"""
traverse_links = validate_traversal_rules(GraphTraversalRules.EXPORT, **kwargs)
traverse_output = traverse_graph(
starting_pks,
get_links=get_links,
links_forward=traverse_links['forward'],
links_backward=traverse_links['backward']
)
function_output = {
'nodes': traverse_output['nodes'],
'links': traverse_output['links'],
'rules': traverse_links['rules_applied']
}
return function_output
def validate_traversal_rules(ruleset=GraphTraversalRules.DEFAULT, **kwargs):
"""
Validates the keywords with a ruleset template and returns a parsed dictionary
ready to be used.
:type ruleset: :py:class:`aiida.common.links.GraphTraversalRules`
:param ruleset: Ruleset template used to validate the set of rules.
:param bool input_calc_forward: will traverse INPUT_CALC links in the forward direction.
:param bool input_calc_backward: will traverse INPUT_CALC links in the backward direction.
:param bool create_forward: will traverse CREATE links in the forward direction.
:param bool create_backward: will traverse CREATE links in the backward direction.
:param bool return_forward: will traverse RETURN links in the forward direction.
:param bool return_backward: will traverse RETURN links in the backward direction.
:param bool input_work_forward: will traverse INPUT_WORK links in the forward direction.
:param bool input_work_backward: will traverse INPUT_WORK links in the backward direction.
:param bool call_calc_forward: will traverse CALL_CALC links in the forward direction.
:param bool call_calc_backward: will traverse CALL_CALC links in the backward direction.
:param bool call_work_forward: will traverse CALL_WORK links in the forward direction.
:param bool call_work_backward: will traverse CALL_WORK links in the backward direction.
"""
from aiida.common import exceptions
if not isinstance(ruleset, GraphTraversalRules):
raise TypeError(
'ruleset input must be of type aiida.common.links.GraphTraversalRules\ninstead, it is: {}'.format(
type(ruleset)
)
)
rules_applied = {}
links_forward = []
links_backward = []
for name, rule in ruleset.value.items():
follow = rule.default
if name in kwargs:
if not rule.toggleable:
raise ValueError('input rule {} is not toggleable for ruleset {}'.format(name, ruleset))
follow = kwargs.pop(name)
if not isinstance(follow, bool):
raise ValueError('the value of rule {} must be boolean, but it is: {}'.format(name, follow))
if follow:
if rule.direction == 'forward':
links_forward.append(rule.link_type)
elif rule.direction == 'backward':
links_backward.append(rule.link_type)
else:
raise exceptions.InternalError(
'unrecognized direction `{}` for graph traversal rule'.format(rule.direction)
)
rules_applied[name] = follow
if kwargs:
error_message = 'unrecognized keywords: {}'.format(', '.join(kwargs.keys()))
raise exceptions.ValidationError(error_message)
valid_output = {
'rules_applied': rules_applied,
'forward': links_forward,
'backward': links_backward,
}
return valid_output
def traverse_graph(starting_pks, max_iterations=None, get_links=False, links_forward=(), links_backward=()):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified links.
Optionally, it may also return the links that connect these nodes.
:type starting_pks: list or tuple or set
:param starting_pks: Contains the (valid) pks of the starting nodes.
:type max_iterations: int or None
:param max_iterations:
The number of iterations to apply the set of rules (a value of 'None' will
iterate until no new nodes are added).
:param bool get_links:
Pass True to also return the links between all nodes (found + initial).
:type links_forward: aiida.common.links.LinkType
:param links_forward:
List with all the links that should be traversed in the forward direction.
:type links_backward: aiida.common.links.LinkType
:param links_backward:
List with all the links that should be traversed in the backward direction.
"""
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
from aiida import orm
from aiida.tools.graph.age_entities import Basket
from aiida.tools.graph.age_rules import UpdateRule, RuleSequence, RuleSaveWalkers, RuleSetWalkers
from aiida.common import exceptions
if max_iterations is None:
max_iterations = inf
elif not (isinstance(max_iterations, int) or max_iterations is inf):
raise TypeError('Max_iterations has to be an integer or infinity')
linktype_list = []
for linktype in links_forward:
if not isinstance(linktype, LinkType):
raise TypeError('links_forward should contain links, but one of them is: {}'.format(type(linktype)))
linktype_list.append(linktype.value)
filters_forwards = {'type': {'in': linktype_list}}
linktype_list = []
for linktype in links_backward:
if not isinstance(linktype, LinkType):
raise TypeError('links_backward should contain links, but one of them is: {}'.format(type(linktype)))
linktype_list.append(linktype.value)
filters_backwards = {'type': {'in': linktype_list}}
if not isinstance(starting_pks, (list, set, tuple)):
raise TypeError('starting_pks must be of type list, set or tuple\ninstead, it is {}'.format(type(starting_pks)))
if not starting_pks:
if get_links:
output = {'nodes': set(), 'links': set()}
else:
output = {'nodes': set(), 'links': None}
return output
if any([not isinstance(pk, int) for pk in starting_pks]):
raise TypeError('one of the starting_pks is not of type int:\n {}'.format(starting_pks))
operational_set = set(starting_pks)
query_nodes = orm.QueryBuilder()
query_nodes.append(orm.Node, project=['id'], filters={'id': {'in': operational_set}})
existing_pks = set(query_nodes.all(flat=True))
missing_pks = operational_set.difference(existing_pks)
if missing_pks:
raise exceptions.NotExistent(
'The following pks are not in the database and must be pruned before this call: {}'.format(missing_pks)
)
rules = []
basket = Basket(nodes=operational_set)
# When max_iterations is finite, the order of traversal may affect the result
# (its not the same to first go backwards and then forwards than vice-versa)
# In order to make it order-independent, the result of the first operation needs
# to be stashed and the second operation must be performed only on the nodes
# that were already in the set at the begining of the iteration: this way, both
# rules are applied on the same set of nodes and the order doesn't matter.
# The way to do this is saving and seting the walkers at the right moments only
# when both forwards and backwards rules are present.
if links_forward and links_backward:
stash = basket.get_template()
rules += [RuleSaveWalkers(stash)]
if links_forward:
query_outgoing = orm.QueryBuilder()
query_outgoing.append(orm.Node, tag='sources')
query_outgoing.append(orm.Node, edge_filters=filters_forwards, with_incoming='sources')
rule_outgoing = UpdateRule(query_outgoing, max_iterations=1, track_edges=get_links)
rules += [rule_outgoing]
if links_forward and links_backward:
rules += [RuleSetWalkers(stash)]
if links_backward:
query_incoming = orm.QueryBuilder()
query_incoming.append(orm.Node, tag='sources')
query_incoming.append(orm.Node, edge_filters=filters_backwards, with_outgoing='sources')
rule_incoming = UpdateRule(query_incoming, max_iterations=1, track_edges=get_links)
rules += [rule_incoming]
rulesequence = RuleSequence(rules, max_iterations=max_iterations)
results = rulesequence.run(basket)
output = {}
output['nodes'] = results.nodes.keyset
output['links'] = None
if get_links:
output['links'] = results['nodes_nodes'].keyset
return output
| 42.194946 | 120 | 0.683864 |
793e84e38db8b2f0f59fe0fbf6ac97860583e373 | 3,245 | py | Python | predict.py | jhonatantirado/CheXNet-Keras | 264fd4ba889fe8d9f5dee48b4ba3f7c0018aa393 | [
"MIT"
] | null | null | null | predict.py | jhonatantirado/CheXNet-Keras | 264fd4ba889fe8d9f5dee48b4ba3f7c0018aa393 | [
"MIT"
] | null | null | null | predict.py | jhonatantirado/CheXNet-Keras | 264fd4ba889fe8d9f5dee48b4ba3f7c0018aa393 | [
"MIT"
] | null | null | null | from keras.models import load_model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
import os
from configparser import ConfigParser
from models.keras import ModelFactory
import tensorflow as tf
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(224, 224))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
return img_tensor
if __name__ == "__main__":
# parser config
config_file = "./config.ini"
cp = ConfigParser()
cp.read(config_file)
# default config
output_dir = cp["DEFAULT"].get("output_dir")
base_model_name = cp["DEFAULT"].get("base_model_name")
class_names = cp["DEFAULT"].get("class_names").split(",")
image_source_dir = cp["DEFAULT"].get("image_source_dir")
# train config
image_dimension = cp["TRAIN"].getint("image_dimension")
# test config
batch_size = cp["TEST"].getint("batch_size")
test_steps = cp["TEST"].get("test_steps")
use_best_weights = cp["TEST"].getboolean("use_best_weights")
# parse weights file path
output_weights_name = cp["TRAIN"].get("output_weights_name")
weights_path = os.path.join(output_dir, output_weights_name)
best_weights_path = os.path.join(output_dir, f"best_{output_weights_name}")
print("** load model **")
if use_best_weights:
print("** use best weights **")
model_weights_path = best_weights_path
else:
print("** use last weights **")
model_weights_path = weights_path
model_factory = ModelFactory()
model = model_factory.get_model(
class_names,
model_name=base_model_name,
use_base_weights=False,
weights_path=model_weights_path)
# image path
img_path_001 = 'starter_images/00001698_000.PNG'
img_path_002 = 'starter_images/00003728_000.PNG'
img_path_003 = 'starter_images/00005318_000.PNG'
# load a single image
new_image_001 = load_image(img_path_001)
new_image_002 = load_image(img_path_002)
new_image_003 = load_image(img_path_003)
# check prediction
pred_001 = model.predict(new_image_001)
pred_002 = model.predict(new_image_002)
pred_003 = model.predict(new_image_003)
print (pred_001)
print (pred_002)
print (pred_003)
result_001 = tf.argmax(pred_001, 1)
result_002 = tf.argmax(pred_002, 1)
result_003 = tf.argmax(pred_003, 1)
predicted_class_001 = tf.keras.backend.eval(result_001)
predicted_class_002 = tf.keras.backend.eval(result_002)
predicted_class_003 = tf.keras.backend.eval(result_003)
print (predicted_class_001)
print (predicted_class_002)
print (predicted_class_003)
print (class_names[predicted_class_001[0]])
print (class_names[predicted_class_002[0]])
print (class_names[predicted_class_003[0]])
| 33.112245 | 183 | 0.692142 |
793e86d829c9f48688ff289a9a6698a47d8a31e1 | 216 | py | Python | Python/Bank/cliente.py | GabrielRenan/Projects-to-Learn | 38bd7e1dfa6ff9ef5ae0e7e5bacaaf23147d71c0 | [
"MIT"
] | null | null | null | Python/Bank/cliente.py | GabrielRenan/Projects-to-Learn | 38bd7e1dfa6ff9ef5ae0e7e5bacaaf23147d71c0 | [
"MIT"
] | null | null | null | Python/Bank/cliente.py | GabrielRenan/Projects-to-Learn | 38bd7e1dfa6ff9ef5ae0e7e5bacaaf23147d71c0 | [
"MIT"
] | null | null | null |
class Cliente:
def __init__(self,nome):
self.__nome = nome
@property
def nome(self):
return self.__nome.title()
@nome.setter
def nome(self, nome):
self.__nome = nome | 18 | 34 | 0.574074 |
793e870b1b532063020eb34c42baea04b0444ddd | 369 | py | Python | wagtail/wagtailforms/urls.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 1 | 2019-11-06T10:51:42.000Z | 2019-11-06T10:51:42.000Z | wagtail/wagtailforms/urls.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailforms/urls.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 2 | 2017-08-08T01:39:02.000Z | 2018-05-06T06:16:10.000Z | from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from wagtail.wagtailforms import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^submissions/(\d+)/$', views.list_submissions, name='list_submissions'),
url(r'^submissions/(\d+)/delete/$', views.delete_submissions, name='delete_submissions')
]
| 30.75 | 92 | 0.731707 |
793e87565395cde7d73093136d453cee31661aa7 | 14,048 | py | Python | docs/conf.py | zhengknight/tensorpack | 726747313fb2f189dd195d32087897b16a23be0a | [
"Apache-2.0"
] | 1 | 2019-05-07T15:23:33.000Z | 2019-05-07T15:23:33.000Z | docs/conf.py | zhengknight/tensorpack | 726747313fb2f189dd195d32087897b16a23be0a | [
"Apache-2.0"
] | null | null | null | docs/conf.py | zhengknight/tensorpack | 726747313fb2f189dd195d32087897b16a23be0a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa
# tensorpack documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:41:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
import mock
import inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DOC_BUILDING'] = '1'
ON_RTD = (os.environ.get('READTHEDOCS') == 'True')
MOCK_MODULES = ['tabulate', 'h5py',
'cv2', 'zmq', 'lmdb',
'sklearn', 'sklearn.datasets',
'scipy', 'scipy.misc', 'scipy.io',
'tornado', 'tornado.concurrent',
'horovod', 'horovod.tensorflow',
'pyarrow',
'subprocess32', 'functools32']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
import tensorpack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
#'sphinx.ext.autosectionlabel',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
if ON_RTD:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# to support markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tensorpack'
copyright = u'2015 - 2018, Yuxin Wu, et al.'
author = u'Yuxin Wu, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tensorpack.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# 'tensorpack.' prefix was removed by js
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tensorpack.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# avoid li fonts being larger
# TODO but li indices fonts are still larger
html_compact_lists = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tensorpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tensorpack.tex', u'tensorpack documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorpack', u'tensorpack documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tensorpack', u'tensorpack documentation',
author, 'tensorpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
suppress_warnings = ['image.nonlocal_uri']
#autodoc_member_order = 'bysource'
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if signature:
# replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
# add scope name to layer signatures:
if hasattr(obj, 'use_scope') and hasattr(obj, 'symbolic_function'):
if obj.use_scope:
signature = signature[0] + 'scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
_DEPRECATED_NAMES = set([
# deprecated stuff:
'TryResumeTraining',
'QueueInputTrainer',
'SimplePredictBuilder',
'LMDBDataPoint',
'TFRecordData',
'dump_dataflow_to_lmdb',
'dump_dataflow_to_tfrecord',
# renamed stuff:
'DumpTensor',
'DumpParamAsImage',
'StagingInputWrapper',
'PeriodicRunHooks',
'get_nr_gpu',
# deprecated or renamed symbolic code
'ImageSample',
'Deconv2D',
'get_scalar_var', 'psnr',
'prediction_incorrect', 'huber_loss',
# internal only
'apply_default_prefetch',
'average_grads',
'aggregate_grads',
'allreduce_grads',
'PrefetchOnGPUs',
])
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, '__HIDE_SPHINX_DOC__', False):
return True
if name == '__init__':
if obj.__doc__ and skip:
# include_init_with_doc doesn't work well for decorated init
# https://github.com/sphinx-doc/sphinx/issues/4258
return False
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
if name in ['get_data', 'size', 'reset_state']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3
cls = getattr(inspect.getmodule(obj),
obj.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if issubclass(cls, tensorpack.DataFlow):
return True
return None
def url_resolver(url):
if '.html' not in url:
return "https://github.com/tensorpack/tensorpack/blob/master/" + url
else:
if ON_RTD:
return "http://tensorpack.readthedocs.io/" + url
else:
return '/' + url
def setup(app):
from recommonmark.transform import AutoStructify
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
'recommonmark_config',
{'url_resolver': url_resolver,
'auto_toc_tree_section': 'Contents',
'enable_math': True,
'enable_inline_math': True,
'enable_eval_rst': True
}, True)
app.add_transform(AutoStructify)
| 32.518519 | 113 | 0.692981 |
793e876da4c6cb416ff291ce78dc2293f7f6312a | 1,123 | py | Python | satflow/run.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | satflow/run.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | satflow/run.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | import os
os.environ["HYDRA_FULL_ERROR"] = "1"
import dotenv
import hydra
from omegaconf import DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
@hydra.main(config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from satflow.core import utils
from satflow.experiments.pl_train import train
# A couple of optional utilities:
# - disabling python warnings
# - easier access to debug mode
# - forcing debug friendly configuration
# - forcing multi-gpu friendly configuration
# You can safely get rid of this line if you don't want those
utils.extras(config)
#
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
# Train model
return train(config)
if __name__ == "__main__":
main()
| 27.390244 | 76 | 0.723063 |
793e87e43756e35447b43ebef01209bf3cb56a58 | 946 | py | Python | ssepaperless/Organizer/urls.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/urls.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/urls.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<department_id>[0-9]+)/$', views.index2, name='index2'),
url(r'^(?P<department_id>[0-9]+)/Advisor/(?P<advisor_id>[0-9]+)/$' , views.advisorinfo, name= 'advisorinfo'),
url(r'^(?P<department_id>[0-9]+)/Advisor/(?P<advisor_id>[0-9]+)/Students/$', views.detail, name='detail'),
url(r'^(?P<department_id>[0-9]+)/Advisor/(?P<advisor_id>[0-9]+)/Degrees/$', views.advisordegree, name='advisordegree'),
# ex: /polls/5/results/
url(r'^(?P<department_id>[0-9]+)/Degree/(?P<degree_id>[0-9]+)/$', views.degree, name='degree'),
url(r'^(?P<department_id>[0-9]+)/Certificate/(?P<certificate_id>[0-9]+)/$', views.certificate, name='certificate'),
# ex: /polls/5/vote/
url(r'^Degree/(?P<degree_id>[0-9]+)/Courses/(?P<degree_core_course_structure_id>[0-9]+)/$', views.coursedegree, name='coursedegree'),
] | 59.125 | 137 | 0.634249 |
793e888b5ae5bf4a2f4ef1efef04be3249969d52 | 359 | py | Python | cloudstore/apps/api/migrations/0004_auto_20200823_2216.py | JonasUJ/cloudstore | 33dd9eb2a92c75d4f2034c07dc1c6f1d6d8d845d | [
"MIT"
] | null | null | null | cloudstore/apps/api/migrations/0004_auto_20200823_2216.py | JonasUJ/cloudstore | 33dd9eb2a92c75d4f2034c07dc1c6f1d6d8d845d | [
"MIT"
] | null | null | null | cloudstore/apps/api/migrations/0004_auto_20200823_2216.py | JonasUJ/cloudstore | 33dd9eb2a92c75d4f2034c07dc1c6f1d6d8d845d | [
"MIT"
] | 1 | 2020-10-08T19:53:21.000Z | 2020-10-08T19:53:21.000Z | # Generated by Django 3.0.8 on 2020-08-23 20:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200802_1500'),
]
operations = [
migrations.RenameField(
model_name='folder',
old_name='parent',
new_name='folder',
),
]
| 18.894737 | 47 | 0.579387 |
793e89591d849b89e64dc0b66300d2432fb4e950 | 632 | py | Python | nyc_data/manage.py | nyccto-rapicastillo/nyc-ppe | e6d5ba45cf2815f7659298103d3b5bc7210ed8cf | [
"MIT"
] | 3 | 2020-04-16T03:24:17.000Z | 2020-09-11T22:12:31.000Z | nyc_data/manage.py | nyccto-rapicastillo/nyc-ppe | e6d5ba45cf2815f7659298103d3b5bc7210ed8cf | [
"MIT"
] | 47 | 2020-04-10T20:02:09.000Z | 2021-09-08T02:05:09.000Z | nyc_data/manage.py | nyccto-rapicastillo/nyc-ppe | e6d5ba45cf2815f7659298103d3b5bc7210ed8cf | [
"MIT"
] | 1 | 2020-04-22T19:10:24.000Z | 2020-04-22T19:10:24.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nyc_data.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 28.727273 | 76 | 0.683544 |
793e89e1fd4d571e4f2b0c95fc34b2eff7c1130f | 978 | py | Python | tabpy/tabpy_server/handlers/service_info_handler.py | phantomcosmonaut/TabPy | 43cce449cdcb5c99202d68f1a6af4d355d3e3734 | [
"MIT"
] | null | null | null | tabpy/tabpy_server/handlers/service_info_handler.py | phantomcosmonaut/TabPy | 43cce449cdcb5c99202d68f1a6af4d355d3e3734 | [
"MIT"
] | null | null | null | tabpy/tabpy_server/handlers/service_info_handler.py | phantomcosmonaut/TabPy | 43cce449cdcb5c99202d68f1a6af4d355d3e3734 | [
"MIT"
] | null | null | null | import json
from tabpy.tabpy_server.app.SettingsParameters import SettingsParameters
from tabpy.tabpy_server.handlers.management_handler import ManagementHandler
class ServiceInfoHandler(ManagementHandler):
def initialize(self, app):
super(ServiceInfoHandler, self).initialize(app)
def get(self):
# do not check for authentication - this method
# is the only way for client to collect info about
# supported API versions and required features
self._add_CORS_header()
info = {}
info["description"] = self.tabpy_state.get_description()
info["creation_time"] = self.tabpy_state.creation_time
info["state_path"] = self.settings[SettingsParameters.StateFilePath]
info["server_version"] = self.settings[SettingsParameters.ServerVersion]
info["name"] = self.tabpy_state.name
info["versions"] = self.settings[SettingsParameters.ApiVersions]
self.finish(json.dumps(info))
| 42.521739 | 80 | 0.721881 |
793e8b1020aa202f8bae68561a80bba9afe4b12a | 63,065 | py | Python | mlflow/tracking/fluent.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 1 | 2021-12-13T20:52:08.000Z | 2021-12-13T20:52:08.000Z | mlflow/tracking/fluent.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 9 | 2021-08-04T06:41:49.000Z | 2022-01-10T10:10:52.000Z | mlflow/tracking/fluent.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 1 | 2021-03-01T10:09:32.000Z | 2021-03-01T10:09:32.000Z | """
Internal module implementing the fluent API, allowing management of an active
MLflow run. This module is exposed to users at the top-level :py:mod:`mlflow` module.
"""
import os
import atexit
import time
import logging
import inspect
from packaging.version import Version
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from mlflow.entities import Experiment, Run, RunInfo, RunStatus, Param, RunTag, Metric, ViewType
from mlflow.entities.lifecycle_stage import LifecycleStage
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from mlflow.tracking.client import MlflowClient
from mlflow.tracking import artifact_utils, _get_store
from mlflow.tracking.context import registry as context_registry
from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT
from mlflow.utils import env
from mlflow.utils.autologging_utils import (
is_testing,
autologging_integration,
AUTOLOGGING_INTEGRATIONS,
autologging_is_disabled,
)
from mlflow.utils.databricks_utils import is_in_databricks_notebook, get_notebook_id
from mlflow.utils.import_hooks import register_post_import_hook
from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_RUN_NAME
from mlflow.utils.validation import _validate_run_id
if TYPE_CHECKING:
import pandas # pylint: disable=unused-import
import matplotlib # pylint: disable=unused-import
import plotly # pylint: disable=unused-import
import numpy # pylint: disable=unused-import
import PIL # pylint: disable=unused-import
_EXPERIMENT_ID_ENV_VAR = "MLFLOW_EXPERIMENT_ID"
_EXPERIMENT_NAME_ENV_VAR = "MLFLOW_EXPERIMENT_NAME"
_RUN_ID_ENV_VAR = "MLFLOW_RUN_ID"
_active_run_stack = []
_active_experiment_id = None
SEARCH_MAX_RESULTS_PANDAS = 100000
NUM_RUNS_PER_PAGE_PANDAS = 10000
_logger = logging.getLogger(__name__)
def set_experiment(experiment_name: str = None, experiment_id: str = None) -> None:
"""
Set the given experiment as the active experiment. The experiment must either be specified by
name via `experiment_name` or by ID via `experiment_id`. The experiment name and ID cannot
both be specified.
:param experiment_name: Case sensitive name of the experiment to be activated. If an experiment
with this name does not exist, a new experiment wth this name is
created.
:param experiment_id: ID of the experiment to be activated. If an experiment with this ID
does not exist, an exception is thrown.
:return: An instance of :py:class:`mlflow.entities.Experiment` representing the new active
experiment.
.. code-block:: python
:caption: Example
import mlflow
# Set an experiment name, which must be unique and case sensitive.
mlflow.set_experiment("Social NLP Experiments")
# Get Experiment Details
experiment = mlflow.get_experiment_by_name("Social NLP Experiments")
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Experiment_id: 1
Artifact Location: file:///.../mlruns/1
Tags: {}
Lifecycle_stage: active
"""
if (experiment_name is not None and experiment_id is not None) or (
experiment_name is None and experiment_id is None
):
raise MlflowException(
message="Must specify exactly one of: `experiment_id` or `experiment_name`.",
error_code=INVALID_PARAMETER_VALUE,
)
client = MlflowClient()
if experiment_id is None:
experiment = client.get_experiment_by_name(experiment_name)
if not experiment:
_logger.info(
"Experiment with name '%s' does not exist. Creating a new experiment.",
experiment_name,
)
# NB: If two simultaneous threads or processes attempt to set the same experiment
# simultaneously, a race condition may be encountered here wherein experiment creation
# fails
experiment_id = client.create_experiment(experiment_name)
experiment = client.get_experiment(experiment_id)
else:
experiment = client.get_experiment(experiment_id)
if experiment is None:
raise MlflowException(
message=f"Experiment with ID '{experiment_id}' does not exist.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException(
message=(
"Cannot set a deleted experiment '%s' as the active experiment."
" You can restore the experiment, or permanently delete the "
" experiment to create a new one." % experiment.name
),
error_code=INVALID_PARAMETER_VALUE,
)
global _active_experiment_id
_active_experiment_id = experiment.experiment_id
return experiment
class ActiveRun(Run): # pylint: disable=W0223
"""Wrapper around :py:class:`mlflow.entities.Run` to enable using Python ``with`` syntax."""
def __init__(self, run):
Run.__init__(self, run.info, run.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
status = RunStatus.FINISHED if exc_type is None else RunStatus.FAILED
end_run(RunStatus.to_string(status))
return exc_type is None
def start_run(
run_id: str = None,
experiment_id: Optional[str] = None,
run_name: Optional[str] = None,
nested: bool = False,
tags: Optional[Dict[str, Any]] = None,
) -> ActiveRun:
"""
Start a new MLflow run, setting it as the active run under which metrics and parameters
will be logged. The return value can be used as a context manager within a ``with`` block;
otherwise, you must call ``end_run()`` to terminate the current run.
If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set,
``start_run`` attempts to resume a run with the specified run ID and
other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``.
If resuming an existing run, the run status is set to ``RunStatus.RUNNING``.
MLflow sets a variety of default tags on the run, as defined in
:ref:`MLflow system tags <system_tags>`.
:param run_id: If specified, get the run with the specified UUID and log parameters
and metrics under that run. The run's end time is unset and its status
is set to running, but the run's other attributes (``source_version``,
``source_type``, etc.) are not changed.
:param experiment_id: ID of the experiment under which to create the current run (applicable
only when ``run_id`` is not specified). If ``experiment_id`` argument
is unspecified, will look for valid experiment in the following order:
activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME``
environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable,
or the default experiment as defined by the tracking server.
:param run_name: Name of new run (stored as a ``mlflow.runName`` tag).
Used only when ``run_id`` is unspecified.
:param nested: Controls whether run is nested in parent run. ``True`` creates a nested run.
:param tags: An optional dictionary of string keys and values to set as tags on the run.
If a run is being resumed, these tags are set on the resumed run. If a new run is
being created, these tags are set on the new run.
:return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping
the run's state.
.. code-block:: python
:caption: Example
import mlflow
# Create nested runs
with mlflow.start_run(run_name='PARENT_RUN') as parent_run:
mlflow.log_param("parent", "yes")
with mlflow.start_run(run_name='CHILD_RUN', nested=True) as child_run:
mlflow.log_param("child", "yes")
print("parent run_id: {}".format(parent_run.info.run_id))
print("child run_id : {}".format(child_run.info.run_id))
print("--")
# Search all child runs with a parent id
query = "tags.mlflow.parentRunId = '{}'".format(parent_run.info.run_id)
results = mlflow.search_runs(filter_string=query)
print(results[["run_id", "params.child", "tags.mlflow.runName"]])
.. code-block:: text
:caption: Output
parent run_id: 5ec0e7ae18f54c2694ffb48c2fccf25c
child run_id : 78b3b0d264b44cd29e8dc389749bb4be
--
run_id params.child tags.mlflow.runName
0 78b3b0d264b44cd29e8dc389749bb4be yes CHILD_RUN
"""
global _active_run_stack
# back compat for int experiment_id
experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id
if len(_active_run_stack) > 0 and not nested:
raise Exception(
(
"Run with UUID {} is already active. To start a new run, first end the "
+ "current run with mlflow.end_run(). To start a nested "
+ "run, call start_run with nested=True"
).format(_active_run_stack[0].info.run_id)
)
client = MlflowClient()
if run_id:
existing_run_id = run_id
elif _RUN_ID_ENV_VAR in os.environ:
existing_run_id = os.environ[_RUN_ID_ENV_VAR]
del os.environ[_RUN_ID_ENV_VAR]
else:
existing_run_id = None
if existing_run_id:
_validate_run_id(existing_run_id)
active_run_obj = client.get_run(existing_run_id)
# Check to see if experiment_id from environment matches experiment_id from set_experiment()
if (
_active_experiment_id is not None
and _active_experiment_id != active_run_obj.info.experiment_id
):
raise MlflowException(
"Cannot start run with ID {} because active run ID "
"does not match environment run ID. Make sure --experiment-name "
"or --experiment-id matches experiment set with "
"set_experiment(), or just use command-line "
"arguments".format(existing_run_id)
)
# Check to see if current run isn't deleted
if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED:
raise MlflowException(
"Cannot start run with ID {} because it is in the "
"deleted state.".format(existing_run_id)
)
# Use previous end_time because a value is required for update_run_info
end_time = active_run_obj.info.end_time
_get_store().update_run_info(
existing_run_id, run_status=RunStatus.RUNNING, end_time=end_time
)
if tags:
client.log_batch(
run_id=existing_run_id,
tags=[RunTag(key, str(value)) for key, value in tags.items()],
)
active_run_obj = client.get_run(existing_run_id)
else:
if len(_active_run_stack) > 0:
parent_run_id = _active_run_stack[-1].info.run_id
else:
parent_run_id = None
exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id()
user_specified_tags = tags or {}
if parent_run_id is not None:
user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id
if run_name is not None:
user_specified_tags[MLFLOW_RUN_NAME] = run_name
tags = context_registry.resolve_tags(user_specified_tags)
active_run_obj = client.create_run(experiment_id=exp_id_for_run, tags=tags)
_active_run_stack.append(ActiveRun(active_run_obj))
return _active_run_stack[-1]
def end_run(status: str = RunStatus.to_string(RunStatus.FINISHED)) -> None:
"""End an active MLflow run (if there is one).
.. code-block:: python
:caption: Example
import mlflow
# Start run and get status
mlflow.start_run()
run = mlflow.active_run()
print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
# End run and get status
mlflow.end_run()
run = mlflow.get_run(run.info.run_id)
print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
print("--")
# Check for any active runs
print("Active run: {}".format(mlflow.active_run()))
.. code-block:: text
:caption: Output
run_id: b47ee4563368419880b44ad8535f6371; status: RUNNING
run_id: b47ee4563368419880b44ad8535f6371; status: FINISHED
--
Active run: None
"""
global _active_run_stack
if len(_active_run_stack) > 0:
# Clear out the global existing run environment variable as well.
env.unset_variable(_RUN_ID_ENV_VAR)
run = _active_run_stack.pop()
MlflowClient().set_terminated(run.info.run_id, status)
atexit.register(end_run)
def active_run() -> Optional[ActiveRun]:
"""Get the currently active ``Run``, or None if no such run exists.
**Note**: You cannot access currently-active run attributes
(parameters, metrics, etc.) through the run returned by ``mlflow.active_run``. In order
to access such attributes, use the :py:class:`mlflow.tracking.MlflowClient` as follows:
.. code-block:: python
:caption: Example
import mlflow
mlflow.start_run()
run = mlflow.active_run()
print("Active run_id: {}".format(run.info.run_id))
mlflow.end_run()
.. code-block:: text
:caption: Output
Active run_id: 6f252757005748708cd3aad75d1ff462
"""
return _active_run_stack[-1] if len(_active_run_stack) > 0 else None
def get_run(run_id: str) -> Run:
"""
Fetch the run from backend store. The resulting :py:class:`Run <mlflow.entities.Run>`
contains a collection of run metadata -- :py:class:`RunInfo <mlflow.entities.RunInfo>`,
as well as a collection of run parameters, tags, and metrics --
:py:class:`RunData <mlflow.entities.RunData>`. In the case where multiple metrics with the
same key are logged for the run, the :py:class:`RunData <mlflow.entities.RunData>` contains
the most recently logged value at the largest step for each metric.
:param run_id: Unique identifier for the run.
:return: A single :py:class:`mlflow.entities.Run` object, if the run exists. Otherwise,
raises an exception.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run() as run:
mlflow.log_param("p", 0)
run_id = run.info.run_id
print("run_id: {}; lifecycle_stage: {}".format(run_id,
mlflow.get_run(run_id).info.lifecycle_stage))
.. code-block:: text
:caption: Output
run_id: 7472befefc754e388e8e922824a0cca5; lifecycle_stage: active
"""
return MlflowClient().get_run(run_id)
def log_param(key: str, value: Any) -> None:
"""
Log a parameter under the current run. If no run is active, this method will create
a new active run.
:param key: Parameter name (string). This string may only contain alphanumerics,
underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Parameter value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.log_param("learning_rate", 0.01)
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_param(run_id, key, value)
def set_tag(key: str, value: Any) -> None:
"""
Set a tag under the current run. If no run is active, this method will create a
new active run.
:param key: Tag name (string). This string may only contain alphanumerics, underscores
(_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Tag value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.set_tag("release.version", "2.2.0")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().set_tag(run_id, key, value)
def delete_tag(key: str) -> None:
"""
Delete a tag from a run. This is irreversible. If no run is active, this method
will create a new active run.
:param key: Name of the tag
.. code-block:: python
:caption: Example
import mlflow
tags = {"engineering": "ML Platform",
"engineering_remote": "ML Platform"}
with mlflow.start_run() as run:
mlflow.set_tags(tags)
with mlflow.start_run(run_id=run.info.run_id):
mlflow.delete_tag("engineering_remote")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().delete_tag(run_id, key)
def log_metric(key: str, value: float, step: Optional[int] = None) -> None:
"""
Log a metric under the current run. If no run is active, this method will create
a new active run.
:param key: Metric name (string). This string may only contain alphanumerics, underscores (_),
dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Metric value (float). Note that some special values such as +/- Infinity may be
replaced by other values depending on the store. For example, the
SQLAlchemy store replaces +/- Infinity with max / min float values.
All backend stores will support values up to length 5000, but some
may support larger values.
:param step: Metric step (int). Defaults to zero if unspecified.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.log_metric("mse", 2500.00)
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_metric(run_id, key, value, int(time.time() * 1000), step or 0)
def log_metrics(metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""
Log multiple metrics for the current run. If no run is active, this method will create a new
active run.
:param metrics: Dictionary of metric_name: String -> value: Float. Note that some special
values such as +/- Infinity may be replaced by other values depending on
the store. For example, sql based store may replace +/- Infinity with
max / min float values.
:param step: A single integer step at which to log the specified
Metrics. If unspecified, each metric is logged at step zero.
:returns: None
.. code-block:: python
:caption: Example
import mlflow
metrics = {"mse": 2500.00, "rmse": 50.00}
# Log a batch of metrics
with mlflow.start_run():
mlflow.log_metrics(metrics)
"""
run_id = _get_or_start_run().info.run_id
timestamp = int(time.time() * 1000)
metrics_arr = [Metric(key, value, timestamp, step or 0) for key, value in metrics.items()]
MlflowClient().log_batch(run_id=run_id, metrics=metrics_arr, params=[], tags=[])
def log_params(params: Dict[str, Any]) -> None:
"""
Log a batch of params for the current run. If no run is active, this method will create a
new active run.
:param params: Dictionary of param_name: String -> value: (String, but will be string-ified if
not)
:returns: None
.. code-block:: python
:caption: Example
import mlflow
params = {"learning_rate": 0.01, "n_estimators": 10}
# Log a batch of parameters
with mlflow.start_run():
mlflow.log_params(params)
"""
run_id = _get_or_start_run().info.run_id
params_arr = [Param(key, str(value)) for key, value in params.items()]
MlflowClient().log_batch(run_id=run_id, metrics=[], params=params_arr, tags=[])
def set_tags(tags: Dict[str, Any]) -> None:
"""
Log a batch of tags for the current run. If no run is active, this method will create a
new active run.
:param tags: Dictionary of tag_name: String -> value: (String, but will be string-ified if
not)
:returns: None
.. code-block:: python
:caption: Example
import mlflow
tags = {"engineering": "ML Platform",
"release.candidate": "RC1",
"release.version": "2.2.0"}
# Set a batch of tags
with mlflow.start_run():
mlflow.set_tags(tags)
"""
run_id = _get_or_start_run().info.run_id
tags_arr = [RunTag(key, str(value)) for key, value in tags.items()]
MlflowClient().log_batch(run_id=run_id, metrics=[], params=[], tags=tags_arr)
def log_artifact(local_path: str, artifact_path: Optional[str] = None) -> None:
"""
Log a local file or directory as an artifact of the currently active run. If no run is
active, this method will create a new active run.
:param local_path: Path to the file to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
.. code-block:: python
:caption: Example
import mlflow
# Create a features.txt artifact file
features = "rooms, zipcode, median_price, school_rating, transport"
with open("features.txt", 'w') as f:
f.write(features)
# With artifact_path=None write features.txt under
# root artifact_uri/artifacts directory
with mlflow.start_run():
mlflow.log_artifact("features.txt")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_artifact(run_id, local_path, artifact_path)
def log_artifacts(local_dir: str, artifact_path: Optional[str] = None) -> None:
"""
Log all the contents of a local directory as artifacts of the run. If no run is active,
this method will create a new active run.
:param local_dir: Path to the directory of files to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
.. code-block:: python
:caption: Example
import os
import mlflow
# Create some files to preserve as artifacts
features = "rooms, zipcode, median_price, school_rating, transport"
data = {"state": "TX", "Available": 25, "Type": "Detached"}
# Create couple of artifact files under the directory "data"
os.makedirs("data", exist_ok=True)
with open("data/data.json", 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
with open("data/features.txt", 'w') as f:
f.write(features)
# Write all files in "data" to root artifact_uri/states
with mlflow.start_run():
mlflow.log_artifacts("data", artifact_path="states")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_artifacts(run_id, local_dir, artifact_path)
def log_text(text: str, artifact_file: str) -> None:
"""
Log text as an artifact.
:param text: String containing text to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the text is saved (e.g. "dir/file.txt").
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
# Log text to a file under the run's root artifact directory
mlflow.log_text("text1", "file1.txt")
# Log text in a subdirectory of the run's root artifact directory
mlflow.log_text("text2", "dir/file2.txt")
# Log HTML text
mlflow.log_text("<h1>header</h1>", "index.html")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_text(run_id, text, artifact_file)
def log_dict(dictionary: Any, artifact_file: str) -> None:
"""
Log a JSON/YAML-serializable object (e.g. `dict`) as an artifact. The serialization
format (JSON or YAML) is automatically inferred from the extension of `artifact_file`.
If the file extension doesn't exist or match any of [".json", ".yml", ".yaml"],
JSON format is used.
:param dictionary: Dictionary to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the dictionary is saved (e.g. "dir/data.json").
.. code-block:: python
:caption: Example
import mlflow
dictionary = {"k": "v"}
with mlflow.start_run():
# Log a dictionary as a JSON file under the run's root artifact directory
mlflow.log_dict(dictionary, "data.json")
# Log a dictionary as a YAML file in a subdirectory of the run's root artifact directory
mlflow.log_dict(dictionary, "dir/data.yml")
# If the file extension doesn't exist or match any of [".json", ".yaml", ".yml"],
# JSON format is used.
mlflow.log_dict(dictionary, "data")
mlflow.log_dict(dictionary, "data.txt")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_dict(run_id, dictionary, artifact_file)
def log_figure(
figure: Union["matplotlib.figure.Figure", "plotly.graph_objects.Figure"], artifact_file: str
) -> None:
"""
Log a figure as an artifact. The following figure objects are supported:
- `matplotlib.figure.Figure`_
- `plotly.graph_objects.Figure`_
.. _matplotlib.figure.Figure:
https://matplotlib.org/api/_as_gen/matplotlib.figure.Figure.html
.. _plotly.graph_objects.Figure:
https://plotly.com/python-api-reference/generated/plotly.graph_objects.Figure.html
:param figure: Figure to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the figure is saved (e.g. "dir/file.png").
.. code-block:: python
:caption: Matplotlib Example
import mlflow
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([0, 1], [2, 3])
with mlflow.start_run():
mlflow.log_figure(fig, "figure.png")
.. code-block:: python
:caption: Plotly Example
import mlflow
from plotly import graph_objects as go
fig = go.Figure(go.Scatter(x=[0, 1], y=[2, 3]))
with mlflow.start_run():
mlflow.log_figure(fig, "figure.html")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_figure(run_id, figure, artifact_file)
def log_image(image: Union["numpy.ndarray", "PIL.Image.Image"], artifact_file: str) -> None:
"""
Log an image as an artifact. The following image objects are supported:
- `numpy.ndarray`_
- `PIL.Image.Image`_
.. _numpy.ndarray:
https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html
.. _PIL.Image.Image:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image
Numpy array support
- data type (( ) represents a valid value range):
- bool
- integer (0 ~ 255)
- unsigned integer (0 ~ 255)
- float (0.0 ~ 1.0)
.. warning::
- Out-of-range integer values will be **clipped** to [0, 255].
- Out-of-range float values will be **clipped** to [0, 1].
- shape (H: height, W: width):
- H x W (Grayscale)
- H x W x 1 (Grayscale)
- H x W x 3 (an RGB channel order is assumed)
- H x W x 4 (an RGBA channel order is assumed)
:param image: Image to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the image is saved (e.g. "dir/image.png").
.. code-block:: python
:caption: Numpy Example
import mlflow
import numpy as np
image = np.random.randint(0, 256, size=(100, 100, 3), dtype=np.uint8)
with mlflow.start_run():
mlflow.log_image(image, "image.png")
.. code-block:: python
:caption: Pillow Example
import mlflow
from PIL import Image
image = Image.new("RGB", (100, 100))
with mlflow.start_run():
mlflow.log_image(image, "image.png")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_image(run_id, image, artifact_file)
def _record_logged_model(mlflow_model):
run_id = _get_or_start_run().info.run_id
MlflowClient()._record_logged_model(run_id, mlflow_model)
def get_experiment(experiment_id: str) -> Experiment:
"""
Retrieve an experiment by experiment_id from the backend store
:param experiment_id: The string-ified experiment ID returned from ``create_experiment``.
:return: :py:class:`mlflow.entities.Experiment`
.. code-block:: python
:caption: Example
import mlflow
experiment = mlflow.get_experiment("0")
print("Name: {}".format(experiment.name))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: Default
Artifact Location: file:///.../mlruns/0
Tags: {}
Lifecycle_stage: active
"""
return MlflowClient().get_experiment(experiment_id)
def get_experiment_by_name(name: str) -> Optional[Experiment]:
"""
Retrieve an experiment by experiment name from the backend store
:param name: The case senstive experiment name.
:return: An instance of :py:class:`mlflow.entities.Experiment`
if an experiment with the specified name exists, otherwise None.
.. code-block:: python
:caption: Example
import mlflow
# Case sensitive name
experiment = mlflow.get_experiment_by_name("Default")
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Experiment_id: 0
Artifact Location: file:///.../mlruns/0
Tags: {}
Lifecycle_stage: active
"""
return MlflowClient().get_experiment_by_name(name)
def list_experiments(
view_type: int = ViewType.ACTIVE_ONLY,
max_results: Optional[int] = None,
) -> List[Experiment]:
"""
:param view_type: Qualify requested type of experiments.
:param max_results: If passed, specifies the maximum number of experiments desired. If not
passed, all experiments will be returned.
:return: A list of :py:class:`Experiment <mlflow.entities.Experiment>` objects.
"""
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().list_experiments(
view_type=view_type,
max_results=number_to_get,
page_token=next_page_token,
)
return _paginate(pagination_wrapper_func, SEARCH_MAX_RESULTS_DEFAULT, max_results)
def create_experiment(
name: str,
artifact_location: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
) -> str:
"""
Create an experiment.
:param name: The experiment name, which must be unique and is case sensitive
:param artifact_location: The location to store run artifacts.
If not provided, the server picks an appropriate default.
:param tags: An optional dictionary of string keys and values to set as
tags on the experiment.
:return: String ID of the created experiment.
.. code-block:: python
:caption: Example
import mlflow
# Create an experiment name, which must be unique and case sensitive
experiment_id = mlflow.create_experiment("Social NLP Experiments")
experiment = mlflow.get_experiment(experiment_id)
print("Name: {}".format(experiment.name))
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: Social NLP Experiments
Experiment_id: 1
Artifact Location: file:///.../mlruns/1
Tags= {}
Lifecycle_stage: active
"""
return MlflowClient().create_experiment(name, artifact_location, tags)
def delete_experiment(experiment_id: str) -> None:
"""
Delete an experiment from the backend store.
:param experiment_id: The The string-ified experiment ID returned from ``create_experiment``.
.. code-block:: python
:caption: Example
import mlflow
experiment_id = mlflow.create_experiment("New Experiment")
mlflow.delete_experiment(experiment_id)
# Examine the deleted experiment details.
experiment = mlflow.get_experiment(experiment_id)
print("Name: {}".format(experiment.name))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: New Experiment
Artifact Location: file:///.../mlruns/2
Lifecycle_stage: deleted
"""
MlflowClient().delete_experiment(experiment_id)
def delete_run(run_id: str) -> None:
"""
Deletes a run with the given ID.
:param run_id: Unique identifier for the run to delete.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run() as run:
mlflow.log_param("p", 0)
run_id = run.info.run_id
mlflow.delete_run(run_id)
print("run_id: {}; lifecycle_stage: {}".format(run_id,
mlflow.get_run(run_id).info.lifecycle_stage))
.. code-block:: text
:caption: Output
run_id: 45f4af3e6fd349e58579b27fcb0b8277; lifecycle_stage: deleted
"""
MlflowClient().delete_run(run_id)
def get_artifact_uri(artifact_path: Optional[str] = None) -> str:
"""
Get the absolute URI of the specified artifact in the currently active run.
If `path` is not specified, the artifact root URI of the currently active
run will be returned; calls to ``log_artifact`` and ``log_artifacts`` write
artifact(s) to subdirectories of the artifact root URI.
If no run is active, this method will create a new active run.
:param artifact_path: The run-relative artifact path for which to obtain an absolute URI.
For example, "path/to/artifact". If unspecified, the artifact root URI
for the currently active run will be returned.
:return: An *absolute* URI referring to the specified artifact or the currently adtive run's
artifact root. For example, if an artifact path is provided and the currently active
run uses an S3-backed store, this may be a uri of the form
``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path
is not provided and the currently active run uses an S3-backed store, this may be a
URI of the form ``s3://<bucket_name>/path/to/artifact/root``.
.. code-block:: python
:caption: Example
import mlflow
features = "rooms, zipcode, median_price, school_rating, transport"
with open("features.txt", 'w') as f:
f.write(features)
# Log the artifact in a directory "features" under the root artifact_uri/features
with mlflow.start_run():
mlflow.log_artifact("features.txt", artifact_path="features")
# Fetch the artifact uri root directory
artifact_uri = mlflow.get_artifact_uri()
print("Artifact uri: {}".format(artifact_uri))
# Fetch a specific artifact uri
artifact_uri = mlflow.get_artifact_uri(artifact_path="features/features.txt")
print("Artifact uri: {}".format(artifact_uri))
.. code-block:: text
:caption: Output
Artifact uri: file:///.../0/a46a80f1c9644bd8f4e5dd5553fffce/artifacts
Artifact uri: file:///.../0/a46a80f1c9644bd8f4e5dd5553fffce/artifacts/features/features.txt
"""
return artifact_utils.get_artifact_uri(
run_id=_get_or_start_run().info.run_id, artifact_path=artifact_path
)
def search_runs(
experiment_ids: Optional[List[str]] = None,
filter_string: str = "",
run_view_type: int = ViewType.ACTIVE_ONLY,
max_results: int = SEARCH_MAX_RESULTS_PANDAS,
order_by: Optional[List[str]] = None,
output_format: str = "pandas",
) -> Union[List[Run], "pandas.DataFrame"]:
"""
Get a pandas DataFrame of runs that fit the search criteria.
:param experiment_ids: List of experiment IDs. None will default to the active experiment.
:param filter_string: Filter query string, defaults to searching all runs.
:param run_view_type: one of enum values ``ACTIVE_ONLY``, ``DELETED_ONLY``, or ``ALL`` runs
defined in :py:class:`mlflow.entities.ViewType`.
:param max_results: The maximum number of runs to put in the dataframe. Default is 100,000
to avoid causing out-of-memory issues on the user's machine.
:param order_by: List of columns to order by (e.g., "metrics.rmse"). The ``order_by`` column
can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``.
The default ordering is to sort by ``start_time DESC``, then ``run_id``.
:param output_format: The output format to be returned. If ``pandas``, a ``pandas.DataFrame``
is returned and, if ``list``, a list of :py:class:`mlflow.entities.Run`
is returned.
:return: If output_format is ``list``: a list of :py:class:`mlflow.entities.Run`. If
output_format is ``pandas``: ``pandas.DataFrame`` of runs, where each metric,
parameter, and tag is expanded into its own column named metrics.*, params.*, or
tags.* respectively. For runs that don't have a particular metric, parameter, or tag,
the value for the corresponding column is (NumPy) ``Nan``, ``None``, or ``None``
respectively.
.. code-block:: python
:caption: Example
import mlflow
# Create an experiment and log two runs under it
experiment_id = mlflow.create_experiment("Social NLP Experiments")
with mlflow.start_run(experiment_id=experiment_id):
mlflow.log_metric("m", 1.55)
mlflow.set_tag("s.release", "1.1.0-RC")
with mlflow.start_run(experiment_id=experiment_id):
mlflow.log_metric("m", 2.50)
mlflow.set_tag("s.release", "1.2.0-GA")
# Search all runs in experiment_id
df = mlflow.search_runs([experiment_id], order_by=["metrics.m DESC"])
print(df[["metrics.m", "tags.s.release", "run_id"]])
print("--")
# Search the experiment_id using a filter_string with tag
# that has a case insensitive pattern
filter_string = "tags.s.release ILIKE '%rc%'"
df = mlflow.search_runs([experiment_id], filter_string=filter_string)
print(df[["metrics.m", "tags.s.release", "run_id"]])
.. code-block:: text
:caption: Output
metrics.m tags.s.release run_id
0 2.50 1.2.0-GA 147eed886ab44633902cc8e19b2267e2
1 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
--
metrics.m tags.s.release run_id
0 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
"""
if not experiment_ids:
experiment_ids = _get_experiment_id()
# Using an internal function as the linter doesn't like assigning a lambda, and inlining the
# full thing is a mess
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().search_runs(
experiment_ids,
filter_string,
run_view_type,
number_to_get,
order_by,
next_page_token,
)
runs = _paginate(pagination_wrapper_func, NUM_RUNS_PER_PAGE_PANDAS, max_results)
if output_format == "list":
return runs # List[mlflow.entities.run.Run]
elif output_format == "pandas":
import numpy as np
import pandas as pd
info = {
"run_id": [],
"experiment_id": [],
"status": [],
"artifact_uri": [],
"start_time": [],
"end_time": [],
}
params, metrics, tags = ({}, {}, {})
PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None)
for i, run in enumerate(runs):
info["run_id"].append(run.info.run_id)
info["experiment_id"].append(run.info.experiment_id)
info["status"].append(run.info.status)
info["artifact_uri"].append(run.info.artifact_uri)
info["start_time"].append(pd.to_datetime(run.info.start_time, unit="ms", utc=True))
info["end_time"].append(pd.to_datetime(run.info.end_time, unit="ms", utc=True))
# Params
param_keys = set(params.keys())
for key in param_keys:
if key in run.data.params:
params[key].append(run.data.params[key])
else:
params[key].append(PARAM_NULL)
new_params = set(run.data.params.keys()) - param_keys
for p in new_params:
params[p] = [PARAM_NULL] * i # Fill in null values for all previous runs
params[p].append(run.data.params[p])
# Metrics
metric_keys = set(metrics.keys())
for key in metric_keys:
if key in run.data.metrics:
metrics[key].append(run.data.metrics[key])
else:
metrics[key].append(METRIC_NULL)
new_metrics = set(run.data.metrics.keys()) - metric_keys
for m in new_metrics:
metrics[m] = [METRIC_NULL] * i
metrics[m].append(run.data.metrics[m])
# Tags
tag_keys = set(tags.keys())
for key in tag_keys:
if key in run.data.tags:
tags[key].append(run.data.tags[key])
else:
tags[key].append(TAG_NULL)
new_tags = set(run.data.tags.keys()) - tag_keys
for t in new_tags:
tags[t] = [TAG_NULL] * i
tags[t].append(run.data.tags[t])
data = {}
data.update(info)
for key in metrics:
data["metrics." + key] = metrics[key]
for key in params:
data["params." + key] = params[key]
for key in tags:
data["tags." + key] = tags[key]
return pd.DataFrame(data)
else:
raise ValueError(
"Unsupported output format: %s. Supported string values are 'pandas' or 'list'"
% output_format
)
def list_run_infos(
experiment_id: str,
run_view_type: int = ViewType.ACTIVE_ONLY,
max_results: int = SEARCH_MAX_RESULTS_DEFAULT,
order_by: Optional[List[str]] = None,
) -> List[RunInfo]:
"""
Return run information for runs which belong to the experiment_id.
:param experiment_id: The experiment id which to search
:param run_view_type: ACTIVE_ONLY, DELETED_ONLY, or ALL runs
:param max_results: Maximum number of results desired.
:param order_by: List of order_by clauses. Currently supported values are
are ``metric.key``, ``parameter.key``, ``tag.key``, ``attribute.key``.
For example, ``order_by=["tag.release ASC", "metric.click_rate DESC"]``.
:return: A list of :py:class:`RunInfo <mlflow.entities.RunInfo>` objects that satisfy the
search expressions.
.. code-block:: python
:caption: Example
import mlflow
from mlflow.entities import ViewType
# Create two runs
with mlflow.start_run() as run1:
mlflow.log_param("p", 0)
with mlflow.start_run() as run2:
mlflow.log_param("p", 1)
# Delete the last run
mlflow.delete_run(run2.info.run_id)
def print_run_infos(run_infos):
for r in run_infos:
print("- run_id: {}, lifecycle_stage: {}".format(r.run_id, r.lifecycle_stage))
print("Active runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ACTIVE_ONLY))
print("Deleted runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.DELETED_ONLY))
print("All runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ALL))
.. code-block:: text
:caption: Output
Active runs:
- run_id: 4937823b730640d5bed9e3e5057a2b34, lifecycle_stage: active
Deleted runs:
- run_id: b13f1badbed842cf9975c023d23da300, lifecycle_stage: deleted
All runs:
- run_id: b13f1badbed842cf9975c023d23da300, lifecycle_stage: deleted
- run_id: 4937823b730640d5bed9e3e5057a2b34, lifecycle_stage: active
"""
# Using an internal function as the linter doesn't like assigning a lambda, and inlining the
# full thing is a mess
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().list_run_infos(
experiment_id, run_view_type, number_to_get, order_by, next_page_token
)
return _paginate(pagination_wrapper_func, SEARCH_MAX_RESULTS_DEFAULT, max_results)
def _paginate(paginated_fn, max_results_per_page, max_results=None):
"""
Intended to be a general use pagination utility.
:param paginated_fn:
:type paginated_fn: This function is expected to take in the number of results to retrieve
per page and a pagination token, and return a PagedList object
:param max_results_per_page:
:type max_results_per_page: The maximum number of results to retrieve per page
:param max_results:
:type max_results: The maximum number of results to retrieve overall. If unspecified,
all results will be retrieved.
:return: Returns a list of entities, as determined by the paginated_fn parameter, with no more
entities than specified by max_results
:rtype: list[object]
"""
all_results = []
next_page_token = None
returns_all = max_results is None
while returns_all or len(all_results) < max_results:
num_to_get = max_results_per_page if returns_all else max_results - len(all_results)
if num_to_get < max_results_per_page:
page_results = paginated_fn(num_to_get, next_page_token)
else:
page_results = paginated_fn(max_results_per_page, next_page_token)
all_results.extend(page_results)
if hasattr(page_results, "token") and page_results.token:
next_page_token = page_results.token
else:
break
return all_results
def _get_or_start_run():
if len(_active_run_stack) > 0:
return _active_run_stack[-1]
return start_run()
def _get_experiment_id_from_env():
experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR)
if experiment_name is not None:
exp = MlflowClient().get_experiment_by_name(experiment_name)
return exp.experiment_id if exp else None
return env.get_env(_EXPERIMENT_ID_ENV_VAR)
def _get_experiment_id():
# TODO: Replace with None for 1.0, leaving for 0.9.1 release backcompat with existing servers
deprecated_default_exp_id = "0"
return (
_active_experiment_id
or _get_experiment_id_from_env()
or (is_in_databricks_notebook() and get_notebook_id())
) or deprecated_default_exp_id
@autologging_integration("mlflow")
def autolog(
log_input_examples: bool = False,
log_model_signatures: bool = True,
log_models: bool = True,
disable: bool = False,
exclusive: bool = False,
disable_for_unsupported_versions: bool = False,
silent: bool = False,
# pylint: disable=unused-argument
) -> None:
"""
Enables (or disables) and configures autologging for all supported integrations.
The parameters are passed to any autologging integrations that support them.
See the :ref:`tracking docs <automatic-logging>` for a list of supported autologging
integrations.
Note that framework-specific configurations set at any point will take precedence over
any configurations set by this function. For example:
.. code-block:: python
mlflow.autolog(log_models=False, exclusive=True)
import sklearn
would enable autologging for `sklearn` with `log_models=False` and `exclusive=True`,
but
.. code-block:: python
mlflow.autolog(log_models=False, exclusive=True)
import sklearn
mlflow.sklearn.autolog(log_models=True)
would enable autologging for `sklearn` with `log_models=True` and `exclusive=False`,
the latter resulting from the default value for `exclusive` in `mlflow.sklearn.autolog`;
other framework autolog functions (e.g. `mlflow.tensorflow.autolog`) would use the
configurations set by `mlflow.autolog` (in this instance, `log_models=False`, `exclusive=True`),
until they are explicitly called by the user.
:param log_input_examples: If ``True``, input examples from training datasets are collected and
logged along with model artifacts during training. If ``False``,
input examples are not logged.
Note: Input examples are MLflow model attributes
and are only collected if ``log_models`` is also ``True``.
:param log_model_signatures: If ``True``,
:py:class:`ModelSignatures <mlflow.models.ModelSignature>`
describing model inputs and outputs are collected and logged along
with model artifacts during training. If ``False``, signatures are
not logged. Note: Model signatures are MLflow model attributes
and are only collected if ``log_models`` is also ``True``.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
Input examples and model signatures, which are attributes of MLflow models,
are also omitted when ``log_models`` is ``False``.
:param disable: If ``True``, disables all supported autologging integrations. If ``False``,
enables all supported autologging integrations.
:param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
:param disable_for_unsupported_versions: If ``True``, disable autologging for versions of
all integration libraries that have not been tested against this version
of the MLflow client or are incompatible.
:param silent: If ``True``, suppress all event logs and warnings from MLflow during autologging
setup and training execution. If ``False``, show all events and warnings during
autologging setup and training execution.
.. code-block:: python
:caption: Example
import numpy as np
import mlflow.sklearn
from mlflow.tracking import MlflowClient
from sklearn.linear_model import LinearRegression
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, "model")]
print("run_id: {}".format(r.info.run_id))
print("artifacts: {}".format(artifacts))
print("params: {}".format(r.data.params))
print("metrics: {}".format(r.data.metrics))
print("tags: {}".format(tags))
# prepare training data
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
# Auto log all the parameters, metrics, and artifacts
mlflow.autolog()
model = LinearRegression()
with mlflow.start_run() as run:
model.fit(X, y)
# fetch the auto logged parameters and metrics for ended run
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
.. code-block:: text
:caption: Output
run_id: fd10a17d028c47399a55ab8741721ef7
artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/model.pkl']
params: {'copy_X': 'True',
'normalize': 'False',
'fit_intercept': 'True',
'n_jobs': 'None'}
metrics: {'training_score': 1.0,
'training_rmse': 4.440892098500626e-16,
'training_r2_score': 1.0,
'training_mae': 2.220446049250313e-16,
'training_mse': 1.9721522630525295e-31}
tags: {'estimator_class': 'sklearn.linear_model._base.LinearRegression',
'estimator_name': 'LinearRegression'}
"""
from mlflow import (
tensorflow,
keras,
gluon,
xgboost,
lightgbm,
pyspark,
statsmodels,
spark,
sklearn,
fastai,
pytorch,
)
locals_copy = locals().items()
# Mapping of library module name to specific autolog function
# eg: mxnet.gluon is the actual library, mlflow.gluon.autolog is our autolog function for it
LIBRARY_TO_AUTOLOG_FN = {
"tensorflow": tensorflow.autolog,
"keras": keras.autolog,
"mxnet.gluon": gluon.autolog,
"xgboost": xgboost.autolog,
"lightgbm": lightgbm.autolog,
"statsmodels": statsmodels.autolog,
"sklearn": sklearn.autolog,
"fastai": fastai.autolog,
"pyspark": spark.autolog,
"pyspark.ml": pyspark.ml.autolog,
# TODO: Broaden this beyond pytorch_lightning as we add autologging support for more
# Pytorch frameworks under mlflow.pytorch.autolog
"pytorch_lightning": pytorch.autolog,
}
CONF_KEY_IS_GLOBALLY_CONFIGURED = "globally_configured"
def get_autologging_params(autolog_fn):
try:
needed_params = list(inspect.signature(autolog_fn).parameters.keys())
return {k: v for k, v in locals_copy if k in needed_params}
except Exception:
return {}
def setup_autologging(module):
try:
autolog_fn = LIBRARY_TO_AUTOLOG_FN[module.__name__]
# Only call integration's autolog function with `mlflow.autolog` configs
# if the integration's autolog function has not already been called by the user.
# Logic is as follows:
# - if a previous_config exists, that means either `mlflow.autolog` or
# `mlflow.integration.autolog` was called.
# - if the config contains `CONF_KEY_IS_GLOBALLY_CONFIGURED`, the configuration
# was set by `mlflow.autolog`, and so we can safely call `autolog_fn` with
# `autologging_params`.
# - if the config doesn't contain this key, the configuration was set by an
# `mlflow.integration.autolog` call, so we should not call `autolog_fn` with
# new configs.
prev_config = AUTOLOGGING_INTEGRATIONS.get(autolog_fn.integration_name)
if prev_config and not prev_config.get(CONF_KEY_IS_GLOBALLY_CONFIGURED, False):
return
autologging_params = get_autologging_params(autolog_fn)
autolog_fn(**autologging_params)
AUTOLOGGING_INTEGRATIONS[autolog_fn.integration_name][
CONF_KEY_IS_GLOBALLY_CONFIGURED
] = True
if not autologging_is_disabled(
autolog_fn.integration_name
) and not autologging_params.get("silent", False):
_logger.info("Autologging successfully enabled for %s.", module.__name__)
except Exception as e:
if is_testing():
# Raise unexpected exceptions in test mode in order to detect
# errors within dependent autologging integrations
raise
elif not autologging_params.get("silent", False):
_logger.warning(
"Exception raised while enabling autologging for %s: %s",
module.__name__,
str(e),
)
# for each autolog library (except pyspark), register a post-import hook.
# this way, we do not send any errors to the user until we know they are using the library.
# the post-import hook also retroactively activates for previously-imported libraries.
for module in list(
set(LIBRARY_TO_AUTOLOG_FN.keys()) - set(["tensorflow", "keras", "pyspark", "pyspark.ml"])
):
register_post_import_hook(setup_autologging, module, overwrite=True)
FULLY_IMPORTED_KERAS = False
TF_AUTOLOG_SETUP_CALLED = False
def conditionally_set_up_keras_autologging(keras_module):
nonlocal FULLY_IMPORTED_KERAS, TF_AUTOLOG_SETUP_CALLED
FULLY_IMPORTED_KERAS = True
if Version(keras_module.__version__) >= Version("2.6.0"):
# NB: Keras unconditionally depends on TensorFlow beginning with Version 2.6.0, and
# many classes defined in the `keras` module are aliases of classes in the `tf.keras`
# module. Accordingly, TensorFlow autologging serves as a replacement for Keras
# autologging in Keras >= 2.6.0
try:
import tensorflow
setup_autologging(tensorflow)
TF_AUTOLOG_SETUP_CALLED = True
except Exception as e:
_logger.debug(
"Failed to set up TensorFlow autologging for tf.keras models upon"
" Keras library import: %s",
str(e),
)
raise
else:
setup_autologging(keras_module)
register_post_import_hook(conditionally_set_up_keras_autologging, "keras", overwrite=True)
def set_up_tensorflow_autologging(tensorflow_module):
import sys
nonlocal FULLY_IMPORTED_KERAS, TF_AUTOLOG_SETUP_CALLED
if "keras" in sys.modules and not FULLY_IMPORTED_KERAS:
# In Keras >= 2.6.0, importing Keras imports the TensorFlow library, which can
# trigger this autologging import hook for TensorFlow before the entire Keras import
# procedure is completed. Attempting to set up autologging before the Keras import
# procedure has completed will result in a failure due to the unavailability of
# certain modules. In this case, we terminate the TensorFlow autologging import hook
# and rely on the Keras autologging import hook to successfully set up TensorFlow
# autologging for tf.keras models once the Keras import procedure has completed
return
# By design, in Keras >= 2.6.0, Keras needs to enable tensorflow autologging so that
# tf.keras models always use tensorflow autologging, rather than vanilla keras autologging.
# As a result, Keras autologging must call `mlflow.tensorflow.autolog()` in Keras >= 2.6.0.
# Accordingly, we insert this check to ensure that importing tensorflow, which may import
# keras, does not enable tensorflow autologging twice.
if not TF_AUTOLOG_SETUP_CALLED:
setup_autologging(tensorflow_module)
register_post_import_hook(set_up_tensorflow_autologging, "tensorflow", overwrite=True)
# for pyspark, we activate autologging immediately, without waiting for a module import.
# this is because on Databricks a SparkSession already exists and the user can directly
# interact with it, and this activity should be logged.
try:
import pyspark as pyspark_module
import pyspark.ml as pyspark_ml_module
setup_autologging(pyspark_module)
setup_autologging(pyspark_ml_module)
except ImportError as ie:
# if pyspark isn't installed, a user could potentially install it in the middle
# of their session so we want to enable autologging once they do
if "pyspark" in str(ie):
register_post_import_hook(setup_autologging, "pyspark", overwrite=True)
register_post_import_hook(setup_autologging, "pyspark.ml", overwrite=True)
except Exception as e:
if is_testing():
# Raise unexpected exceptions in test mode in order to detect
# errors within dependent autologging integrations
raise
else:
_logger.warning("Exception raised while enabling autologging for spark: %s", str(e))
| 39.049536 | 100 | 0.639864 |
793e8b7da49659a2fbffd8ae180d597ce3814940 | 20,365 | py | Python | pandas/tests/series/test_rank.py | sofiane87/pandas | 0de99558b497c5611cbe5d35d504763bd7692275 | [
"BSD-3-Clause"
] | 2 | 2019-11-13T18:20:29.000Z | 2020-04-18T02:58:39.000Z | pandas/tests/series/methods/test_rank.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/series/methods/test_rank.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | 2 | 2019-12-21T21:17:43.000Z | 2019-12-26T10:34:36.000Z | from itertools import chain, product
import numpy as np
import pytest
from pandas._libs.algos import Infinity, NegInfinity
from pandas._libs.tslib import iNaT
import pandas.util._test_decorators as td
from pandas import NaT, Series, Timestamp, date_range
from pandas.api.types import CategoricalDtype
import pandas.util.testing as tm
class TestSeriesRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
def test_rank(self, datetime_series):
pytest.importorskip("scipy.stats.special")
rankdata = pytest.importorskip("scipy.stats.rankdata")
datetime_series[::2] = np.nan
datetime_series[:10][::3] = 4.0
ranks = datetime_series.rank()
oranks = datetime_series.astype("O").rank()
tm.assert_series_equal(ranks, oranks)
mask = np.isnan(datetime_series)
filled = datetime_series.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled), index=filled.index, name="ts")
exp[mask] = np.nan
tm.assert_series_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
tm.assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
rng = date_range("1/1/1990", periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.iloc[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
tm.assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
# GH 5968
iseries = Series(["3 day", "1 day 10m", "-2 day", NaT], dtype="m8[ns]")
exp = Series([3, 2, 1, np.nan])
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
values = np.array(
[-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40],
dtype="float64",
)
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype="float64")
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
def test_rank_categorical(self):
# GH issue #15420 rank incorrectly orders ordered categories
# Test ascending/descending ranking for ordered categoricals
exp = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
exp_desc = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0])
ordered = Series(
["first", "second", "third", "fourth", "fifth", "sixth"]
).astype(
CategoricalDtype(
categories=["first", "second", "third", "fourth", "fifth", "sixth"],
ordered=True,
)
)
tm.assert_series_equal(ordered.rank(), exp)
tm.assert_series_equal(ordered.rank(ascending=False), exp_desc)
# Unordered categoricals should be ranked as objects
unordered = Series(
["first", "second", "third", "fourth", "fifth", "sixth"]
).astype(
CategoricalDtype(
categories=["first", "second", "third", "fourth", "fifth", "sixth"],
ordered=False,
)
)
exp_unordered = Series([2.0, 4.0, 6.0, 3.0, 1.0, 5.0])
res = unordered.rank()
tm.assert_series_equal(res, exp_unordered)
unordered1 = Series([1, 2, 3, 4, 5, 6]).astype(
CategoricalDtype([1, 2, 3, 4, 5, 6], False)
)
exp_unordered1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
res1 = unordered1.rank()
tm.assert_series_equal(res1, exp_unordered1)
# Test na_option for rank data
na_ser = Series(
["first", "second", "third", "fourth", "fifth", "sixth", np.NaN]
).astype(
CategoricalDtype(
["first", "second", "third", "fourth", "fifth", "sixth", "seventh"],
True,
)
)
exp_top = Series([2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.0])
exp_bot = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
exp_keep = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, np.NaN])
tm.assert_series_equal(na_ser.rank(na_option="top"), exp_top)
tm.assert_series_equal(na_ser.rank(na_option="bottom"), exp_bot)
tm.assert_series_equal(na_ser.rank(na_option="keep"), exp_keep)
# Test na_option for rank data with ascending False
exp_top = Series([7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0])
exp_bot = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 7.0])
exp_keep = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, np.NaN])
tm.assert_series_equal(na_ser.rank(na_option="top", ascending=False), exp_top)
tm.assert_series_equal(
na_ser.rank(na_option="bottom", ascending=False), exp_bot
)
tm.assert_series_equal(na_ser.rank(na_option="keep", ascending=False), exp_keep)
# Test invalid values for na_option
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
na_ser.rank(na_option="bad", ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
na_ser.rank(na_option=True, ascending=False)
# Test with pct=True
na_ser = Series(["first", "second", "third", "fourth", np.NaN]).astype(
CategoricalDtype(["first", "second", "third", "fourth"], True)
)
exp_top = Series([0.4, 0.6, 0.8, 1.0, 0.2])
exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.0])
exp_keep = Series([0.25, 0.5, 0.75, 1.0, np.NaN])
tm.assert_series_equal(na_ser.rank(na_option="top", pct=True), exp_top)
tm.assert_series_equal(na_ser.rank(na_option="bottom", pct=True), exp_bot)
tm.assert_series_equal(na_ser.rank(na_option="keep", pct=True), exp_keep)
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method="average")
msg = (
"No axis named average for object type"
" <class 'pandas.core.series.Series'>"
)
with pytest.raises(ValueError, match=msg):
s.rank("average")
@pytest.mark.parametrize(
"contents,dtype",
[
(
[
-np.inf,
-50,
-1,
-1e-20,
-1e-25,
-1e-50,
0,
1e-40,
1e-20,
1e-10,
2,
40,
np.inf,
],
"float64",
),
(
[
-np.inf,
-50,
-1,
-1e-20,
-1e-25,
-1e-45,
0,
1e-40,
1e-20,
1e-10,
2,
40,
np.inf,
],
"float32",
),
([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"),
pytest.param(
[
np.iinfo(np.int64).min,
-100,
0,
1,
9999,
100000,
1e10,
np.iinfo(np.int64).max,
],
"int64",
marks=pytest.mark.xfail(
reason="iNaT is equivalent to minimum value of dtype"
"int64 pending issue GH#16674"
),
),
([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"),
],
)
def test_rank_inf(self, contents, dtype):
dtype_na_map = {
"float64": np.nan,
"float32": np.nan,
"int64": iNaT,
"object": None,
}
# Insert nans at random positions if underlying dtype has missing
# value. Then adjust the expected order by adding nans accordingly
# This is for testing whether rank calculation is affected
# when values are interwined with nan values.
values = np.array(contents, dtype=dtype)
exp_order = np.array(range(len(values)), dtype="float64") + 1.0
if dtype in dtype_na_map:
na_value = dtype_na_map[dtype]
nan_indices = np.random.choice(range(len(values)), 5)
values = np.insert(values, nan_indices, na_value)
exp_order = np.insert(exp_order, nan_indices, np.nan)
# shuffle the testing array and expected results in the same way
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(exp_order[random_order], dtype="float64")
iranks = iseries.rank()
tm.assert_series_equal(iranks, exp)
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method="average"):
result = s.rank(method=method)
tm.assert_series_equal(result, Series(expected))
dtypes = [None, object]
disabled = {(object, "first")}
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
@td.skip_if_no_scipy
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])
@pytest.mark.parametrize("na_option", ["top", "bottom", "keep"])
def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending):
dtypes = [
("object", None, Infinity(), NegInfinity()),
("float64", np.nan, np.inf, -np.inf),
]
chunk = 3
disabled = {("object", "first")}
def _check(s, method, na_option, ascending):
exp_ranks = {
"average": ([2, 2, 2], [5, 5, 5], [8, 8, 8]),
"min": ([1, 1, 1], [4, 4, 4], [7, 7, 7]),
"max": ([3, 3, 3], [6, 6, 6], [9, 9, 9]),
"first": ([1, 2, 3], [4, 5, 6], [7, 8, 9]),
"dense": ([1, 1, 1], [2, 2, 2], [3, 3, 3]),
}
ranks = exp_ranks[method]
if na_option == "top":
order = [ranks[1], ranks[0], ranks[2]]
elif na_option == "bottom":
order = [ranks[0], ranks[2], ranks[1]]
else:
order = [ranks[0], [np.nan] * chunk, ranks[1]]
expected = order if ascending else order[::-1]
expected = list(chain.from_iterable(expected))
result = s.rank(method=method, na_option=na_option, ascending=ascending)
tm.assert_series_equal(result, Series(expected, dtype="float64"))
for dtype, na_value, pos_inf, neg_inf in dtypes:
in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk
iseries = Series(in_arr, dtype=dtype)
if (dtype, method) in disabled:
continue
_check(iseries, method, na_option, ascending)
def test_rank_desc_mix_nans_infs(self):
# GH 19538
# check descending ranking when mix nans and infs
iseries = Series([1, np.nan, np.inf, -np.inf, 25])
result = iseries.rank(ascending=False)
exp = Series([3, np.nan, 1, 4, 2], dtype="float64")
tm.assert_series_equal(result, exp)
def test_rank_methods_series(self):
pytest.importorskip("scipy.stats.special")
rankdata = pytest.importorskip("scipy.stats.rankdata")
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
np.random.shuffle(xs)
index = [chr(ord("a") + i) for i in range(len(xs))]
for vals in [xs, xs + 1e6, xs * 1e-6]:
ts = Series(vals, index=index)
for m in ["average", "min", "max", "first", "dense"]:
result = ts.rank(method=m)
sprank = rankdata(vals, m if m != "first" else "ordinal")
expected = Series(sprank, index=index).astype("float64")
tm.assert_series_equal(result, expected)
def test_rank_dense_method(self):
dtypes = ["O", "f8", "i8"]
in_out = [
([1], [1]),
([2], [1]),
([0], [1]),
([2, 2], [1, 1]),
([1, 2, 3], [1, 2, 3]),
([4, 2, 1], [3, 2, 1]),
([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),
([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5]),
]
for ser, exp in in_out:
for dtype in dtypes:
s = Series(ser).astype(dtype)
result = s.rank(method="dense")
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
def test_rank_descending(self):
dtypes = ["O", "f8", "i8"]
for dtype, method in product(dtypes, self.results):
if "i" in dtype:
s = self.s.dropna()
else:
s = self.s.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
tm.assert_series_equal(res, expected)
if method == "first" and dtype == "O":
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
tm.assert_series_equal(res2, expected)
def test_rank_int(self):
s = self.s.dropna().astype("i8")
for method, res in self.results.items():
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
tm.assert_series_equal(result, expected)
def test_rank_object_bug(self):
# GH 13445
# smoke tests
Series([np.nan] * 32).astype(object).rank(ascending=True)
Series([np.nan] * 32).astype(object).rank(ascending=False)
def test_rank_modify_inplace(self):
# GH 18521
# Check rank does not mutate series
s = Series([Timestamp("2017-01-05 10:20:27.569000"), NaT])
expected = s.copy()
s.rank()
result = s
tm.assert_series_equal(result, expected)
# GH15630, pct should be on 100% basis when method='dense'
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0, 1.0]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 2, 2.0 / 2, 2.0 / 2]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.0 / 3, 1.0 / 3, 3.0 / 3, 3.0 / 3, 2.0 / 3]),
([1, 1, 3, 3, 5, 5], [1.0 / 3, 1.0 / 3, 2.0 / 3, 2.0 / 3, 3.0 / 3, 3.0 / 3]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_dense_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="dense", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0 / 2, 1.0 / 2]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 2.0 / 3, 2.0 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.0 / 5, 1.0 / 5, 4.0 / 5, 4.0 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [1.0 / 6, 1.0 / 6, 3.0 / 6, 3.0 / 6, 5.0 / 6, 5.0 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_min_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="min", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0, 1.0]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 3.0 / 3, 3.0 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [2.0 / 5, 2.0 / 5, 5.0 / 5, 5.0 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [2.0 / 6, 2.0 / 6, 4.0 / 6, 4.0 / 6, 6.0 / 6, 6.0 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_max_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="max", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.5 / 2, 1.5 / 2]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 2.5 / 3, 2.5 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_average_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="average", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8"])
@pytest.mark.parametrize(
"ser, exp",
[
([1], [1.0]),
([1, 2], [1.0 / 2, 2.0 / 2]),
([2, 2], [1.0 / 2, 2.0 / 2.0]),
([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([1, 2, 2], [1.0 / 3, 2.0 / 3, 3.0 / 3]),
([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),
([1, 1, 5, 5, 3], [1.0 / 5, 2.0 / 5, 4.0 / 5, 5.0 / 5, 3.0 / 5]),
([1, 1, 3, 3, 5, 5], [1.0 / 6, 2.0 / 6, 3.0 / 6, 4.0 / 6, 5.0 / 6, 6.0 / 6]),
([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),
],
)
def test_rank_first_pct(dtype, ser, exp):
s = Series(ser).astype(dtype)
result = s.rank(method="first", pct=True)
expected = Series(exp).astype(result.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows():
# GH 18271
s = Series(np.arange(2 ** 24 + 1))
result = s.rank(pct=True).max()
assert result == 1
| 35.917108 | 88 | 0.497569 |
793e8be004569a9671c3ba4ae8fa239d5feadcb1 | 35,336 | py | Python | homeassistant/config_entries.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/config_entries.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 7 | 2016-04-09T20:56:30.000Z | 2016-04-19T21:28:46.000Z | homeassistant/config_entries.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 2 | 2019-07-05T17:46:08.000Z | 2021-04-25T21:21:02.000Z | """Manage config entries in Home Assistant."""
import asyncio
import functools
import logging
from typing import Any, Callable, Dict, List, Optional, Set, Union, cast
import uuid
import weakref
import attr
from homeassistant import data_entry_flow, loader
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.event import Event
from homeassistant.setup import async_process_deps_reqs, async_setup_component
from homeassistant.util.decorator import Registry
_LOGGER = logging.getLogger(__name__)
_UNDEF: dict = {}
SOURCE_DISCOVERY = "discovery"
SOURCE_IMPORT = "import"
SOURCE_SSDP = "ssdp"
SOURCE_USER = "user"
SOURCE_ZEROCONF = "zeroconf"
# If a user wants to hide a discovery from the UI they can "Ignore" it. The config_entries/ignore_flow
# websocket command creates a config entry with this source and while it exists normal discoveries
# with the same unique id are ignored.
SOURCE_IGNORE = "ignore"
# This is used when a user uses the "Stop Ignoring" button in the UI (the
# config_entries/ignore_flow websocket command). It's triggered after the "ignore" config entry has
# been removed and unloaded.
SOURCE_UNIGNORE = "unignore"
HANDLERS = Registry()
STORAGE_KEY = "core.config_entries"
STORAGE_VERSION = 1
# Deprecated since 0.73
PATH_CONFIG = ".config_entries.json"
SAVE_DELAY = 1
# The config entry has been set up successfully
ENTRY_STATE_LOADED = "loaded"
# There was an error while trying to set up this config entry
ENTRY_STATE_SETUP_ERROR = "setup_error"
# There was an error while trying to migrate the config entry to a new version
ENTRY_STATE_MIGRATION_ERROR = "migration_error"
# The config entry was not ready to be set up yet, but might be later
ENTRY_STATE_SETUP_RETRY = "setup_retry"
# The config entry has not been loaded
ENTRY_STATE_NOT_LOADED = "not_loaded"
# An error occurred when trying to unload the entry
ENTRY_STATE_FAILED_UNLOAD = "failed_unload"
UNRECOVERABLE_STATES = (ENTRY_STATE_MIGRATION_ERROR, ENTRY_STATE_FAILED_UNLOAD)
DISCOVERY_NOTIFICATION_ID = "config_entry_discovery"
DISCOVERY_SOURCES = (
SOURCE_SSDP,
SOURCE_ZEROCONF,
SOURCE_DISCOVERY,
SOURCE_IMPORT,
SOURCE_UNIGNORE,
)
EVENT_FLOW_DISCOVERED = "config_entry_discovered"
CONN_CLASS_CLOUD_PUSH = "cloud_push"
CONN_CLASS_CLOUD_POLL = "cloud_poll"
CONN_CLASS_LOCAL_PUSH = "local_push"
CONN_CLASS_LOCAL_POLL = "local_poll"
CONN_CLASS_ASSUMED = "assumed"
CONN_CLASS_UNKNOWN = "unknown"
class ConfigError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownEntry(ConfigError):
"""Unknown entry specified."""
class OperationNotAllowed(ConfigError):
"""Raised when a config entry operation is not allowed."""
class ConfigEntry:
"""Hold a configuration entry."""
__slots__ = (
"entry_id",
"version",
"domain",
"title",
"data",
"options",
"unique_id",
"system_options",
"source",
"connection_class",
"state",
"_setup_lock",
"update_listeners",
"_async_cancel_retry_setup",
)
def __init__(
self,
version: int,
domain: str,
title: str,
data: dict,
source: str,
connection_class: str,
system_options: dict,
options: Optional[dict] = None,
unique_id: Optional[str] = None,
entry_id: Optional[str] = None,
state: str = ENTRY_STATE_NOT_LOADED,
) -> None:
"""Initialize a config entry."""
# Unique id of the config entry
self.entry_id = entry_id or uuid.uuid4().hex
# Version of the configuration.
self.version = version
# Domain the configuration belongs to
self.domain = domain
# Title of the configuration
self.title = title
# Config data
self.data = data
# Entry options
self.options = options or {}
# Entry system options
self.system_options = SystemOptions(**system_options)
# Source of the configuration (user, discovery, cloud)
self.source = source
# Connection class
self.connection_class = connection_class
# State of the entry (LOADED, NOT_LOADED)
self.state = state
# Unique ID of this entry.
self.unique_id = unique_id
# Listeners to call on update
self.update_listeners: List = []
# Function to cancel a scheduled retry
self._async_cancel_retry_setup: Optional[Callable[[], Any]] = None
async def async_setup(
self,
hass: HomeAssistant,
*,
integration: Optional[loader.Integration] = None,
tries: int = 0,
) -> None:
"""Set up an entry."""
if self.source == SOURCE_IGNORE:
return
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
try:
component = integration.get_component()
except ImportError as err:
_LOGGER.error(
"Error importing integration %s to set up %s config entry: %s",
integration.domain,
self.domain,
err,
)
if self.domain == integration.domain:
self.state = ENTRY_STATE_SETUP_ERROR
return
if self.domain == integration.domain:
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error importing platform config_flow from integration %s to set up %s config entry: %s",
integration.domain,
self.domain,
err,
)
self.state = ENTRY_STATE_SETUP_ERROR
return
# Perform migration
if not await self.async_migrate(hass):
self.state = ENTRY_STATE_MIGRATION_ERROR
return
try:
result = await component.async_setup_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_setup_entry did not return boolean", integration.domain
)
result = False
except ConfigEntryNotReady:
self.state = ENTRY_STATE_SETUP_RETRY
wait_time = 2 ** min(tries, 4) * 5
tries += 1
_LOGGER.warning(
"Config entry for %s not ready yet. Retrying in %d seconds.",
self.domain,
wait_time,
)
async def setup_again(now: Any) -> None:
"""Run setup again."""
self._async_cancel_retry_setup = None
await self.async_setup(hass, integration=integration, tries=tries)
self._async_cancel_retry_setup = hass.helpers.event.async_call_later(
wait_time, setup_again
)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
result = False
# Only store setup result as state if it was not forwarded.
if self.domain != integration.domain:
return
if result:
self.state = ENTRY_STATE_LOADED
else:
self.state = ENTRY_STATE_SETUP_ERROR
async def async_unload(
self, hass: HomeAssistant, *, integration: Optional[loader.Integration] = None
) -> bool:
"""Unload an entry.
Returns if unload is possible and was successful.
"""
if self.source == SOURCE_IGNORE:
self.state = ENTRY_STATE_NOT_LOADED
return True
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
if integration.domain == self.domain:
if self.state in UNRECOVERABLE_STATES:
return False
if self.state != ENTRY_STATE_LOADED:
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
self.state = ENTRY_STATE_NOT_LOADED
return True
supports_unload = hasattr(component, "async_unload_entry")
if not supports_unload:
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
try:
result = await component.async_unload_entry( # type: ignore
hass, self
)
assert isinstance(result, bool)
# Only adjust state if we unloaded the component
if result and integration.domain == self.domain:
self.state = ENTRY_STATE_NOT_LOADED
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error unloading entry %s for %s", self.title, integration.domain
)
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
async def async_remove(self, hass: HomeAssistant) -> None:
"""Invoke remove callback on component."""
if self.source == SOURCE_IGNORE:
return
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
if not hasattr(component, "async_remove_entry"):
return
try:
await component.async_remove_entry( # type: ignore
hass, self
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error calling entry remove callback %s for %s",
self.title,
integration.domain,
)
async def async_migrate(self, hass: HomeAssistant) -> bool:
"""Migrate an entry.
Returns True if config entry is up-to-date or has been migrated.
"""
handler = HANDLERS.get(self.domain)
if handler is None:
_LOGGER.error(
"Flow handler not found for entry %s for %s", self.title, self.domain
)
return False
# Handler may be a partial
while isinstance(handler, functools.partial):
handler = handler.func
if self.version == handler.VERSION:
return True
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
supports_migrate = hasattr(component, "async_migrate_entry")
if not supports_migrate:
_LOGGER.error(
"Migration handler not found for entry %s for %s",
self.title,
self.domain,
)
return False
try:
result = await component.async_migrate_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_migrate_entry did not return boolean", self.domain
)
return False
if result:
# pylint: disable=protected-access
hass.config_entries._async_schedule_save()
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error migrating entry %s for %s", self.title, self.domain
)
return False
def add_update_listener(self, listener: Callable) -> Callable:
"""Listen for when entry is updated.
Listener: Callback function(hass, entry)
Returns function to unlisten.
"""
weak_listener = weakref.ref(listener)
self.update_listeners.append(weak_listener)
return lambda: self.update_listeners.remove(weak_listener)
def as_dict(self) -> Dict[str, Any]:
"""Return dictionary version of this entry."""
return {
"entry_id": self.entry_id,
"version": self.version,
"domain": self.domain,
"title": self.title,
"data": self.data,
"options": self.options,
"system_options": self.system_options.as_dict(),
"source": self.source,
"connection_class": self.connection_class,
"unique_id": self.unique_id,
}
class ConfigEntriesFlowManager(data_entry_flow.FlowManager):
"""Manage all the config entry flows that are in progress."""
def __init__(
self, hass: HomeAssistant, config_entries: "ConfigEntries", hass_config: dict
):
"""Initialize the config entry flow manager."""
super().__init__(hass)
self.config_entries = config_entries
self._hass_config = hass_config
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish a config flow and add an entry."""
flow = cast(ConfigFlow, flow)
# Remove notification if no other discovery config entries in progress
if not any(
ent["context"]["source"] in DISCOVERY_SOURCES
for ent in self.hass.config_entries.flow.async_progress()
if ent["flow_id"] != flow.flow_id
):
self.hass.components.persistent_notification.async_dismiss(
DISCOVERY_NOTIFICATION_ID
)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
# Check if config entry exists with unique ID. Unload it.
existing_entry = None
if flow.unique_id is not None:
# Abort all flows in progress with same unique ID.
for progress_flow in self.async_progress():
if (
progress_flow["handler"] == flow.handler
and progress_flow["flow_id"] != flow.flow_id
and progress_flow["context"].get("unique_id") == flow.unique_id
):
self.async_abort(progress_flow["flow_id"])
# Find existing entry.
for check_entry in self.config_entries.async_entries(result["handler"]):
if check_entry.unique_id == flow.unique_id:
existing_entry = check_entry
break
# Unload the entry before setting up the new one.
# We will remove it only after the other one is set up,
# so that device customizations are not getting lost.
if (
existing_entry is not None
and existing_entry.state not in UNRECOVERABLE_STATES
):
await self.config_entries.async_unload(existing_entry.entry_id)
entry = ConfigEntry(
version=result["version"],
domain=result["handler"],
title=result["title"],
data=result["data"],
options={},
system_options={},
source=flow.context["source"],
connection_class=flow.CONNECTION_CLASS,
unique_id=flow.unique_id,
)
await self.config_entries.async_add(entry)
if existing_entry is not None:
await self.config_entries.async_remove(existing_entry.entry_id)
result["result"] = entry
return result
async def async_create_flow(
self, handler_key: Any, *, context: Optional[Dict] = None, data: Any = None
) -> "ConfigFlow":
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
try:
integration = await loader.async_get_integration(self.hass, handler_key)
except loader.IntegrationNotFound:
_LOGGER.error("Cannot find integration %s", handler_key)
raise data_entry_flow.UnknownHandler
# Make sure requirements and dependencies of component are resolved
await async_process_deps_reqs(self.hass, self._hass_config, integration)
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error occurred loading config flow for integration %s: %s",
handler_key,
err,
)
raise data_entry_flow.UnknownHandler
handler = HANDLERS.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
if not context or "source" not in context:
raise KeyError("Context not set or doesn't have a source set")
flow = cast(ConfigFlow, handler())
flow.init_step = context["source"]
return flow
async def async_post_init(
self, flow: data_entry_flow.FlowHandler, result: dict
) -> None:
"""After a flow is initialised trigger new flow notifications."""
source = flow.context["source"]
# Create notification.
if source in DISCOVERY_SOURCES:
self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED)
self.hass.components.persistent_notification.async_create(
title="New devices discovered",
message=(
"We have discovered new devices on your network. "
"[Check it out](/config/integrations)"
),
notification_id=DISCOVERY_NOTIFICATION_ID,
)
class ConfigEntries:
"""Manage the configuration entries.
An instance of this object is available via `hass.config_entries`.
"""
def __init__(self, hass: HomeAssistant, hass_config: dict) -> None:
"""Initialize the entry manager."""
self.hass = hass
self.flow = ConfigEntriesFlowManager(hass, self, hass_config)
self.options = OptionsFlowManager(hass)
self._hass_config = hass_config
self._entries: List[ConfigEntry] = []
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
EntityRegistryDisabledHandler(hass).async_setup()
@callback
def async_domains(self) -> List[str]:
"""Return domains for which we have entries."""
seen: Set[str] = set()
result = []
for entry in self._entries:
if entry.domain not in seen:
seen.add(entry.domain)
result.append(entry.domain)
return result
@callback
def async_get_entry(self, entry_id: str) -> Optional[ConfigEntry]:
"""Return entry with matching entry_id."""
for entry in self._entries:
if entry_id == entry.entry_id:
return entry
return None
@callback
def async_entries(self, domain: Optional[str] = None) -> List[ConfigEntry]:
"""Return all entries or entries for a specific domain."""
if domain is None:
return list(self._entries)
return [entry for entry in self._entries if entry.domain == domain]
async def async_add(self, entry: ConfigEntry) -> None:
"""Add and setup an entry."""
self._entries.append(entry)
await self.async_setup(entry.entry_id)
self._async_schedule_save()
async def async_remove(self, entry_id: str) -> Dict[str, Any]:
"""Remove an entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
unload_success = entry.state != ENTRY_STATE_FAILED_UNLOAD
else:
unload_success = await self.async_unload(entry_id)
await entry.async_remove(self.hass)
self._entries.remove(entry)
self._async_schedule_save()
dev_reg, ent_reg = await asyncio.gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
)
dev_reg.async_clear_config_entry(entry_id)
ent_reg.async_clear_config_entry(entry_id)
# After we have fully removed an "ignore" config entry we can try and rediscover it so that a
# user is able to immediately start configuring it. We do this by starting a new flow with
# the 'unignore' step. If the integration doesn't implement async_step_unignore then
# this will be a no-op.
if entry.source == SOURCE_IGNORE:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
entry.domain,
context={"source": SOURCE_UNIGNORE},
data={"unique_id": entry.unique_id},
)
)
return {"require_restart": not unload_success}
async def async_initialize(self) -> None:
"""Initialize config entry config."""
# Migrating for config entries stored before 0.73
config = await self.hass.helpers.storage.async_migrator(
self.hass.config.path(PATH_CONFIG),
self._store,
old_conf_migrate_func=_old_conf_migrator,
)
if config is None:
self._entries = []
return
self._entries = [
ConfigEntry(
version=entry["version"],
domain=entry["domain"],
entry_id=entry["entry_id"],
data=entry["data"],
source=entry["source"],
title=entry["title"],
# New in 0.79
connection_class=entry.get("connection_class", CONN_CLASS_UNKNOWN),
# New in 0.89
options=entry.get("options"),
# New in 0.98
system_options=entry.get("system_options", {}),
# New in 0.104
unique_id=entry.get("unique_id"),
)
for entry in config["entries"]
]
async def async_setup(self, entry_id: str) -> bool:
"""Set up a config entry.
Return True if entry has been successfully loaded.
"""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state != ENTRY_STATE_NOT_LOADED:
raise OperationNotAllowed
# Setup Component if not set up yet
if entry.domain in self.hass.config.components:
await entry.async_setup(self.hass)
else:
# Setting up the component will set up all its config entries
result = await async_setup_component(
self.hass, entry.domain, self._hass_config
)
if not result:
return result
return entry.state == ENTRY_STATE_LOADED
async def async_unload(self, entry_id: str) -> bool:
"""Unload a config entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
raise OperationNotAllowed
return await entry.async_unload(self.hass)
async def async_reload(self, entry_id: str) -> bool:
"""Reload an entry.
If an entry was not loaded, will just load.
"""
unload_result = await self.async_unload(entry_id)
if not unload_result:
return unload_result
return await self.async_setup(entry_id)
@callback
def async_update_entry(
self,
entry: ConfigEntry,
*,
unique_id: Union[str, dict, None] = _UNDEF,
data: dict = _UNDEF,
options: dict = _UNDEF,
system_options: dict = _UNDEF,
) -> None:
"""Update a config entry."""
if unique_id is not _UNDEF:
entry.unique_id = cast(Optional[str], unique_id)
if data is not _UNDEF:
entry.data = data
if options is not _UNDEF:
entry.options = options
if system_options is not _UNDEF:
entry.system_options.update(**system_options)
for listener_ref in entry.update_listeners:
listener = listener_ref()
self.hass.async_create_task(listener(self.hass, entry))
self._async_schedule_save()
async def async_forward_entry_setup(self, entry: ConfigEntry, domain: str) -> bool:
"""Forward the setup of an entry to a different component.
By default an entry is setup with the component it belongs to. If that
component also has related platforms, the component will have to
forward the entry to be setup by that component.
You don't want to await this coroutine if it is called as part of the
setup of a component, because it can cause a deadlock.
"""
# Setup Component if not set up yet
if domain not in self.hass.config.components:
result = await async_setup_component(self.hass, domain, self._hass_config)
if not result:
return False
integration = await loader.async_get_integration(self.hass, domain)
await entry.async_setup(self.hass, integration=integration)
return True
async def async_forward_entry_unload(self, entry: ConfigEntry, domain: str) -> bool:
"""Forward the unloading of an entry to a different component."""
# It was never loaded.
if domain not in self.hass.config.components:
return True
integration = await loader.async_get_integration(self.hass, domain)
return await entry.async_unload(self.hass, integration=integration)
def _async_schedule_save(self) -> None:
"""Save the entity registry to a file."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Any]]]:
"""Return data to save."""
return {"entries": [entry.as_dict() for entry in self._entries]}
async def _old_conf_migrator(old_config: Dict[str, Any]) -> Dict[str, Any]:
"""Migrate the pre-0.73 config format to the latest version."""
return {"entries": old_config}
class ConfigFlow(data_entry_flow.FlowHandler):
"""Base class for config flows with some helpers."""
def __init_subclass__(cls, domain: Optional[str] = None, **kwargs: Any) -> None:
"""Initialize a subclass, register if possible."""
super().__init_subclass__(**kwargs) # type: ignore
if domain is not None:
HANDLERS.register(domain)(cls)
CONNECTION_CLASS = CONN_CLASS_UNKNOWN
@property
def unique_id(self) -> Optional[str]:
"""Return unique ID if available."""
# pylint: disable=no-member
if not self.context:
return None
return cast(Optional[str], self.context.get("unique_id"))
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> "OptionsFlow":
"""Get the options flow for this handler."""
raise data_entry_flow.UnknownHandler
@callback
def _abort_if_unique_id_configured(self, updates: Dict[Any, Any] = None) -> None:
"""Abort if the unique ID is already configured."""
assert self.hass
if self.unique_id is None:
return
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
if updates is not None and not updates.items() <= entry.data.items():
self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
raise data_entry_flow.AbortFlow("already_configured")
async def async_set_unique_id(
self, unique_id: str, *, raise_on_progress: bool = True
) -> Optional[ConfigEntry]:
"""Set a unique ID for the config flow.
Returns optionally existing config entry with same ID.
"""
if raise_on_progress:
for progress in self._async_in_progress():
if progress["context"].get("unique_id") == unique_id:
raise data_entry_flow.AbortFlow("already_in_progress")
# pylint: disable=no-member
self.context["unique_id"] = unique_id
for entry in self._async_current_entries():
if entry.unique_id == unique_id:
return entry
return None
@callback
def _async_current_entries(self) -> List[ConfigEntry]:
"""Return current entries."""
assert self.hass is not None
return self.hass.config_entries.async_entries(self.handler)
@callback
def _async_current_ids(self, include_ignore: bool = True) -> Set[Optional[str]]:
"""Return current unique IDs."""
assert self.hass is not None
return set(
entry.unique_id
for entry in self.hass.config_entries.async_entries(self.handler)
if include_ignore or entry.source != SOURCE_IGNORE
)
@callback
def _async_in_progress(self) -> List[Dict]:
"""Return other in progress flows for current domain."""
assert self.hass is not None
return [
flw
for flw in self.hass.config_entries.flow.async_progress()
if flw["handler"] == self.handler and flw["flow_id"] != self.flow_id
]
async def async_step_ignore(self, user_input: Dict[str, Any]) -> Dict[str, Any]:
"""Ignore this config flow."""
await self.async_set_unique_id(user_input["unique_id"], raise_on_progress=False)
return self.async_create_entry(title="Ignored", data={})
async def async_step_unignore(self, user_input: Dict[str, Any]) -> Dict[str, Any]:
"""Rediscover a config entry by it's unique_id."""
return self.async_abort(reason="not_implemented")
class OptionsFlowManager(data_entry_flow.FlowManager):
"""Flow to set options for a configuration entry."""
async def async_create_flow(
self,
handler_key: Any,
*,
context: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
) -> "OptionsFlow":
"""Create an options flow for a config entry.
Entry_id and flow.handler is the same thing to map entry with flow.
"""
entry = self.hass.config_entries.async_get_entry(handler_key)
if entry is None:
raise UnknownEntry(handler_key)
if entry.domain not in HANDLERS:
raise data_entry_flow.UnknownHandler
flow = cast(OptionsFlow, HANDLERS[entry.domain].async_get_options_flow(entry))
return flow
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish an options flow and update options for configuration entry.
Flow.handler and entry_id is the same thing to map flow with entry.
"""
flow = cast(OptionsFlow, flow)
entry = self.hass.config_entries.async_get_entry(flow.handler)
if entry is None:
raise UnknownEntry(flow.handler)
self.hass.config_entries.async_update_entry(entry, options=result["data"])
result["result"] = True
return result
class OptionsFlow(data_entry_flow.FlowHandler):
"""Base class for config option flows."""
handler: str
@attr.s(slots=True)
class SystemOptions:
"""Config entry system options."""
disable_new_entities = attr.ib(type=bool, default=False)
def update(self, *, disable_new_entities: bool) -> None:
"""Update properties."""
self.disable_new_entities = disable_new_entities
def as_dict(self) -> Dict[str, Any]:
"""Return dictionary version of this config entrys system options."""
return {"disable_new_entities": self.disable_new_entities}
class EntityRegistryDisabledHandler:
"""Handler to handle when entities related to config entries updating disabled_by."""
RELOAD_AFTER_UPDATE_DELAY = 30
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the handler."""
self.hass = hass
self.registry: Optional[entity_registry.EntityRegistry] = None
self.changed: Set[str] = set()
self._remove_call_later: Optional[Callable[[], None]] = None
@callback
def async_setup(self) -> None:
"""Set up the disable handler."""
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, self._handle_entry_updated
)
async def _handle_entry_updated(self, event: Event) -> None:
"""Handle entity registry entry update."""
if (
event.data["action"] != "update"
or "disabled_by" not in event.data["changes"]
):
return
if self.registry is None:
self.registry = await entity_registry.async_get_registry(self.hass)
entity_entry = self.registry.async_get(event.data["entity_id"])
if (
# Stop if no entry found
entity_entry is None
# Stop if entry not connected to config entry
or entity_entry.config_entry_id is None
# Stop if the entry got disabled. In that case the entity handles it
# themselves.
or entity_entry.disabled_by
):
return
config_entry = self.hass.config_entries.async_get_entry(
entity_entry.config_entry_id
)
assert config_entry is not None
if config_entry.entry_id not in self.changed and await support_entry_unload(
self.hass, config_entry.domain
):
self.changed.add(config_entry.entry_id)
if not self.changed:
return
# We are going to delay reloading on *every* entity registry change so that
# if a user is happily clicking along, it will only reload at the end.
if self._remove_call_later:
self._remove_call_later()
self._remove_call_later = self.hass.helpers.event.async_call_later(
self.RELOAD_AFTER_UPDATE_DELAY, self._handle_reload
)
async def _handle_reload(self, _now: Any) -> None:
"""Handle a reload."""
self._remove_call_later = None
to_reload = self.changed
self.changed = set()
_LOGGER.info(
"Reloading config entries because disabled_by changed in entity registry: %s",
", ".join(self.changed),
)
await asyncio.gather(
*[self.hass.config_entries.async_reload(entry_id) for entry_id in to_reload]
)
async def support_entry_unload(hass: HomeAssistant, domain: str) -> bool:
"""Test if a domain supports entry unloading."""
integration = await loader.async_get_integration(hass, domain)
component = integration.get_component()
return hasattr(component, "async_unload_entry")
| 33.944284 | 109 | 0.615123 |
793e8d20de7e33f68d878ebad87ea2a818be1374 | 31,858 | py | Python | Lib/test/test_exception_group.py | ErikBjare/cpython | b68431fadb3150134ac6ccbf501cdfeaf4c75678 | [
"0BSD"
] | 5 | 2021-12-03T23:11:53.000Z | 2022-01-08T21:02:50.000Z | Lib/test/test_exception_group.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 4 | 2021-12-01T14:06:09.000Z | 2022-03-24T21:55:25.000Z | Lib/test/test_exception_group.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 1 | 2022-03-24T19:52:47.000Z | 2022-03-24T19:52:47.000Z | import collections.abc
import traceback
import types
import unittest
class TestExceptionGroupTypeHierarchy(unittest.TestCase):
def test_exception_group_types(self):
self.assertTrue(issubclass(ExceptionGroup, Exception))
self.assertTrue(issubclass(ExceptionGroup, BaseExceptionGroup))
self.assertTrue(issubclass(BaseExceptionGroup, BaseException))
def test_exception_is_not_generic_type(self):
with self.assertRaisesRegex(TypeError, 'Exception'):
Exception[OSError]
def test_exception_group_is_generic_type(self):
E = OSError
self.assertIsInstance(ExceptionGroup[E], types.GenericAlias)
self.assertIsInstance(BaseExceptionGroup[E], types.GenericAlias)
class BadConstructorArgs(unittest.TestCase):
def test_bad_EG_construction__too_many_args(self):
MSG = r'BaseExceptionGroup.__new__\(\) takes exactly 2 arguments'
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup('no errors')
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup([ValueError('no msg')])
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup('eg', [ValueError('too')], [TypeError('many')])
def test_bad_EG_construction__bad_message(self):
MSG = 'argument 1 must be str, not '
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup(ValueError(12), SyntaxError('bad syntax'))
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup(None, [ValueError(12)])
def test_bad_EG_construction__bad_excs_sequence(self):
MSG = r'second argument \(exceptions\) must be a sequence'
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup('errors not sequence', {ValueError(42)})
with self.assertRaisesRegex(TypeError, MSG):
ExceptionGroup("eg", None)
MSG = r'second argument \(exceptions\) must be a non-empty sequence'
with self.assertRaisesRegex(ValueError, MSG):
ExceptionGroup("eg", [])
def test_bad_EG_construction__nested_non_exceptions(self):
MSG = (r'Item [0-9]+ of second argument \(exceptions\)'
' is not an exception')
with self.assertRaisesRegex(ValueError, MSG):
ExceptionGroup('expect instance, not type', [OSError]);
with self.assertRaisesRegex(ValueError, MSG):
ExceptionGroup('bad error', ["not an exception"])
class InstanceCreation(unittest.TestCase):
def test_EG_wraps_Exceptions__creates_EG(self):
excs = [ValueError(1), TypeError(2)]
self.assertIs(
type(ExceptionGroup("eg", excs)),
ExceptionGroup)
def test_BEG_wraps_Exceptions__creates_EG(self):
excs = [ValueError(1), TypeError(2)]
self.assertIs(
type(BaseExceptionGroup("beg", excs)),
ExceptionGroup)
def test_EG_wraps_BaseException__raises_TypeError(self):
MSG= "Cannot nest BaseExceptions in an ExceptionGroup"
with self.assertRaisesRegex(TypeError, MSG):
eg = ExceptionGroup("eg", [ValueError(1), KeyboardInterrupt(2)])
def test_BEG_wraps_BaseException__creates_BEG(self):
beg = BaseExceptionGroup("beg", [ValueError(1), KeyboardInterrupt(2)])
self.assertIs(type(beg), BaseExceptionGroup)
def test_EG_subclass_wraps_anything(self):
class MyEG(ExceptionGroup):
pass
self.assertIs(
type(MyEG("eg", [ValueError(12), TypeError(42)])),
MyEG)
self.assertIs(
type(MyEG("eg", [ValueError(12), KeyboardInterrupt(42)])),
MyEG)
def test_BEG_subclass_wraps_anything(self):
class MyBEG(BaseExceptionGroup):
pass
self.assertIs(
type(MyBEG("eg", [ValueError(12), TypeError(42)])),
MyBEG)
self.assertIs(
type(MyBEG("eg", [ValueError(12), KeyboardInterrupt(42)])),
MyBEG)
class StrAndReprTests(unittest.TestCase):
def test_ExceptionGroup(self):
eg = BaseExceptionGroup(
'flat', [ValueError(1), TypeError(2)])
self.assertEqual(str(eg), "flat (2 sub-exceptions)")
self.assertEqual(repr(eg),
"ExceptionGroup('flat', [ValueError(1), TypeError(2)])")
eg = BaseExceptionGroup(
'nested', [eg, ValueError(1), eg, TypeError(2)])
self.assertEqual(str(eg), "nested (4 sub-exceptions)")
self.assertEqual(repr(eg),
"ExceptionGroup('nested', "
"[ExceptionGroup('flat', "
"[ValueError(1), TypeError(2)]), "
"ValueError(1), "
"ExceptionGroup('flat', "
"[ValueError(1), TypeError(2)]), TypeError(2)])")
def test_BaseExceptionGroup(self):
eg = BaseExceptionGroup(
'flat', [ValueError(1), KeyboardInterrupt(2)])
self.assertEqual(str(eg), "flat (2 sub-exceptions)")
self.assertEqual(repr(eg),
"BaseExceptionGroup("
"'flat', "
"[ValueError(1), KeyboardInterrupt(2)])")
eg = BaseExceptionGroup(
'nested', [eg, ValueError(1), eg])
self.assertEqual(str(eg), "nested (3 sub-exceptions)")
self.assertEqual(repr(eg),
"BaseExceptionGroup('nested', "
"[BaseExceptionGroup('flat', "
"[ValueError(1), KeyboardInterrupt(2)]), "
"ValueError(1), "
"BaseExceptionGroup('flat', "
"[ValueError(1), KeyboardInterrupt(2)])])")
def test_custom_exception(self):
class MyEG(ExceptionGroup):
pass
eg = MyEG(
'flat', [ValueError(1), TypeError(2)])
self.assertEqual(str(eg), "flat (2 sub-exceptions)")
self.assertEqual(repr(eg), "MyEG('flat', [ValueError(1), TypeError(2)])")
eg = MyEG(
'nested', [eg, ValueError(1), eg, TypeError(2)])
self.assertEqual(str(eg), "nested (4 sub-exceptions)")
self.assertEqual(repr(eg), (
"MyEG('nested', "
"[MyEG('flat', [ValueError(1), TypeError(2)]), "
"ValueError(1), "
"MyEG('flat', [ValueError(1), TypeError(2)]), "
"TypeError(2)])"))
def create_simple_eg():
excs = []
try:
try:
raise MemoryError("context and cause for ValueError(1)")
except MemoryError as e:
raise ValueError(1) from e
except ValueError as e:
excs.append(e)
try:
try:
raise OSError("context for TypeError")
except OSError as e:
raise TypeError(int)
except TypeError as e:
excs.append(e)
try:
try:
raise ImportError("context for ValueError(2)")
except ImportError as e:
raise ValueError(2)
except ValueError as e:
excs.append(e)
try:
raise ExceptionGroup('simple eg', excs)
except ExceptionGroup as e:
return e
class ExceptionGroupFields(unittest.TestCase):
def test_basics_ExceptionGroup_fields(self):
eg = create_simple_eg()
# check msg
self.assertEqual(eg.message, 'simple eg')
self.assertEqual(eg.args[0], 'simple eg')
# check cause and context
self.assertIsInstance(eg.exceptions[0], ValueError)
self.assertIsInstance(eg.exceptions[0].__cause__, MemoryError)
self.assertIsInstance(eg.exceptions[0].__context__, MemoryError)
self.assertIsInstance(eg.exceptions[1], TypeError)
self.assertIsNone(eg.exceptions[1].__cause__)
self.assertIsInstance(eg.exceptions[1].__context__, OSError)
self.assertIsInstance(eg.exceptions[2], ValueError)
self.assertIsNone(eg.exceptions[2].__cause__)
self.assertIsInstance(eg.exceptions[2].__context__, ImportError)
# check tracebacks
line0 = create_simple_eg.__code__.co_firstlineno
tb_linenos = [line0 + 27,
[line0 + 6, line0 + 14, line0 + 22]]
self.assertEqual(eg.__traceback__.tb_lineno, tb_linenos[0])
self.assertIsNone(eg.__traceback__.tb_next)
for i in range(3):
tb = eg.exceptions[i].__traceback__
self.assertIsNone(tb.tb_next)
self.assertEqual(tb.tb_lineno, tb_linenos[1][i])
def test_fields_are_readonly(self):
eg = ExceptionGroup('eg', [TypeError(1), OSError(2)])
self.assertEqual(type(eg.exceptions), tuple)
eg.message
with self.assertRaises(AttributeError):
eg.message = "new msg"
eg.exceptions
with self.assertRaises(AttributeError):
eg.exceptions = [OSError('xyz')]
class ExceptionGroupTestBase(unittest.TestCase):
def assertMatchesTemplate(self, exc, exc_type, template):
""" Assert that the exception matches the template
A template describes the shape of exc. If exc is a
leaf exception (i.e., not an exception group) then
template is an exception instance that has the
expected type and args value of exc. If exc is an
exception group, then template is a list of the
templates of its nested exceptions.
"""
if exc_type is not None:
self.assertIs(type(exc), exc_type)
if isinstance(exc, BaseExceptionGroup):
self.assertIsInstance(template, collections.abc.Sequence)
self.assertEqual(len(exc.exceptions), len(template))
for e, t in zip(exc.exceptions, template):
self.assertMatchesTemplate(e, None, t)
else:
self.assertIsInstance(template, BaseException)
self.assertEqual(type(exc), type(template))
self.assertEqual(exc.args, template.args)
class ExceptionGroupSubgroupTests(ExceptionGroupTestBase):
def setUp(self):
self.eg = create_simple_eg()
self.eg_template = [ValueError(1), TypeError(int), ValueError(2)]
def test_basics_subgroup_split__bad_arg_type(self):
bad_args = ["bad arg",
OSError('instance not type'),
[OSError, TypeError],
(OSError, 42)]
for arg in bad_args:
with self.assertRaises(TypeError):
self.eg.subgroup(arg)
with self.assertRaises(TypeError):
self.eg.split(arg)
def test_basics_subgroup_by_type__passthrough(self):
eg = self.eg
self.assertIs(eg, eg.subgroup(BaseException))
self.assertIs(eg, eg.subgroup(Exception))
self.assertIs(eg, eg.subgroup(BaseExceptionGroup))
self.assertIs(eg, eg.subgroup(ExceptionGroup))
def test_basics_subgroup_by_type__no_match(self):
self.assertIsNone(self.eg.subgroup(OSError))
def test_basics_subgroup_by_type__match(self):
eg = self.eg
testcases = [
# (match_type, result_template)
(ValueError, [ValueError(1), ValueError(2)]),
(TypeError, [TypeError(int)]),
((ValueError, TypeError), self.eg_template)]
for match_type, template in testcases:
with self.subTest(match=match_type):
subeg = eg.subgroup(match_type)
self.assertEqual(subeg.message, eg.message)
self.assertMatchesTemplate(subeg, ExceptionGroup, template)
def test_basics_subgroup_by_predicate__passthrough(self):
self.assertIs(self.eg, self.eg.subgroup(lambda e: True))
def test_basics_subgroup_by_predicate__no_match(self):
self.assertIsNone(self.eg.subgroup(lambda e: False))
def test_basics_subgroup_by_predicate__match(self):
eg = self.eg
testcases = [
# (match_type, result_template)
(ValueError, [ValueError(1), ValueError(2)]),
(TypeError, [TypeError(int)]),
((ValueError, TypeError), self.eg_template)]
for match_type, template in testcases:
subeg = eg.subgroup(lambda e: isinstance(e, match_type))
self.assertEqual(subeg.message, eg.message)
self.assertMatchesTemplate(subeg, ExceptionGroup, template)
class ExceptionGroupSplitTests(ExceptionGroupTestBase):
def setUp(self):
self.eg = create_simple_eg()
self.eg_template = [ValueError(1), TypeError(int), ValueError(2)]
def test_basics_split_by_type__passthrough(self):
for E in [BaseException, Exception,
BaseExceptionGroup, ExceptionGroup]:
match, rest = self.eg.split(E)
self.assertMatchesTemplate(
match, ExceptionGroup, self.eg_template)
self.assertIsNone(rest)
def test_basics_split_by_type__no_match(self):
match, rest = self.eg.split(OSError)
self.assertIsNone(match)
self.assertMatchesTemplate(
rest, ExceptionGroup, self.eg_template)
def test_basics_split_by_type__match(self):
eg = self.eg
VE = ValueError
TE = TypeError
testcases = [
# (matcher, match_template, rest_template)
(VE, [VE(1), VE(2)], [TE(int)]),
(TE, [TE(int)], [VE(1), VE(2)]),
((VE, TE), self.eg_template, None),
((OSError, VE), [VE(1), VE(2)], [TE(int)]),
]
for match_type, match_template, rest_template in testcases:
match, rest = eg.split(match_type)
self.assertEqual(match.message, eg.message)
self.assertMatchesTemplate(
match, ExceptionGroup, match_template)
if rest_template is not None:
self.assertEqual(rest.message, eg.message)
self.assertMatchesTemplate(
rest, ExceptionGroup, rest_template)
else:
self.assertIsNone(rest)
def test_basics_split_by_predicate__passthrough(self):
match, rest = self.eg.split(lambda e: True)
self.assertMatchesTemplate(match, ExceptionGroup, self.eg_template)
self.assertIsNone(rest)
def test_basics_split_by_predicate__no_match(self):
match, rest = self.eg.split(lambda e: False)
self.assertIsNone(match)
self.assertMatchesTemplate(rest, ExceptionGroup, self.eg_template)
def test_basics_split_by_predicate__match(self):
eg = self.eg
VE = ValueError
TE = TypeError
testcases = [
# (matcher, match_template, rest_template)
(VE, [VE(1), VE(2)], [TE(int)]),
(TE, [TE(int)], [VE(1), VE(2)]),
((VE, TE), self.eg_template, None),
]
for match_type, match_template, rest_template in testcases:
match, rest = eg.split(lambda e: isinstance(e, match_type))
self.assertEqual(match.message, eg.message)
self.assertMatchesTemplate(
match, ExceptionGroup, match_template)
if rest_template is not None:
self.assertEqual(rest.message, eg.message)
self.assertMatchesTemplate(
rest, ExceptionGroup, rest_template)
class DeepRecursionInSplitAndSubgroup(unittest.TestCase):
def make_deep_eg(self):
e = TypeError(1)
for i in range(2000):
e = ExceptionGroup('eg', [e])
return e
def test_deep_split(self):
e = self.make_deep_eg()
with self.assertRaises(RecursionError):
e.split(TypeError)
def test_deep_subgroup(self):
e = self.make_deep_eg()
with self.assertRaises(RecursionError):
e.subgroup(TypeError)
def leaf_generator(exc, tbs=None):
if tbs is None:
tbs = []
tbs.append(exc.__traceback__)
if isinstance(exc, BaseExceptionGroup):
for e in exc.exceptions:
yield from leaf_generator(e, tbs)
else:
# exc is a leaf exception and its traceback
# is the concatenation of the traceback
# segments in tbs
yield exc, tbs
tbs.pop()
class LeafGeneratorTest(unittest.TestCase):
# The leaf_generator is mentioned in PEP 654 as a suggestion
# on how to iterate over leaf nodes of an EG. Is is also
# used below as a test utility. So we test it here.
def test_leaf_generator(self):
eg = create_simple_eg()
self.assertSequenceEqual(
[e for e, _ in leaf_generator(eg)],
eg.exceptions)
for e, tbs in leaf_generator(eg):
self.assertSequenceEqual(
tbs, [eg.__traceback__, e.__traceback__])
def create_nested_eg():
excs = []
try:
try:
raise TypeError(bytes)
except TypeError as e:
raise ExceptionGroup("nested", [e])
except ExceptionGroup as e:
excs.append(e)
try:
try:
raise MemoryError('out of memory')
except MemoryError as e:
raise ValueError(1) from e
except ValueError as e:
excs.append(e)
try:
raise ExceptionGroup("root", excs)
except ExceptionGroup as eg:
return eg
class NestedExceptionGroupBasicsTest(ExceptionGroupTestBase):
def test_nested_group_matches_template(self):
eg = create_nested_eg()
self.assertMatchesTemplate(
eg,
ExceptionGroup,
[[TypeError(bytes)], ValueError(1)])
def test_nested_group_chaining(self):
eg = create_nested_eg()
self.assertIsInstance(eg.exceptions[1].__context__, MemoryError)
self.assertIsInstance(eg.exceptions[1].__cause__, MemoryError)
self.assertIsInstance(eg.exceptions[0].__context__, TypeError)
def test_nested_exception_group_tracebacks(self):
eg = create_nested_eg()
line0 = create_nested_eg.__code__.co_firstlineno
for (tb, expected) in [
(eg.__traceback__, line0 + 19),
(eg.exceptions[0].__traceback__, line0 + 6),
(eg.exceptions[1].__traceback__, line0 + 14),
(eg.exceptions[0].exceptions[0].__traceback__, line0 + 4),
]:
self.assertEqual(tb.tb_lineno, expected)
self.assertIsNone(tb.tb_next)
def test_iteration_full_tracebacks(self):
eg = create_nested_eg()
# check that iteration over leaves
# produces the expected tracebacks
self.assertEqual(len(list(leaf_generator(eg))), 2)
line0 = create_nested_eg.__code__.co_firstlineno
expected_tbs = [ [line0 + 19, line0 + 6, line0 + 4],
[line0 + 19, line0 + 14]]
for (i, (_, tbs)) in enumerate(leaf_generator(eg)):
self.assertSequenceEqual(
[tb.tb_lineno for tb in tbs],
expected_tbs[i])
class ExceptionGroupSplitTestBase(ExceptionGroupTestBase):
def split_exception_group(self, eg, types):
""" Split an EG and do some sanity checks on the result """
self.assertIsInstance(eg, BaseExceptionGroup)
match, rest = eg.split(types)
sg = eg.subgroup(types)
if match is not None:
self.assertIsInstance(match, BaseExceptionGroup)
for e,_ in leaf_generator(match):
self.assertIsInstance(e, types)
self.assertIsNotNone(sg)
self.assertIsInstance(sg, BaseExceptionGroup)
for e,_ in leaf_generator(sg):
self.assertIsInstance(e, types)
if rest is not None:
self.assertIsInstance(rest, BaseExceptionGroup)
def leaves(exc):
return [] if exc is None else [e for e,_ in leaf_generator(exc)]
# match and subgroup have the same leaves
self.assertSequenceEqual(leaves(match), leaves(sg))
match_leaves = leaves(match)
rest_leaves = leaves(rest)
# each leaf exception of eg is in exactly one of match and rest
self.assertEqual(
len(leaves(eg)),
len(leaves(match)) + len(leaves(rest)))
for e in leaves(eg):
self.assertNotEqual(
match and e in match_leaves,
rest and e in rest_leaves)
# message, cause and context, traceback and note equal to eg
for part in [match, rest, sg]:
if part is not None:
self.assertEqual(eg.message, part.message)
self.assertIs(eg.__cause__, part.__cause__)
self.assertIs(eg.__context__, part.__context__)
self.assertIs(eg.__traceback__, part.__traceback__)
self.assertIs(eg.__note__, part.__note__)
def tbs_for_leaf(leaf, eg):
for e, tbs in leaf_generator(eg):
if e is leaf:
return tbs
def tb_linenos(tbs):
return [tb.tb_lineno for tb in tbs if tb]
# full tracebacks match
for part in [match, rest, sg]:
for e in leaves(part):
self.assertSequenceEqual(
tb_linenos(tbs_for_leaf(e, eg)),
tb_linenos(tbs_for_leaf(e, part)))
return match, rest
class NestedExceptionGroupSplitTest(ExceptionGroupSplitTestBase):
def test_split_by_type(self):
class MyExceptionGroup(ExceptionGroup):
pass
def raiseVE(v):
raise ValueError(v)
def raiseTE(t):
raise TypeError(t)
def nested_group():
def level1(i):
excs = []
for f, arg in [(raiseVE, i), (raiseTE, int), (raiseVE, i+1)]:
try:
f(arg)
except Exception as e:
excs.append(e)
raise ExceptionGroup('msg1', excs)
def level2(i):
excs = []
for f, arg in [(level1, i), (level1, i+1), (raiseVE, i+2)]:
try:
f(arg)
except Exception as e:
excs.append(e)
raise MyExceptionGroup('msg2', excs)
def level3(i):
excs = []
for f, arg in [(level2, i+1), (raiseVE, i+2)]:
try:
f(arg)
except Exception as e:
excs.append(e)
raise ExceptionGroup('msg3', excs)
level3(5)
try:
nested_group()
except ExceptionGroup as e:
e.__note__ = f"the note: {id(e)}"
eg = e
eg_template = [
[
[ValueError(6), TypeError(int), ValueError(7)],
[ValueError(7), TypeError(int), ValueError(8)],
ValueError(8),
],
ValueError(7)]
valueErrors_template = [
[
[ValueError(6), ValueError(7)],
[ValueError(7), ValueError(8)],
ValueError(8),
],
ValueError(7)]
typeErrors_template = [[[TypeError(int)], [TypeError(int)]]]
self.assertMatchesTemplate(eg, ExceptionGroup, eg_template)
# Match Nothing
match, rest = self.split_exception_group(eg, SyntaxError)
self.assertIsNone(match)
self.assertMatchesTemplate(rest, ExceptionGroup, eg_template)
# Match Everything
match, rest = self.split_exception_group(eg, BaseException)
self.assertMatchesTemplate(match, ExceptionGroup, eg_template)
self.assertIsNone(rest)
match, rest = self.split_exception_group(eg, (ValueError, TypeError))
self.assertMatchesTemplate(match, ExceptionGroup, eg_template)
self.assertIsNone(rest)
# Match ValueErrors
match, rest = self.split_exception_group(eg, ValueError)
self.assertMatchesTemplate(match, ExceptionGroup, valueErrors_template)
self.assertMatchesTemplate(rest, ExceptionGroup, typeErrors_template)
# Match TypeErrors
match, rest = self.split_exception_group(eg, (TypeError, SyntaxError))
self.assertMatchesTemplate(match, ExceptionGroup, typeErrors_template)
self.assertMatchesTemplate(rest, ExceptionGroup, valueErrors_template)
# Match ExceptionGroup
match, rest = eg.split(ExceptionGroup)
self.assertIs(match, eg)
self.assertIsNone(rest)
# Match MyExceptionGroup (ExceptionGroup subclass)
match, rest = eg.split(MyExceptionGroup)
self.assertMatchesTemplate(match, ExceptionGroup, [eg_template[0]])
self.assertMatchesTemplate(rest, ExceptionGroup, [eg_template[1]])
def test_split_BaseExceptionGroup(self):
def exc(ex):
try:
raise ex
except BaseException as e:
return e
try:
raise BaseExceptionGroup(
"beg", [exc(ValueError(1)), exc(KeyboardInterrupt(2))])
except BaseExceptionGroup as e:
beg = e
# Match Nothing
match, rest = self.split_exception_group(beg, TypeError)
self.assertIsNone(match)
self.assertMatchesTemplate(
rest, BaseExceptionGroup, [ValueError(1), KeyboardInterrupt(2)])
# Match Everything
match, rest = self.split_exception_group(
beg, (ValueError, KeyboardInterrupt))
self.assertMatchesTemplate(
match, BaseExceptionGroup, [ValueError(1), KeyboardInterrupt(2)])
self.assertIsNone(rest)
# Match ValueErrors
match, rest = self.split_exception_group(beg, ValueError)
self.assertMatchesTemplate(
match, ExceptionGroup, [ValueError(1)])
self.assertMatchesTemplate(
rest, BaseExceptionGroup, [KeyboardInterrupt(2)])
# Match KeyboardInterrupts
match, rest = self.split_exception_group(beg, KeyboardInterrupt)
self.assertMatchesTemplate(
match, BaseExceptionGroup, [KeyboardInterrupt(2)])
self.assertMatchesTemplate(
rest, ExceptionGroup, [ValueError(1)])
class NestedExceptionGroupSubclassSplitTest(ExceptionGroupSplitTestBase):
def test_split_ExceptionGroup_subclass_no_derive_no_new_override(self):
class EG(ExceptionGroup):
pass
try:
try:
try:
raise TypeError(2)
except TypeError as te:
raise EG("nested", [te])
except EG as nested:
try:
raise ValueError(1)
except ValueError as ve:
raise EG("eg", [ve, nested])
except EG as e:
eg = e
self.assertMatchesTemplate(eg, EG, [ValueError(1), [TypeError(2)]])
# Match Nothing
match, rest = self.split_exception_group(eg, OSError)
self.assertIsNone(match)
self.assertMatchesTemplate(
rest, ExceptionGroup, [ValueError(1), [TypeError(2)]])
# Match Everything
match, rest = self.split_exception_group(eg, (ValueError, TypeError))
self.assertMatchesTemplate(
match, ExceptionGroup, [ValueError(1), [TypeError(2)]])
self.assertIsNone(rest)
# Match ValueErrors
match, rest = self.split_exception_group(eg, ValueError)
self.assertMatchesTemplate(match, ExceptionGroup, [ValueError(1)])
self.assertMatchesTemplate(rest, ExceptionGroup, [[TypeError(2)]])
# Match TypeErrors
match, rest = self.split_exception_group(eg, TypeError)
self.assertMatchesTemplate(match, ExceptionGroup, [[TypeError(2)]])
self.assertMatchesTemplate(rest, ExceptionGroup, [ValueError(1)])
def test_split_BaseExceptionGroup_subclass_no_derive_new_override(self):
class EG(BaseExceptionGroup):
def __new__(cls, message, excs, unused):
# The "unused" arg is here to show that split() doesn't call
# the actual class constructor from the default derive()
# implementation (it would fail on unused arg if so because
# it assumes the BaseExceptionGroup.__new__ signature).
return super().__new__(cls, message, excs)
try:
raise EG("eg", [ValueError(1), KeyboardInterrupt(2)], "unused")
except EG as e:
eg = e
self.assertMatchesTemplate(
eg, EG, [ValueError(1), KeyboardInterrupt(2)])
# Match Nothing
match, rest = self.split_exception_group(eg, OSError)
self.assertIsNone(match)
self.assertMatchesTemplate(
rest, BaseExceptionGroup, [ValueError(1), KeyboardInterrupt(2)])
# Match Everything
match, rest = self.split_exception_group(
eg, (ValueError, KeyboardInterrupt))
self.assertMatchesTemplate(
match, BaseExceptionGroup, [ValueError(1), KeyboardInterrupt(2)])
self.assertIsNone(rest)
# Match ValueErrors
match, rest = self.split_exception_group(eg, ValueError)
self.assertMatchesTemplate(match, ExceptionGroup, [ValueError(1)])
self.assertMatchesTemplate(
rest, BaseExceptionGroup, [KeyboardInterrupt(2)])
# Match KeyboardInterrupt
match, rest = self.split_exception_group(eg, KeyboardInterrupt)
self.assertMatchesTemplate(
match, BaseExceptionGroup, [KeyboardInterrupt(2)])
self.assertMatchesTemplate(rest, ExceptionGroup, [ValueError(1)])
def test_split_ExceptionGroup_subclass_derive_and_new_overrides(self):
class EG(ExceptionGroup):
def __new__(cls, message, excs, code):
obj = super().__new__(cls, message, excs)
obj.code = code
return obj
def derive(self, excs):
return EG(self.message, excs, self.code)
try:
try:
try:
raise TypeError(2)
except TypeError as te:
raise EG("nested", [te], 101)
except EG as nested:
try:
raise ValueError(1)
except ValueError as ve:
raise EG("eg", [ve, nested], 42)
except EG as e:
eg = e
self.assertMatchesTemplate(eg, EG, [ValueError(1), [TypeError(2)]])
# Match Nothing
match, rest = self.split_exception_group(eg, OSError)
self.assertIsNone(match)
self.assertMatchesTemplate(rest, EG, [ValueError(1), [TypeError(2)]])
self.assertEqual(rest.code, 42)
self.assertEqual(rest.exceptions[1].code, 101)
# Match Everything
match, rest = self.split_exception_group(eg, (ValueError, TypeError))
self.assertMatchesTemplate(match, EG, [ValueError(1), [TypeError(2)]])
self.assertEqual(match.code, 42)
self.assertEqual(match.exceptions[1].code, 101)
self.assertIsNone(rest)
# Match ValueErrors
match, rest = self.split_exception_group(eg, ValueError)
self.assertMatchesTemplate(match, EG, [ValueError(1)])
self.assertEqual(match.code, 42)
self.assertMatchesTemplate(rest, EG, [[TypeError(2)]])
self.assertEqual(rest.code, 42)
self.assertEqual(rest.exceptions[0].code, 101)
# Match TypeErrors
match, rest = self.split_exception_group(eg, TypeError)
self.assertMatchesTemplate(match, EG, [[TypeError(2)]])
self.assertEqual(match.code, 42)
self.assertEqual(match.exceptions[0].code, 101)
self.assertMatchesTemplate(rest, EG, [ValueError(1)])
self.assertEqual(rest.code, 42)
if __name__ == '__main__':
unittest.main()
| 36.326112 | 81 | 0.60506 |
793e8d93fef2a82f83c9459a825b824fbf659e38 | 25,761 | py | Python | cumulusci/tasks/bulkdata/load.py | vazexqi/CumulusCI | 9075b30adc1ddcb7741df610d888330fa0233798 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/bulkdata/load.py | vazexqi/CumulusCI | 9075b30adc1ddcb7741df610d888330fa0233798 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/bulkdata/load.py | vazexqi/CumulusCI | 9075b30adc1ddcb7741df610d888330fa0233798 | [
"BSD-3-Clause"
] | 1 | 2021-06-03T01:10:17.000Z | 2021-06-03T01:10:17.000Z | from collections import defaultdict
from unittest.mock import MagicMock
from typing import Union
import tempfile
from contextlib import contextmanager
from sqlalchemy import Column, MetaData, Table, Unicode, create_engine, text, func
from sqlalchemy.orm import aliased, Session
from sqlalchemy.ext.automap import automap_base
from cumulusci.core.exceptions import BulkDataException, TaskOptionsError
from cumulusci.core.utils import process_bool_arg
from cumulusci.tasks.bulkdata.utils import (
SqlAlchemyMixin,
RowErrorChecker,
)
from cumulusci.tasks.bulkdata.dates import adjust_relative_dates
from cumulusci.tasks.bulkdata.step import (
DataOperationStatus,
DataOperationType,
DataOperationJobResult,
get_dml_operation,
)
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.utils import os_friendly_path
from cumulusci.tasks.bulkdata.mapping_parser import (
parse_from_yaml,
validate_and_inject_mapping,
MappingStep,
MappingLookup,
)
class LoadData(SqlAlchemyMixin, BaseSalesforceApiTask):
"""Perform Bulk API operations to load data defined by a mapping from a local store into an org."""
task_options = {
"database_url": {
"description": "The database url to a database containing the test data to load"
},
"mapping": {
"description": "The path to a yaml file containing mappings of the database fields to Salesforce object fields",
"required": True,
},
"start_step": {
"description": "If specified, skip steps before this one in the mapping",
"required": False,
},
"sql_path": {
"description": "If specified, a database will be created from an SQL script at the provided path"
},
"ignore_row_errors": {
"description": "If True, allow the load to continue even if individual rows fail to load."
},
"reset_oids": {
"description": "If True (the default), and the _sf_ids tables exist, reset them before continuing.",
"required": False,
},
"bulk_mode": {
"description": "Set to Serial to force serial mode on all jobs. Parallel is the default."
},
"inject_namespaces": {
"description": "If True, the package namespace prefix will be "
"automatically added to (or removed from) objects "
"and fields based on the name used in the org. Defaults to True."
},
"drop_missing_schema": {
"description": "Set to True to skip any missing objects or fields instead of stopping with an error."
},
}
row_warning_limit = 10
def _init_options(self, kwargs):
super(LoadData, self)._init_options(kwargs)
self.options["ignore_row_errors"] = process_bool_arg(
self.options.get("ignore_row_errors") or False
)
if self.options.get("database_url"):
# prefer database_url if it's set
self.options["sql_path"] = None
elif self.options.get("sql_path"):
self.options["sql_path"] = os_friendly_path(self.options["sql_path"])
self.options["database_url"] = None
else:
raise TaskOptionsError(
"You must set either the database_url or sql_path option."
)
self.reset_oids = self.options.get("reset_oids", True)
self.bulk_mode = (
self.options.get("bulk_mode") and self.options.get("bulk_mode").title()
)
if self.bulk_mode and self.bulk_mode not in ["Serial", "Parallel"]:
raise TaskOptionsError("bulk_mode must be either Serial or Parallel")
inject_namespaces = self.options.get("inject_namespaces")
self.options["inject_namespaces"] = process_bool_arg(
True if inject_namespaces is None else inject_namespaces
)
self.options["drop_missing_schema"] = process_bool_arg(
self.options.get("drop_missing_schema") or False
)
def _run_task(self):
self._init_mapping()
with self._init_db():
self._expand_mapping()
start_step = self.options.get("start_step")
started = False
for name, mapping in self.mapping.items():
# Skip steps until start_step
if not started and start_step and name != start_step:
self.logger.info(f"Skipping step: {name}")
continue
started = True
self.logger.info(f"Running step: {name}")
result = self._execute_step(mapping)
if result.status is DataOperationStatus.JOB_FAILURE:
raise BulkDataException(
f"Step {name} did not complete successfully: {','.join(result.job_errors)}"
)
if name in self.after_steps:
for after_name, after_step in self.after_steps[name].items():
self.logger.info(f"Running post-load step: {after_name}")
result = self._execute_step(after_step)
if result.status is DataOperationStatus.JOB_FAILURE:
raise BulkDataException(
f"Step {after_name} did not complete successfully: {','.join(result.job_errors)}"
)
def _execute_step(
self, mapping: MappingStep
) -> Union[DataOperationJobResult, MagicMock]:
"""Load data for a single step."""
if "RecordTypeId" in mapping.fields:
conn = self.session.connection()
self._load_record_types([mapping.sf_object], conn)
self.session.commit()
query = self._query_db(mapping)
bulk_mode = mapping.bulk_mode or self.bulk_mode or "Parallel"
step = get_dml_operation(
sobject=mapping.sf_object,
operation=mapping.action,
api_options={"batch_size": mapping.batch_size, "bulk_mode": bulk_mode},
context=self,
fields=mapping.get_load_field_list(),
api=mapping.api,
volume=query.count(),
)
with tempfile.TemporaryFile(mode="w+t") as local_ids:
step.start()
step.load_records(self._stream_queried_data(mapping, local_ids, query))
step.end()
if step.job_result.status is not DataOperationStatus.JOB_FAILURE:
local_ids.seek(0)
self._process_job_results(mapping, step, local_ids)
return step.job_result
def _stream_queried_data(self, mapping, local_ids, query):
"""Get data from the local db"""
statics = self._get_statics(mapping)
total_rows = 0
if mapping.anchor_date:
date_context = mapping.get_relative_date_context(
mapping.get_load_field_list(), self.org_config
)
for row in query.yield_per(10000):
total_rows += 1
# Add static values to row
pkey = row[0]
row = list(row[1:]) + statics
if mapping.anchor_date and (date_context[0] or date_context[1]):
row = adjust_relative_dates(
mapping, date_context, row, DataOperationType.INSERT
)
if mapping.action is DataOperationType.UPDATE:
if len(row) > 1 and all([f is None for f in row[1:]]):
# Skip update rows that contain no values
total_rows -= 1
continue
local_ids.write(str(pkey) + "\n")
yield row
self.logger.info(
f"Prepared {total_rows} rows for {mapping['action']} to {mapping['sf_object']}."
)
def _load_record_types(self, sobjects, conn):
"""Persist record types for the given sObjects into the database."""
for sobject in sobjects:
table_name = sobject + "_rt_target_mapping"
self._extract_record_types(sobject, table_name, conn)
def _get_statics(self, mapping):
"""Return the static values (not column names) to be appended to
records for this mapping."""
statics = list(mapping.static.values())
if mapping.record_type:
query = (
f"SELECT Id FROM RecordType WHERE SObjectType='{mapping.sf_object}'"
f"AND DeveloperName = '{mapping.record_type}' LIMIT 1"
)
records = self.sf.query(query)["records"]
if records:
record_type_id = records[0]["Id"]
else:
raise BulkDataException(f"Cannot find RecordType with query `{query}`")
statics.append(record_type_id)
return statics
def _query_db(self, mapping):
"""Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
"""
model = self.models[mapping.table]
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for name, f in mapping.fields.items():
if name not in ("Id", "RecordTypeId", "RecordType"):
columns.append(model.__table__.columns[f])
lookups = {
lookup_field: lookup
for lookup_field, lookup in mapping.lookups.items()
if not lookup.after
}
for lookup in lookups.values():
lookup.aliased_table = aliased(
self.metadata.tables[f"{lookup.table}_sf_ids"]
)
columns.append(lookup.aliased_table.columns.sf_id)
if "RecordTypeId" in mapping.fields:
rt_dest_table = self.metadata.tables[
mapping.get_destination_record_type_table()
]
columns.append(rt_dest_table.columns.record_type_id)
query = self.session.query(*columns)
if mapping.record_type and hasattr(model, "record_type"):
query = query.filter(model.record_type == mapping.record_type)
if mapping.filters:
filter_args = []
for f in mapping.filters:
filter_args.append(text(f))
query = query.filter(*filter_args)
if "RecordTypeId" in mapping.fields:
try:
rt_source_table = self.metadata.tables[
mapping.get_source_record_type_table()
]
except KeyError as e:
raise BulkDataException(
"A record type mapping table was not found in your dataset. "
f"Was it generated by extract_data? {e}",
) from e
rt_dest_table = self.metadata.tables[
mapping.get_destination_record_type_table()
]
query = query.outerjoin(
rt_source_table,
rt_source_table.columns.record_type_id
== getattr(model, mapping.fields["RecordTypeId"]),
)
query = query.outerjoin(
rt_dest_table,
rt_dest_table.columns.developer_name
== rt_source_table.columns.developer_name,
)
for sf_field, lookup in lookups.items():
# Outer join with lookup ids table:
# returns main obj even if lookup is null
key_field = lookup.get_lookup_key_field(model)
value_column = getattr(model, key_field)
query = query.outerjoin(
lookup.aliased_table,
lookup.aliased_table.columns.id == value_column,
)
# Order by foreign key to minimize lock contention
# by trying to keep lookup targets in the same batch
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
# Filter out non-person account Contact records.
# Contact records for person accounts were already created by the system.
if mapping.sf_object == "Contact" and self._can_load_person_accounts(mapping):
query = self._filter_out_person_account_records(query, model)
return query
def _process_job_results(self, mapping, step, local_ids):
"""Get the job results and process the results. If we're raising for
row-level errors, do so; if we're inserting, store the new Ids."""
if mapping.action is DataOperationType.INSERT:
id_table_name = self._initialize_id_table(mapping, self.reset_oids)
conn = self.session.connection()
results_generator = self._generate_results_id_map(step, local_ids)
# If we know we have no successful inserts, don't attempt to persist Ids.
# Do, however, drain the generator to get error-checking behavior.
if mapping.action is DataOperationType.INSERT and (
step.job_result.records_processed - step.job_result.total_row_errors
):
self._sql_bulk_insert_from_records(
connection=conn,
table=id_table_name,
columns=("id", "sf_id"),
record_iterable=results_generator,
)
else:
for r in results_generator:
pass # Drain generator to validate results
# Contact records for Person Accounts are inserted during an Account
# sf_object step. Insert records into the Contact ID table for
# person account Contact records so lookups to
# person account Contact records get populated downstream as expected.
if (
mapping.action is DataOperationType.INSERT
and mapping.sf_object == "Contact"
and self._can_load_person_accounts(mapping)
):
account_id_lookup = mapping.lookups.get("AccountId")
if account_id_lookup:
self._sql_bulk_insert_from_records(
connection=conn,
table=id_table_name,
columns=("id", "sf_id"),
record_iterable=self._generate_contact_id_map_for_person_accounts(
mapping, account_id_lookup, conn
),
)
if mapping.action is DataOperationType.INSERT:
self.session.commit()
def _generate_results_id_map(self, step, local_ids):
"""Consume results from load and prepare rows for id table.
Raise BulkDataException on row errors if configured to do so."""
error_checker = RowErrorChecker(
self.logger, self.options["ignore_row_errors"], self.row_warning_limit
)
local_ids = (lid.strip("\n") for lid in local_ids)
for result, local_id in zip(step.get_results(), local_ids):
if result.success:
yield (local_id, result.id)
else:
error_checker.check_for_row_error(result, local_id)
def _initialize_id_table(self, mapping, should_reset_table):
"""initalize or find table to hold the inserted SF Ids
The table has a name like xxx_sf_ids and has just two columns, id and sf_id.
If the table already exists, should_reset_table determines whether to
drop and recreate it or not.
"""
id_table_name = f"{mapping['table']}_sf_ids"
already_exists = id_table_name in self.metadata.tables
if already_exists and not should_reset_table:
return id_table_name
if not hasattr(self, "_initialized_id_tables"):
self._initialized_id_tables = set()
if id_table_name not in self._initialized_id_tables:
if already_exists:
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(
id_table_name,
self.metadata,
Column("id", Unicode(255), primary_key=True),
Column("sf_id", Unicode(18)),
)
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
def _sqlite_load(self):
"""Read a SQLite script and initialize the in-memory database."""
conn = self.session.connection()
cursor = conn.connection.cursor()
with open(self.options["sql_path"], "r", encoding="utf-8") as f:
try:
cursor.executescript(f.read())
finally:
cursor.close()
# self.session.flush()
@contextmanager
def _init_db(self):
"""Initialize the database and automapper."""
# initialize the DB engine
with self._database_url() as database_url:
parent_engine = create_engine(database_url)
with parent_engine.connect() as connection:
# initialize the DB session
self.session = Session(connection)
if self.options.get("sql_path"):
self._sqlite_load()
# initialize DB metadata
self.metadata = MetaData()
self.metadata.bind = connection
# initialize the automap mapping
self.base = automap_base(bind=connection, metadata=self.metadata)
self.base.prepare(connection, reflect=True)
# Loop through mappings and reflect each referenced table
self.models = {}
for name, mapping in self.mapping.items():
if mapping.table not in self.models:
self.models[mapping.table] = self.base.classes[mapping.table]
# create any Record Type tables we need
if "RecordTypeId" in mapping.fields:
self._create_record_type_table(
mapping.get_destination_record_type_table()
)
self.metadata.create_all()
self._validate_org_has_person_accounts_enabled_if_person_account_data_exists()
yield
def _init_mapping(self):
"""Load a YAML mapping file."""
mapping_file_path = self.options["mapping"]
if not mapping_file_path:
raise TaskOptionsError("Mapping file path required")
self.mapping = parse_from_yaml(mapping_file_path)
validate_and_inject_mapping(
mapping=self.mapping,
org_config=self.org_config,
namespace=self.project_config.project__package__namespace,
data_operation=DataOperationType.INSERT,
inject_namespaces=self.options["inject_namespaces"],
drop_missing=self.options["drop_missing_schema"],
)
def _expand_mapping(self):
"""Walk the mapping and generate any required 'after' steps
to handle dependent and self-lookups."""
# Expand the mapping to handle dependent lookups
self.after_steps = defaultdict(dict)
for step in self.mapping.values():
if any([lookup.after for lookup in step.lookups.values()]):
# We have deferred/dependent lookups.
# Synthesize mapping steps for them.
sobject = step.sf_object
after_list = {
lookup.after for lookup in step.lookups.values() if lookup.after
}
for after in after_list:
lookups = {
lookup_field: lookup
for lookup_field, lookup in step.lookups.items()
if lookup.after == after
}
name = f"Update {sobject} Dependencies After {after}"
mapping = MappingStep(
sf_object=sobject,
api=step.api,
action="update",
table=step.table,
)
mapping.lookups["Id"] = MappingLookup(
name="Id",
table=step["table"],
key_field=self.models[
step["table"]
].__table__.primary_key.columns.keys()[0],
)
for lookup in lookups:
mapping.lookups[lookup] = lookups[lookup].copy()
mapping.lookups[lookup].after = None
self.after_steps[after][name] = mapping
def _validate_org_has_person_accounts_enabled_if_person_account_data_exists(self):
"""
To ensure data is loaded from the dataset as expected as well as avoid partial
failues, raise a BulkDataException if there exists Account or Contact records with
IsPersonAccount as 'true' but the org does not have person accounts enabled.
"""
for mapping in self.mapping.values():
if (
mapping.sf_object
in [
"Account",
"Contact",
]
and self._db_has_person_accounts_column(mapping)
):
table = self.models[mapping.table].__table__
if (
self.session.query(table)
.filter(table.columns.get("IsPersonAccount") == "true")
.first()
and not self.org_config.is_person_accounts_enabled
):
raise BulkDataException(
"Your dataset contains Person Account data but Person Accounts is not enabled for your org."
)
def _db_has_person_accounts_column(self, mapping):
"""Returns whether "IsPersonAccount" is a column in mapping's table."""
return (
self.models[mapping.table].__table__.columns.get("IsPersonAccount")
is not None
)
def _can_load_person_accounts(self, mapping) -> bool:
"""Returns whether person accounts can be loaded:
- The mapping has a "IsPersonAccount" column
- Person Accounts is enabled in the org.
"""
return (
self._db_has_person_accounts_column(mapping)
and self.org_config.is_person_accounts_enabled
)
def _filter_out_person_account_records(self, query, model):
return query.filter(
func.lower(model.__table__.columns.get("IsPersonAccount")) == "false"
)
def _generate_contact_id_map_for_person_accounts(
self, contact_mapping, account_id_lookup, conn
):
"""
Yields (local_id, sf_id) for Contact records where IsPersonAccount
is true that can handle large data volumes.
We know a Person Account record is related to one and only one Contact
record. Therefore, we can map local Contact IDs to Salesforce IDs
by previously inserted Account records:
- Query the DB to get the map: Salesforce Account ID ->
local Contact ID
- Query Salesforce to get the map: Salesforce Account ID ->
Salesforce Contact ID
- Merge the maps
"""
# Contact table columns
contact_model = self.models[contact_mapping.table]
contact_id_column = getattr(
contact_model, contact_model.__table__.primary_key.columns.keys()[0]
)
account_id_column = getattr(
contact_model, account_id_lookup.get_lookup_key_field(contact_model)
)
# Account ID table + column
account_sf_ids_table = account_id_lookup.aliased_table
account_sf_id_column = account_sf_ids_table.columns["sf_id"]
# Query the Contact table for person account contact records so we can
# create a Map: Account SF ID --> Contact ID. Outer join the
# Account SF IDs table to get each Contact's associated
# Account SF ID.
query = (
self.session.query(contact_id_column, account_sf_id_column)
.filter(
func.lower(contact_model.__table__.columns.get("IsPersonAccount"))
== "true"
)
.outerjoin(
account_sf_ids_table,
account_sf_ids_table.columns["id"] == account_id_column,
)
)
# Stream the results so we can process batches of 200 Contacts
# in case we have large data volumes.
query_result = conn.execution_options(stream_results=True).execute(
query.statement
)
while True:
# While we have a chunk to process
chunk = query_result.fetchmany(200)
if not chunk:
break
# Collect Map: Account SF ID --> Contact ID
contact_ids_by_account_sf_id = {record[1]: record[0] for record in chunk}
# Query Map: Account SF ID --> Contact SF ID
# It's safe to use query_all since the chunk size to 200.
for record in self.sf.query_all(
"SELECT Id, AccountId FROM Contact WHERE IsPersonAccount = true AND AccountId IN ('{}')".format(
"','".join(contact_ids_by_account_sf_id.keys())
)
)["records"]:
contact_id = contact_ids_by_account_sf_id.get(record["AccountId"])
contact_sf_id = record["Id"]
# Join maps together to get tuple (Contact ID, Contact SF ID) to insert into step's ID Table.
yield (contact_id, contact_sf_id)
| 40.632492 | 124 | 0.591437 |
793e8f586f4661f679f2e2a52aa1e3fb9841db72 | 17 | py | Python | python/testData/psi/FStringTerminatedByQuoteInsideStringLiteralInFormatPart.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/psi/FStringTerminatedByQuoteInsideStringLiteralInFormatPart.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/psi/FStringTerminatedByQuoteInsideStringLiteralInFormatPart.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | s = f'{42:{"'"}}' | 17 | 17 | 0.235294 |
793e901392b73523237f5290b5992bb69b90054a | 3,840 | py | Python | shapash/decomposition/contributions.py | peterdhansen/shapash | d866cced7aa01f6d162faa910e53d281d3e35e4c | [
"Apache-2.0"
] | 2 | 2021-01-15T13:40:00.000Z | 2021-01-15T13:40:58.000Z | shapash/decomposition/contributions.py | alisoncossette/shapash | d2a5e466450e8bef1e2eac055d78773747244489 | [
"Apache-2.0"
] | null | null | null | shapash/decomposition/contributions.py | alisoncossette/shapash | d2a5e466450e8bef1e2eac055d78773747244489 | [
"Apache-2.0"
] | null | null | null | """
Contributions
"""
import pandas as pd
import numpy as np
from shapash.utils.transform import preprocessing_tolist
from shapash.utils.transform import check_transformers
from shapash.utils.category_encoder_backend import calc_inv_contrib_ce
from shapash.utils.columntransformer_backend import calc_inv_contrib_ct
def inverse_transform_contributions(contributions, preprocessing=None):
"""
Reverse contribution giving a preprocessing.
Preprocessing could be :
- a single category_encoders
- a single ColumnTransformer
- list with multiple category_encoders with optional (dict, list of dict)
- list with a single ColumnTransformer with optional (dict, list of dict)
- dict
- list of dict
Parameters
----------
contributions : pandas.DataFrame
Contributions values.
preprocessing : category_encoders, ColumnTransformer, list, dict, optional (default: None)
The processing apply to the original data.
Returns
-------
pandas.Dataframe
Return the aggregate contributions.
"""
if not isinstance(contributions, pd.DataFrame):
raise Exception('Shap values must be a pandas dataframe.')
if preprocessing is None:
return contributions
else:
#Transform preprocessing into a list
list_encoding = preprocessing_tolist(preprocessing)
# check supported inverse
use_ct, use_ce = check_transformers(list_encoding)
# Apply Inverse Transform
x_contrib_invers = contributions.copy()
if use_ct:
for encoding in list_encoding:
x_contrib_invers = calc_inv_contrib_ct(x_contrib_invers, encoding)
else:
for encoding in list_encoding:
x_contrib_invers = calc_inv_contrib_ce(x_contrib_invers, encoding)
return x_contrib_invers
def rank_contributions(s_df, x_df):
"""
Function to sort contributions and input features
by decreasing contribution absolute values
Parameters
----------
s_df: pandas.DataFrame
Local contributions dataframe.
x_df: pandas.DataFrame
Input features.
Returns
-------
pandas.DataFrame
Local contributions sorted by decreasing absolute values.
pandas.DataFrame
Input features sorted by decreasing contributions absolute values.
pandas.DataFrame
Input features names sorted for each observation
by decreasing contributions absolute values.
"""
argsort = np.argsort(-np.abs(s_df.values), axis=1)
sorted_contrib = np.take_along_axis(s_df.values, argsort, axis=1)
sorted_features = np.take_along_axis(x_df.values, argsort, axis=1)
contrib_col = ['contribution_' + str(i) for i in range(s_df.shape[1])]
col = ['feature_' + str(i) for i in range(s_df.shape[1])]
s_dict = pd.DataFrame(data=argsort, columns=col, index=x_df.index)
s_ord = pd.DataFrame(data=sorted_contrib, columns=contrib_col, index=x_df.index)
x_ord = pd.DataFrame(data=sorted_features, columns=col, index=x_df.index)
return [s_ord, x_ord, s_dict]
def assign_contributions(ranked):
"""
Turn a list of results into a dict.
Parameters
----------
ranked : list
The output of rank_contributions.
Returns
-------
dict
Same data but rearrange into a dict with explicit names.
Raises
------
ValueError
The output of rank_contributions should always be of length three.
"""
if len(ranked) != 3:
raise ValueError(
'Expected lenght : 3, observed lenght : {},'
'please check the outputs of rank_contributions.'.format(len(ranked))
)
return {
'contrib_sorted': ranked[0],
'x_sorted': ranked[1],
'var_dict': ranked[2]
}
| 31.219512 | 94 | 0.67526 |
793e9020ee57eb84e3ee364212dfa7ea6d597ab1 | 1,749 | py | Python | scripts/_oldstuff/hdf5traits.py | heistermann/trmmlib | b32cf623737285073e4c61bd0e01a0fe8b26c329 | [
"MIT"
] | null | null | null | scripts/_oldstuff/hdf5traits.py | heistermann/trmmlib | b32cf623737285073e4c61bd0e01a0fe8b26c329 | [
"MIT"
] | null | null | null | scripts/_oldstuff/hdf5traits.py | heistermann/trmmlib | b32cf623737285073e4c61bd0e01a0fe8b26c329 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 04 13:08:35 2016
@author: heistermann
"""
# adapted with help from
# http://stackoverflow.com/questions/15023333/simple-tool-library-to-visualize-huge-python-dict
from traits.api import HasTraits, Instance
from traitsui.api import View, VGroup, Item, ValueEditor
from wradlib.io import read_generic_hdf5
def ex_load_hdf5():
filename = "X:/gpm/level2/2A.GPM.Ku.V620160118.20160327-S004128-E011127.V04A.RT-H5"
# load rainbow file contents to dict
rbdict = read_generic_hdf5(filename)#, loaddata=False)
class DictEditor(HasTraits):
Object = Instance(object)
def __init__(self, obj, **traits):
super(DictEditor, self).__init__(**traits)
self.Object = obj
def trait_view(self, name=None, view_elements=None):
return View(
VGroup(
Item('Object',
label='Debug',
id='debug',
editor=ValueEditor(), # ValueEditor()
style='custom',
dock='horizontal',
show_label=False), ),
title='Dictionary Editor',
width=800,
height=600,
resizable=True)
def dic(my_data):
b = DictEditor(my_data)
b.configure_traits()
dic(rbdict)
# =======================================================
if __name__ == '__main__':
# ex_load_hdf5()
filename = "X:/gpm/level2/2A.GPM.Ku.V620160118.20160327-S004128-E011127.V04A.RT-H5"
# load rainbow file contents to dict
out = read_generic_hdf5(filename)#, loaddata=False)
for key in out.keys():
print key
| 30.155172 | 95 | 0.563751 |
793e906630e8a955a0d438d51b42c0514743474e | 50,575 | py | Python | synapse/storage/databases/main/stream.py | BearerPipelineTest/synapse-1 | 78b99de7c206b106340e12cdee0af9aa246bd5ad | [
"Apache-2.0"
] | null | null | null | synapse/storage/databases/main/stream.py | BearerPipelineTest/synapse-1 | 78b99de7c206b106340e12cdee0af9aa246bd5ad | [
"Apache-2.0"
] | null | null | null | synapse/storage/databases/main/stream.py | BearerPipelineTest/synapse-1 | 78b99de7c206b106340e12cdee0af9aa246bd5ad | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module is responsible for getting events from the DB for pagination
and event streaming.
The order it returns events in depend on whether we are streaming forwards or
are paginating backwards. We do this because we want to handle out of order
messages nicely, while still returning them in the correct order when we
paginate bacwards.
This is implemented by keeping two ordering columns: stream_ordering and
topological_ordering. Stream ordering is basically insertion/received order
(except for events from backfill requests). The topological_ordering is a
weak ordering of events based on the pdu graph.
This means that we have to have two different types of tokens, depending on
what sort order was used:
- stream tokens are of the form: "s%d", which maps directly to the column
- topological tokems: "t%d-%d", where the integers map to the topological
and stream ordering columns respectively.
"""
import logging
from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
List,
Optional,
Set,
Tuple,
cast,
)
import attr
from frozendict import frozendict
from twisted.internet import defer
from synapse.api.filtering import Filter
from synapse.events import EventBase
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import PersistedEventPosition, RoomStreamToken
from synapse.util.caches.descriptors import cached
from synapse.util.caches.stream_change_cache import StreamChangeCache
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
MAX_STREAM_SIZE = 1000
_STREAM_TOKEN = "stream"
_TOPOLOGICAL_TOKEN = "topological"
# Used as return values for pagination APIs
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _EventDictReturn:
event_id: str
topological_ordering: Optional[int]
stream_ordering: int
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _EventsAround:
events_before: List[EventBase]
events_after: List[EventBase]
start: RoomStreamToken
end: RoomStreamToken
def generate_pagination_where_clause(
direction: str,
column_names: Tuple[str, str],
from_token: Optional[Tuple[Optional[int], int]],
to_token: Optional[Tuple[Optional[int], int]],
engine: BaseDatabaseEngine,
) -> str:
"""Creates an SQL expression to bound the columns by the pagination
tokens.
For example creates an SQL expression like:
(6, 7) >= (topological_ordering, stream_ordering)
AND (5, 3) < (topological_ordering, stream_ordering)
would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3).
Note that tokens are considered to be after the row they are in, e.g. if
a row A has a token T, then we consider A to be before T. This convention
is important when figuring out inequalities for the generated SQL, and
produces the following result:
- If paginating forwards then we exclude any rows matching the from
token, but include those that match the to token.
- If paginating backwards then we include any rows matching the from
token, but include those that match the to token.
Args:
direction: Whether we're paginating backwards("b") or forwards ("f").
column_names: The column names to bound. Must *not* be user defined as
these get inserted directly into the SQL statement without escapes.
from_token: The start point for the pagination. This is an exclusive
minimum bound if direction is "f", and an inclusive maximum bound if
direction is "b".
to_token: The endpoint point for the pagination. This is an inclusive
maximum bound if direction is "f", and an exclusive minimum bound if
direction is "b".
engine: The database engine to generate the clauses for
Returns:
The sql expression
"""
assert direction in ("b", "f")
where_clause = []
if from_token:
where_clause.append(
_make_generic_sql_bound(
bound=">=" if direction == "b" else "<",
column_names=column_names,
values=from_token,
engine=engine,
)
)
if to_token:
where_clause.append(
_make_generic_sql_bound(
bound="<" if direction == "b" else ">=",
column_names=column_names,
values=to_token,
engine=engine,
)
)
return " AND ".join(where_clause)
def _make_generic_sql_bound(
bound: str,
column_names: Tuple[str, str],
values: Tuple[Optional[int], int],
engine: BaseDatabaseEngine,
) -> str:
"""Create an SQL expression that bounds the given column names by the
values, e.g. create the equivalent of `(1, 2) < (col1, col2)`.
Only works with two columns.
Older versions of SQLite don't support that syntax so we have to expand it
out manually.
Args:
bound: The comparison operator to use. One of ">", "<", ">=",
"<=", where the values are on the left and columns on the right.
names: The column names. Must *not* be user defined
as these get inserted directly into the SQL statement without
escapes.
values: The values to bound the columns by. If
the first value is None then only creates a bound on the second
column.
engine: The database engine to generate the SQL for
Returns:
The SQL statement
"""
assert bound in (">", "<", ">=", "<=")
name1, name2 = column_names
val1, val2 = values
if val1 is None:
val2 = int(val2)
return "(%d %s %s)" % (val2, bound, name2)
val1 = int(val1)
val2 = int(val2)
if isinstance(engine, PostgresEngine):
# Postgres doesn't optimise ``(x < a) OR (x=a AND y<b)`` as well
# as it optimises ``(x,y) < (a,b)`` on multicolumn indexes. So we
# use the later form when running against postgres.
return "((%d,%d) %s (%s,%s))" % (val1, val2, bound, name1, name2)
# We want to generate queries of e.g. the form:
#
# (val1 < name1 OR (val1 = name1 AND val2 <= name2))
#
# which is equivalent to (val1, val2) < (name1, name2)
return """(
{val1:d} {strict_bound} {name1}
OR ({val1:d} = {name1} AND {val2:d} {bound} {name2})
)""".format(
name1=name1,
val1=val1,
name2=name2,
val2=val2,
strict_bound=bound[0], # The first bound must always be strict equality here
bound=bound,
)
def _filter_results(
lower_token: Optional[RoomStreamToken],
upper_token: Optional[RoomStreamToken],
instance_name: str,
topological_ordering: int,
stream_ordering: int,
) -> bool:
"""Returns True if the event persisted by the given instance at the given
topological/stream_ordering falls between the two tokens (taking a None
token to mean unbounded).
Used to filter results from fetching events in the DB against the given
tokens. This is necessary to handle the case where the tokens include
position maps, which we handle by fetching more than necessary from the DB
and then filtering (rather than attempting to construct a complicated SQL
query).
"""
event_historical_tuple = (
topological_ordering,
stream_ordering,
)
if lower_token:
if lower_token.topological is not None:
# If these are historical tokens we compare the `(topological, stream)`
# tuples.
if event_historical_tuple <= lower_token.as_historical_tuple():
return False
else:
# If these are live tokens we compare the stream ordering against the
# writers stream position.
if stream_ordering <= lower_token.get_stream_pos_for_instance(
instance_name
):
return False
if upper_token:
if upper_token.topological is not None:
if upper_token.as_historical_tuple() < event_historical_tuple:
return False
else:
if upper_token.get_stream_pos_for_instance(instance_name) < stream_ordering:
return False
return True
def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]:
# NB: This may create SQL clauses that don't optimise well (and we don't
# have indices on all possible clauses). E.g. it may create
# "room_id == X AND room_id != X", which postgres doesn't optimise.
if not event_filter:
return "", []
clauses = []
args = []
if event_filter.types:
clauses.append(
"(%s)" % " OR ".join("event.type = ?" for _ in event_filter.types)
)
args.extend(event_filter.types)
for typ in event_filter.not_types:
clauses.append("event.type != ?")
args.append(typ)
if event_filter.senders:
clauses.append(
"(%s)" % " OR ".join("event.sender = ?" for _ in event_filter.senders)
)
args.extend(event_filter.senders)
for sender in event_filter.not_senders:
clauses.append("event.sender != ?")
args.append(sender)
if event_filter.rooms:
clauses.append(
"(%s)" % " OR ".join("event.room_id = ?" for _ in event_filter.rooms)
)
args.extend(event_filter.rooms)
for room_id in event_filter.not_rooms:
clauses.append("event.room_id != ?")
args.append(room_id)
if event_filter.contains_url:
clauses.append("event.contains_url = ?")
args.append(event_filter.contains_url)
# We're only applying the "labels" filter on the database query, because applying the
# "not_labels" filter via a SQL query is non-trivial. Instead, we let
# event_filter.check_fields apply it, which is not as efficient but makes the
# implementation simpler.
if event_filter.labels:
clauses.append("(%s)" % " OR ".join("label = ?" for _ in event_filter.labels))
args.extend(event_filter.labels)
# Filter on relation_senders / relation types from the joined tables.
if event_filter.related_by_senders:
clauses.append(
"(%s)"
% " OR ".join(
"related_event.sender = ?" for _ in event_filter.related_by_senders
)
)
args.extend(event_filter.related_by_senders)
if event_filter.related_by_rel_types:
clauses.append(
"(%s)"
% " OR ".join(
"relation_type = ?" for _ in event_filter.related_by_rel_types
)
)
args.extend(event_filter.related_by_rel_types)
return " AND ".join(clauses), args
class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self._instance_name = hs.get_instance_name()
self._send_federation = hs.should_send_federation()
self._federation_shard_config = hs.config.worker.federation_shard_config
# If we're a process that sends federation we may need to reset the
# `federation_stream_position` table to match the current sharding
# config. We don't do this now as otherwise two processes could conflict
# during startup which would cause one to die.
self._need_to_reset_federation_stream_positions = self._send_federation
events_max = self.get_room_max_stream_ordering()
event_cache_prefill, min_event_val = self.db_pool.get_cache_dict(
db_conn,
"events",
entity_column="room_id",
stream_column="stream_ordering",
max_value=events_max,
)
self._events_stream_cache = StreamChangeCache(
"EventsRoomStreamChangeCache",
min_event_val,
prefilled_cache=event_cache_prefill,
)
self._membership_stream_cache = StreamChangeCache(
"MembershipStreamChangeCache", events_max
)
self._stream_order_on_start = self.get_room_max_stream_ordering()
def get_room_max_stream_ordering(self) -> int:
"""Get the stream_ordering of regular events that we have committed up to
Returns the maximum stream id such that all stream ids less than or
equal to it have been successfully persisted.
"""
return self._stream_id_gen.get_current_token()
def get_room_min_stream_ordering(self) -> int:
"""Get the stream_ordering of backfilled events that we have committed up to
Backfilled events use *negative* stream orderings, so this returns the
minimum negative stream id such that all stream ids greater than or
equal to it have been successfully persisted.
"""
return self._backfill_id_gen.get_current_token()
def get_room_max_token(self) -> RoomStreamToken:
"""Get a `RoomStreamToken` that marks the current maximum persisted
position of the events stream. Useful to get a token that represents
"now".
The token returned is a "live" token that may have an instance_map
component.
"""
min_pos = self._stream_id_gen.get_current_token()
positions = {}
if isinstance(self._stream_id_gen, MultiWriterIdGenerator):
# The `min_pos` is the minimum position that we know all instances
# have finished persisting to, so we only care about instances whose
# positions are ahead of that. (Instance positions can be behind the
# min position as there are times we can work out that the minimum
# position is ahead of the naive minimum across all current
# positions. See MultiWriterIdGenerator for details)
positions = {
i: p
for i, p in self._stream_id_gen.get_positions().items()
if p > min_pos
}
return RoomStreamToken(None, min_pos, frozendict(positions))
async def get_room_events_stream_for_rooms(
self,
room_ids: Collection[str],
from_key: RoomStreamToken,
to_key: RoomStreamToken,
limit: int = 0,
order: str = "DESC",
) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]:
"""Get new room events in stream ordering since `from_key`.
Args:
room_ids
from_key: Token from which no events are returned before
to_key: Token from which no events are returned after. (This
is typically the current stream token)
limit: Maximum number of events to return
order: Either "DESC" or "ASC". Determines which events are
returned when the result is limited. If "DESC" then the most
recent `limit` events are returned, otherwise returns the
oldest `limit` events.
Returns:
A map from room id to a tuple containing:
- list of recent events in the room
- stream ordering key for the start of the chunk of events returned.
"""
room_ids = self._events_stream_cache.get_entities_changed(
room_ids, from_key.stream
)
if not room_ids:
return {}
results = {}
room_ids = list(room_ids)
for rm_ids in (room_ids[i : i + 20] for i in range(0, len(room_ids), 20)):
res = await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
self.get_room_events_stream_for_room,
room_id,
from_key,
to_key,
limit,
order=order,
)
for room_id in rm_ids
],
consumeErrors=True,
)
)
results.update(dict(zip(rm_ids, res)))
return results
def get_rooms_that_changed(
self, room_ids: Collection[str], from_key: RoomStreamToken
) -> Set[str]:
"""Given a list of rooms and a token, return rooms where there may have
been changes.
"""
from_id = from_key.stream
return {
room_id
for room_id in room_ids
if self._events_stream_cache.has_entity_changed(room_id, from_id)
}
async def get_room_events_stream_for_room(
self,
room_id: str,
from_key: RoomStreamToken,
to_key: RoomStreamToken,
limit: int = 0,
order: str = "DESC",
) -> Tuple[List[EventBase], RoomStreamToken]:
"""Get new room events in stream ordering since `from_key`.
Args:
room_id
from_key: Token from which no events are returned before
to_key: Token from which no events are returned after. (This
is typically the current stream token)
limit: Maximum number of events to return
order: Either "DESC" or "ASC". Determines which events are
returned when the result is limited. If "DESC" then the most
recent `limit` events are returned, otherwise returns the
oldest `limit` events.
Returns:
The list of events (in ascending stream order) and the token from the start
of the chunk of events returned.
"""
if from_key == to_key:
return [], from_key
has_changed = self._events_stream_cache.has_entity_changed(
room_id, from_key.stream
)
if not has_changed:
return [], from_key
def f(txn: LoggingTransaction) -> List[_EventDictReturn]:
# To handle tokens with a non-empty instance_map we fetch more
# results than necessary and then filter down
min_from_id = from_key.stream
max_to_id = to_key.get_max_stream_pos()
sql = """
SELECT event_id, instance_name, topological_ordering, stream_ordering
FROM events
WHERE
room_id = ?
AND not outlier
AND stream_ordering > ? AND stream_ordering <= ?
ORDER BY stream_ordering %s LIMIT ?
""" % (
order,
)
txn.execute(sql, (room_id, min_from_id, max_to_id, 2 * limit))
rows = [
_EventDictReturn(event_id, None, stream_ordering)
for event_id, instance_name, topological_ordering, stream_ordering in txn
if _filter_results(
from_key,
to_key,
instance_name,
topological_ordering,
stream_ordering,
)
][:limit]
return rows
rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f)
ret = await self.get_events_as_list(
[r.event_id for r in rows], get_prev_content=True
)
self._set_before_and_after(ret, rows, topo_order=False)
if order.lower() == "desc":
ret.reverse()
if rows:
key = RoomStreamToken(None, min(r.stream_ordering for r in rows))
else:
# Assume we didn't get anything because there was nothing to
# get.
key = from_key
return ret, key
async def get_membership_changes_for_user(
self,
user_id: str,
from_key: RoomStreamToken,
to_key: RoomStreamToken,
excluded_rooms: Optional[List[str]] = None,
) -> List[EventBase]:
"""Fetch membership events for a given user.
All such events whose stream ordering `s` lies in the range
`from_key < s <= to_key` are returned. Events are ordered by ascending stream
order.
"""
# Start by ruling out cases where a DB query is not necessary.
if from_key == to_key:
return []
if from_key:
has_changed = self._membership_stream_cache.has_entity_changed(
user_id, int(from_key.stream)
)
if not has_changed:
return []
def f(txn: LoggingTransaction) -> List[_EventDictReturn]:
# To handle tokens with a non-empty instance_map we fetch more
# results than necessary and then filter down
min_from_id = from_key.stream
max_to_id = to_key.get_max_stream_pos()
args: List[Any] = [user_id, min_from_id, max_to_id]
ignore_room_clause = ""
if excluded_rooms is not None and len(excluded_rooms) > 0:
ignore_room_clause = "AND e.room_id NOT IN (%s)" % ",".join(
"?" for _ in excluded_rooms
)
args = args + excluded_rooms
sql = """
SELECT m.event_id, instance_name, topological_ordering, stream_ordering
FROM events AS e, room_memberships AS m
WHERE e.event_id = m.event_id
AND m.user_id = ?
AND e.stream_ordering > ? AND e.stream_ordering <= ?
%s
ORDER BY e.stream_ordering ASC
""" % (
ignore_room_clause,
)
txn.execute(sql, args)
rows = [
_EventDictReturn(event_id, None, stream_ordering)
for event_id, instance_name, topological_ordering, stream_ordering in txn
if _filter_results(
from_key,
to_key,
instance_name,
topological_ordering,
stream_ordering,
)
]
return rows
rows = await self.db_pool.runInteraction("get_membership_changes_for_user", f)
ret = await self.get_events_as_list(
[r.event_id for r in rows], get_prev_content=True
)
self._set_before_and_after(ret, rows, topo_order=False)
return ret
async def get_recent_events_for_room(
self, room_id: str, limit: int, end_token: RoomStreamToken
) -> Tuple[List[EventBase], RoomStreamToken]:
"""Get the most recent events in the room in topological ordering.
Args:
room_id
limit
end_token: The stream token representing now.
Returns:
A list of events and a token pointing to the start of the returned
events. The events returned are in ascending topological order.
"""
rows, token = await self.get_recent_event_ids_for_room(
room_id, limit, end_token
)
events = await self.get_events_as_list(
[r.event_id for r in rows], get_prev_content=True
)
self._set_before_and_after(events, rows)
return events, token
async def get_recent_event_ids_for_room(
self, room_id: str, limit: int, end_token: RoomStreamToken
) -> Tuple[List[_EventDictReturn], RoomStreamToken]:
"""Get the most recent events in the room in topological ordering.
Args:
room_id
limit
end_token: The stream token representing now.
Returns:
A list of _EventDictReturn and a token pointing to the start of the
returned events. The events returned are in ascending order.
"""
# Allow a zero limit here, and no-op.
if limit == 0:
return [], end_token
rows, token = await self.db_pool.runInteraction(
"get_recent_event_ids_for_room",
self._paginate_room_events_txn,
room_id,
from_token=end_token,
limit=limit,
)
# We want to return the results in ascending order.
rows.reverse()
return rows, token
async def get_room_event_before_stream_ordering(
self, room_id: str, stream_ordering: int
) -> Optional[Tuple[int, int, str]]:
"""Gets details of the first event in a room at or before a stream ordering
Args:
room_id:
stream_ordering:
Returns:
A tuple of (stream ordering, topological ordering, event_id)
"""
def _f(txn: LoggingTransaction) -> Optional[Tuple[int, int, str]]:
sql = (
"SELECT stream_ordering, topological_ordering, event_id"
" FROM events"
" WHERE room_id = ? AND stream_ordering <= ?"
" AND NOT outlier"
" ORDER BY stream_ordering DESC"
" LIMIT 1"
)
txn.execute(sql, (room_id, stream_ordering))
return cast(Optional[Tuple[int, int, str]], txn.fetchone())
return await self.db_pool.runInteraction(
"get_room_event_before_stream_ordering", _f
)
async def get_last_event_in_room_before_stream_ordering(
self,
room_id: str,
end_token: RoomStreamToken,
) -> Optional[EventBase]:
"""Returns the last event in a room at or before a stream ordering
Args:
room_id
end_token: The token used to stream from
Returns:
The most recent event.
"""
last_row = await self.get_room_event_before_stream_ordering(
room_id=room_id,
stream_ordering=end_token.stream,
)
if last_row:
_, _, event_id = last_row
event = await self.get_event(event_id, get_prev_content=True)
return event
return None
async def get_current_room_stream_token_for_room_id(
self, room_id: Optional[str] = None
) -> RoomStreamToken:
"""Returns the current position of the rooms stream.
By default, it returns a live token with the current global stream
token. Specifying a `room_id` causes it to return a historic token with
the room specific topological token.
"""
stream_ordering = self.get_room_max_stream_ordering()
if room_id is None:
return RoomStreamToken(None, stream_ordering)
else:
topo = await self.db_pool.runInteraction(
"_get_max_topological_txn", self._get_max_topological_txn, room_id
)
return RoomStreamToken(topo, stream_ordering)
def get_stream_id_for_event_txn(
self,
txn: LoggingTransaction,
event_id: str,
allow_none=False,
) -> int:
return self.db_pool.simple_select_one_onecol_txn(
txn=txn,
table="events",
keyvalues={"event_id": event_id},
retcol="stream_ordering",
allow_none=allow_none,
)
async def get_position_for_event(self, event_id: str) -> PersistedEventPosition:
"""Get the persisted position for an event"""
row = await self.db_pool.simple_select_one(
table="events",
keyvalues={"event_id": event_id},
retcols=("stream_ordering", "instance_name"),
desc="get_position_for_event",
)
return PersistedEventPosition(
row["instance_name"] or "master", row["stream_ordering"]
)
async def get_topological_token_for_event(self, event_id: str) -> RoomStreamToken:
"""The stream token for an event
Args:
event_id: The id of the event to look up a stream token for.
Raises:
StoreError if the event wasn't in the database.
Returns:
A `RoomStreamToken` topological token.
"""
row = await self.db_pool.simple_select_one(
table="events",
keyvalues={"event_id": event_id},
retcols=("stream_ordering", "topological_ordering"),
desc="get_topological_token_for_event",
)
return RoomStreamToken(row["topological_ordering"], row["stream_ordering"])
async def get_current_topological_token(self, room_id: str, stream_key: int) -> int:
"""Gets the topological token in a room after or at the given stream
ordering.
Args:
room_id
stream_key
"""
sql = (
"SELECT coalesce(MIN(topological_ordering), 0) FROM events"
" WHERE room_id = ? AND stream_ordering >= ?"
)
row = await self.db_pool.execute(
"get_current_topological_token", None, sql, room_id, stream_key
)
return row[0][0] if row else 0
def _get_max_topological_txn(self, txn: LoggingTransaction, room_id: str) -> int:
txn.execute(
"SELECT MAX(topological_ordering) FROM events WHERE room_id = ?",
(room_id,),
)
rows = txn.fetchall()
return rows[0][0] if rows else 0
@staticmethod
def _set_before_and_after(
events: List[EventBase], rows: List[_EventDictReturn], topo_order: bool = True
) -> None:
"""Inserts ordering information to events' internal metadata from
the DB rows.
Args:
events
rows
topo_order: Whether the events were ordered topologically or by stream
ordering. If true then all rows should have a non null
topological_ordering.
"""
for event, row in zip(events, rows):
stream = row.stream_ordering
if topo_order and row.topological_ordering:
topo: Optional[int] = row.topological_ordering
else:
topo = None
internal = event.internal_metadata
internal.before = RoomStreamToken(topo, stream - 1)
internal.after = RoomStreamToken(topo, stream)
internal.order = (int(topo) if topo else 0, int(stream))
async def get_events_around(
self,
room_id: str,
event_id: str,
before_limit: int,
after_limit: int,
event_filter: Optional[Filter] = None,
) -> _EventsAround:
"""Retrieve events and pagination tokens around a given event in a
room.
"""
results = await self.db_pool.runInteraction(
"get_events_around",
self._get_events_around_txn,
room_id,
event_id,
before_limit,
after_limit,
event_filter,
)
events_before = await self.get_events_as_list(
list(results["before"]["event_ids"]), get_prev_content=True
)
events_after = await self.get_events_as_list(
list(results["after"]["event_ids"]), get_prev_content=True
)
return _EventsAround(
events_before=events_before,
events_after=events_after,
start=results["before"]["token"],
end=results["after"]["token"],
)
def _get_events_around_txn(
self,
txn: LoggingTransaction,
room_id: str,
event_id: str,
before_limit: int,
after_limit: int,
event_filter: Optional[Filter],
) -> dict:
"""Retrieves event_ids and pagination tokens around a given event in a
room.
Args:
room_id
event_id
before_limit
after_limit
event_filter
Returns:
dict
"""
results = self.db_pool.simple_select_one_txn(
txn,
"events",
keyvalues={"event_id": event_id, "room_id": room_id},
retcols=["stream_ordering", "topological_ordering"],
)
# This cannot happen as `allow_none=False`.
assert results is not None
# Paginating backwards includes the event at the token, but paginating
# forward doesn't.
before_token = RoomStreamToken(
results["topological_ordering"] - 1, results["stream_ordering"]
)
after_token = RoomStreamToken(
results["topological_ordering"], results["stream_ordering"]
)
rows, start_token = self._paginate_room_events_txn(
txn,
room_id,
before_token,
direction="b",
limit=before_limit,
event_filter=event_filter,
)
events_before = [r.event_id for r in rows]
rows, end_token = self._paginate_room_events_txn(
txn,
room_id,
after_token,
direction="f",
limit=after_limit,
event_filter=event_filter,
)
events_after = [r.event_id for r in rows]
return {
"before": {"event_ids": events_before, "token": start_token},
"after": {"event_ids": events_after, "token": end_token},
}
async def get_all_new_events_stream(
self, from_id: int, current_id: int, limit: int
) -> Tuple[int, List[EventBase]]:
"""Get all new events
Returns all events with from_id < stream_ordering <= current_id.
Args:
from_id: the stream_ordering of the last event we processed
current_id: the stream_ordering of the most recently processed event
limit: the maximum number of events to return
Returns:
A tuple of (next_id, events), where `next_id` is the next value to
pass as `from_id` (it will either be the stream_ordering of the
last returned event, or, if fewer than `limit` events were found,
the `current_id`).
"""
def get_all_new_events_stream_txn(
txn: LoggingTransaction,
) -> Tuple[int, List[str]]:
sql = (
"SELECT e.stream_ordering, e.event_id"
" FROM events AS e"
" WHERE"
" ? < e.stream_ordering AND e.stream_ordering <= ?"
" ORDER BY e.stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (from_id, current_id, limit))
rows = txn.fetchall()
upper_bound = current_id
if len(rows) == limit:
upper_bound = rows[-1][0]
return upper_bound, [row[1] for row in rows]
upper_bound, event_ids = await self.db_pool.runInteraction(
"get_all_new_events_stream", get_all_new_events_stream_txn
)
events = await self.get_events_as_list(event_ids)
return upper_bound, events
async def get_federation_out_pos(self, typ: str) -> int:
if self._need_to_reset_federation_stream_positions:
await self.db_pool.runInteraction(
"_reset_federation_positions_txn", self._reset_federation_positions_txn
)
self._need_to_reset_federation_stream_positions = False
return await self.db_pool.simple_select_one_onecol(
table="federation_stream_position",
retcol="stream_id",
keyvalues={"type": typ, "instance_name": self._instance_name},
desc="get_federation_out_pos",
)
async def update_federation_out_pos(self, typ: str, stream_id: int) -> None:
if self._need_to_reset_federation_stream_positions:
await self.db_pool.runInteraction(
"_reset_federation_positions_txn", self._reset_federation_positions_txn
)
self._need_to_reset_federation_stream_positions = False
await self.db_pool.simple_update_one(
table="federation_stream_position",
keyvalues={"type": typ, "instance_name": self._instance_name},
updatevalues={"stream_id": stream_id},
desc="update_federation_out_pos",
)
def _reset_federation_positions_txn(self, txn: LoggingTransaction) -> None:
"""Fiddles with the `federation_stream_position` table to make it match
the configured federation sender instances during start up.
"""
# The federation sender instances may have changed, so we need to
# massage the `federation_stream_position` table to have a row per type
# per instance sending federation. If there is a mismatch we update the
# table with the correct rows using the *minimum* stream ID seen. This
# may result in resending of events/EDUs to remote servers, but that is
# preferable to dropping them.
if not self._send_federation:
return
# Pull out the configured instances. If we don't have a shard config then
# we assume that we're the only instance sending.
configured_instances = self._federation_shard_config.instances
if not configured_instances:
configured_instances = [self._instance_name]
elif self._instance_name not in configured_instances:
return
instances_in_table = self.db_pool.simple_select_onecol_txn(
txn,
table="federation_stream_position",
keyvalues={},
retcol="instance_name",
)
if set(instances_in_table) == set(configured_instances):
# Nothing to do
return
sql = """
SELECT type, MIN(stream_id) FROM federation_stream_position
GROUP BY type
"""
txn.execute(sql)
min_positions = {typ: pos for typ, pos in txn} # Map from type -> min position
# Ensure we do actually have some values here
assert set(min_positions) == {"federation", "events"}
sql = """
DELETE FROM federation_stream_position
WHERE NOT (%s)
"""
clause, args = make_in_list_sql_clause(
txn.database_engine, "instance_name", configured_instances
)
txn.execute(sql % (clause,), args)
for typ, stream_id in min_positions.items():
self.db_pool.simple_upsert_txn(
txn,
table="federation_stream_position",
keyvalues={"type": typ, "instance_name": self._instance_name},
values={"stream_id": stream_id},
)
def has_room_changed_since(self, room_id: str, stream_id: int) -> bool:
return self._events_stream_cache.has_entity_changed(room_id, stream_id)
def _paginate_room_events_txn(
self,
txn: LoggingTransaction,
room_id: str,
from_token: RoomStreamToken,
to_token: Optional[RoomStreamToken] = None,
direction: str = "b",
limit: int = -1,
event_filter: Optional[Filter] = None,
) -> Tuple[List[_EventDictReturn], RoomStreamToken]:
"""Returns list of events before or after a given token.
Args:
txn
room_id
from_token: The token used to stream from
to_token: A token which if given limits the results to only those before
direction: Either 'b' or 'f' to indicate whether we are paginating
forwards or backwards from `from_key`.
limit: The maximum number of events to return.
event_filter: If provided filters the events to
those that match the filter.
Returns:
A list of _EventDictReturn and a token that points to the end of the
result set. If no events are returned then the end of the stream has
been reached (i.e. there are no events between `from_token` and
`to_token`), or `limit` is zero.
"""
assert int(limit) >= 0
# Tokens really represent positions between elements, but we use
# the convention of pointing to the event before the gap. Hence
# we have a bit of asymmetry when it comes to equalities.
args = [False, room_id]
if direction == "b":
order = "DESC"
else:
order = "ASC"
# The bounds for the stream tokens are complicated by the fact
# that we need to handle the instance_map part of the tokens. We do this
# by fetching all events between the min stream token and the maximum
# stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and
# then filtering the results.
if from_token.topological is not None:
from_bound: Tuple[Optional[int], int] = from_token.as_historical_tuple()
elif direction == "b":
from_bound = (
None,
from_token.get_max_stream_pos(),
)
else:
from_bound = (
None,
from_token.stream,
)
to_bound: Optional[Tuple[Optional[int], int]] = None
if to_token:
if to_token.topological is not None:
to_bound = to_token.as_historical_tuple()
elif direction == "b":
to_bound = (
None,
to_token.stream,
)
else:
to_bound = (
None,
to_token.get_max_stream_pos(),
)
bounds = generate_pagination_where_clause(
direction=direction,
column_names=("event.topological_ordering", "event.stream_ordering"),
from_token=from_bound,
to_token=to_bound,
engine=self.database_engine,
)
filter_clause, filter_args = filter_to_clause(event_filter)
if filter_clause:
bounds += " AND " + filter_clause
args.extend(filter_args)
# We fetch more events as we'll filter the result set
args.append(int(limit) * 2)
select_keywords = "SELECT"
join_clause = ""
# Using DISTINCT in this SELECT query is quite expensive, because it
# requires the engine to sort on the entire (not limited) result set,
# i.e. the entire events table. Only use it in scenarios that could result
# in the same event ID occurring multiple times in the results.
needs_distinct = False
if event_filter and event_filter.labels:
# If we're not filtering on a label, then joining on event_labels will
# return as many row for a single event as the number of labels it has. To
# avoid this, only join if we're filtering on at least one label.
join_clause += """
LEFT JOIN event_labels
USING (event_id, room_id, topological_ordering)
"""
if len(event_filter.labels) > 1:
# Multiple labels could cause the same event to appear multiple times.
needs_distinct = True
# If there is a filter on relation_senders and relation_types join to the
# relations table.
if event_filter and (
event_filter.related_by_senders or event_filter.related_by_rel_types
):
# Filtering by relations could cause the same event to appear multiple
# times (since there's no limit on the number of relations to an event).
needs_distinct = True
join_clause += """
LEFT JOIN event_relations AS relation ON (event.event_id = relation.relates_to_id)
"""
if event_filter.related_by_senders:
join_clause += """
LEFT JOIN events AS related_event ON (relation.event_id = related_event.event_id)
"""
if needs_distinct:
select_keywords += " DISTINCT"
sql = """
%(select_keywords)s
event.event_id, event.instance_name,
event.topological_ordering, event.stream_ordering
FROM events AS event
%(join_clause)s
WHERE event.outlier = ? AND event.room_id = ? AND %(bounds)s
ORDER BY event.topological_ordering %(order)s,
event.stream_ordering %(order)s LIMIT ?
""" % {
"select_keywords": select_keywords,
"join_clause": join_clause,
"bounds": bounds,
"order": order,
}
txn.execute(sql, args)
# Filter the result set.
rows = [
_EventDictReturn(event_id, topological_ordering, stream_ordering)
for event_id, instance_name, topological_ordering, stream_ordering in txn
if _filter_results(
lower_token=to_token if direction == "b" else from_token,
upper_token=from_token if direction == "b" else to_token,
instance_name=instance_name,
topological_ordering=topological_ordering,
stream_ordering=stream_ordering,
)
][:limit]
if rows:
topo = rows[-1].topological_ordering
toke = rows[-1].stream_ordering
if direction == "b":
# Tokens are positions between events.
# This token points *after* the last event in the chunk.
# We need it to point to the event before it in the chunk
# when we are going backwards so we subtract one from the
# stream part.
toke -= 1
next_token = RoomStreamToken(topo, toke)
else:
# TODO (erikj): We should work out what to do here instead.
next_token = to_token if to_token else from_token
return rows, next_token
async def paginate_room_events(
self,
room_id: str,
from_key: RoomStreamToken,
to_key: Optional[RoomStreamToken] = None,
direction: str = "b",
limit: int = -1,
event_filter: Optional[Filter] = None,
) -> Tuple[List[EventBase], RoomStreamToken]:
"""Returns list of events before or after a given token.
Args:
room_id
from_key: The token used to stream from
to_key: A token which if given limits the results to only those before
direction: Either 'b' or 'f' to indicate whether we are paginating
forwards or backwards from `from_key`.
limit: The maximum number of events to return.
event_filter: If provided filters the events to those that match the filter.
Returns:
The results as a list of events and a token that points to the end
of the result set. If no events are returned then the end of the
stream has been reached (i.e. there are no events between `from_key`
and `to_key`).
"""
rows, token = await self.db_pool.runInteraction(
"paginate_room_events",
self._paginate_room_events_txn,
room_id,
from_key,
to_key,
direction,
limit,
event_filter,
)
events = await self.get_events_as_list(
[r.event_id for r in rows], get_prev_content=True
)
self._set_before_and_after(events, rows)
return events, token
@cached()
async def get_id_for_instance(self, instance_name: str) -> int:
"""Get a unique, immutable ID that corresponds to the given Synapse worker instance."""
def _get_id_for_instance_txn(txn: LoggingTransaction) -> int:
instance_id = self.db_pool.simple_select_one_onecol_txn(
txn,
table="instance_map",
keyvalues={"instance_name": instance_name},
retcol="instance_id",
allow_none=True,
)
if instance_id is not None:
return instance_id
# If we don't have an entry upsert one.
#
# We could do this before the first check, and rely on the cache for
# efficiency, but each UPSERT causes the next ID to increment which
# can quickly bloat the size of the generated IDs for new instances.
self.db_pool.simple_upsert_txn(
txn,
table="instance_map",
keyvalues={"instance_name": instance_name},
values={},
)
return self.db_pool.simple_select_one_onecol_txn(
txn,
table="instance_map",
keyvalues={"instance_name": instance_name},
retcol="instance_id",
)
return await self.db_pool.runInteraction(
"get_id_for_instance", _get_id_for_instance_txn
)
@cached()
async def get_name_from_instance_id(self, instance_id: int) -> str:
"""Get the instance name from an ID previously returned by
`get_id_for_instance`.
"""
return await self.db_pool.simple_select_one_onecol(
table="instance_map",
keyvalues={"instance_id": instance_id},
retcol="instance_name",
desc="get_name_from_instance_id",
)
| 35.666432 | 101 | 0.603697 |
793e918096ca989ab6e0a00b52b8e03a69f06e90 | 723 | py | Python | figlets/OLE.py | LeverImmy/SmojSubmit | 7b18812e8b9726184880d0016fc0d19679e50a8a | [
"MIT"
] | 12 | 2018-08-13T14:47:39.000Z | 2022-03-06T13:13:08.000Z | figlets/OLE.py | LeverImmy/SmojSubmit | 7b18812e8b9726184880d0016fc0d19679e50a8a | [
"MIT"
] | 3 | 2019-08-19T16:22:30.000Z | 2020-09-14T21:38:01.000Z | figlets/OLE.py | LeverImmy/SmojSubmit | 7b18812e8b9726184880d0016fc0d19679e50a8a | [
"MIT"
] | 2 | 2019-04-02T02:31:20.000Z | 2020-05-18T04:21:39.000Z | figlet = (
r" ___ _ _ _ _ _ _ _____ _ _ " + '\n'
r" / _ \ _ _| |_ _ __ _ _| |_ | | (_)_ __ ___ (_) |_ | ____|_ _____ ___ ___ __| | ___ __| |" + '\n'
r"| | | | | | | __| '_ \| | | | __| | | | | '_ ` _ \| | __| | _| \ \/ / __/ _ \/ _ \/ _` |/ _ \/ _` |" + '\n'
r"| |_| | |_| | |_| |_) | |_| | |_ | |___| | | | | | | | |_ | |___ > < (_| __/ __/ (_| | __/ (_| |" + '\n'
r" \___/ \__,_|\__| .__/ \__,_|\__| |_____|_|_| |_| |_|_|\__| |_____/_/\_\___\___|\___|\__,_|\___|\__,_|" + '\n'
r" |_| " + '\n'
)
| 80.333333 | 117 | 0.232365 |
793e92d6b6031cbfd2b7bc201aa42435574b6701 | 5,774 | py | Python | configs/carafe/faster_rcnn_r50_fpn_carafe_1x.py | JKingKong/mmdetection | cfa22397194c592c25bd19e2f9f2f60f1ea699d3 | [
"Apache-2.0"
] | null | null | null | configs/carafe/faster_rcnn_r50_fpn_carafe_1x.py | JKingKong/mmdetection | cfa22397194c592c25bd19e2f9f2f60f1ea699d3 | [
"Apache-2.0"
] | null | null | null | configs/carafe/faster_rcnn_r50_fpn_carafe_1x.py | JKingKong/mmdetection | cfa22397194c592c25bd19e2f9f2f60f1ea699d3 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1+1,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 300
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_carafe_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.550265 | 78 | 0.587807 |
793e92d75811f280ed1e3e3a6a92f8ca55df64f7 | 2,433 | py | Python | examples/plugins_manager.py | jspure/Pushkin-is-White-Theme | 1ef3757497237c94cdfc103e978af51a4e5f5459 | [
"MIT"
] | 5 | 2018-07-02T14:09:17.000Z | 2020-01-14T14:43:14.000Z | examples/plugins_manager.py | jspure/Pushkin-is-White-Theme | 1ef3757497237c94cdfc103e978af51a4e5f5459 | [
"MIT"
] | 2 | 2018-09-24T09:31:22.000Z | 2019-11-11T13:12:41.000Z | examples/plugins_manager.py | jspure/Pushkin-is-White-Theme | 1ef3757497237c94cdfc103e978af51a4e5f5459 | [
"MIT"
] | 1 | 2018-07-02T12:52:18.000Z | 2018-07-02T12:52:18.000Z | import os
import gixy
from gixy.plugins.plugin import Plugin
class PluginsManager(object):
def __init__(self, config=None):
self.imported = False
self.config = config
self._plugins = []
def import_plugins(self):
if self.imported:
return
files_list = os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'plugins'))
for plugin_file in files_list:
if not plugin_file.endswith('.py') or plugin_file.startswith('_'):
continue
__import__('gixy.plugins.' + os.path.splitext(plugin_file)[0], None, None, [''])
self.imported = True
def init_plugins(self):
self.import_plugins()
exclude = self.config.skips if self.config else None
include = self.config.plugins if self.config else None
severity = self.config.severity if self.config else None
for plugin_cls in Plugin.__subclasses__():
name = plugin_cls.__name__
if include and name not in include:
# Skip not needed plugins
continue
if exclude and name in exclude:
# Skipped plugins
continue
if severity and not gixy.severity.is_acceptable(plugin_cls.severity, severity):
# Skip plugin by severity level
continue
if self.config and self.config.has_for(name):
options = self.config.get_for(name)
else:
options = plugin_cls.options
self._plugins.append(plugin_cls(options))
@property
def plugins(self):
if not self._plugins:
self.init_plugins()
return self._plugins
@property
def plugins_classes(self):
self.import_plugins()
return Plugin.__subclasses__()
def get_plugins_descriptions(self):
return map(lambda a: a.name, self.plugins)
def audit(self, directive):
for plugin in self.plugins:
if plugin.directives and directive.name not in plugin.directives:
continue
plugin.audit(directive)
def issues(self):
result = []
for plugin in self.plugins:
if not plugin.issues:
continue
result.extend(plugin.issues)
return result | 32.878378 | 107 | 0.577887 |
793e92e63f1b73d3f2100daa6879429cc90389b3 | 10,841 | py | Python | jina/proto/jina_pb2.py | sthagen/jina-ai-jina | a854da4f7cbafcf5d699a505dacfa4f27014fb62 | [
"Apache-2.0"
] | null | null | null | jina/proto/jina_pb2.py | sthagen/jina-ai-jina | a854da4f7cbafcf5d699a505dacfa4f27014fb62 | [
"Apache-2.0"
] | null | null | null | jina/proto/jina_pb2.py | sthagen/jina-ai-jina | a854da4f7cbafcf5d699a505dacfa4f27014fb62 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jina.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import docarray.proto.docarray_pb2 as docarray__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\njina.proto\x12\x04jina\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x0e\x64ocarray.proto\"\x9f\x01\n\nRouteProto\x12\x10\n\x08\x65xecutor\x18\x01 \x01(\t\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x06status\x18\x04 \x01(\x0b\x32\x11.jina.StatusProto\"\xc3\x01\n\rJinaInfoProto\x12+\n\x04jina\x18\x01 \x03(\x0b\x32\x1d.jina.JinaInfoProto.JinaEntry\x12+\n\x04\x65nvs\x18\x02 \x03(\x0b\x32\x1d.jina.JinaInfoProto.EnvsEntry\x1a+\n\tJinaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc6\x01\n\x0bHeaderProto\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12!\n\x06status\x18\x02 \x01(\x0b\x32\x11.jina.StatusProto\x12\x1a\n\rexec_endpoint\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x0ftarget_executor\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07timeout\x18\x05 \x01(\rH\x02\x88\x01\x01\x42\x10\n\x0e_exec_endpointB\x12\n\x10_target_executorB\n\n\x08_timeout\"#\n\x0e\x45ndpointsProto\x12\x11\n\tendpoints\x18\x01 \x03(\t\"\xf9\x01\n\x0bStatusProto\x12*\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1c.jina.StatusProto.StatusCode\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x33\n\texception\x18\x03 \x01(\x0b\x32 .jina.StatusProto.ExceptionProto\x1aN\n\x0e\x45xceptionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x03(\t\x12\x0e\n\x06stacks\x18\x03 \x03(\t\x12\x10\n\x08\x65xecutor\x18\x04 \x01(\t\"$\n\nStatusCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"^\n\rRelatedEntity\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\x08shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x42\x0b\n\t_shard_id\"\xa0\x02\n\x10\x44\x61taRequestProto\x12!\n\x06header\x18\x01 \x01(\x0b\x32\x11.jina.HeaderProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12 \n\x06routes\x18\x03 \x03(\x0b\x32\x10.jina.RouteProto\x12\x35\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\'.jina.DataRequestProto.DataContentProto\x1a\x63\n\x10\x44\x61taContentProto\x12,\n\x04\x64ocs\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x14\n\ndocs_bytes\x18\x02 \x01(\x0cH\x00\x42\x0b\n\tdocuments\"@\n\x14\x44\x61taRequestListProto\x12(\n\x08requests\x18\x01 \x03(\x0b\x32\x16.jina.DataRequestProto2Z\n\x12JinaDataRequestRPC\x12\x44\n\x0cprocess_data\x12\x1a.jina.DataRequestListProto\x1a\x16.jina.DataRequestProto\"\x00\x32\x63\n\x18JinaSingleDataRequestRPC\x12G\n\x13process_single_data\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00\x32G\n\x07JinaRPC\x12<\n\x04\x43\x61ll\x12\x16.jina.DataRequestProto\x1a\x16.jina.DataRequestProto\"\x00(\x01\x30\x01\x32`\n\x18JinaDiscoverEndpointsRPC\x12\x44\n\x12\x65ndpoint_discovery\x12\x16.google.protobuf.Empty\x1a\x14.jina.EndpointsProto\"\x00\x32N\n\x14JinaGatewayDryRunRPC\x12\x36\n\x07\x64ry_run\x12\x16.google.protobuf.Empty\x1a\x11.jina.StatusProto\"\x00\x32G\n\x0bJinaInfoRPC\x12\x38\n\x07_status\x12\x16.google.protobuf.Empty\x1a\x13.jina.JinaInfoProto\"\x00\x62\x06proto3')
_ROUTEPROTO = DESCRIPTOR.message_types_by_name['RouteProto']
_JINAINFOPROTO = DESCRIPTOR.message_types_by_name['JinaInfoProto']
_JINAINFOPROTO_JINAENTRY = _JINAINFOPROTO.nested_types_by_name['JinaEntry']
_JINAINFOPROTO_ENVSENTRY = _JINAINFOPROTO.nested_types_by_name['EnvsEntry']
_HEADERPROTO = DESCRIPTOR.message_types_by_name['HeaderProto']
_ENDPOINTSPROTO = DESCRIPTOR.message_types_by_name['EndpointsProto']
_STATUSPROTO = DESCRIPTOR.message_types_by_name['StatusProto']
_STATUSPROTO_EXCEPTIONPROTO = _STATUSPROTO.nested_types_by_name['ExceptionProto']
_RELATEDENTITY = DESCRIPTOR.message_types_by_name['RelatedEntity']
_DATAREQUESTPROTO = DESCRIPTOR.message_types_by_name['DataRequestProto']
_DATAREQUESTPROTO_DATACONTENTPROTO = _DATAREQUESTPROTO.nested_types_by_name['DataContentProto']
_DATAREQUESTLISTPROTO = DESCRIPTOR.message_types_by_name['DataRequestListProto']
_STATUSPROTO_STATUSCODE = _STATUSPROTO.enum_types_by_name['StatusCode']
RouteProto = _reflection.GeneratedProtocolMessageType('RouteProto', (_message.Message,), {
'DESCRIPTOR' : _ROUTEPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.RouteProto)
})
_sym_db.RegisterMessage(RouteProto)
JinaInfoProto = _reflection.GeneratedProtocolMessageType('JinaInfoProto', (_message.Message,), {
'JinaEntry' : _reflection.GeneratedProtocolMessageType('JinaEntry', (_message.Message,), {
'DESCRIPTOR' : _JINAINFOPROTO_JINAENTRY,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.JinaInfoProto.JinaEntry)
})
,
'EnvsEntry' : _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), {
'DESCRIPTOR' : _JINAINFOPROTO_ENVSENTRY,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.JinaInfoProto.EnvsEntry)
})
,
'DESCRIPTOR' : _JINAINFOPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.JinaInfoProto)
})
_sym_db.RegisterMessage(JinaInfoProto)
_sym_db.RegisterMessage(JinaInfoProto.JinaEntry)
_sym_db.RegisterMessage(JinaInfoProto.EnvsEntry)
HeaderProto = _reflection.GeneratedProtocolMessageType('HeaderProto', (_message.Message,), {
'DESCRIPTOR' : _HEADERPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.HeaderProto)
})
_sym_db.RegisterMessage(HeaderProto)
EndpointsProto = _reflection.GeneratedProtocolMessageType('EndpointsProto', (_message.Message,), {
'DESCRIPTOR' : _ENDPOINTSPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.EndpointsProto)
})
_sym_db.RegisterMessage(EndpointsProto)
StatusProto = _reflection.GeneratedProtocolMessageType('StatusProto', (_message.Message,), {
'ExceptionProto' : _reflection.GeneratedProtocolMessageType('ExceptionProto', (_message.Message,), {
'DESCRIPTOR' : _STATUSPROTO_EXCEPTIONPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.StatusProto.ExceptionProto)
})
,
'DESCRIPTOR' : _STATUSPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.StatusProto)
})
_sym_db.RegisterMessage(StatusProto)
_sym_db.RegisterMessage(StatusProto.ExceptionProto)
RelatedEntity = _reflection.GeneratedProtocolMessageType('RelatedEntity', (_message.Message,), {
'DESCRIPTOR' : _RELATEDENTITY,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.RelatedEntity)
})
_sym_db.RegisterMessage(RelatedEntity)
DataRequestProto = _reflection.GeneratedProtocolMessageType('DataRequestProto', (_message.Message,), {
'DataContentProto' : _reflection.GeneratedProtocolMessageType('DataContentProto', (_message.Message,), {
'DESCRIPTOR' : _DATAREQUESTPROTO_DATACONTENTPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.DataRequestProto.DataContentProto)
})
,
'DESCRIPTOR' : _DATAREQUESTPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.DataRequestProto)
})
_sym_db.RegisterMessage(DataRequestProto)
_sym_db.RegisterMessage(DataRequestProto.DataContentProto)
DataRequestListProto = _reflection.GeneratedProtocolMessageType('DataRequestListProto', (_message.Message,), {
'DESCRIPTOR' : _DATAREQUESTLISTPROTO,
'__module__' : 'jina_pb2'
# @@protoc_insertion_point(class_scope:jina.DataRequestListProto)
})
_sym_db.RegisterMessage(DataRequestListProto)
_JINADATAREQUESTRPC = DESCRIPTOR.services_by_name['JinaDataRequestRPC']
_JINASINGLEDATAREQUESTRPC = DESCRIPTOR.services_by_name['JinaSingleDataRequestRPC']
_JINARPC = DESCRIPTOR.services_by_name['JinaRPC']
_JINADISCOVERENDPOINTSRPC = DESCRIPTOR.services_by_name['JinaDiscoverEndpointsRPC']
_JINAGATEWAYDRYRUNRPC = DESCRIPTOR.services_by_name['JinaGatewayDryRunRPC']
_JINAINFORPC = DESCRIPTOR.services_by_name['JinaInfoRPC']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_JINAINFOPROTO_JINAENTRY._options = None
_JINAINFOPROTO_JINAENTRY._serialized_options = b'8\001'
_JINAINFOPROTO_ENVSENTRY._options = None
_JINAINFOPROTO_ENVSENTRY._serialized_options = b'8\001'
_ROUTEPROTO._serialized_start=129
_ROUTEPROTO._serialized_end=288
_JINAINFOPROTO._serialized_start=291
_JINAINFOPROTO._serialized_end=486
_JINAINFOPROTO_JINAENTRY._serialized_start=398
_JINAINFOPROTO_JINAENTRY._serialized_end=441
_JINAINFOPROTO_ENVSENTRY._serialized_start=443
_JINAINFOPROTO_ENVSENTRY._serialized_end=486
_HEADERPROTO._serialized_start=489
_HEADERPROTO._serialized_end=687
_ENDPOINTSPROTO._serialized_start=689
_ENDPOINTSPROTO._serialized_end=724
_STATUSPROTO._serialized_start=727
_STATUSPROTO._serialized_end=976
_STATUSPROTO_EXCEPTIONPROTO._serialized_start=860
_STATUSPROTO_EXCEPTIONPROTO._serialized_end=938
_STATUSPROTO_STATUSCODE._serialized_start=940
_STATUSPROTO_STATUSCODE._serialized_end=976
_RELATEDENTITY._serialized_start=978
_RELATEDENTITY._serialized_end=1072
_DATAREQUESTPROTO._serialized_start=1075
_DATAREQUESTPROTO._serialized_end=1363
_DATAREQUESTPROTO_DATACONTENTPROTO._serialized_start=1264
_DATAREQUESTPROTO_DATACONTENTPROTO._serialized_end=1363
_DATAREQUESTLISTPROTO._serialized_start=1365
_DATAREQUESTLISTPROTO._serialized_end=1429
_JINADATAREQUESTRPC._serialized_start=1431
_JINADATAREQUESTRPC._serialized_end=1521
_JINASINGLEDATAREQUESTRPC._serialized_start=1523
_JINASINGLEDATAREQUESTRPC._serialized_end=1622
_JINARPC._serialized_start=1624
_JINARPC._serialized_end=1695
_JINADISCOVERENDPOINTSRPC._serialized_start=1697
_JINADISCOVERENDPOINTSRPC._serialized_end=1793
_JINAGATEWAYDRYRUNRPC._serialized_start=1795
_JINAGATEWAYDRYRUNRPC._serialized_end=1873
_JINAINFORPC._serialized_start=1875
_JINAINFORPC._serialized_end=1946
# @@protoc_insertion_point(module_scope)
| 60.904494 | 3,306 | 0.814685 |
793e92fc67806be9cae12afc54d655e82137fdd6 | 23 | py | Python | kde/__init__.py | dagobash/kde | bb00460a47cfe25563f401fad498f7cafaa51fcf | [
"MIT"
] | null | null | null | kde/__init__.py | dagobash/kde | bb00460a47cfe25563f401fad498f7cafaa51fcf | [
"MIT"
] | null | null | null | kde/__init__.py | dagobash/kde | bb00460a47cfe25563f401fad498f7cafaa51fcf | [
"MIT"
] | null | null | null | from kde.kde import KDE | 23 | 23 | 0.826087 |
793e93826ffbe91b74d0703d26a54205fe2a8707 | 12,497 | py | Python | evaluation_models/inception.py | KamilDeja/PyTorch-VAE | 7782de94bb3f0f11957932bdf2aacc307e8a12ff | [
"Apache-2.0"
] | null | null | null | evaluation_models/inception.py | KamilDeja/PyTorch-VAE | 7782de94bb3f0f11957932bdf2aacc307e8a12ff | [
"Apache-2.0"
] | null | null | null | evaluation_models/inception.py | KamilDeja/PyTorch-VAE | 7782de94bb3f0f11957932bdf2aacc307e8a12ff | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import os
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = _inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def _inception_v3(*args, **kwargs):
"""Wraps `torchvision.models.inception_v3`
Skips default weight inititialization if supported by torchvision version.
See https://github.com/mseitzer/pytorch-fid/issues/28.
"""
try:
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
except ValueError:
# Just a caution against weird version strings
version = (0,)
if version >= (0, 6):
kwargs['init_weights'] = False
return torchvision.models.inception_v3(*args, **kwargs)
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = _inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
model_path = "evaluation_models/inception"
if os.path.exists(model_path):
print("Loading cached inception model for validation")
state_dict = torch.load(model_path)
else:
print("Downloading inception model for validation")
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
torch.save(state_dict, model_path)
inception.load_state_dict(state_dict)
return inception
class FIDInceptionA(torchvision.models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(torchvision.models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
| 36.648094 | 126 | 0.638633 |
793e946d0ed8db9b1060ad6efc9ac50eb6a8519c | 2,207 | py | Python | bin/run_sep_bfs.py | SEP-Graph/ppopp19-artifact | 576b2ddd78bd5efee3207fb0b3394bd79f61c3d7 | [
"Apache-2.0"
] | 1 | 2020-03-04T09:15:33.000Z | 2020-03-04T09:15:33.000Z | bin/run_sep_bfs.py | SEP-Graph/ppopp19-artifact | 576b2ddd78bd5efee3207fb0b3394bd79f61c3d7 | [
"Apache-2.0"
] | null | null | null | bin/run_sep_bfs.py | SEP-Graph/ppopp19-artifact | 576b2ddd78bd5efee3207fb0b3394bd79f61c3d7 | [
"Apache-2.0"
] | 1 | 2018-12-29T18:12:03.000Z | 2018-12-29T18:12:03.000Z | #!/usr/bin/env python3
from os.path import exists, join
from time import time
from sys import exit
from subprocess import getstatusoutput
from common import DATASET_ROOT, WORKSPACE_ROOT, LOG_ROOT, parse_all_metadata, get_gpu_ram
SEP_PATH = join(WORKSPACE_ROOT, "sep-graph")
SEP_BUILD_PATH = join(SEP_PATH, "build")
BFS_BINARY_PATH = join(SEP_BUILD_PATH, "hybrid_bfs")
if not exists(BFS_BINARY_PATH):
exit("Could not found BFS binary")
for data_dirname, metadata in parse_all_metadata().items():
link = next((x for x in metadata["links"] if "undirected" in x and x.endswith("gr")), None)
data_filename = link.split("/")[-1]
data_path = join(join(DATASET_ROOT, data_dirname), data_filename)
if not exists(data_path):
exit("Could not found file %s" % data_path)
# required parameter
is_sparse = metadata["sparse"]
source_node = metadata["source_node"]
# If the total RAM more than 12GB, we will store CSR and CSC format in global memory. (for better performance)
# If the total RAM less than 12GB, we only store CSR format, because the dataset is undirected!
treat_as_undirected = get_gpu_ram() < 12000
# optional parameter
alpha = None
beta = None
if not bool(is_sparse):
alpha = metadata["SEP_bfs_alpha"]
beta = metadata["SEP_bfs_beta"]
block_size = metadata.get("block_size", "256")
timestamp = str(int(time()))
log_path = join(LOG_ROOT, "SEP_bfs_%s_%s.json" % (data_dirname, timestamp))
cmd = "%s" \
" -graphfile=%s" \
" -undirected=%s" \
" -source_node=%s" \
" -sparse=%s" \
" %s" \
" -block_size=%s" \
" -json=%s" % (
BFS_BINARY_PATH,
data_path,
treat_as_undirected,
source_node,
is_sparse,
"" if bool(is_sparse) else "-alpha=%s -beta=%s" % (alpha, beta), block_size, log_path)
print('Evaluating BFS implemented on SEP-Graph for "%s" dataset' % data_dirname)
# run the program
status, output = getstatusoutput(cmd)
if status != 0:
print('Failed to run: "%s"' % cmd)
exit(output)
print("--------------")
| 33.953846 | 114 | 0.631174 |
793e952748361e48fd7753e76fbf3d8eec66ae09 | 222 | py | Python | Python3/Lists/clear_pop_remove.py | norbertosanchezdichi/TIL | 2e9719ddd288022f53b094a42679e849bdbcc625 | [
"MIT"
] | null | null | null | Python3/Lists/clear_pop_remove.py | norbertosanchezdichi/TIL | 2e9719ddd288022f53b094a42679e849bdbcc625 | [
"MIT"
] | null | null | null | Python3/Lists/clear_pop_remove.py | norbertosanchezdichi/TIL | 2e9719ddd288022f53b094a42679e849bdbcc625 | [
"MIT"
] | null | null | null | list = [10, 'heh', 'i\'m a dude', 'he\'s a dude', 'she\'s a dude']
print(list)
last_item = list.pop()
print(list)
print(f'Last item \'{last_item}\' has been popped.')
list.remove(10)
print(list)
list.clear()
print(list) | 18.5 | 66 | 0.63964 |
793e95efedcc7167fea997037908a13906af2351 | 10,251 | py | Python | BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/inf.py | KrzysztofKoch1/edk2 | 4d621893471c6299de06aeac56f4c6cddc5c9ebe | [
"BSD-2-Clause"
] | 36 | 2017-03-09T08:14:35.000Z | 2022-03-21T03:44:33.000Z | BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/inf.py | KrzysztofKoch1/edk2 | 4d621893471c6299de06aeac56f4c6cddc5c9ebe | [
"BSD-2-Clause"
] | 5 | 2019-09-17T22:39:25.000Z | 2021-04-22T01:44:17.000Z | BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/inf.py | KrzysztofKoch1/edk2 | 4d621893471c6299de06aeac56f4c6cddc5c9ebe | [
"BSD-2-Clause"
] | 14 | 2019-08-05T00:28:56.000Z | 2022-03-28T09:11:00.000Z | ## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import plugins.EdkPlugins.basemodel.ini as ini
import re, os
from plugins.EdkPlugins.basemodel.message import *
class INFFile(ini.BaseINIFile):
_libobjs = {}
def GetSectionInstance(self, parent, name, isCombined=False):
return INFSection(parent, name, isCombined)
def GetProduceLibraryClass(self):
obj = self.GetDefine("LIBRARY_CLASS")
if obj is None: return None
return obj.split('|')[0].strip()
def GetSectionObjectsByName(self, name, arch=None):
arr = []
sects = self.GetSectionByName(name)
for sect in sects:
# skip unmatched archtecture content
if not sect.IsArchMatch(arch):
continue
for obj in sect.GetObjects():
arr.append(obj)
return arr
def GetSourceObjects(self, arch=None, tool=None):
arr = []
sects = self.GetSectionByName('sources')
for sect in sects:
# skip unmatched archtecture content
if not sect.IsArchMatch(arch):
continue
for obj in sect.GetObjects():
if not obj.IsMatchFamily(tool):
continue
arr.append(obj)
return arr
def Parse(self):
if not ini.BaseINIFile.Parse(self):
return False
classname = self.GetProduceLibraryClass()
if classname is not None:
libobjdict = INFFile._libobjs
if classname in libobjdict:
if self not in libobjdict[classname]:
libobjdict[classname].append(self)
else:
libobjdict[classname] = [self]
return True
def GetBaseName(self):
return self.GetDefine("BASE_NAME").strip()
def GetModuleRootPath(self):
return os.path.dirname(self.GetFilename())
def Clear(self):
classname = self.GetProduceLibraryClass()
if classname is not None:
libobjdict = INFFile._libobjs
libobjdict[classname].remove(self)
if len(libobjdict[classname]) == 0:
del libobjdict[classname]
ini.BaseINIFile.Clear(self)
class INFSection(ini.BaseINISection):
def GetSectionINIObject(self, parent):
type = self.GetType()
if type.lower() == 'libraryclasses':
return INFLibraryClassObject(self)
if type.lower() == 'sources':
return INFSourceObject(self)
if type.lower().find('pcd') != -1:
return INFPcdObject(self)
if type.lower() == 'packages':
return INFDependentPackageObject(self)
if type.lower() in ['guids', 'protocols', 'ppis']:
return INFGuidObject(self)
if type.lower() == 'defines':
return INFDefineSectionObject(self)
return INFSectionObject(self)
def GetType(self):
arr = self._name.split('.')
return arr[0].strip()
def GetArch(self):
arr = self._name.split('.')
if len(arr) == 1:
return 'common'
return arr[1]
def IsArchMatch(self, arch):
if arch is None or self.GetArch() == 'common':
return True
if self.GetArch().lower() != arch.lower():
return False
return True
class INFSectionObject(ini.BaseINISectionObject):
def GetArch(self):
return self.GetParent().GetArch()
class INFDefineSectionObject(INFSectionObject):
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self._key = None
self._value = None
def Parse(self):
assert (self._start == self._end), 'The object in define section must be in single line'
line = self.GetLineByOffset(self._start).strip()
line = line.split('#')[0]
arr = line.split('=')
if len(arr) != 2:
ErrorMsg('Invalid define section object',
self.GetFilename(),
self._start
)
return False
self._key = arr[0].strip()
self._value = arr[1].strip()
return True
def GetKey(self):
return self._key
def GetValue(self):
return self._value
class INFLibraryClassObject(INFSectionObject):
_objs = {}
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self._classname = None
def GetClass(self):
return self._classname
def Parse(self):
self._classname = self.GetLineByOffset(self._start).split('#')[0].strip()
objdict = INFLibraryClassObject._objs
if self._classname in objdict:
objdict[self._classname].append(self)
else:
objdict[self._classname] = [self]
return True
def Destroy(self):
objdict = INFLibraryClassObject._objs
objdict[self._classname].remove(self)
if len(objdict[self._classname]) == 0:
del objdict[self._classname]
def GetName(self):
return self._classname
@staticmethod
def GetObjectDict():
return INFLibraryClassObject._objs
class INFDependentPackageObject(INFSectionObject):
def GetPath(self):
return self.GetLineByOffset(self._start).split('#')[0].strip()
class INFSourceObject(INFSectionObject):
_objs = {}
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self.mSourcename = None
self.mToolCode = None
self.mFamily = None
self.mTagName = None
self.mFeaturePcd = None
self.mFilename = None
def GetSourcePath(self):
return self.mSourcename
def GetSourceFullPath(self):
path = os.path.dirname(self.GetFilename())
path = os.path.join(path, self.GetSourcePath())
return os.path.normpath(path)
def GetToolCode(self):
return self.mToolCode
def GetFamily(self):
return self.mFamily
def GetTagName(self):
return self.mTagName
def GetFeaturePcd(self):
return self.mFeaturePcd
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
arr = line.split('|')
self.mSourcename = arr[0].strip()
if len(arr) >= 2:
self.mFamily = arr[1].strip()
if len(arr) >= 3:
self.mTagName = arr[2].strip()
if len(arr) >= 4:
self.mToolCode = arr[3].strip()
if len(arr) >= 5:
self.mFeaturePcd = arr[4].strip()
self.mFilename = os.path.basename(self.GetSourceFullPath())
objdict = INFSourceObject._objs
if self.mFilename not in objdict:
objdict[self.mFilename] = [self]
else:
objdict[self.mFilename].append(self)
return True
def GetName(self):
return self.mFilename
def Destroy(self):
objdict = INFSourceObject._objs
objdict[self.mFilename].remove(self)
if len(objdict[self.mFilename]) == 0:
del objdict[self.mFilename]
def IsMatchFamily(self, family):
if family is None:
return True
if self.mFamily is not None:
if family.strip().lower() == self.mFamily.lower():
return True
else:
return False
else:
fname = self.GetSourcePath()
if fname.endswith('.S') and family.lower() != 'gcc':
return False
if fname.endswith('.s') and (self.GetArch().lower() != 'ipf' and self.GetArch().lower() != 'common'):
return False
if fname.lower().endswith('.asm') and (family.lower() != 'msft' and family.lower() != 'intel'):
return False
return True
@staticmethod
def GetObjectDict():
return INFSourceObject._objs
class INFPcdObject(INFSectionObject):
_objs = {}
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self.mPcdType = None
self.mDefaultValue = None
self.mPcdName = None
@staticmethod
def GetObjectDict():
return INFPcdObject._objs
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
arr = line.split('|')
self.mPcdName = arr[0].strip()
if len(arr) >= 2:
self.mDefaultValue = arr[1].strip()
objdict = INFPcdObject._objs
if self.GetName() in objdict:
if self not in objdict[self.GetName()]:
objdict[self.GetName()].append(self)
else:
objdict[self.GetName()] = [self]
return True
def GetPcdName(self):
return self.mPcdName
def GetPcdType(self):
return self.GetParent().GetType()
def GetName(self):
return self.mPcdName.split('.')[1]
def Destroy(self):
objdict = INFPcdObject._objs
objdict[self.GetName()].remove(self)
if len(objdict[self.GetName()]) == 0:
del objdict[self.GetName()]
class INFGuidObject(INFSectionObject):
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0].split("|")[0]
self._name = line.strip()
return True
def GetName(self):
return self._name
| 29.973684 | 114 | 0.571652 |
793e960d734dfd294165763109e5cc8dbd30b549 | 751 | py | Python | actions/hallucinationScoutAction.py | SC2-ND-bot/The-PPPP-Bot | f1f4bda5f773347d9d997f4a84b4514a98932d55 | [
"MIT"
] | null | null | null | actions/hallucinationScoutAction.py | SC2-ND-bot/The-PPPP-Bot | f1f4bda5f773347d9d997f4a84b4514a98932d55 | [
"MIT"
] | null | null | null | actions/hallucinationScoutAction.py | SC2-ND-bot/The-PPPP-Bot | f1f4bda5f773347d9d997f4a84b4514a98932d55 | [
"MIT"
] | null | null | null | from actions.action import Action
class HallucinationScoutAction(Action):
def __init__(self):
super().__init__()
self.cost = 2.0
self.scoutLocations = None
self.effects["scouting"] = True
def __repr__(self):
return "Hallucination Action Class"
def reset(self):
self.scoutLocations = None
def checkProceduralPrecondition(self, gameObject, agent):
unit = agent.getUnit(gameObject)
if not unit.is_hallucination:
return False
self.scoutLocations = [gameObject.enemy_start_locations[0]]
return self.scoutLocations is not None
def perform(self, gameObject, agent, firstAction):
unit = agent.getUnit(gameObject)
for index, location in enumerate(self.scoutLocations):
gameObject.do(unit.move(location, index != 0))
| 24.225806 | 61 | 0.75233 |
793e9612e2aecad7fc00708c116f259334f1be4b | 309 | py | Python | rllib/offline/is_estimator.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 1 | 2022-03-07T06:40:06.000Z | 2022-03-07T06:40:06.000Z | rllib/offline/is_estimator.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | rllib/offline/is_estimator.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 1 | 2019-09-24T16:24:49.000Z | 2019-09-24T16:24:49.000Z | from ray.rllib.offline.estimators.importance_sampling import ImportanceSampling
from ray.rllib.utils.deprecation import Deprecated
@Deprecated(
new="ray.rllib.offline.estimators.importance_sampling::ImportanceSampling",
error=False,
)
class ImportanceSamplingEstimator(ImportanceSampling):
pass
| 28.090909 | 79 | 0.825243 |
793e96a2ffcde6865164c93b510a37dff5314e65 | 6,680 | py | Python | roadpin_backend/app/cron_data/cron_new_taipei_city.py | g0v/roadpin | c2919552dd3ce0e3614a35466bae6d6a740e9368 | [
"CC-BY-3.0",
"Apache-2.0"
] | 5 | 2015-04-20T17:16:56.000Z | 2018-12-25T11:14:22.000Z | roadpin_backend/app/cron_data/cron_new_taipei_city.py | g0v/roadpin | c2919552dd3ce0e3614a35466bae6d6a740e9368 | [
"CC-BY-3.0",
"Apache-2.0"
] | 1 | 2016-06-20T02:55:27.000Z | 2016-06-21T15:37:19.000Z | roadpin_backend/app/cron_data/cron_new_taipei_city.py | g0v/roadpin | c2919552dd3ce0e3614a35466bae6d6a740e9368 | [
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from StringIO import StringIO
from datetime import datetime
import sys
import argparse
from lxml import html
from twisted.internet import reactor
from scrapy import log, signals
from scrapy.crawler import Crawler
from scrapy.settings import Settings
from scrapy.xlib.pydispatch import dispatcher
from app import cfg
from app import util
#from app.crawler.new_taipei_city.new_taipei_city.spiders.new_taipei_city_spider import NewTaipeiCitySpider
from app.cron_data import cron_taipei_city
from app.cron_data import process_data
def cron_new_taipei_city():
while True:
error_code = _cron_new_taipei_city()
_sleep()
pass
'''
def _cron_new_taipei_city():
dispatcher.connect(stop_reactor, signal=signals.spider_closed)
spider = NewTaipeiCitySpider()
crawler = Crawler(Settings())
crawler.configure()
crawler.crawl(spider)
crawler.start()
log.start()
log.msg('Running reactor...')
reactor.run() # the script will block here until the spider is closed
log.msg('Reactor stopped.')
'''
def _cron_new_taipei_city():
#params = _get_params()
(error_code, results) = _crawl_data()
return error_code
def _get_params():
'''
1. lookup the latest data in mongo.
2. return the latest params of data.
'''
latest_dig = util.get_cache('cron_new_taipei_city_latest_dig')
return {'latest_dig': latest_dig}
def _crawl_data():
results = _crawl_dig()
return (S_OK, results)
def _crawl_dig():
the_url = 'http://61.60.124.185/tpctempdig/InfoAllList.asp'
start_timestamp = 946684800
end_timestamp = util.get_timestamp() + 86400 * 366
start_datetime = util.timestamp_to_datetime(start_timestamp)
end_datetime = util.timestamp_to_datetime(end_timestamp)
params = {
'sortflag': '',
'sorttype': '',
'TargetLB': '',
'qry2': 1,
'startyear': start_datetime.year,
'startmonth': start_datetime.month,
'startday': start_datetime.day,
'endyear': end_datetime.year,
'endmonth': end_datetime.month,
'endday': end_datetime.day,
}
http_data = util.http_multipost({the_url: params})
#cfg.logger.debug('http_data: %s', http_data)
(latest_timestamp, dig_data) = _parse_dig(http_data[the_url])
[_put_to_db(each_data) for each_data in dig_data]
util.save_cache('cron_new_taipei_city_latest_dig', {'latest_timestamp': latest_timestamp})
def _parse_dig(http_data):
#cfg.logger.debug('http_data_type: %s', http_data.__class__.__name__)
http_data_ascii = http_data.encode('iso-8859-1')
data_utf8 = util.big5_to_utf8(http_data_ascii)
#cfg.logger.debug('data_utf8: %s', data_utf8)
(latest_timestamp, data_list) = _parse_dig_data(data_utf8)
return (latest_timestamp, data_list)
def _parse_dig_data(data):
cfg.logger.debug('data: %s', data)
doc = html.parse(StringIO(data))
cfg.logger.debug('doc: %s', doc)
elements = doc.xpath("//tr[@class='g3']")
cfg.logger.debug('len(elements): %s elements: %s', len(elements), elements)
results = [_parse_element(elem) for elem in elements]
results = [result for result in results if result]
latest_timestamp = 0
for result in results:
start_timestamp = result.get('start_timestamp', 0)
if start_timestamp > latest_timestamp:
latest_timestamp = start_timestamp
return (latest_timestamp, results)
def _parse_element(elem):
text = elem.text_content()
#cfg.logger.debug('text: %s', text)
return _parse_text(text)
def _parse_text(text):
cfg.logger.debug('text: %s', text)
_columns = [ 'OK_UNITpro', 'IDpro', 'APP_NAMEpro', 'LOCATIONpro', 'CB_DATEpro', 'APPROVE_DATEpro']
f = StringIO(text)
lines = f.readlines()
lines = [line for line in lines if line.strip()]
n_lines = len(lines)
if n_lines != 6:
cfg.logger.error('lines != 6: lines: %s', lines)
return {}
cfg.logger.debug('lines: %s', lines)
result = {column: lines[idx].strip() for (idx, column) in enumerate(_columns) if column}
if not result.get('OK_UNITpro', ''):
result = {}
if not result.get('CB_DATEpro', ''):
result = {}
cfg.logger.debug('result: %s', result)
(start_timestamp, end_timestamp) = _parse_time_period(result)
geo = _parse_geo(result)
result['start_timestamp'] = start_timestamp
result['end_timestamp'] = end_timestamp
result['geo'] = geo
return result
def _put_to_db(data):
category = 'new_taipei_city_dig_point'
the_idx = data['IDpro']
start_timestamp = data.get('start_timestamp', 0)
end_timestamp = data.get('end_timestamp', 0)
geo = data.get('geo', {})
cfg.logger.debug('to process_data: the_idx: %s data: %s', the_idx, data)
process_data('新北市', category, the_idx, start_timestamp, end_timestamp, geo, data)
def _parse_time_period(data):
time_period = data.get('CB_DATEpro', '~')
return _parse_time_period_core(time_period)
def _parse_time_period_core(time_period):
time_period_split = time_period.split('~')
if len(time_period_split) != 2:
return (0, MAX_TIMESTAMP)
start_date = time_period_split[0]
end_date = time_period_split[1]
start_timestamp = _parse_date(start_date)
end_timestamp = _parse_date(end_date)
if end_timestamp == 0:
end_timestamp = MAX_TIMESTAMP
return (start_timestamp, end_timestamp)
def _parse_date(the_date):
the_date_list = the_date.split('/')
if len(the_date_list) != 3:
return 0
the_year = util._int(the_date_list[0])
the_month = util._int(the_date_list[1])
the_day = util._int(the_date_list[2])
cfg.logger.debug('the_date: %s the_year: %s the_month: %s the_day: %s', the_date, the_year, the_month, the_day)
the_datetime = datetime(the_year, the_month, the_day)
the_timestamp = util.datetime_to_timestamp(the_datetime)
return the_timestamp
def _parse_geo(data):
return {}
def _sleep():
time_sleep = util._int(cfg.config.get('time_sleep', 86400))
cfg.logger.debug('to sleep: time_sleep: %s', time_sleep)
time.sleep(time_sleep)
def parse_args():
''' '''
parser = argparse.ArgumentParser(description='roadpin_backend')
parser.add_argument('-i', '--ini', type=str, required=True, help="ini filename")
args = parser.parse_args()
return (S_OK, args)
if __name__ == '__main__':
(error_code, args) = parse_args()
cfg.init({"ini_filename": args.ini})
cron_new_taipei_city()
| 27.377049 | 115 | 0.688024 |
793e96c05ec2725f94d692e932e132dd7f3d02b7 | 1,902 | py | Python | api_basebone/core/decorators.py | git-men/bsm-django | 46d1fcbd8ca379d20a3396fd7ea529ccf998f59d | [
"MIT"
] | 90 | 2020-12-07T04:49:43.000Z | 2022-03-31T08:24:35.000Z | api_basebone/core/decorators.py | flyowl/lightning | 946c98986c1c42bf8c28f203cdf8512262283c25 | [
"MIT"
] | 4 | 2021-01-11T16:10:55.000Z | 2022-02-18T12:13:23.000Z | api_basebone/core/decorators.py | flyowl/lightning | 946c98986c1c42bf8c28f203cdf8512262283c25 | [
"MIT"
] | 16 | 2020-12-07T12:32:05.000Z | 2022-01-30T05:36:51.000Z | from functools import wraps
from api_basebone.export.specs import FieldType
BSM_BATCH_ACTION = 'bsm_action_map'
BSM_CLIENT_BATCH_ACTION = 'bsm_client_action_map'
BSM_ADMIN_COMPUTED_FIELDS_MAP = 'bsm_admin_computed_fields_map'
def action(model, verbose_name='', manage=True):
"""
管理端批量操作的连接器
可以把动作连接到对应的模型中去
Params:
model class 模型类
verbose_name str 函数描述
manage bool 是否是针对管理端,客户端写 False
"""
def middle(func):
@wraps(func)
def wrapper(request, queryset):
return func(request, queryset)
if verbose_name:
wrapper.short_description = verbose_name
end_slug = BSM_BATCH_ACTION if manage else BSM_CLIENT_BATCH_ACTION
bsm_action_map = getattr(model, end_slug, None)
if bsm_action_map is None:
setattr(model, end_slug, {})
action_map = getattr(model, end_slug)
action_map[func.__name__.lower()] = wrapper
return wrapper
return middle
def basebone_admin_property(model, display_name, field_type=None):
"""
管理端计算属性字段,管理端配置时,输出对应的数据
FIXME: 暂时在 bsm admin 类中使用
Params:
display str 字段的可读名称
field_type str 字段的类型
"""
# 如果没有指定字段类型,则默认是字符串
if not field_type:
field_type = FieldType.STRING
def middle(func):
@wraps(func)
def wrapper(self, instance):
return func(self, instance)
name = func.__name__
computed_property = getattr(model, BSM_ADMIN_COMPUTED_FIELDS_MAP, None)
if not isinstance(computed_property, dict):
setattr(model, BSM_ADMIN_COMPUTED_FIELDS_MAP, {})
if name not in model.bsm_admin_computed_fields_map:
model.bsm_admin_computed_fields_map[name] = {
'display_name': display_name,
'field_type': field_type,
}
return wrapper
return middle
| 26.054795 | 79 | 0.654574 |
793e988b46aef7286ce6b56e7eba8e88be95400d | 619 | py | Python | effdet/data/parsers/parser_factory.py | SKA-INAF/efficientdet-pytorch | 8967bab88288d11e5547a7efa391adc0c987be47 | [
"Apache-2.0"
] | null | null | null | effdet/data/parsers/parser_factory.py | SKA-INAF/efficientdet-pytorch | 8967bab88288d11e5547a7efa391adc0c987be47 | [
"Apache-2.0"
] | null | null | null | effdet/data/parsers/parser_factory.py | SKA-INAF/efficientdet-pytorch | 8967bab88288d11e5547a7efa391adc0c987be47 | [
"Apache-2.0"
] | null | null | null | """ Parser factory
Copyright 2020 Ross Wightman
"""
from .parser_coco import CocoParser
from .parser_voc import VocParser
from .parser_open_images import OpenImagesParser
from .parser_radio_galaxy import RadioGalaxyParser
def create_parser(name, **kwargs):
if name == 'coco':
parser = CocoParser(**kwargs)
elif name == 'voc':
parser = VocParser(**kwargs)
elif name == 'openimages':
parser = OpenImagesParser(**kwargs)
elif name == 'radiogalaxy':
parser = RadioGalaxyParser(**kwargs)
else:
assert False, f'Unknown dataset parser ({name})'
return parser
| 26.913043 | 56 | 0.684976 |
793e98b3b0af2be75fe4928d6d5fc6fe72ffef4e | 1,960 | py | Python | tridentstream/services/config/configservice.py | tridentstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | 6 | 2020-01-03T14:50:09.000Z | 2021-09-13T01:44:31.000Z | tridentstream/services/config/configservice.py | tidalstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | null | null | null | tridentstream/services/config/configservice.py | tidalstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | null | null | null | from unplugged import Schema
from ...plugins import ConfigPlugin
from .models import DefaultSetting, Setting
class ConfigConfigPlugin(ConfigPlugin):
plugin_name = "config"
config_schema = Schema
def __init__(self, config):
super(ConfigConfigPlugin, self).__init__(config)
self.schemas = {}
def get_default_config(self, namespace, key):
for default_type in self.default_types:
try:
return DefaultSetting.objects.get(
default_type=default_type, namespace=namespace, key=key
).value
except DefaultSetting.DoesNotExist:
pass
return None
def get_user_config(self, user, namespace, key):
try:
return Setting.objects.get(user=user, namespace=namespace, key=key).value
except Setting.DoesNotExist:
return self.get_default_config(namespace, key)
def set_default_config(self, default_type, namespace, key, value):
try:
s = DefaultSetting.objects.get(
default_type=default_type, namespace=namespace, key=key
)
except DefaultSetting.DoesNotExist:
s = DefaultSetting(default_type=default_type, namespace=namespace, key=key)
if value is None:
s.delete()
else:
s.value = value
s.save()
def set_user_config(self, user, namespace, key, value):
try:
s = Setting.objects.get(user=user, namespace=namespace, key=key)
except Setting.DoesNotExist:
s = Setting(user=user, namespace=namespace, key=key)
if value is None:
s.delete()
else:
s.value = value
s.save()
def set_config_schema(self, namespace, key, schema):
self.schemas[(namespace, key)] = schema
def get_config_schema(self, namespace, key):
return self.schemas.get((namespace, key))
| 31.111111 | 87 | 0.616837 |
793e9b1e0c97e3cd85ca94e6190e7809ee037606 | 684 | py | Python | tf2-neural-style-transfer/settings.py | ganfanhang/DeepLearningExamples | 1d940c60b2f45b925599cea81e97a40ae81c4d76 | [
"Apache-2.0"
] | 274 | 2020-01-28T08:12:55.000Z | 2022-03-28T02:28:31.000Z | tf2-neural-style-transfer/settings.py | ronlado74/DeepLearningExamples | 061c64abde7eb76ab7683616ebb9090814f13362 | [
"Apache-2.0"
] | 16 | 2020-03-23T04:01:37.000Z | 2022-02-10T01:52:51.000Z | tf2-neural-style-transfer/settings.py | ronlado74/DeepLearningExamples | 061c64abde7eb76ab7683616ebb9090814f13362 | [
"Apache-2.0"
] | 241 | 2020-02-03T03:17:42.000Z | 2022-03-28T09:04:05.000Z | # -*- coding: utf-8 -*-
# @File : settings.py
# @Author : AaronJny
# @Time : 2020/03/13
# @Desc :
# 内容特征层及loss加权系数
CONTENT_LAYERS = {'block4_conv2': 0.5, 'block5_conv2': 0.5}
# 风格特征层及loss加权系数
STYLE_LAYERS = {'block1_conv1': 0.2, 'block2_conv1': 0.2, 'block3_conv1': 0.2, 'block4_conv1': 0.2,
'block5_conv1': 0.2}
# 内容图片路径
CONTENT_IMAGE_PATH = './images/content.jpg'
# 风格图片路径
STYLE_IMAGE_PATH = './images/style.jpg'
# 生成图片的保存目录
OUTPUT_DIR = './output'
# 内容loss总加权系数
CONTENT_LOSS_FACTOR = 1
# 风格loss总加权系数
STYLE_LOSS_FACTOR = 100
# 图片宽度
WIDTH = 450
# 图片高度
HEIGHT = 300
# 训练epoch数
EPOCHS = 20
# 每个epoch训练多少次
STEPS_PER_EPOCH = 100
# 学习率
LEARNING_RATE = 0.03
| 19 | 99 | 0.671053 |
793e9b77aee1b283dcf82ea1e797a217923808a0 | 6,543 | py | Python | sar/scripts/uav_sar_simulation.py | cpswarm/complex_behaviors | 68f2a07180f6056f32c0ed16e9e21ac57794e2ed | [
"Apache-2.0"
] | 4 | 2019-09-18T20:42:59.000Z | 2021-02-17T04:50:28.000Z | sar/scripts/uav_sar_simulation.py | cpswarm/complex_behaviors | 68f2a07180f6056f32c0ed16e9e21ac57794e2ed | [
"Apache-2.0"
] | null | null | null | sar/scripts/uav_sar_simulation.py | cpswarm/complex_behaviors | 68f2a07180f6056f32c0ed16e9e21ac57794e2ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import rospy
import smach
import smach_ros
import mavros_msgs
import std_srvs.srv
from cpswarm_msgs.msg import *
from swarmros.msg import *
# define state Idle
class Idle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded'])
def execute(self, userdata):
rospy.loginfo('Executing state Idle')
rospy.sleep(10.0)
return 'succeeded'
def main():
rospy.init_node('state_machine_node')
if not rospy.has_param('~altitude'):
rospy.logerr('Altitude not specified, cannot perform simulation!')
return
# Create a TOP level SMACH state machine
top_sm = smach.StateMachine(['succeeded', 'preempted', 'aborted'])
# Open the container
with top_sm:
# ===================================== SarThreads =====================================
# Callback for custom outcomes from SarThreads
def out_cb(outcome_map):
if outcome_map['AbortEventMonitoring'] == 'invalid':
rospy.loginfo('Returning missionAbort Event')
return 'missionAbort'
return 'aborted'
# Create a Concurrence container
sarthreads_concurrence = smach.Concurrence(
outcomes=['missionAbort', 'aborted'],
default_outcome='missionAbort',
child_termination_cb=lambda so: True,
outcome_cb=out_cb)
# Open the container
with sarthreads_concurrence:
# ===================================== SarBehavior =====================================
# Create a State Machine container
sarbehavior_sm = smach.StateMachine(
outcomes=['succeeded', 'preempted', 'aborted'])
# Open the container
with sarbehavior_sm:
# ADD Idle to SarBehavior #
smach.StateMachine.add('Idle',
Idle(),
transitions={'succeeded':'Takeoff'})
# ADD Takeoff to SarBehavior #
smach.StateMachine.add('Takeoff',
smach_ros.SimpleActionState('cmd/takeoff',
TakeoffAction,
goal=TakeoffGoal(rospy.get_param('~altitude'))),
transitions={'succeeded':'Coverage'})
# ADD Coverage to SarBehavior #
smach.StateMachine.add('Coverage',
smach_ros.SimpleActionState('uav_coverage',
CoverageAction,
goal=CoverageGoal(rospy.get_param('~altitude')),
result_slots=['target_id', 'target_pose']),
transitions={'succeeded':'Tracking'},
remapping={'target_id':'target_id', 'target_pose':'target_pose'})
# ===================================== SelectRoverThreads =====================================
# Callback for custom outcomes
def selectrover_outcb(outcome_map):
if outcome_map['LostEventMonitoring'] == 'invalid':
rospy.loginfo('Returning target_lost Event')
return 'target_lost'
return outcome_map['TaskAllocation']
# Create a Concurrence container
selectrover_concurrence = smach.Concurrence(
input_keys=['target_id','target_pose'],
outcomes=['succeeded', 'aborted', 'target_lost'],
default_outcome='aborted',
child_termination_cb=lambda so: True,
outcome_cb=selectrover_outcb)
# Open the container
with selectrover_concurrence:
# ADD TaskAllocation to SelectRoverThreads #
smach.Concurrence.add('TaskAllocation',
smach_ros.SimpleActionState('cmd/task_allocation_auction',
TaskAllocationAction,
goal_slots=['task_id', 'task_pose'],
result_slots=['task_id', 'winner', 'task_pose']),
remapping={'task_id':'target_id', 'task_pose':'target_pose'})
# ADD LostEventMonitoring to SelectRoverThreads #
smach.Concurrence.add('LostEventMonitoring',
smach_ros.MonitorState('target_lost',
TargetPositionEvent,
cond_cb=lambda ud, msg: False))
# ===================================== SelectRoverThreads END =====================================
# ADD SelectRoverThreads to SarBehavior #
smach.StateMachine.add('SelectRover',
selectrover_concurrence,
transitions={'succeeded':'Tracking', 'aborted':'SelectRover', 'target_lost':'LocalCoverage'})
def tracking_goal_cb(userdata, goal):
tracking_goal = TrackingGoal()
tracking_goal.target = userdata.target
tracking_goal.altitude = rospy.get_param('~altitude')
return tracking_goal
# ADD Tracking to SarBehavior #
smach.StateMachine.add('Tracking',
smach_ros.SimpleActionState('uav_tracking',
TrackingAction,
input_keys=['target'],
goal_cb=tracking_goal_cb),
transitions={'succeeded':'Coverage', 'aborted':'LocalCoverage'},
remapping={'target':'target_id'})
# ADD LocalCoverage to SarBehavior #
smach.StateMachine.add('LocalCoverage',
smach_ros.SimpleActionState('uav_local_coverage',
CoverageAction,
goal=CoverageGoal(rospy.get_param('~altitude')),
result_slots=['target_id', 'target_pose']),
transitions={'aborted':'Coverage', 'succeeded':'SelectRover'},
remapping={'target_id':'target_id', 'target_pose':'target_pose'})
# ===================================== SarBehavior END =====================================
# ADD SarBehavior to SarThreads #
smach.Concurrence.add('SarBehavior', sarbehavior_sm)
# ADD AbortEventMonitoring to SarThreads #
smach.Concurrence.add('AbortEventMonitoring',
smach_ros.MonitorState('bridge/events/mission_abort',
SimpleEvent,
cond_cb=lambda ud, msg: False))
# ===================================== SarThreads END =====================================
# ADD SarThreads to TOP state #
smach.StateMachine.add('SarThreads',
sarthreads_concurrence,
transitions={'missionAbort':'MissionAbort'})
# ===================================== MissionAbort =====================================
# Create a State Machine container
missionabort_sm = smach.StateMachine(
outcomes=['succeeded', 'preempted', 'aborted'])
# Open the container
with missionabort_sm:
# ADD Land to MissionAbort #
smach.StateMachine.add('Land',
smach_ros.ServiceState('cmd/land',
std_srvs.srv.Empty),
transitions={})
# ===================================== MissionAbort END =====================================
# ADD MissionAbort to TOP state #
smach.StateMachine.add('MissionAbort',
missionabort_sm,
transitions={'succeeded':'SarThreads'})
# Create and start the introspection server (uncomment if needed)
sis = smach_ros.IntrospectionServer('smach_server', top_sm, '/SM_TOP')
sis.start()
# Execute SMACH plan
outcome = top_sm.execute()
# Wait for ctrl-c to stop the application
rospy.spin()
sis.stop()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| 32.231527 | 105 | 0.644811 |
793e9b9444545d79865f4e935068955867db83bc | 1,367 | py | Python | src/api-service/tests/test_task_config.py | tonybaloney/onefuzz | e0f2e9ed5aae006e0054387de7a0ff8c83c8f722 | [
"MIT"
] | 2,692 | 2020-09-15T17:54:21.000Z | 2022-03-31T11:58:57.000Z | src/api-service/tests/test_task_config.py | tonybaloney/onefuzz | e0f2e9ed5aae006e0054387de7a0ff8c83c8f722 | [
"MIT"
] | 980 | 2020-09-18T18:23:01.000Z | 2022-03-30T22:20:43.000Z | src/api-service/tests/test_task_config.py | nharper285/onefuzz | 1de2cc841d6fc885f8bcb6d032bf5b96ddb52493 | [
"MIT"
] | 177 | 2020-09-16T00:10:56.000Z | 2022-03-30T21:18:10.000Z | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Tuple
import pytest
from __app__.onefuzzlib.tasks.config import is_valid_blob_name
BlobNameTestCase = Tuple[str, bool]
BLOB_NAME_TEST_CASES = [
# Valid
("fuzz.exe", True),
("bin/fuzz.exe", True),
("/".join("a" * 254), True),
("a" * 1024, True),
# Invalid (absolute)
("/fuzz.exe", False),
("/bin/fuzz.exe", False),
# Invalid (special dirs)
("./fuzz.exe", False),
("././fuzz.exe", False),
("./bin/fuzz.exe", False),
("./bin/./fuzz.exe", False),
("../fuzz.exe", False),
("../bin/fuzz.exe", False),
(".././fuzz.exe", False),
("../bin/./fuzz.exe", False),
# Out of Azure size bounds
("", False),
(" ", False),
("/".join("a" * 255), False),
("a" * 1025, False),
# Paths with invalid segments.
("a.", False),
("a..", False),
("a./b", False),
("a/b./c", False),
("a./", False),
("a../", False),
("a./b/", False),
("a/b./c/", False),
("a//", False),
]
@pytest.mark.parametrize("blob_name_test_case", BLOB_NAME_TEST_CASES)
def test_is_valid_blob_name(blob_name_test_case: BlobNameTestCase) -> None:
blob_name, expected = blob_name_test_case
is_valid = is_valid_blob_name(blob_name)
assert is_valid == expected
| 23.568966 | 75 | 0.572056 |
793e9baebb5a0d66ce73900fcee710f26f661ad3 | 1,205 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/xdata/apis/ListInstanceInfoRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/xdata/apis/ListInstanceInfoRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/xdata/apis/ListInstanceInfoRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ListInstanceInfoRequest(JDCloudRequest):
"""
查询用户所属的实例信息
"""
def __init__(self, parameters, header=None, version="v1"):
super(ListInstanceInfoRequest, self).__init__(
'/regions/{regionId}/dwInstance', 'GET', header, version)
self.parameters = parameters
class ListInstanceInfoParameters(object):
def __init__(self, regionId, ):
"""
:param regionId: 地域ID
"""
self.regionId = regionId
| 28.690476 | 75 | 0.714523 |
793e9bb6c6e2f422d158d436958dc5110b478ebc | 2,603 | py | Python | code/data_clean.py | dongpengfei826153155/fiddler2jmeter | 68fcbe197bf0414eff0525f52df056cc89884259 | [
"Apache-2.0"
] | 13 | 2020-09-04T06:45:29.000Z | 2021-11-29T01:46:13.000Z | code/data_clean.py | dongpengfei826153155/fiddler2jmeter | 68fcbe197bf0414eff0525f52df056cc89884259 | [
"Apache-2.0"
] | null | null | null | code/data_clean.py | dongpengfei826153155/fiddler2jmeter | 68fcbe197bf0414eff0525f52df056cc89884259 | [
"Apache-2.0"
] | 7 | 2020-09-06T04:26:34.000Z | 2021-11-29T01:46:11.000Z | import re
# 数据清洗处理
class DataClean:
def __init__(self, jmeter_datas):
self.jmeter_datas = jmeter_datas
def select_jmeter_data(self, host_name=None, filter_url=None, distinct=False):
'''
数据清洗,得到想要的数据
:param host_name: HOST regexp匹配
:param filter_url: 需要去除的url
eg:/(.*)\.(css|ico|jpg|png|gif|bmp|wav|js|jpe)(\?.*)?$ ----> 过滤css|ico|jpg|png|gif|bmp|wav|js|jpe
:return:
'''
select_jmeter_data = []
# print(self.jmeter_datas)
for i, jmeter_data in enumerate(self.jmeter_datas):
# host 包含在给的的hosts中且不为空,url正则匹配满足
try:
if jmeter_data['server_name'] is not None \
and re.match(host_name, jmeter_data['server_name'], re.IGNORECASE) is not None \
and re.match(filter_url, jmeter_data['path'], re.IGNORECASE) is None:
# 过滤重复的url请求
if distinct and len(select_jmeter_data) > 0:
distinct_list = [f"{i['server_name']}{i['path']}" for i in select_jmeter_data]
if f"{jmeter_data['server_name']}{jmeter_data['path']}" in distinct_list:
# print(f"{jmeter_data['server_name']}{jmeter_data['path']}")
continue
select_jmeter_data.append(jmeter_data)
except Exception as e:
print('正则表达式存在问题:\nhostname: {} \nfilter_url: {}'.format(host_name, filter_url))
return select_jmeter_data
def get_header_parameter(self, select_jmeter_data, host_name=None):
'''
re.match(host_name, jmeter_data['server_name'], re.IGNORECASE) is not None
提取公共的header
:param select_jmeter_data: 数据集合
:param host_name: 域名
:return:
'''
# 获取一条数据进行交集计算
header_parameter = set()
if host_name is not None:
for i in range(len(select_jmeter_data) - 1):
# print(select_jmeter_data[i]['Header'][0])
if re.match(host_name, select_jmeter_data[i]['Header'][0][1], re.IGNORECASE) is not None:
header_parameter = set(select_jmeter_data[i]['Header'])
break
else:
header_parameter = select_jmeter_data[0]['Header']
# header交集计算
for i in range(len(select_jmeter_data) - 1):
if select_jmeter_data[i]['Header'] in [('Host', host_name)]:
header_parameter = header_parameter & set(select_jmeter_data[i + 1]['Header'])
return header_parameter
| 41.31746 | 107 | 0.573569 |
793e9bd27063a005f26670ee28f494df45b4513d | 227 | py | Python | tests/context.py | ezrasavard/compound | d32e8f3ebbc1fc0209e94810a48461097b04515e | [
"MIT"
] | 1 | 2017-08-14T14:37:40.000Z | 2017-08-14T14:37:40.000Z | tests/context.py | ezrasavard/compound | d32e8f3ebbc1fc0209e94810a48461097b04515e | [
"MIT"
] | null | null | null | tests/context.py | ezrasavard/compound | d32e8f3ebbc1fc0209e94810a48461097b04515e | [
"MIT"
] | null | null | null | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'../compoundfin')))
import compoundfin
| 25.222222 | 74 | 0.709251 |
793e9bee8aedbc33e10729f3b21cb11aaa089df1 | 11,189 | py | Python | clients/client/python/ory_client/model/project_slug.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/model/project_slug.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/model/project_slug.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | null | null | null | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.42
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from ory_client.exceptions import ApiAttributeError
class ProjectSlug(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'slug': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'slug': 'slug', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ProjectSlug - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
slug (str): The Project Slug. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ProjectSlug - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
slug (str): The Project Slug. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.536965 | 194 | 0.57199 |
793e9d9ef264fbf24b869b02296d4aa621ab42f1 | 28,573 | py | Python | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/tomcat/plugins/custom_wf_plugin/plugin/workflows.py | alien4cloud/alien4cloud-cloudify4-provider | 97faee855255eb0c3ce25bb3075c29acd11a63c5 | [
"Apache-2.0"
] | null | null | null | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/tomcat/plugins/custom_wf_plugin/plugin/workflows.py | alien4cloud/alien4cloud-cloudify4-provider | 97faee855255eb0c3ce25bb3075c29acd11a63c5 | [
"Apache-2.0"
] | 3 | 2015-12-04T15:27:22.000Z | 2016-04-08T11:32:43.000Z | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/tomcat/plugins/custom_wf_plugin/plugin/workflows.py | alien4cloud/alien4cloud-cloudify4-provider | 97faee855255eb0c3ce25bb3075c29acd11a63c5 | [
"Apache-2.0"
] | 16 | 2015-01-29T10:05:09.000Z | 2019-06-24T19:23:54.000Z | from cloudify.decorators import workflow
from cloudify.workflows import ctx
from cloudify.workflows import tasks as workflow_tasks
from utils import set_state_task
from utils import operation_task
from utils import link_tasks
from utils import CustomContext
from utils import generate_native_node_workflows
from utils import _get_all_nodes
from utils import _get_all_nodes_instances
from utils import _get_all_modified_node_instances
from utils import is_host_node
from workflow import WfStartEvent
from workflow import build_pre_event
# subworkflow 'install' for host 'Server'
def install_host_server(ctx, graph, custom_context):
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Java')
set_state_task(ctx, graph, 'Java', 'started', 'Java_started', custom_context)
set_state_task(ctx, graph, 'Java', 'creating', 'Java_creating', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.create', 'create_Tomcat', custom_context)
set_state_task(ctx, graph, 'War', 'configured', 'War_configured', custom_context)
set_state_task(ctx, graph, 'War', 'starting', 'War_starting', custom_context)
set_state_task(ctx, graph, 'War', 'started', 'War_started', custom_context)
set_state_task(ctx, graph, 'Java', 'created', 'Java_created', custom_context)
set_state_task(ctx, graph, 'War', 'configuring', 'War_configuring', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'starting', 'Tomcat_starting', custom_context)
operation_task(ctx, graph, 'War', 'cloudify.interfaces.lifecycle.configure', 'configure_War', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'creating', 'Tomcat_creating', custom_context)
set_state_task(ctx, graph, 'Java', 'starting', 'Java_starting', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'created', 'Tomcat_created', custom_context)
set_state_task(ctx, graph, 'War', 'created', 'War_created', custom_context)
operation_task(ctx, graph, 'Java', 'cloudify.interfaces.lifecycle.start', 'start_Java', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'initial', 'Tomcat_initial', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'configuring', 'Tomcat_configuring', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'started', 'Tomcat_started', custom_context)
operation_task(ctx, graph, 'Java', 'cloudify.interfaces.lifecycle.configure', 'configure_Java', custom_context)
operation_task(ctx, graph, 'Java', 'cloudify.interfaces.lifecycle.create', 'create_Java', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_install')
set_state_task(ctx, graph, 'War', 'creating', 'War_creating', custom_context)
operation_task(ctx, graph, 'War', 'cloudify.interfaces.lifecycle.start', 'start_War', custom_context)
set_state_task(ctx, graph, 'Java', 'configured', 'Java_configured', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'configured', 'Tomcat_configured', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.configure', 'configure_Tomcat', custom_context)
set_state_task(ctx, graph, 'War', 'initial', 'War_initial', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.start', 'start_Tomcat', custom_context)
set_state_task(ctx, graph, 'Java', 'configuring', 'Java_configuring', custom_context)
set_state_task(ctx, graph, 'Java', 'initial', 'Java_initial', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'Tomcat_configuring', 'Java_started', custom_context)
link_tasks(graph, 'create_Java', 'Java_creating', custom_context)
link_tasks(graph, 'Tomcat_created', 'create_Tomcat', custom_context)
link_tasks(graph, 'War_starting', 'War_configured', custom_context)
link_tasks(graph, 'start_War', 'War_starting', custom_context)
link_tasks(graph, 'Java_configuring', 'Java_created', custom_context)
link_tasks(graph, 'configure_War', 'War_configuring', custom_context)
link_tasks(graph, 'start_Tomcat', 'Tomcat_starting', custom_context)
link_tasks(graph, 'War_configured', 'configure_War', custom_context)
link_tasks(graph, 'create_Tomcat', 'Tomcat_creating', custom_context)
link_tasks(graph, 'start_Java', 'Java_starting', custom_context)
link_tasks(graph, 'Tomcat_configuring', 'Tomcat_created', custom_context)
link_tasks(graph, 'Java_configuring', 'Tomcat_created', custom_context)
link_tasks(graph, 'War_configuring', 'War_created', custom_context)
link_tasks(graph, 'Java_started', 'start_Java', custom_context)
link_tasks(graph, 'Tomcat_creating', 'Tomcat_initial', custom_context)
link_tasks(graph, 'configure_Tomcat', 'Tomcat_configuring', custom_context)
link_tasks(graph, 'War_initial', 'Tomcat_started', custom_context)
link_tasks(graph, 'Java_configured', 'configure_Java', custom_context)
link_tasks(graph, 'Java_created', 'create_Java', custom_context)
link_tasks(graph, 'Tomcat_initial', 'Server_install', custom_context)
link_tasks(graph, 'Java_initial', 'Server_install', custom_context)
link_tasks(graph, 'War_created', 'War_creating', custom_context)
link_tasks(graph, 'War_started', 'start_War', custom_context)
link_tasks(graph, 'Java_starting', 'Java_configured', custom_context)
link_tasks(graph, 'Tomcat_starting', 'Tomcat_configured', custom_context)
link_tasks(graph, 'Tomcat_configured', 'configure_Tomcat', custom_context)
link_tasks(graph, 'War_creating', 'War_initial', custom_context)
link_tasks(graph, 'Tomcat_started', 'start_Tomcat', custom_context)
link_tasks(graph, 'configure_Java', 'Java_configuring', custom_context)
link_tasks(graph, 'Java_creating', 'Java_initial', custom_context)
# subworkflow 'uninstall' for host 'Server'
def uninstall_host_server(ctx, graph, custom_context):
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
set_state_task(ctx, graph, 'War', 'deleting', 'War_deleting', custom_context)
set_state_task(ctx, graph, 'Java', 'stopping', 'Java_stopping', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'deleted', 'Tomcat_deleted', custom_context)
set_state_task(ctx, graph, 'War', 'stopping', 'War_stopping', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.stop', 'stop_Tomcat', custom_context)
set_state_task(ctx, graph, 'Java', 'deleting', 'Java_deleting', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'stopped', 'Tomcat_stopped', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_uninstall')
set_state_task(ctx, graph, 'War', 'stopped', 'War_stopped', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'stopping', 'Tomcat_stopping', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'deleting', 'Tomcat_deleting', custom_context)
set_state_task(ctx, graph, 'Java', 'stopped', 'Java_stopped', custom_context)
set_state_task(ctx, graph, 'War', 'deleted', 'War_deleted', custom_context)
set_state_task(ctx, graph, 'Java', 'deleted', 'Java_deleted', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'War_deleted', 'War_deleting', custom_context)
link_tasks(graph, 'Java_stopped', 'Java_stopping', custom_context)
link_tasks(graph, 'Server_uninstall', 'Tomcat_deleted', custom_context)
link_tasks(graph, 'War_stopped', 'War_stopping', custom_context)
link_tasks(graph, 'Tomcat_stopped', 'stop_Tomcat', custom_context)
link_tasks(graph, 'Java_deleted', 'Java_deleting', custom_context)
link_tasks(graph, 'Tomcat_deleting', 'Tomcat_stopped', custom_context)
link_tasks(graph, 'War_deleting', 'War_stopped', custom_context)
link_tasks(graph, 'stop_Tomcat', 'Tomcat_stopping', custom_context)
link_tasks(graph, 'Tomcat_deleted', 'Tomcat_deleting', custom_context)
link_tasks(graph, 'Java_deleting', 'Java_stopped', custom_context)
link_tasks(graph, 'Tomcat_stopping', 'War_deleted', custom_context)
link_tasks(graph, 'Server_uninstall', 'Java_deleted', custom_context)
def install_host(ctx, graph, custom_context, compute):
options = {}
options['Server'] = install_host_server
options[compute](ctx, graph, custom_context)
def uninstall_host(ctx, graph, custom_context, compute):
options = {}
options['Server'] = uninstall_host_server
options[compute](ctx, graph, custom_context)
@workflow
def a4c_install(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('install')))
_a4c_install(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_uninstall(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
_a4c_uninstall(ctx, graph, custom_context)
return graph.execute()
def _a4c_install(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Java')
set_state_task(ctx, graph, 'Java', 'started', 'Java_started', custom_context)
set_state_task(ctx, graph, 'Java', 'creating', 'Java_creating', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.create', 'create_Tomcat', custom_context)
set_state_task(ctx, graph, 'War', 'configured', 'War_configured', custom_context)
set_state_task(ctx, graph, 'War', 'starting', 'War_starting', custom_context)
set_state_task(ctx, graph, 'War', 'started', 'War_started', custom_context)
set_state_task(ctx, graph, 'Java', 'created', 'Java_created', custom_context)
set_state_task(ctx, graph, 'War', 'configuring', 'War_configuring', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'starting', 'Tomcat_starting', custom_context)
operation_task(ctx, graph, 'War', 'cloudify.interfaces.lifecycle.configure', 'configure_War', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'creating', 'Tomcat_creating', custom_context)
set_state_task(ctx, graph, 'Java', 'starting', 'Java_starting', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'created', 'Tomcat_created', custom_context)
set_state_task(ctx, graph, 'War', 'created', 'War_created', custom_context)
operation_task(ctx, graph, 'Java', 'cloudify.interfaces.lifecycle.start', 'start_Java', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'initial', 'Tomcat_initial', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'configuring', 'Tomcat_configuring', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'started', 'Tomcat_started', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_install')
operation_task(ctx, graph, 'Java', 'cloudify.interfaces.lifecycle.configure', 'configure_Java', custom_context)
operation_task(ctx, graph, 'Java', 'cloudify.interfaces.lifecycle.create', 'create_Java', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_install')
set_state_task(ctx, graph, 'War', 'creating', 'War_creating', custom_context)
operation_task(ctx, graph, 'War', 'cloudify.interfaces.lifecycle.start', 'start_War', custom_context)
set_state_task(ctx, graph, 'Java', 'configured', 'Java_configured', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'configured', 'Tomcat_configured', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.configure', 'configure_Tomcat', custom_context)
set_state_task(ctx, graph, 'War', 'initial', 'War_initial', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.start', 'start_Tomcat', custom_context)
set_state_task(ctx, graph, 'Java', 'configuring', 'Java_configuring', custom_context)
set_state_task(ctx, graph, 'Java', 'initial', 'Java_initial', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'Java_started', 'start_Java', custom_context)
link_tasks(graph, 'Java_creating', 'Java_initial', custom_context)
link_tasks(graph, 'create_Tomcat', 'Tomcat_creating', custom_context)
link_tasks(graph, 'War_configured', 'configure_War', custom_context)
link_tasks(graph, 'War_starting', 'War_configured', custom_context)
link_tasks(graph, 'War_started', 'start_War', custom_context)
link_tasks(graph, 'Java_created', 'create_Java', custom_context)
link_tasks(graph, 'War_configuring', 'War_created', custom_context)
link_tasks(graph, 'Tomcat_starting', 'Tomcat_configured', custom_context)
link_tasks(graph, 'configure_War', 'War_configuring', custom_context)
link_tasks(graph, 'Tomcat_creating', 'Tomcat_initial', custom_context)
link_tasks(graph, 'Java_starting', 'Java_configured', custom_context)
link_tasks(graph, 'Tomcat_created', 'create_Tomcat', custom_context)
link_tasks(graph, 'War_created', 'War_creating', custom_context)
link_tasks(graph, 'start_Java', 'Java_starting', custom_context)
link_tasks(graph, 'Tomcat_initial', 'Server_install', custom_context)
link_tasks(graph, 'Tomcat_configuring', 'Java_started', custom_context)
link_tasks(graph, 'Tomcat_configuring', 'Tomcat_created', custom_context)
link_tasks(graph, 'Tomcat_started', 'start_Tomcat', custom_context)
link_tasks(graph, 'configure_Java', 'Java_configuring', custom_context)
link_tasks(graph, 'create_Java', 'Java_creating', custom_context)
link_tasks(graph, 'War_creating', 'War_initial', custom_context)
link_tasks(graph, 'start_War', 'War_starting', custom_context)
link_tasks(graph, 'Java_configured', 'configure_Java', custom_context)
link_tasks(graph, 'Tomcat_configured', 'configure_Tomcat', custom_context)
link_tasks(graph, 'configure_Tomcat', 'Tomcat_configuring', custom_context)
link_tasks(graph, 'War_initial', 'Tomcat_started', custom_context)
link_tasks(graph, 'start_Tomcat', 'Tomcat_starting', custom_context)
link_tasks(graph, 'Java_configuring', 'Tomcat_created', custom_context)
link_tasks(graph, 'Java_configuring', 'Java_created', custom_context)
link_tasks(graph, 'Java_initial', 'Server_install', custom_context)
def _a4c_uninstall(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Tomcat')
custom_context.add_customized_wf_node('Java')
custom_context.add_customized_wf_node('War')
custom_context.add_customized_wf_node('Java')
set_state_task(ctx, graph, 'War', 'deleting', 'War_deleting', custom_context)
set_state_task(ctx, graph, 'Java', 'stopping', 'Java_stopping', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'deleted', 'Tomcat_deleted', custom_context)
set_state_task(ctx, graph, 'War', 'stopping', 'War_stopping', custom_context)
operation_task(ctx, graph, 'Tomcat', 'cloudify.interfaces.lifecycle.stop', 'stop_Tomcat', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_uninstall')
set_state_task(ctx, graph, 'Java', 'deleting', 'Java_deleting', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'stopped', 'Tomcat_stopped', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_uninstall')
set_state_task(ctx, graph, 'War', 'stopped', 'War_stopped', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'stopping', 'Tomcat_stopping', custom_context)
set_state_task(ctx, graph, 'Tomcat', 'deleting', 'Tomcat_deleting', custom_context)
set_state_task(ctx, graph, 'Java', 'stopped', 'Java_stopped', custom_context)
set_state_task(ctx, graph, 'War', 'deleted', 'War_deleted', custom_context)
set_state_task(ctx, graph, 'Java', 'deleted', 'Java_deleted', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'War_deleting', 'War_stopped', custom_context)
link_tasks(graph, 'Tomcat_deleted', 'Tomcat_deleting', custom_context)
link_tasks(graph, 'stop_Tomcat', 'Tomcat_stopping', custom_context)
link_tasks(graph, 'Java_deleting', 'Java_stopped', custom_context)
link_tasks(graph, 'Tomcat_stopped', 'stop_Tomcat', custom_context)
link_tasks(graph, 'Server_uninstall', 'Tomcat_deleted', custom_context)
link_tasks(graph, 'Server_uninstall', 'Java_deleted', custom_context)
link_tasks(graph, 'War_stopped', 'War_stopping', custom_context)
link_tasks(graph, 'Tomcat_stopping', 'War_deleted', custom_context)
link_tasks(graph, 'Tomcat_deleting', 'Tomcat_stopped', custom_context)
link_tasks(graph, 'Java_stopped', 'Java_stopping', custom_context)
link_tasks(graph, 'War_deleted', 'War_deleting', custom_context)
link_tasks(graph, 'Java_deleted', 'Java_deleting', custom_context)
def _get_scaling_group_name_from_node_id(ctx, node_id):
scaling_groups=ctx.deployment.scaling_groups
for group_name, scaling_group in ctx.deployment.scaling_groups.iteritems():
for member in scaling_group['members']:
if member == node_id:
ctx.logger.info("Node {} found in scaling group {}".format(node_id, group_name))
return group_name
return None
@workflow
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
delta = int(delta)
scalable_entity_name = _get_scaling_group_name_from_node_id(ctx, node_id)
scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
if scalable_entity_name:
curr_num_instances = scaling_group['properties']['current_instances']
planned_num_instances = curr_num_instances + delta
scale_id = scalable_entity_name
else:
scaled_node = ctx.get_node(scalable_entity_name)
if not scaled_node:
raise ValueError("Node {0} doesn't exist".format(scalable_entity_name))
if not is_host_node(scaled_node):
raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(scalable_entity_name))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
scale_id = scaled_node.id
if planned_num_instances < 1:
raise ValueError('Provided delta: {0} is illegal. current number of'
'instances of node/group {1} is {2}'
.format(delta, scalable_entity_name, curr_num_instances))
modification = ctx.deployment.start_modification({
scale_id: {
'instances': planned_num_instances
}
})
ctx.logger.info('Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
try:
if delta > 0:
ctx.logger.info('Scaling host/group {0} adding {1} instances'.format(scalable_entity_name, delta))
added_and_related = _get_all_nodes(modification.added)
added = _get_all_modified_node_instances(added_and_related, 'added')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'install')))
custom_context = CustomContext(ctx, added, added_and_related)
install_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Scale failed. Uninstalling node/group {0}'.format(scalable_entity_name))
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
try:
custom_context = CustomContext(ctx, added, added_and_related)
uninstall_host(ctx, graph, custom_context, scalable_entity_name)
graph.execute()
except:
ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(scalable_entity_name))
raise
else:
ctx.logger.info('Unscaling host/group {0} removing {1} instances'.format(scalable_entity_name, delta))
removed_and_related = _get_all_nodes(modification.removed)
removed = _get_all_modified_node_instances(removed_and_related, 'removed')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'uninstall')))
custom_context = CustomContext(ctx, removed, removed_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Unscale failed.')
raise
except:
ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
try:
modification.rollback()
except:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
raise
else:
try:
modification.finish()
except:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
@workflow
def a4c_heal(
ctx,
node_instance_id,
diagnose_value='Not provided',
**kwargs):
"""Reinstalls the whole subgraph of the system topology
The subgraph consists of all the nodes that are hosted in the
failing node's compute and the compute itself.
Additionally it unlinks and establishes appropriate relationships
:param ctx: cloudify context
:param node_id: failing node's id
:param diagnose_value: diagnosed reason of failure
"""
ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
.format(node_instance_id, diagnose_value))
failing_node = ctx.get_node_instance(node_instance_id)
host_instance_id = failing_node._node_instance.host_id
failing_node_host = ctx.get_node_instance(host_instance_id)
node_id = failing_node_host.node_id
subgraph_node_instances = failing_node_host.get_contained_subgraph()
added_and_related = _get_all_nodes(ctx)
try:
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'uninstall')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
graph.execute()
except:
ctx.logger.error('Uninstall while healing failed.')
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'install')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
install_host(ctx, graph, custom_context, node_id)
graph.execute()
#following code can be pasted in src/test/python/workflows/context.py for simulation
#def _build_nodes(ctx):
#types = []
#types.append('alien.nodes.Java')
#types.append('tosca.nodes.SoftwareComponent')
#types.append('tosca.nodes.Root')
#node_Java = _build_node(ctx, 'Java', types, 1)
#types = []
#types.append('alien.nodes.Tomcat')
#types.append('tosca.nodes.WebServer')
#types.append('tosca.nodes.SoftwareComponent')
#types.append('tosca.nodes.Root')
#node_Tomcat = _build_node(ctx, 'Tomcat', types, 1)
#types = []
#types.append('alien.nodes.openstack.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_Server = _build_node(ctx, 'Server', types, 1)
#types = []
#types.append('alien.nodes.War')
#types.append('alien.nodes.LoadBalancedWebApplication')
#types.append('tosca.nodes.Root')
#node_War = _build_node(ctx, 'War', types, 1)
#types = []
#types.append('alien.nodes.openstack.PublicNetwork')
#types.append('alien.nodes.PublicNetwork')
#types.append('tosca.nodes.Network')
#types.append('tosca.nodes.Root')
#node_NetPub = _build_node(ctx, 'NetPub', types, 1)
#_add_relationship(node_Java, node_Server)
#_add_relationship(node_Tomcat, node_Server)
#_add_relationship(node_Tomcat, node_Java)
#_add_relationship(node_Server, node_NetPub)
#_add_relationship(node_War, node_Tomcat)
| 58.075203 | 127 | 0.73146 |
793e9da2fa742db3574445e0e028ad412928ff35 | 611 | py | Python | ns/flow/flow.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | 3 | 2021-06-17T01:57:43.000Z | 2021-12-16T11:53:31.000Z | ns/flow/flow.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | null | null | null | ns/flow/flow.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | null | null | null | class Flow:
def __init__(self,
fid,
src,
dst,
size=None,
start_time=None,
finish_time=None,
pkt_gen=None,
pkt_sink=None) -> None:
self.fid = fid
self.src = src
self.dst = dst
self.size = size
self.start_time = start_time
self.finish_time = finish_time
self.pkt_gen = pkt_gen
self.pkt_sink = pkt_sink
self.path = None
def __repr__(self) -> str:
return f"Flow {self.fid} on {self.path}"
| 25.458333 | 48 | 0.466448 |
793e9e17169f69013d2ece32d6c799ced555978c | 590 | py | Python | voxel_globe/websockets/migrations/0002_log_message_timestamp.py | ngageoint/voxel-globe | 91f386de652b704942165889c10468b2c4cf4eec | [
"MIT"
] | 28 | 2015-07-27T23:57:24.000Z | 2020-04-05T15:10:52.000Z | voxel_globe/websockets/migrations/0002_log_message_timestamp.py | VisionSystemsInc/voxel_globe | 6eb3fca5586726428e9d914f7b730ca164c64a52 | [
"MIT"
] | 50 | 2016-02-11T15:50:22.000Z | 2016-10-27T22:38:27.000Z | voxel_globe/websockets/migrations/0002_log_message_timestamp.py | ngageoint/voxel-globe | 91f386de652b704942165889c10468b2c4cf4eec | [
"MIT"
] | 8 | 2015-07-27T19:22:03.000Z | 2021-01-04T09:44:48.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('websockets', '0001_update_websocket_models'),
]
operations = [
migrations.AddField(
model_name='logmessagemodel',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2016, 8, 16, 12, 54, 35, 276042, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
]
| 25.652174 | 130 | 0.655932 |
793e9f5b22b07dba8a759e852f6676f028ee19ff | 102 | py | Python | src/app/config_default.py | cla7aye15I4nd/zhiyuan-salon-Q | 3e7c91fc466f6aeef7950034d9ed84928e539cb9 | [
"MIT"
] | 2 | 2021-05-25T07:33:44.000Z | 2021-05-25T07:35:11.000Z | src/app/config_default.py | cla7aye15I4nd/zhiyuan-salon-Q | 3e7c91fc466f6aeef7950034d9ed84928e539cb9 | [
"MIT"
] | null | null | null | src/app/config_default.py | cla7aye15I4nd/zhiyuan-salon-Q | 3e7c91fc466f6aeef7950034d9ed84928e539cb9 | [
"MIT"
] | null | null | null | SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///./user.db'
SECRET_KEY = ''
| 25.5 | 47 | 0.77451 |
793e9f9916e17c1a9232f00431eaeb26731c1e0b | 5,361 | py | Python | tests/protocols/test_irc.py | spaceone/circuits | ed6d5464f1f83034109ed3d23d126c715450cfd2 | [
"MIT"
] | null | null | null | tests/protocols/test_irc.py | spaceone/circuits | ed6d5464f1f83034109ed3d23d126c715450cfd2 | [
"MIT"
] | null | null | null | tests/protocols/test_irc.py | spaceone/circuits | ed6d5464f1f83034109ed3d23d126c715450cfd2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pytest
from pytest import fixture
from circuits import Component, Event, handler
from circuits.net.events import read, write
from circuits.protocols.irc import (
AWAY, INVITE, IRC, JOIN, KICK, MODE, NAMES, NICK, NOTICE, PART, PASS, PONG,
PRIVMSG, QUIT, TOPIC, USER, WHOIS, irc_color_to_ansi, joinprefix, parsemsg,
parseprefix, strip,
)
from circuits.six import b, u
class App(Component):
def init(self):
IRC().register(self)
self.data = []
self.events = []
@handler(False)
def reset(self):
self.data = []
self.events = []
@handler()
def _on_event(self, event, *args, **kwargs):
self.events.append(event)
def request(self, message):
self.fire(write(bytes(message)))
def write(self, data):
self.data.append(data)
@fixture(scope="function")
def app(request):
app = App()
while len(app):
app.flush()
return app
def test_strip():
s = ":\x01\x02test\x02\x01"
s = strip(s)
assert s == "\x01\x02test\x02\x01"
s = ":\x01\x02test\x02\x01"
s = strip(s, color=True)
assert s == "test"
def test_joinprefix():
nick, ident, host = "test", "foo", "localhost"
s = joinprefix(nick, ident, host)
assert s == "test!foo@localhost"
def test_parsemsg():
s = b(":foo!bar@localhost NICK foobar")
source, command, args = parsemsg(s)
assert source == (u("foo"), u("bar"), u("localhost"))
assert command == "NICK"
assert args == [u("foobar")]
s = b("")
source, command, args = parsemsg(s)
assert source == (None, None, None)
assert command is None
assert args == []
def test_parseprefix():
s = "test!foo@localhost"
nick, ident, host = parseprefix(s)
assert nick == "test"
assert ident == "foo"
assert host == "localhost"
s = "test"
nick, ident, host = parseprefix(s)
assert nick == "test"
assert ident is None
assert host is None
@pytest.mark.parametrize("event,data", [
(PASS("secret"), b"PASS secret\r\n"),
(
USER("foo", "localhost", "localhost", "Test Client"),
b"USER foo localhost localhost :Test Client\r\n"
),
(NICK("test"), b"NICK test\r\n"),
(PONG("localhost"), b"PONG :localhost\r\n"),
(QUIT(), b"QUIT Leaving\r\n"),
(QUIT("Test"), b"QUIT Test\r\n"),
(QUIT("Test Message"), b"QUIT :Test Message\r\n"),
(JOIN("#test"), b"JOIN #test\r\n"),
(JOIN("#test", "secret"), b"JOIN #test secret\r\n"),
(PART("#test"), b"PART #test\r\n"),
(PRIVMSG("test", "Hello"), b"PRIVMSG test Hello\r\n"),
(PRIVMSG("test", "Hello World"), b"PRIVMSG test :Hello World\r\n"),
(NOTICE("test", "Hello"), b"NOTICE test Hello\r\n"),
(NOTICE("test", "Hello World"), b"NOTICE test :Hello World\r\n"),
(KICK("#test", "test"), b"KICK #test test :\r\n"),
(KICK("#test", "test", "Bye"), b"KICK #test test Bye\r\n"),
(KICK("#test", "test", "Good Bye!"), b"KICK #test test :Good Bye!\r\n"),
(TOPIC("#test", "Hello World!"), b"TOPIC #test :Hello World!\r\n"),
(MODE("+i"), b"MODE +i\r\n"),
(MODE("#test", "+o", "test"), b"MODE #test +o test\r\n"),
(INVITE("test", "#test"), b"INVITE test #test\r\n"),
(NAMES(), b"NAMES\r\n"),
(NAMES("#test"), b"NAMES #test\r\n"),
(AWAY("I am away."), b"AWAY :I am away.\r\n"),
(WHOIS("somenick"), b"WHOIS :somenick\r\n"),
])
def test_commands(event, data):
message = event.args[0]
return bytes(message) == data
@pytest.mark.parametrize("data,event", [
(
b":localhost NOTICE * :*** Looking up your hostname...\r\n",
Event.create(
"notice", (u("localhost"), None, None), u("*"),
u("*** Looking up your hostname..."),
)
),
])
def test_responses(app, data, event):
app.reset()
app.fire(read(data))
while len(app):
app.flush()
e = app.events[-1]
assert event.name == e.name
assert event.args == e.args
assert event.kwargs == e.kwargs
@pytest.mark.parametrize('inp,out', [
('hi \x02bold\x02 \x1ditalic\x1d \x1funderline\x1f \x1estrikethrough\x1e', 'hi \x02bold\x02 \x1b[03mitalic\x1b[23m \x1b[04munderline\x1b[24m \x1b[09mstrikethrough\x1b[29m'),
('\x0300white\x03 \x0301black\x03 \x0302blue\x03 \x0303green\x03 \x0304red\x03 ', '\x1b[37mwhite\x1b[39;49m \x1b[30mblack\x1b[39;49m \x1b[34mblue\x1b[39;49m \x1b[32mgreen\x1b[39;49m \x1b[31mred\x1b[39;49m '),
('\x0305brown\x03 \x0306magenta\x03 \x0307orange\x03 \x0308yellow\x03 ', '\x1b[36mbrown\x1b[39;49m \x1b[35mmagenta\x1b[39;49m \x1b[33morange\x1b[39;49m \x1b[93myellow\x1b[39;49m '),
('\x0309lightgreen\x03 \x0310cyan\x03 \x0311lightcyan\x03 \x0312lightblue\x03 ', '\x1b[92mlightgreen\x1b[39;49m \x1b[36mcyan\x1b[39;49m \x1b[96mlightcyan\x1b[39;49m \x1b[94mlightblue\x1b[39;49m '),
('\x0313pink\x03 \x0314grey\x03 \x0315lightgrey\x03', '\x1b[95mpink\x1b[39;49m \x1b[90mgrey\x1b[39;49m \x1b[37mlightgrey\x1b[39;49m'),
('\x0300white\x03 \x0301,01black\x03 \x0301,02blue\x03 \x0301,03green\x03 \x0301,04red\x03 ', '\x1b[37mwhite\x1b[39;49m \x1b[30;40mblack\x1b[39;49m \x1b[30;44mblue\x1b[39;49m \x1b[30;42mgreen\x1b[39;49m \x1b[30;41mred\x1b[39;49m '),
('\x0f', '\x1b[m'),
('\x0302blue', '\x1b[34mblue\x1b[m'),
])
def test_ansi_color(inp, out):
assert irc_color_to_ansi(inp) == out
| 33.092593 | 236 | 0.611267 |
793ea103f6ede73dc586316a863d7fffd5fc6dff | 134 | py | Python | src/browserist/browser/scroll/to_top_of_page.py | jakob-bagterp/browserist | 76bd916dd217b7da3759fd6ec3374191002dc091 | [
"Apache-2.0"
] | 2 | 2022-02-20T10:03:19.000Z | 2022-03-22T11:17:10.000Z | src/browserist/browser/scroll/to_top_of_page.py | jakob-bagterp/browserist | 76bd916dd217b7da3759fd6ec3374191002dc091 | [
"Apache-2.0"
] | null | null | null | src/browserist/browser/scroll/to_top_of_page.py | jakob-bagterp/browserist | 76bd916dd217b7da3759fd6ec3374191002dc091 | [
"Apache-2.0"
] | null | null | null | from .to_position import scroll_to_position
def scroll_to_top_of_page(driver: object) -> None:
scroll_to_position(driver, 0, 0)
| 22.333333 | 50 | 0.783582 |
793ea113d6d4fd41b35d10c3564067d48b713dd1 | 1,891 | py | Python | benchmark/startPyquil2707.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil2707.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil2707.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=10
prog += H(3) # number=40
prog += CZ(0,3) # number=41
prog += H(3) # number=42
prog += CNOT(0,3) # number=33
prog += X(3) # number=34
prog += CNOT(0,3) # number=35
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=12
prog += H(2) # number=30
prog += CZ(0,2) # number=31
prog += H(2) # number=32
prog += X(2) # number=21
prog += H(2) # number=36
prog += CZ(0,2) # number=37
prog += H(2) # number=38
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(3) # number=16
prog += CZ(1,3) # number=17
prog += H(3) # number=18
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(2) # number=39
prog += H(0) # number=26
prog += CZ(3,0) # number=27
prog += H(0) # number=28
prog += CNOT(3,0) # number=14
prog += Y(2) # number=29
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2707.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.213333 | 64 | 0.56055 |
793ea1f806e761fd116844a9b2e81f1c386aa10b | 4,700 | py | Python | kgcnn/literature/GATv2.py | thegodone/gcnn_keras | 2009b9ab9a07c1a369849478812fcc2cb9799945 | [
"MIT"
] | null | null | null | kgcnn/literature/GATv2.py | thegodone/gcnn_keras | 2009b9ab9a07c1a369849478812fcc2cb9799945 | [
"MIT"
] | null | null | null | kgcnn/literature/GATv2.py | thegodone/gcnn_keras | 2009b9ab9a07c1a369849478812fcc2cb9799945 | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow.keras as ks
from kgcnn.layers.casting import ChangeTensorType
from kgcnn.layers.conv.attention import AttentionHeadGATV2
from kgcnn.layers.keras import Concatenate, Dense, Average, Activation
from kgcnn.layers.mlp import MLP
from kgcnn.layers.pool.pooling import PoolingNodes
from kgcnn.utils.models import generate_embedding, update_model_kwargs
# Graph Attention Networks by Veličković et al. (2018)
# https://arxiv.org/abs/1710.10903
# Improved by
# How Attentive are Graph Attention Networks?
# by Brody et al. (2021)
model_default = {'name': "GATv2",
'inputs': [{'shape': (None,), 'name': "node_attributes", 'dtype': 'float32', 'ragged': True},
{'shape': (None,), 'name': "edge_attributes", 'dtype': 'float32', 'ragged': True},
{'shape': (None, 2), 'name': "edge_indices", 'dtype': 'int64', 'ragged': True}],
'input_embedding': {"node": {"input_dim": 95, "output_dim": 64},
"edge": {"input_dim": 5, "output_dim": 64}},
'output_embedding': 'graph',
'output_mlp': {"use_bias": [True, True, False], "units": [25, 10, 1],
"activation": ['relu', 'relu', 'sigmoid']},
'attention_args': {"units": 32, "use_final_activation": False, "use_edge_features": True,
"has_self_loops": True, "activation": "kgcnn>leaky_relu", "use_bias": True},
'pooling_nodes_args': {'pooling_method': 'mean'},
'depth': 3, 'attention_heads_num': 5,
'attention_heads_concat': False, 'verbose': 1
}
@update_model_kwargs(model_default)
def make_model(inputs=None,
input_embedding=None,
output_embedding=None,
output_mlp=None,
attention_args=None,
pooling_nodes_args=None,
depth=None,
attention_heads_num=None,
attention_heads_concat=None,
**kwargs):
"""Make GATv2 graph network via functional API. Default parameters can be found in :obj:`model_default`.
Args:
inputs (list): List of dictionaries unpacked in :obj:`tf.keras.layers.Input`. Order must match model definition.
input_embedding (dict): Dictionary of embedding arguments for nodes etc. unpacked in `Embedding` layers.
output_embedding (str): Main embedding task for graph network. Either "node", ("edge") or "graph".
output_mlp (dict): Dictionary of layer arguments unpacked in the final classification `MLP` layer block.
Defines number of model outputs and activation.
attention_args (dict): Dictionary of layer arguments unpacked in `AttentionHeadGATV2` layer.
pooling_nodes_args (dict): Dictionary of layer arguments unpacked in `PoolingNodes` layer.
depth (int): Number of graph embedding units or depth of the network.
attention_heads_num (int): Number of attention heads to use.
attention_heads_concat (bool): Whether to concat attention heads. Otherwise average heads.
Returns:
tf.keras.models.Model
"""
# Make input
node_input = ks.layers.Input(**inputs[0])
edge_input = ks.layers.Input(**inputs[1])
edge_index_input = ks.layers.Input(**inputs[2])
# Embedding, if no feature dimension
n = generate_embedding(node_input, inputs[0]['shape'], input_embedding['node'])
ed = generate_embedding(edge_input, inputs[1]['shape'], input_embedding['edge'])
edi = edge_index_input
# Model
nk = Dense(units=attention_args["units"], activation="linear")(n)
for i in range(0, depth):
heads = [AttentionHeadGATV2(**attention_args)([nk, ed, edi]) for _ in range(attention_heads_num)]
if attention_heads_concat:
nk = Concatenate(axis=-1)(heads)
else:
nk = Average()(heads)
nk = Activation(activation=attention_args["activation"])(nk)
n = nk
# Output embedding choice
if output_embedding == 'graph':
out = PoolingNodes(**pooling_nodes_args)(n)
out = MLP(**output_mlp)(out)
main_output = ks.layers.Flatten()(out) # will be dense
elif output_embedding == 'node':
out = MLP(**output_mlp)(n)
main_output = ChangeTensorType(input_tensor_type="ragged", output_tensor_type="tensor")(out)
else:
raise ValueError("Unsupported graph embedding for `GATv2`")
# Define model output
model = tf.keras.models.Model(inputs=[node_input, edge_input, edge_index_input], outputs=main_output)
return model
| 47.959184 | 120 | 0.635532 |
793ea2cbdc428bd05e80cd8878ca62180763c3b3 | 1,676 | py | Python | synergy/mx/tree_details.py | hwknsj/scheduler | 6740331360f49083c208085fb5a60ce80ebf418b | [
"BSD-3-Clause"
] | null | null | null | synergy/mx/tree_details.py | hwknsj/scheduler | 6740331360f49083c208085fb5a60ce80ebf418b | [
"BSD-3-Clause"
] | null | null | null | synergy/mx/tree_details.py | hwknsj/scheduler | 6740331360f49083c208085fb5a60ce80ebf418b | [
"BSD-3-Clause"
] | null | null | null | __author__ = 'Bohdan Mushkevych'
from werkzeug.utils import cached_property
from synergy.mx.base_request_handler import BaseRequestHandler
from synergy.mx.rest_model_factory import create_rest_timetable_tree, create_rest_managed_scheduler_entry
class TreeDetails(BaseRequestHandler):
def __init__(self, request, **values):
super(TreeDetails, self).__init__(request, **values)
def _get_tree_details(self, tree_name):
tree_obj = self.scheduler.timetable.trees[tree_name]
rest_tree = create_rest_timetable_tree(self.scheduler.timetable, tree_obj)
for process_name in tree_obj.process_hierarchy:
thread_handler = self.scheduler.managed_handlers[process_name]
rest_process = create_rest_managed_scheduler_entry(thread_handler,
self.scheduler.timetable,
self.scheduler.gc)
rest_tree.processes[process_name] = rest_process.document
return rest_tree
@cached_property
def tree_details(self):
tree_name = self.request.args.get('tree_name')
if tree_name:
return self._get_tree_details(tree_name).document
else:
return dict()
# @cached_property
def mx_page_trees(self, mx_page):
""" return trees assigned to given MX Page """
resp = dict()
for tree_name, tree in self.scheduler.timetable.trees.items():
if tree.mx_page == mx_page:
rest_tree = self._get_tree_details(tree_name)
resp[tree.tree_name] = rest_tree.document
return resp
| 40.878049 | 105 | 0.655131 |
793ea2d497a8995a92e9bde98facf0a6c8112edd | 4,257 | py | Python | phaino/models/gaussian.py | vinicius-pirees/phaino | 4aa720505fef55b416149df664c4a8787e1491e5 | [
"MIT"
] | null | null | null | phaino/models/gaussian.py | vinicius-pirees/phaino | 4aa720505fef55b416149df664c4a8787e1491e5 | [
"MIT"
] | 4 | 2020-09-26T01:13:19.000Z | 2022-02-10T01:50:11.000Z | phaino/models/gaussian.py | vinicius-pirees/phaino | 4aa720505fef55b416149df664c4a8787e1491e5 | [
"MIT"
] | null | null | null | import os
import numpy as np
import scipy
from numpy import inf
from phaino.utils.spatio_temporal_gradient_features import process_frames
from phaino.utils.commons import frame_to_gray, reduce_frame
import pickle
from datetime import datetime
from phaino.config.config import PhainoConfiguration
config = PhainoConfiguration()
phaino_config = config.get_config()
MODELS_DIR = phaino_config['models']['directory']
MODEL_DIR = os.path.join(os.path.join(MODELS_DIR, 'gaussian'))
def measures(X):
means = X.mean(axis=0)
variances = np.mean((X - means) ** 2, axis=0 )
stds = np.sqrt(variances)
return means, variances, stds
def init_model_dir(model_dir):
os.makedirs(model_dir, exist_ok=True)
class Gaussian:
def __init__(self):
self.means = None
self.variances = None
self.stds = None
self.model_dir = MODEL_DIR
init_model_dir(MODEL_DIR)
def update_model(self, X):
self.means, self.variances, self.stds = measures(X)
def get_measures(self):
return self.means, self.variances, self.stds
def evaluate(self, x):
calc_gaussian = lambda x, mu, sigma: (1.0 / (sigma * np.sqrt(2 * np.pi))) * np.exp( -(x - mu)**2 / (2 * sigma**2) )
gaussian = calc_gaussian(x, self.means, self.stds)
#return np.sum(np.log(gaussian))
# Test approach
result = np.log(gaussian)
result[result == -inf] = -10000000
result[result == inf] = 10000000
result = np.nan_to_num(result)
return np.sum(result)
def gaussian_model_frame(self, frame):
frame = frame_to_gray(frame)
reduced_frame = reduce_frame(frame)
return reduced_frame
def input_frames_transform(self, train_frames):
input_frames = []
for frame in range(0,train_frames.shape[0]):
input_frames.append(gaussian_model_frame(train_frames[frame]))
return np.array(input_frames)
def frame_predictions(self, test_video, pred_list, clip_size=5, threshold=0):
frame_predicitions_dict = {}
video_len = test_video.shape[0]
eval_index = 0
cnt = 1
for frame_num in range(0,video_len):
pred = pred_list[eval_index]
if pred < threshold:
detection = 1
else:
detection = 0
frame_predicitions_dict[frame_num] = detection
if cnt == clip_size:
eval_index += 1
cnt = 0
cnt += 1
return frame_predicitions_dict
def frame_predictions_show(self, test_video, frame_predictions_dict):
pass
def get_training_set(self, input_frames):
return self.process_frames(input_frames)
def use_last_model(self, path):
model = self.load_model(path)
self.means = model['means']
self.variances = model['variances']
self.stds = model['stds']
def fit(self, training_set):
self.update_model(training_set)
self.save_model(self.means, self.variances, self.stds)
def get_last_model_path(self):
model_list = os.listdir(self.model_dir)
if len(model_list) == 0:
print('No model yet')
else:
model_list.sort()
last_model = model_list[-1]
return os.path.join(self.model_dir,last_model)
def new_model_path(self, name):
return os.path.join(self.model_dir, name + '_' + datetime.now().strftime("%Y%m%d_%H%M%S"))
def save_model(self, means, variances, stds):
with open(self.new_model_path('gaussian'), 'wb') as handle:
pickle.dump({'means': means, 'variances': variances, 'stds': stds}, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_last_model(self):
path = self.get_last_model_path()
return self.load_model(path)
def load_model(self, path):
with open(path, 'rb') as handle:
model = pickle.load(handle)
self.means = model['means']
self.variances = model['variances']
self.stds = model['stds']
| 29.5625 | 123 | 0.606765 |
793ea35449988c46c2ba3832d970f1d6491e4913 | 1,274 | py | Python | src/boost_histogram/_internal/traits.py | kgizdov/boost-histogram | a456b89c703a2e69b32ca7de23448a133d5c59a3 | [
"BSD-3-Clause"
] | null | null | null | src/boost_histogram/_internal/traits.py | kgizdov/boost-histogram | a456b89c703a2e69b32ca7de23448a133d5c59a3 | [
"BSD-3-Clause"
] | null | null | null | src/boost_histogram/_internal/traits.py | kgizdov/boost-histogram | a456b89c703a2e69b32ca7de23448a133d5c59a3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# This is basically a dataclass from Python 3.7, with frozen=True
from typing import Any
_traits = (
"underflow",
"overflow",
"circular",
"growth",
"continuous",
"ordered",
)
# This can be converted to a immutable dataclass once Python < 3.7 is dropped.
class Traits(object):
__slots__ = _traits
def __init__(
self,
underflow=False,
overflow=False,
circular=False,
growth=False,
continuous=False,
ordered=False,
):
# type: (bool, bool, bool, bool, bool, bool) -> None
for name in _traits:
setattr(self, name, locals()[name])
def __eq__(self, other):
# type: (Any) -> bool
return all(getattr(self, name) == getattr(other, name) for name in _traits)
def __ne__(self, other):
# type: (Any) -> bool
return not self == other
@property
def discrete(self):
# type: () -> bool
"True if axis is not continuous"
return not self.continuous # type: ignore
def __repr__(self):
# type: () -> str
args = ("{}={}".format(name, getattr(self, name)) for name in _traits)
return "{}({})".format(self.__class__.__name__, ", ".join(args))
| 24.037736 | 83 | 0.567504 |
793ea36c6eec604fe884e406cff74048c297eee7 | 1,092 | py | Python | tests/tests_geomstats/test_errors.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 10 | 2018-01-28T17:16:44.000Z | 2022-02-27T02:42:41.000Z | tests/tests_geomstats/test_errors.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 67 | 2018-01-05T17:15:32.000Z | 2018-05-11T18:50:30.000Z | tests/tests_geomstats/test_errors.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 3 | 2021-11-12T23:57:46.000Z | 2021-12-04T10:05:42.000Z | """Unit tests for errors."""
import pytest
import geomstats.backend as gs
import geomstats.errors
import geomstats.tests
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.spd_matrices import SPDMatrices
class TestBackends(geomstats.tests.TestCase):
def test_check_belongs(self):
euclidean = Euclidean(5)
point = gs.array([1, 2])
with pytest.raises(RuntimeError):
geomstats.errors.check_belongs(point, euclidean)
@staticmethod
def test_check_belongs_with_tol():
spd = SPDMatrices(5)
point = spd.random_point()
geomstats.errors.check_belongs(point, spd)
def test_check_integer(self):
a = -2
with pytest.raises(ValueError):
geomstats.errors.check_integer(a, "a")
def test_check_parameter_accepted_values(self):
param = "lefttt"
accepted_values = ["left", "right"]
with pytest.raises(ValueError):
geomstats.errors.check_parameter_accepted_values(
param, "left_or_right", accepted_values
)
| 27.3 | 61 | 0.675824 |
793ea3e9afbe9e94040e74850dbcb754760637f7 | 15,610 | py | Python | motifx/motifx.py | EthanVn/MotifX | 4258ac72823bdc335f4f3d01fd03c8dc495e14ec | [
"MIT"
] | 1 | 2021-05-04T17:43:47.000Z | 2021-05-04T17:43:47.000Z | motifx/motifx.py | EthanVn/MotifX | 4258ac72823bdc335f4f3d01fd03c8dc495e14ec | [
"MIT"
] | null | null | null | motifx/motifx.py | EthanVn/MotifX | 4258ac72823bdc335f4f3d01fd03c8dc495e14ec | [
"MIT"
] | null | null | null | from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
from .cache import Cache
import numpy as np
class MotifX(object):
def __init__(self, matrix, reformat=True, dtype=np.int32):
self.cache = Cache(matrix, reformat, dtype)
def M1(self) -> (csr_matrix, dict):
UT_csr: csr_matrix = self.cache.UT_csr
U_U_csr: csr_matrix = self.cache.U_U_csr
# C = (U * U) .* U';
C: csr_matrix = U_U_csr.multiply(UT_csr)
# W = C + C';
return C + C.transpose()
def M2(self) -> csr_matrix:
B_csr: csr_matrix = self.cache.B_csr
UT_csr: csr_matrix = self.cache.UT_csr
B_U_csr: csr_matrix = self.cache.B_U_csr
U_B_csr: csr_matrix = self.cache.U_B_csr
U_U_csr: csr_matrix = self.cache.U_U_csr
# C = (B * U) .* U' + (U * B) .* U' + (U * U) .* B;
C: csr_matrix = B_U_csr.multiply(UT_csr) + U_B_csr.multiply(UT_csr) + U_U_csr.multiply(B_csr)
# W = C + C';
return C + C.transpose()
def M3(self) -> csr_matrix:
B_csr: csr_matrix = self.cache.B_csr
U_csr: csr_matrix = self.cache.U_csr
B_B_csr: csr_matrix = self.cache.B_B_csr
B_U_csr: csr_matrix = self.cache.B_U_csr
U_B_csr: csr_matrix = self.cache.U_B_csr
# C = (B * B) .* U + (B * U) .* B + (U * B) .* B;
C: csr_matrix = B_B_csr.multiply(U_csr) + B_U_csr.multiply(B_csr) + U_B_csr.multiply(B_csr)
# W = C+ C';
return C + C.transpose()
def M4(self) -> csr_matrix:
B_csr: csr_matrix = self.cache.B_csr
B_B_csr: csr_matrix = self.cache.B_B_csr
# W = (B * B) .* B;
return B_B_csr.multiply(B_csr)
def M5(self) -> csr_matrix:
U_csr: csr_matrix = self.cache.U_csr
U_U_csr: csr_matrix = self.cache.U_U_csr
UT_U_csr: csr_matrix = self.cache.UT_U_csr
U_UT_csr: csr_matrix = self.cache.U_UT_csr
# T1 = (U * U ) .* U;
T1: csr_matrix = U_U_csr.multiply(U_csr)
# T2 = (U' * U ) .* U;
T2: csr_matrix = UT_U_csr.multiply(U_csr)
# T3 = (U * U') .* U;
T3: csr_matrix = U_UT_csr.multiply(U_csr)
# C = T1 + T2 + T3;
C: csr_matrix = T1 + T2 + T3
# W = C + C';
return C + C.transpose()
def M6(self) -> csr_matrix:
B_csr: csr_matrix = self.cache.B_csr
U_csr: csr_matrix = self.cache.U_csr
U_B_csr: csr_matrix = self.cache.U_B_csr
UT_U_csr: csr_matrix = self.cache.UT_U_csr
# C1 = (U * B) .* U;
C1: csr_matrix = U_B_csr.multiply(U_csr)
# C1 = C1 + C1';
C1: csr_matrix = C1 + C1.transpose()
# C2 = (U' * U) .* B;
C2 = UT_U_csr.multiply(B_csr)
# W = C1 + C2;
return C1 + C2
def M7(self) -> csr_matrix:
B_csr: csr_matrix = self.cache.B_csr
UT_csr: csr_matrix = self.cache.UT_csr
UT_B_csr: csr_matrix = self.cache.UT_B_csr
U_UT_csr: csr_matrix = self.cache.U_UT_csr
# C1 = (U' * B) .* U';
C1: csr_matrix = UT_B_csr.multiply(UT_csr)
# C1 = C1 + C1';
C1 = C1 + C1.transpose()
# C2 = (U * U') .* B;
C2: csr_matrix = U_UT_csr.multiply(B_csr)
# W = C1 + C2;
return C1 + C2
def M8(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
U_row_find: list = self.cache.U_row_find
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# N = size(G, 1);
# for i = 1:N
for i in range(shape[0]):
# J = find(U(i, :));
J = U_row_find[i][0]
# for j1 = 1:length(J)
for j1 in range(U_row_find[i][1]):
# for j2 = (j1+1):length(J)
for j2 in range(j1 + 1, U_row_find[i][1]):
# k1 = J(j1);
k1 = J[j1]
# k2 = J(j2);
k2 = J[j2]
# if A(k1, k2) == 0 & & A(k2, k1) == 0
if not A_dict.get((k1, k2)) and not A_dict.get((k2, k1)):
# W(i, k1) = W(i, k1) + 1;
W_lst[(i, k1)] = W_lst.get((i, k1), 0) + 1
# W(i, k2) = W(i, k2) + 1;
W_lst[(i, k2)] = W_lst.get((i, k2), 0) + 1
# W(k1, k2) = W(k1, k2) + 1;
W_lst[(k1, k2)] = W_lst.get((k1, k2), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def M9(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
U_row_find: list = self.cache.U_row_find
U_col_find: list = self.cache.U_col_find
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# N = size(G, 1);
# for i = 1:N
for i in range(shape[0]):
# J1 = find(U(i, :));
J1 = U_row_find[i][0]
# J2 = find(U(:, i));
J2 = U_col_find[i][0]
# for j1 = 1:length(J1)
for j1 in range(U_row_find[i][1]):
# for j2 = 1:length(J2)
for j2 in range(U_col_find[i][1]):
# k1 = J1(j1);
k1 = J1[j1]
# k2 = J2(j2);
k2 = J2[j2]
# if A(k1, k2) == 0 & & A(k2, k1) == 0
if not A_dict.get((k1, k2)) and not A_dict.get((k2, k1)):
# W(i, k1) = W(i, k1) + 1;
W_lst[(i, k1)] = W_lst.get((i, k1), 0) + 1
# W(i, k2) = W(i, k2) + 1;
W_lst[(i, k2)] = W_lst.get((i, k2), 0) + 1
# W(k1, k2) = W(k1, k2) + 1;
W_lst[(k1, k2)] = W_lst.get((k1, k2), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def M10(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
U_row_find_AT: list = self.cache.U_row_find_AT
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# N = size(G, 1);
# for i = 1:N
for i in range(shape[0]):
# J = find(U(i, :));
J = U_row_find_AT[i][0]
# for j1 = 1:length(J)
for j1 in range(U_row_find_AT[i][1]):
# for j2 = (j1+1):length(J)
for j2 in range(j1 + 1, U_row_find_AT[i][1]):
# k1 = J(j1);
k1 = J[j1]
# k2 = J(j2);
k2 = J[j2]
# if A(k1, k2) == 0 && A(k2, k1) == 0
if not A_dict.get((k1, k2)) and not A_dict.get((k2, k1)):
# W(i, k1) = W(i, k1) + 1;
W_lst[(i, k1)] = W_lst.get((i, k1), 0) + 1
# W(i, k2) = W(i, k2) + 1;
W_lst[(i, k2)] = W_lst.get((i, k2), 0) + 1
# W(k1, k2) = W(k1, k2) + 1;
W_lst[(k1, k2)] = W_lst.get((k1, k2), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def M11(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
B_row_find: list = self.cache.B_row_find
U_row_find: list = self.cache.U_row_find
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# N = size(G, 1);
# for i = 1:N
for i in range(shape[0]):
# J1 = find(B(i, :));
J1 = B_row_find[i][0]
# J2 = find(U(i, :));
J2 = U_row_find[i][0]
# for j1 = 1:length(J1)
for j1 in range(B_row_find[i][1]):
# for j2 = 1:length(J2)
for j2 in range(U_row_find[i][1]):
# k1 = J1(j1);
k1 = J1[j1]
# k2 = J2(j2);
k2 = J2[j2]
# if A(k1, k2) == 0 && A(k2, k1) == 0
if not A_dict.get((k1, k2)) and not A_dict.get((k2, k1)):
# W(i, k1) = W(i, k1) + 1;
W_lst[(i, k1)] = W_lst.get((i, k1), 0) + 1
# W(i, k2) = W(i, k2) + 1;
W_lst[(i, k2)] = W_lst.get((i, k2), 0) + 1
# W(k1, k2) = W(k1, k2) + 1;
W_lst[(k1, k2)] = W_lst.get((k1, k2), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def M12(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
B_row_find: list = self.cache.B_row_find
U_row_find_AT: list = self.cache.U_row_find_AT
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# N = size(G, 1);
# for i = 1:N
for i in range(shape[0]):
# J1 = find(B(i, :));
J1 = B_row_find[i][0]
# J2 = find(U(i, :));
J2 = U_row_find_AT[i][0]
# for j1 = 1:length(J1)
for j1 in range(B_row_find[i][1]):
# for j2 = 1:length(J2)
for j2 in range(U_row_find_AT[i][1]):
# k1 = J1(j1);
k1 = J1[j1]
# k2 = J2(j2);
k2 = J2[j2]
# if A(k1, k2) == 0 && A(k2, k1) == 0
if not A_dict.get((k1, k2)) and not A_dict.get((k2, k1)):
# W(i, k1) = W(i, k1) + 1;
W_lst[(i, k1)] = W_lst.get((i, k1), 0) + 1
# W(i, k2) = W(i, k2) + 1;
W_lst[(i, k2)] = W_lst.get((i, k2), 0) + 1
# W(k1, k2) = W(k1, k2) + 1;
W_lst[(k1, k2)] = W_lst.get((k1, k2), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def M13(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
B_row_find: list = self.cache.B_row_find
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# N = size(G, 1);
# for i = 1:N
for i in range(shape[0]):
# J = find(B(i, :));
J = B_row_find[i][0]
# for j1 = 1:length(J)
for j1 in range(B_row_find[i][1]):
# for j2 = (j1+1):length(J)
for j2 in range(j1 + 1, B_row_find[i][1]):
# k1 = J(j1);
k1 = J[j1]
# k2 = J(j2);
k2 = J[j2]
# if A(k1, k2) == 0 && A(k2, k1) == 0
if not A_dict.get((k1, k2)) and not A_dict.get((k2, k1)):
# W(i, k1) = W(i, k1) + 1;
W_lst[(i, k1)] = W_lst.get((i, k1), 0) + 1
# W(i, k2) = W(i, k2) + 1;
W_lst[(i, k2)] = W_lst.get((i, k2), 0) + 1
# W(k1, k2) = W(k1, k2) + 1;
W_lst[(k1, k2)] = W_lst.get((k1, k2), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def Bifan(self) -> csr_matrix:
W_lst = {}
dtype = self.cache.dtype
shape = self.cache.shape
A_dict: dict = self.cache.A_dict
U_row_find: list = self.cache.U_row_find
# W = zeros(size(G));
W: lil_matrix = lil_matrix(shape, dtype=dtype)
# NA = ~A & ~A';
# [ai, aj] = find(triu(NA, 1));
NA_dict, ai, aj = {}, [], []
for i in range(0, shape[0]):
for j in range(i + 1, shape[0]):
if not A_dict.get((i, j)) and not A_dict.get((j, i)):
NA_dict[(i, j)] = 1
NA_dict[(j, i)] = 1
ai.append(i)
aj.append(j)
# for ind = 1:length(ai)
for ind in range(len(ai)):
# x = ai(ind);
x = ai[ind]
# y = aj(ind);
y = aj[ind]
# xout = find(U(x,:));
xout = U_row_find[x][0]
# yout = find(U(y,:));
yout = U_row_find[y][0]
# common = intersect(xout, yout);
common: list = np.intersect1d(xout, yout).tolist()
# nc = length(common)
nc = len(common)
# for i = 1:nc
for i in range(nc):
# for j = (i+1):nc
for j in range(i + 1, nc):
# w = common(i);
w = common[i]
# v = common(j);
v = common[j]
# if NA(w, v) == 1
if NA_dict.get((w, v)):
# W(x, y) = W(x, y) + 1;
W_lst[(x, y)] = W_lst.get((x, y), 0) + 1
# W(x, w) = W(x, w) + 1;
W_lst[(x, w)] = W_lst.get((x, w), 0) + 1
# W(x, v) = W(x, v) + 1;
W_lst[(x, v)] = W_lst.get((x, v), 0) + 1
# W(y, w) = W(y, w) + 1;
W_lst[(y, w)] = W_lst.get((y, w), 0) + 1
# W(y, v) = W(y, v) + 1;
W_lst[(y, v)] = W_lst.get((y, v), 0) + 1
# W(w, v) = W(w, v) + 1;
W_lst[(w, v)] = W_lst.get((w, v), 0) + 1
row, col, data = [], [], []
for (i, j), x in W_lst.items():
row.append(i)
col.append(j)
data.append(x)
W._set_arrayXarray(np.array(row), np.array(col), np.array(data, dtype=dtype))
# W = sparse(W + W');
return W + W.transpose()
def Edge(self) -> csc_matrix:
return self.cache.G_csr.copy()
| 39.821429 | 101 | 0.422165 |
793ea4815e32580a920ae62b55656db101afb927 | 2,565 | py | Python | graph_plots/Data/data.py | DanShai/kivy-graph | 6537901d521247a13e186aaa8ecbaffdffdaf7ea | [
"MIT"
] | 3 | 2018-11-28T13:35:35.000Z | 2021-09-12T15:54:28.000Z | graph_plots/Data/data.py | DanShai/kivy-graph | 6537901d521247a13e186aaa8ecbaffdffdaf7ea | [
"MIT"
] | null | null | null | graph_plots/Data/data.py | DanShai/kivy-graph | 6537901d521247a13e186aaa8ecbaffdffdaf7ea | [
"MIT"
] | 1 | 2021-05-03T18:48:01.000Z | 2021-05-03T18:48:01.000Z | '''
@author: dan
'''
from __future__ import division
import numpy as np
from random import randint
from copy import deepcopy
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.preprocessing import MinMaxScaler
class Adata(object):
def make2DDatas(self):
datasets = [make_moons(n_samples=100, noise=0.1, random_state=13),
make_circles(n_samples=100, noise=0.1,
factor=0.5, random_state=123),
make_classification(n_features=3, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1)]
X, Y = datasets[0] # moons or discs
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
return {"X": X, "Y": Y}
def make3DDatas(self):
# rng = np.random.RandomState(2)
# xx += 2 * rng.uniform(size=xx.shape)
# ln = (xx,yy)
# datasets = [make_moons(n_samples=300, noise=0.3, random_state=0),
# make_circles(n_samples=300,noise=0.2, factor=0.5, random_state=1),
# ln]
datasets = [make_moons(n_samples=100, noise=0.1, random_state=123),
make_circles(n_samples=100, noise=0.1,
factor=0.5, random_state=123),
make_classification(n_features=3, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1)]
X, Y = datasets[2] # moons or discs
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
return {"X": X, "Y": Y}
def make1DDatas(self):
#X = np.random.normal(size=100)
#XX=np.arange(0, 105, 1)
#XX = np.random.randint(low=-50, high=50, size=1000)
X = np.linspace(-10., 11., num=100)
Y = (X - 2) * np.cos(2 * X)
#YY = XX**2 + XX - 1
# Make sure that it X is 2D
#N = 1000
#s = 10
#XX = s*np.random.rand(N)
#XX = np.sort(XX)
#YY = np.sin(XX) + 0.1*np.random.randn(N)
X = X[:, np.newaxis]
return {"X": X, "Y": Y}
def makeHistData(self):
a = np.random.randint(0, size=20, high=10)
Y, X = self.np_frequecy(a)
ldatas = zip(X, Y)
return ldatas
def np_frequecy(self, dras):
al = np.array(dras)
al = al.ravel()
al = np.sort(al, axis=None)
mx = max(al)
hist_bins = np.histogram(al, bins=np.linspace(
0, mx+1, num=(mx+2), dtype=np.int))
return hist_bins
| 32.468354 | 126 | 0.560234 |
793ea489f21d95b34570aed36b75cb5cc33c7755 | 2,072 | py | Python | b2validators/document.py | math-s/b2bit-validators | 35beb903eaf26524485a0b7ec9efc73c5f103fc5 | [
"MIT"
] | null | null | null | b2validators/document.py | math-s/b2bit-validators | 35beb903eaf26524485a0b7ec9efc73c5f103fc5 | [
"MIT"
] | null | null | null | b2validators/document.py | math-s/b2bit-validators | 35beb903eaf26524485a0b7ec9efc73c5f103fc5 | [
"MIT"
] | null | null | null | import re
from b2validators.exceptions import ValidationError
def validate_cnpj(value):
cnpj = re.sub("[^0-9]", "", value)
if len(cnpj) < 14:
raise ValidationError("O CNPJ precisa ter 14 dígitos.")
expected_cnpj = [int(digit) for digit in cnpj[:12] if digit.isdigit()]
cnpj_test = [int(digit) for digit in cnpj if digit.isdigit()]
weights = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
result = []
for idx, w in enumerate(weights):
x = w*expected_cnpj[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cnpj.append(0)
else:
expected_cnpj.append(11 - remainder)
weights = [6] + weights
result = []
for idx, w in enumerate(weights):
x = w*expected_cnpj[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cnpj.append(0)
else:
expected_cnpj.append(11 - remainder)
if cnpj_test != expected_cnpj:
raise ValidationError("CNPJ inválido")
return value
def validate_cpf(value):
cpf = re.sub("[^0-9]", "", value)
if len(cpf) != 11:
raise ValidationError("O CPF deve ter 11 dígitos.")
expected_cpf = [int(digit) for digit in cpf][:9]
cpf_test = [int(digit) for digit in cpf]
weights = [10, 9, 8, 7, 6, 5, 4, 3, 2]
result = []
for idx, w in enumerate(weights):
x = w*expected_cpf[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cpf.append(0)
else:
expected_cpf.append(11 - remainder)
weights = [11] + weights
result = []
for idx, w in enumerate(weights):
x = w*expected_cpf[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cpf.append(0)
else:
expected_cpf.append(11 - remainder)
if cpf_test != expected_cpf:
raise ValidationError("CPF inválido.")
return value
| 26.909091 | 74 | 0.601834 |
793ea49f27f6fc24701cf4cb928bea13ae0cab0e | 9,568 | py | Python | Alignment/APEEstimation/test/testApeestimator_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | Alignment/APEEstimation/test/testApeestimator_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | Alignment/APEEstimation/test/testApeestimator_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | import os
import FWCore.ParameterSet.Config as cms
##
## Setup command line options
##
import FWCore.ParameterSet.VarParsing as VarParsing
import sys
options = VarParsing.VarParsing ('standard')
options.register('sample', 'wlnu', VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Input sample")
options.register('isTest', True, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.bool, "Test run")
# get and parse the command line arguments
if( hasattr(sys, "argv") ):
for args in sys.argv :
arg = args.split(',')
for val in arg:
val = val.split('=')
if(len(val)==2):
setattr(options,val[0], val[1])
print "Input sample: ", options.sample
print "Test run: ", options.isTest
##
## Process definition
##
process = cms.Process("ApeEstimator")
##
## Message Logger
##
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.categories.append('SectorBuilder')
process.MessageLogger.categories.append('ResidualErrorBinning')
process.MessageLogger.categories.append('HitSelector')
process.MessageLogger.categories.append('CalculateAPE')
process.MessageLogger.categories.append('ApeEstimator')
#process.MessageLogger.categories.append('TrackRefitter')
process.MessageLogger.categories.append('AlignmentTrackSelector')
process.MessageLogger.cerr.INFO.limit = 0
process.MessageLogger.cerr.default.limit = -1 # Do not use =0, else all error messages (except those listed below) are supressed
process.MessageLogger.cerr.SectorBuilder = cms.untracked.PSet(limit = cms.untracked.int32(-1))
process.MessageLogger.cerr.HitSelector = cms.untracked.PSet(limit = cms.untracked.int32(-1))
process.MessageLogger.cerr.CalculateAPE = cms.untracked.PSet(limit = cms.untracked.int32(-1))
process.MessageLogger.cerr.ApeEstimator = cms.untracked.PSet(limit = cms.untracked.int32(-1))
process.MessageLogger.cerr.AlignmentTrackSelector = cms.untracked.PSet(limit = cms.untracked.int32(-1))
process.MessageLogger.cerr.FwkReport.reportEvery = 1000 ## really show only every 1000th
##
## Process options
##
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
)
##
## Input sample definition
##
isData1 = isData2 = False
isData = False
isQcd = isWlnu = isZmumu = isZtautau = isZmumu10 = isZmumu20 = False
isMc = False
if options.sample == 'data1':
isData1 = True
isData = True
elif options.sample == 'data2':
isData2 = True
isData = True
elif options.sample == 'qcd':
isQcd = True
isMc = True
elif options.sample == 'wlnu':
isWlnu = True
isMc = True
elif options.sample == 'zmumu':
isZmumu = True
isMc = True
elif options.sample == 'ztautau':
isZtautau = True
isMc = True
elif options.sample == 'zmumu10':
isZmumu10 = True
isMc = True
elif options.sample == 'zmumu20':
isZmumu20 = True
isMc = True
else:
print 'ERROR --- incorrect data sammple: ', options.sample
exit(8888)
##
## Input Files
##
if isData1:
process.load("Alignment.APEEstimation.samples.Data_TkAlMuonIsolated_Run2011A_May10ReReco_ApeSkim_cff")
elif isData2:
process.load("Alignment.APEEstimationsamples.Data_TkAlMuonIsolated_Run2011A_PromptV4_ApeSkim_cff")
elif isQcd:
process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer11_qcd_ApeSkim_cff")
elif isWlnu:
process.load("Alignment.APEEstimation.samples.Mc_WJetsToLNu_74XTest_ApeSkim_cff")
elif isZmumu10:
process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer11_zmumu10_ApeSkim_cff")
elif isZmumu20:
process.load("Alignment.APEEstimation.samples.Mc_TkAlMuonIsolated_Summer11_zmumu20_ApeSkim_cff")
##
## Number of Events (should be after input file)
##
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
if options.isTest: process.maxEvents.input = 10001
##
## Check run and event numbers for Dublicates --- only for real data
##
#process.source.duplicateCheckMode = cms.untracked.string("noDuplicateCheck")
#process.source.duplicateCheckMode = cms.untracked.string("checkEachFile")
process.source.duplicateCheckMode = cms.untracked.string("checkEachRealDataFile")
#process.source.duplicateCheckMode = cms.untracked.string("checkAllFilesOpened") # default value
##
## Whole Refitter Sequence
##
process.load("Alignment.APEEstimation.TrackRefitter_38T_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_design', '')
##### To be used when running on Phys14MC with a CMSSW version > 72X
#process.GlobalTag.toGet = cms.VPSet(
# cms.PSet(
# record = cms.string("BeamSpotObjectsRcd"),
# tag = cms.string("Realistic8TeVCollisions_START50_V13_v1_mc"),
# connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
# )
#)
print "Using global tag "+process.GlobalTag.globaltag._value
##
## New pixel templates
##
process.GlobalTag.toGet = cms.VPSet(
cms.PSet(
record = cms.string("SiPixelTemplateDBObjectRcd"),
tag = cms.string("SiPixelTemplateDBObject_38T_v3_mc"),
connect = cms.untracked.string("frontier://FrontierProd/CMS_CONDITIONS"),
)
)
##
## Alignment and APE
##
import CalibTracker.Configuration.Common.PoolDBESSource_cfi
## Choose Alignment (w/o touching APE)
if isMc:
process.myTrackerAlignment = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone(
connect = 'frontier://FrontierProd/CMS_CONDITIONS', # or your sqlite file
toGet = [
cms.PSet(
record = cms.string('TrackerAlignmentRcd'),
tag = cms.string('TrackerIdealGeometry210_mc') # 'TrackerAlignment_2009_v2_offline'
),
],
)
process.es_prefer_trackerAlignment = cms.ESPrefer("PoolDBESSource","myTrackerAlignment")
process.es_prefer_trackerAlignment = cms.ESPrefer("PoolDBESSource","myTrackerAlignment")
if isData:
# Recent geometry
process.myTrackerAlignment = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone(
connect = 'frontier://FrontierProd/CMS_CONDITIONS',
toGet = [
cms.PSet(
record = cms.string('TrackerAlignmentRcd'),
tag = cms.string('TrackerAlignment_GR10_v6_offline'),
),
],
)
process.es_prefer_trackerAlignment = cms.ESPrefer("PoolDBESSource","myTrackerAlignment")
# Kinks and bows
process.myTrackerAlignmentKinksAndBows = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone(
connect = 'frontier://FrontierProd/CMS_CONDITIONS',
toGet = [
cms.PSet(
record = cms.string('TrackerSurfaceDeformationRcd'),
tag = cms.string('TrackerSurfaceDeformations_v1_offline'),
),
],
)
process.es_prefer_trackerAlignmentKinksAndBows = cms.ESPrefer("PoolDBESSource","myTrackerAlignmentKinksAndBows")
## APE (set to zero)
process.myTrackerAlignmentErr = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone(
connect = 'frontier://FrontierProd/CMS_CONDITIONS',
toGet = [
cms.PSet(
record = cms.string('TrackerAlignmentErrorExtendedRcd'),
tag = cms.string('TrackerIdealGeometryErrorsExtended210_mc')
),
],
)
process.es_prefer_trackerAlignmentErr = cms.ESPrefer("PoolDBESSource","myTrackerAlignmentErr")
##
## Trigger Selection
##
process.load("Alignment.APEEstimation.TriggerSelection_cff")
##
## ApeEstimator
##
from Alignment.APEEstimation.ApeEstimator_cff import *
process.ApeEstimator1 = ApeEstimator.clone(
#~ tjTkAssociationMapTag = "TrackRefitterHighPurityForApeEstimator",
tjTkAssociationMapTag = "TrackRefitterForApeEstimator",
maxTracksPerEvent = 0,
applyTrackCuts = False,
Sectors = RecentSectors,
analyzerMode = False,
calculateApe = True
)
process.ApeEstimator1.HitSelector.width = []
process.ApeEstimator1.HitSelector.maxIndex = []
process.ApeEstimator1.HitSelector.widthProj = []
process.ApeEstimator1.HitSelector.widthDiff = []
process.ApeEstimator1.HitSelector.edgeStrips = []
process.ApeEstimator1.HitSelector.sOverN = []
process.ApeEstimator1.HitSelector.maxCharge = []
process.ApeEstimator1.HitSelector.chargeOnEdges = []
process.ApeEstimator1.HitSelector.probX = []
process.ApeEstimator1.HitSelector.phiSensX = []
process.ApeEstimator1.HitSelector.phiSensY = []
process.ApeEstimator1.HitSelector.errXHit = []
process.ApeEstimator1.HitSelector.chargePixel = []
process.ApeEstimator1.HitSelector.widthX = []
process.ApeEstimator1.HitSelector.widthY = []
process.ApeEstimator1.HitSelector.logClusterProbability = []
process.ApeEstimator1.HitSelector.isOnEdge = []
process.ApeEstimator1.HitSelector.qBin = []
process.ApeEstimator2 = process.ApeEstimator1.clone(
Sectors = ValidationSectors,
analyzerMode = True,
calculateApe = False,
)
process.ApeEstimator3 = process.ApeEstimator2.clone(
zoomHists = False,
)
##
## Output File Configuration
##
outputName = os.environ['CMSSW_BASE'] + '/src/Alignment/APEEstimation/hists/'
if options.isTest:
outputName = outputName + 'test_'
outputName = outputName + options.sample + '.root'
process.TFileService = cms.Service("TFileService",
fileName = cms.string(outputName),
closeFileFast = cms.untracked.bool(True)
)
##
## Path
##
process.p = cms.Path(
process.TriggerSelectionSequence*
process.RefitterHighPuritySequence*
(process.ApeEstimator1+
process.ApeEstimator2+
process.ApeEstimator3
)
)
| 30.56869 | 134 | 0.749373 |
793ea5500fe5a11ec6ba119f9343eda150fb0c14 | 1,048 | py | Python | Line.py | TheoXiong7/chessboard-processing | cce2b03700881c07787b0def9a23e8606325939c | [
"MIT"
] | null | null | null | Line.py | TheoXiong7/chessboard-processing | cce2b03700881c07787b0def9a23e8606325939c | [
"MIT"
] | null | null | null | Line.py | TheoXiong7/chessboard-processing | cce2b03700881c07787b0def9a23e8606325939c | [
"MIT"
] | null | null | null | import numpy as np
class Line:
def __init__(self,x1,x2,y1,y2):
'''
Creates a Line object
'''
# Endpoints
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
# Change in x and y
self.dx = self.x2 - self.x1
self.dy = self.y2 - self.y1
# Orientation
if abs(self.dx) > abs(self.dy):
self.orientation = 'horizontal'
else:
self.orientation = 'vertical'
def find_intersection(self,other):
'''
Finds intersection of this line and other. One line must be horizontal
and the other must be vertical
'''
# Determinant for finding points of intersection
x = ((self.x1*self.y2 - self.y1*self.x2)*(other.x1-other.x2) - (self.x1-self.x2)*(other.x1*other.y2 - other.y1*other.x2))/ ((self.x1-self.x2)*(other.y1-other.y2) - (self.y1-self.y2)*(other.x1-other.x2))
y = ((self.x1*self.y2 - self.y1*self.x2)*(other.y1-other.y2) - (self.y1-self.y2)*(other.x1*other.y2 - other.y1*other.x2))/ ((self.x1-self.x2)*(other.y1-other.y2) - (self.y1-self.y2)*(other.x1-other.x2))
x = int(x)
y = int(y)
return x,y
| 24.372093 | 204 | 0.640267 |
793ea67ca80fd354ec4d3482a5408825a4ed8031 | 5,383 | py | Python | build/PureCloudPlatformClientV2/models/bu_async_schedule_run_response.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/bu_async_schedule_run_response.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/bu_async_schedule_run_response.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class BuAsyncScheduleRunResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuAsyncScheduleRunResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'operation_id': 'str',
'result': 'BuScheduleRun'
}
self.attribute_map = {
'status': 'status',
'operation_id': 'operationId',
'result': 'result'
}
self._status = None
self._operation_id = None
self._result = None
@property
def status(self):
"""
Gets the status of this BuAsyncScheduleRunResponse.
The status of the operation
:return: The status of this BuAsyncScheduleRunResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this BuAsyncScheduleRunResponse.
The status of the operation
:param status: The status of this BuAsyncScheduleRunResponse.
:type: str
"""
allowed_values = ["Processing", "Complete", "Canceled", "Error"]
if status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for status -> " + status)
self._status = "outdated_sdk_version"
else:
self._status = status
@property
def operation_id(self):
"""
Gets the operation_id of this BuAsyncScheduleRunResponse.
The ID for the operation
:return: The operation_id of this BuAsyncScheduleRunResponse.
:rtype: str
"""
return self._operation_id
@operation_id.setter
def operation_id(self, operation_id):
"""
Sets the operation_id of this BuAsyncScheduleRunResponse.
The ID for the operation
:param operation_id: The operation_id of this BuAsyncScheduleRunResponse.
:type: str
"""
self._operation_id = operation_id
@property
def result(self):
"""
Gets the result of this BuAsyncScheduleRunResponse.
The result of the operation. Null unless status == Complete
:return: The result of this BuAsyncScheduleRunResponse.
:rtype: BuScheduleRun
"""
return self._result
@result.setter
def result(self, result):
"""
Sets the result of this BuAsyncScheduleRunResponse.
The result of the operation. Null unless status == Complete
:param result: The result of this BuAsyncScheduleRunResponse.
:type: BuScheduleRun
"""
self._result = result
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.786096 | 81 | 0.582761 |
793ea69aff6bb07f73aff3e7089f7cc2a617fa16 | 15,148 | py | Python | salt/modules/ciscoconfparse_mod.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | salt/modules/ciscoconfparse_mod.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 1 | 2017-07-10T21:44:39.000Z | 2017-07-10T21:44:39.000Z | salt/modules/ciscoconfparse_mod.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | """
Execution module for `ciscoconfparse <http://www.pennington.net/py/ciscoconfparse/index.html>`_
.. versionadded:: 2019.2.0
This module can be used for basic configuration parsing, audit or validation
for a variety of network platforms having Cisco IOS style configuration (one
space indentation), including: Cisco IOS, Cisco Nexus, Cisco IOS-XR,
Cisco IOS-XR, Cisco ASA, Arista EOS, Brocade, HP Switches, Dell PowerConnect
Switches, or Extreme Networks devices. In newer versions, ``ciscoconfparse``
provides support for brace-delimited configuration style as well, for platforms
such as: Juniper Junos, Palo Alto, or F5 Networks.
See http://www.pennington.net/py/ciscoconfparse/index.html for further details.
:depends: ciscoconfparse
This module depends on the Python library with the same name,
``ciscoconfparse`` - to install execute: ``pip install ciscoconfparse``.
"""
from salt.exceptions import SaltException
# Import Salt modules
try:
import ciscoconfparse
HAS_CISCOCONFPARSE = True
except ImportError:
HAS_CISCOCONFPARSE = False
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = "ciscoconfparse"
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
if HAS_CISCOCONFPARSE:
return HAS_CISCOCONFPARSE
else:
return (False, "Missing dependency ciscoconfparse")
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _get_ccp(config=None, config_path=None, saltenv="base"):
"""
"""
if config_path:
config = __salt__["cp.get_file_str"](config_path, saltenv=saltenv)
if config is False:
raise SaltException("{} is not available".format(config_path))
if isinstance(config, str):
config = config.splitlines()
ccp = ciscoconfparse.CiscoConfParse(config)
return ccp
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
def find_objects(config=None, config_path=None, regex=None, saltenv="base"):
"""
Return all the line objects that match the expression in the ``regex``
argument.
.. warning::
This function is mostly valuable when invoked from other Salt
components (i.e., execution modules, states, templates etc.). For CLI
usage, please consider using
:py:func:`ciscoconfparse.find_lines <salt.ciscoconfparse_mod.find_lines>`
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
regex
The regular expression to match the lines against.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
Usage example:
.. code-block:: python
objects = __salt__['ciscoconfparse.find_objects'](config_path='salt://path/to/config.txt',
regex='Gigabit')
for obj in objects:
print(obj.text)
"""
ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv)
lines = ccp.find_objects(regex)
return lines
def find_lines(config=None, config_path=None, regex=None, saltenv="base"):
"""
Return all the lines (as text) that match the expression in the ``regex``
argument.
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
regex
The regular expression to match the lines against.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' ciscoconfparse.find_lines config_path=https://bit.ly/2mAdq7z regex='ip address'
Output example:
.. code-block:: text
cisco-ios-router:
- ip address dhcp
- ip address 172.20.0.1 255.255.255.0
- no ip address
"""
lines = find_objects(
config=config, config_path=config_path, regex=regex, saltenv=saltenv
)
return [line.text for line in lines]
def find_objects_w_child(
config=None,
config_path=None,
parent_regex=None,
child_regex=None,
ignore_ws=False,
saltenv="base",
):
"""
Parse through the children of all parent lines matching ``parent_regex``,
and return a list of child objects, which matched the ``child_regex``.
.. warning::
This function is mostly valuable when invoked from other Salt
components (i.e., execution modules, states, templates etc.). For CLI
usage, please consider using
:py:func:`ciscoconfparse.find_lines_w_child <salt.ciscoconfparse_mod.find_lines_w_child>`
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
parent_regex
The regular expression to match the parent lines against.
child_regex
The regular expression to match the child lines against.
ignore_ws: ``False``
Whether to ignore the white spaces.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
Usage example:
.. code-block:: python
objects = __salt__['ciscoconfparse.find_objects_w_child'](config_path='https://bit.ly/2mAdq7z',
parent_regex='line con',
child_regex='stopbits')
for obj in objects:
print(obj.text)
"""
ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv)
lines = ccp.find_objects_w_child(parent_regex, child_regex, ignore_ws=ignore_ws)
return lines
def find_lines_w_child(
config=None,
config_path=None,
parent_regex=None,
child_regex=None,
ignore_ws=False,
saltenv="base",
):
r"""
Return a list of parent lines (as text) matching the regular expression
``parent_regex`` that have children lines matching ``child_regex``.
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
parent_regex
The regular expression to match the parent lines against.
child_regex
The regular expression to match the child lines against.
ignore_ws: ``False``
Whether to ignore the white spaces.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' ciscoconfparse.find_lines_w_child config_path=https://bit.ly/2mAdq7z parent_line='line con' child_line='stopbits'
salt '*' ciscoconfparse.find_lines_w_child config_path=https://bit.ly/2uIRxau parent_regex='ge-(.*)' child_regex='unit \d+'
"""
lines = find_objects_w_child(
config=config,
config_path=config_path,
parent_regex=parent_regex,
child_regex=child_regex,
ignore_ws=ignore_ws,
saltenv=saltenv,
)
return [line.text for line in lines]
def find_objects_wo_child(
config=None,
config_path=None,
parent_regex=None,
child_regex=None,
ignore_ws=False,
saltenv="base",
):
"""
Return a list of parent ``ciscoconfparse.IOSCfgLine`` objects, which matched
the ``parent_regex`` and whose children did *not* match ``child_regex``.
Only the parent ``ciscoconfparse.IOSCfgLine`` objects will be returned. For
simplicity, this method only finds oldest ancestors without immediate
children that match.
.. warning::
This function is mostly valuable when invoked from other Salt
components (i.e., execution modules, states, templates etc.). For CLI
usage, please consider using
:py:func:`ciscoconfparse.find_lines_wo_child <salt.ciscoconfparse_mod.find_lines_wo_child>`
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
parent_regex
The regular expression to match the parent lines against.
child_regex
The regular expression to match the child lines against.
ignore_ws: ``False``
Whether to ignore the white spaces.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
Usage example:
.. code-block:: python
objects = __salt__['ciscoconfparse.find_objects_wo_child'](config_path='https://bit.ly/2mAdq7z',
parent_regex='line con',
child_regex='stopbits')
for obj in objects:
print(obj.text)
"""
ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv)
lines = ccp.find_objects_wo_child(parent_regex, child_regex, ignore_ws=ignore_ws)
return lines
def find_lines_wo_child(
config=None,
config_path=None,
parent_regex=None,
child_regex=None,
ignore_ws=False,
saltenv="base",
):
"""
Return a list of parent ``ciscoconfparse.IOSCfgLine`` lines as text, which
matched the ``parent_regex`` and whose children did *not* match ``child_regex``.
Only the parent ``ciscoconfparse.IOSCfgLine`` text lines will be returned.
For simplicity, this method only finds oldest ancestors without immediate
children that match.
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
parent_regex
The regular expression to match the parent lines against.
child_regex
The regular expression to match the child lines against.
ignore_ws: ``False``
Whether to ignore the white spaces.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' ciscoconfparse.find_lines_wo_child config_path=https://bit.ly/2mAdq7z parent_line='line con' child_line='stopbits'
"""
lines = find_objects_wo_child(
config=config,
config_path=config_path,
parent_regex=parent_regex,
child_regex=child_regex,
ignore_ws=ignore_ws,
saltenv=saltenv,
)
return [line.text for line in lines]
def filter_lines(
config=None, config_path=None, parent_regex=None, child_regex=None, saltenv="base"
):
"""
Return a list of detailed matches, for the configuration blocks (parent-child
relationship) whose parent respects the regular expressions configured via
the ``parent_regex`` argument, and the child matches the ``child_regex``
regular expression. The result is a list of dictionaries with the following
keys:
- ``match``: a boolean value that tells whether ``child_regex`` matched any
children lines.
- ``parent``: the parent line (as text).
- ``child``: the child line (as text). If no child line matched, this field
will be ``None``.
Note that the return list contains the elements that matched the parent
condition, the ``parent_regex`` regular expression. Therefore, the ``parent``
field will always have a valid value, while ``match`` and ``child`` may
default to ``False`` and ``None`` respectively when there is not child match.
CLI Example:
.. code-block:: bash
salt '*' ciscoconfparse.filter_lines config_path=https://bit.ly/2mAdq7z parent_regex='Gigabit' child_regex='shutdown'
Example output (for the example above):
.. code-block:: python
[
{
'parent': 'interface GigabitEthernet1',
'match': False,
'child': None
},
{
'parent': 'interface GigabitEthernet2',
'match': True,
'child': ' shutdown'
},
{
'parent': 'interface GigabitEthernet3',
'match': True,
'child': ' shutdown'
}
]
"""
ret = []
ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv)
parent_lines = ccp.find_objects(parent_regex)
for parent_line in parent_lines:
child_lines = parent_line.re_search_children(child_regex)
if child_lines:
for child_line in child_lines:
ret.append(
{
"match": True,
"parent": parent_line.text,
"child": child_line.text,
}
)
else:
ret.append({"match": False, "parent": parent_line.text, "child": None})
return ret
| 33.365639 | 131 | 0.610378 |
793ea701b1fe2c5bdd74adb61e8865a1413a48f6 | 10,214 | py | Python | src/reader/chemdner_corpus.py | admukhty/IHP | b812938582c77d7ab275f8ea04316a38b576323c | [
"MIT"
] | null | null | null | src/reader/chemdner_corpus.py | admukhty/IHP | b812938582c77d7ab275f8ea04316a38b576323c | [
"MIT"
] | null | null | null | src/reader/chemdner_corpus.py | admukhty/IHP | b812938582c77d7ab275f8ea04316a38b576323c | [
"MIT"
] | null | null | null | import codecs
import time
import sys
import logging
import argparse
import pickle
from operator import itemgetter
from pycorenlp import StanfordCoreNLP
import progressbar as pb
from subprocess import check_output
from text.corpus import Corpus
from text.document import Document
from config import config
class ChemdnerCorpus(Corpus):
"""Chemdner corpus from BioCreative IV and V"""
def __init__(self, corpusdir, **kwargs):
super(ChemdnerCorpus, self).__init__(corpusdir, **kwargs)
self.subtypes = ["IDENTIFIER", "MULTIPLE", "FAMILY", "FORMULA", "SYSTEMATIC", "ABBREVIATION", "TRIVIAL"]
def load_corpus(self, corenlpserver, process=True):
"""Load the CHEMDNER corpus file on the dir element"""
# open filename and parse lines
total_lines = sum(1 for line in open(self.path))
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.Timer()]
pbar = pb.ProgressBar(widgets=widgets, maxval=total_lines).start()
n_lines = 1
time_per_abs = []
with codecs.open(self.path, 'r', "utf-8") as inputfile:
for line in inputfile:
t = time.time()
# each line is PMID title abs
tsv = line.split('\t')
doctext = tsv[2].strip().replace("<", "(").replace(">", ")")
newdoc = Document(doctext, process=False,
did=tsv[0], title=tsv[1].strip())
newdoc.sentence_tokenize("biomedical")
if process:
newdoc.process_document(corenlpserver, "biomedical")
self.documents[newdoc.did] = newdoc
n_lines += 1
abs_time = time.time() - t
time_per_abs.append(abs_time)
pbar.update(n_lines+1)
pbar.finish()
abs_avg = sum(time_per_abs)*1.0/len(time_per_abs)
logging.info("average time per abstract: %ss" % abs_avg)
def load_annotations(self, ann_dir, entitytype="chemical"):
# total_lines = sum(1 for line in open(ann_dir))
# n_lines = 1
logging.info("loading annotations file...")
with codecs.open(ann_dir, 'r', "utf-8") as inputfile:
for line in inputfile:
# logging.info("processing annotation %s/%s" % (n_lines, total_lines))
pmid, doct, start, end, text, chemt = line.strip().split('\t')
#pmid = "PMID" + pmid
if pmid in self.documents:
if entitytype == "all" or entitytype == "chemical" or entitytype == chemt:
self.documents[pmid].tag_chemdner_entity(int(start), int(end),
chemt, text=text, doct=doct)
else:
logging.info("%s not found!" % pmid)
def write_chemdner_files(results, models, goldset, ths, rules):
""" results files for CHEMDNER CEMP and CPD tasks"""
print("saving results to {}".format(results.path + ".tsv"))
with codecs.open(results.path + ".tsv", 'w', 'utf-8') as outfile:
cpdlines, max_entities = results.corpus.write_chemdner_results(models, outfile, ths, rules)
cpdlines = sorted(cpdlines, key=itemgetter(2))
with open(results.path + "_cpd.tsv", "w") as cpdfile:
for i, l in enumerate(cpdlines):
if l[2] == 0:
cpdfile.write("{}_{}\t0\t{}\t1\n".format(l[0], l[1], i+1))
else:
cpdfile.write("{}_{}\t1\t{}\t{}\n".format(l[0], l[1], i+1, l[2]*1.0/max_entities))
def run_chemdner_evaluation(goldstd, results, format=""):
"""
Use the official BioCreative evaluation script (should be installed in the system)
:param goldstd: Gold standard file path
:param results: Results file path
:param: format option
:return: Output of the evaluation script
"""
cem_command = ["bc-evaluate", results, goldstd]
if format != "":
cem_command = cem_command[:1] + [format] + cem_command[1:]
r = check_output(cem_command)
return r
def get_chemdner_gold_ann_set(goldann="CHEMDNER/CHEMDNER_TEST_ANNOTATION/chemdner_ann_test_13-09-13.txt"):
"""
Load the CHEMDNER annotations to a set
:param goldann: Path to CHEMDNER annotation file
:return: Set of gold standard annotations
"""
with codecs.open(goldann, 'r', 'utf-8') as goldfile:
gold = goldfile.readlines()
goldlist = []
for line in gold:
#pmid, T/A, start, end
x = line.strip().split('\t')
goldlist.append((x[0], x[1] + ":" + x[2] + ":" + x[3], '1'))
#print goldlist[0:2]
goldset = set(goldlist)
return goldset, None
def main():
start_time = time.time()
parser = argparse.ArgumentParser(description='')
parser.add_argument("actions", default="classify", help="Actions to be performed.",
choices=["load_corpus"])
parser.add_argument("--goldstd", default="", dest="goldstd", nargs="+",
help="Gold standard to be used. Will override corpus, annotations",
choices=config.paths.keys())
parser.add_argument("--submodels", default="", nargs='+', help="sub types of classifiers"),
parser.add_argument("-i", "--input", dest="input", action="store",
default='''Administration of a higher dose of indinavir should be \\
considered when coadministering with megestrol acetate.''',
help="Text to classify.")
parser.add_argument("--corpus", dest="corpus", nargs=2,
default=["chemdner", "CHEMDNER/CHEMDNER_SAMPLE_JUNE25/chemdner_sample_abstracts.txt"],
help="format path")
parser.add_argument("--annotations", dest="annotations")
parser.add_argument("--tag", dest="tag", default="0", help="Tag to identify the text.")
parser.add_argument("--models", dest="models", help="model destination path, without extension")
parser.add_argument("--entitytype", dest="etype", help="type of entities to be considered", default="all")
parser.add_argument("--doctype", dest="doctype", help="type of document to be considered", default="all")
parser.add_argument("--annotated", action="store_true", default=False, dest="annotated",
help="True if the input has <entity> tags.")
parser.add_argument("-o", "--output", "--format", dest="output",
nargs=2, help="format path; output formats: xml, html, tsv, text, chemdner.")
parser.add_argument("--crf", dest="crf", help="CRF implementation", default="stanford",
choices=["stanford", "crfsuite"])
parser.add_argument("--log", action="store", dest="loglevel", default="WARNING", help="Log level")
parser.add_argument("--kernel", action="store", dest="kernel", default="svmtk", help="Kernel for relation extraction")
parser.add_argument("--pairtype1", action="store", dest="pairtype1")
parser.add_argument("--pairtype2", action="store", dest="pairtype2")
options = parser.parse_args()
# set logger
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.loglevel)
while len(logging.root.handlers) > 0:
logging.root.removeHandler(logging.root.handlers[-1])
logging_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s:%(funcName)s %(message)s'
logging.basicConfig(level=numeric_level, format=logging_format)
logging.getLogger().setLevel(numeric_level)
logging.info("Processing action {0} on {1}".format(options.actions, options.goldstd))
# set configuration variables based on the goldstd option if the corpus has a gold standard,
# or on corpus and annotation options
# pre-processing options
if options.actions == "load_corpus":
if len(options.goldstd) > 1:
print("load only one corpus each time")
sys.exit()
options.goldstd = options.goldstd[0]
corpus_format = config.paths[options.goldstd]["format"]
corpus_path = config.paths[options.goldstd]["text"]
corpus_ann = config.paths[options.goldstd]["annotations"]
corenlp_client = StanfordCoreNLP('http://localhost:9000')
if corpus_format == "chemdner":
corpus = ChemdnerCorpus(corpus_path)
#corpus.save()
if options.goldstd == "chemdner_traindev":
# merge chemdner_train and chemdner_dev
tpath = config.paths["chemdner_train"]["corpus"]
tcorpus = pickle.load(open(tpath, 'rb'))
dpath = config.paths["chemdner_dev"]["corpus"]
dcorpus = pickle.load(open(dpath, 'rb'))
corpus.documents.update(tcorpus.documents)
corpus.documents.update(dcorpus.documents)
elif options.goldstd == "cemp_test_divide":
logging.info("loading corpus %s" % corpus_path)
corpus.load_corpus(corenlp_client, process=False)
docs = corpus.documents.keys()
step = int(len(docs)/10)
logging.info("step: {}".format(str(step)))
for i in range(10):
logging.info("processing cemp_test{}: {} - {}".format(str(i), int(step*i), int(step*i+step)))
sub_corpus_path = config.paths["cemp_test" + str(i)]["corpus"]
sub_corpus = ChemdnerCorpus(sub_corpus_path)
sub_docs = docs[int(step*i):int(step*i+step)]
for di, d in enumerate(sub_docs):
logging.info("fold {}: processing {}/{}".format(i, di, step))
sub_corpus.documents[d] = corpus.documents[d]
del corpus.documents[d]
sub_corpus.documents[d].process_document(corenlp_client)
sub_corpus.save()
if __name__ == "__main__":
main() | 51.847716 | 123 | 0.593205 |
793ea7fe5fa58fc77b16c972f51062c8574a14db | 8,272 | py | Python | docs/conf.py | bwall/yara | 43ffdcc226325e208d01172cd27870d3e046d79f | [
"Apache-2.0"
] | 6 | 2019-09-02T14:21:04.000Z | 2021-10-01T01:54:46.000Z | docs/conf.py | Neo23x0/yara | 7cf4b24b052d9eede9b8288c0c250b58efaa13e1 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | Neo23x0/yara | 7cf4b24b052d9eede9b8288c0c250b58efaa13e1 | [
"Apache-2.0"
] | 8 | 2016-02-07T08:11:18.000Z | 2021-09-01T13:22:51.000Z | # -*- coding: utf-8 -*-
#
# yara documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 11:04:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'yara'
copyright = u'2014-2015, Victor M. Alvarez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'yaradoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'yara.tex', u'yara Documentation',
u'Victor M. Alvarez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yara', u'yara Documentation',
[u'Victor M. Alvarez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'yara', u'yara Documentation',
u'Victor M. Alvarez', 'yara', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.865672 | 79 | 0.717239 |
793ea81f14931c4897720d87aa1f572077adc71a | 838 | py | Python | tests/test_data.py | keggsmurph21/jam | 1aee2c56961d58def4e1f24a3dea824ade04f02d | [
"MIT"
] | 1 | 2020-10-16T03:31:25.000Z | 2020-10-16T03:31:25.000Z | tests/test_data.py | keggsmurph21/jam | 1aee2c56961d58def4e1f24a3dea824ade04f02d | [
"MIT"
] | null | null | null | tests/test_data.py | keggsmurph21/jam | 1aee2c56961d58def4e1f24a3dea824ade04f02d | [
"MIT"
] | null | null | null | nested = {
"json": ["rigid", "better for data interchange"],
"toml": ["simple and readable", "easier to implement"],
"yaml": ["slim and flexible", "better for configuration"],
"object": {
"key": "value",
"array": [{"null_value": None}, {"boolean": True}, {"integer": 1}],
},
"paragraph": "Blank lines denote\nparagraph breaks\n",
"content": "Or we\ncan auto\nconvert line breaks\nto save space",
}
flatter = {
"json": ["rigid", "better for data interchange"],
"toml": ["simple and readable", "easier to implement"],
"yaml": ["slim and flexible", "better for configuration"],
"object": {
"key": "value",
"array": "omitted",
},
"paragraph": "Blank lines denote\nparagraph breaks\n",
"content": "Or we\ncan auto\nconvert line breaks\nto save space",
}
| 34.916667 | 75 | 0.593079 |
793ea927834cdb96d321e16ae1409fa9b5ece1d3 | 5,536 | py | Python | examples/examples.py | gjdv/DegiroAPI | 578b97b1add42f6d5e031be8eb034314c51ec30f | [
"MIT"
] | null | null | null | examples/examples.py | gjdv/DegiroAPI | 578b97b1add42f6d5e031be8eb034314c51ec30f | [
"MIT"
] | null | null | null | examples/examples.py | gjdv/DegiroAPI | 578b97b1add42f6d5e031be8eb034314c51ec30f | [
"MIT"
] | 1 | 2022-01-23T11:15:17.000Z | 2022-01-23T11:15:17.000Z | import degiroapi
from degiroapi.product import Product
from degiroapi.order import Order
from degiroapi.utils import pretty_json
from datetime import datetime, timedelta
# login
degiro = degiroapi.DeGiro()
degiro.login("username", "password")
# login with 2fa
otp = input("Input Google Authenticator password:")
degiro.login("username", "password", oneTimePassword=otp.strip())
# logout
degiro.logout()
# print the current cash funds
cashfunds = degiro.getdata(degiroapi.Data.Type.CASHFUNDS)
for data in cashfunds:
print(data)
# print the current portfolio (True to filter Products with size 0, False to show all)
portfolio = degiro.getdata(degiroapi.Data.Type.PORTFOLIO, True)
for data in portfolio:
print(data)
# download portfolio as csv file
csv_portfolio = degiro.download_csv('PORTFOLIO', datetime(2019, 1, 1), datetime.now())
with open('portfolio.csv', 'w') as file:
file.write(csv_portfolio)
# output one search result
products = degiro.search_products('Pfizer')
print(Product(products[0]).id)
print(Product(products[0]).name)
print(Product(products[0]).symbol)
print(Product(products[0]).isin)
print(Product(products[0]).currency)
print(Product(products[0]).product_type)
print(Product(products[0]).tradable)
print(Product(products[0]).close_price)
print(Product(products[0]).close_price_date)
# output multiple search result
products = degiro.search_products('Pfizer', 3)
print(Product(products[0]).id)
print(Product(products[1]).id)
print(Product(products[2]).id)
# printing info for a specified product ID:
info = degiro.product_info(5322419)
print(info["id"], info["name"], info["currency"], info["closePrice"])
# print transactions
transactions = degiro.transactions(datetime(2019, 1, 1), datetime.now())
print(pretty_json(transactions))
# print order history (maximum timespan 90 days)
orders = degiro.orders(datetime.now() - timedelta(days=90), datetime.now())
print(pretty_json(orders))
# printing order history (maximum timespan 90 days), with argument True return only open orders
orders = degiro.orders(datetime.now() - timedelta(days=90), datetime.now(), True)
print(pretty_json(orders))
# deleting an open order
orders = degiro.orders(datetime.now() - timedelta(days=1), datetime.now(), True)
degiro.delete_order(orders[0]['orderId'])
degiro.delete_order("f278d56f-eaa0-4dc7-b067-45c6b4b3d74f")
# getting realtime and historical data from a stock
products = degiro.search_products('nrz')
# Interval can be set to One_Day, One_Week, One_Month, Three_Months, Six_Months, One_Year, Three_Years, Five_Years, Max
realprice = degiro.real_time_price(Product(products[0]).id, degiroapi.Interval.Type.One_Day)
# reatime data
print(realprice[0]['data']['lastPrice'])
print(pretty_json(realprice[0]['data']))
# historical data
print(realprice[1]['data'])
# get s&p 500 stock list
sp5symbols = []
products = degiro.get_stock_list(14, 846)
for product in products:
sp5symbols.append(Product(product).symbol)
# get german30 stock list
daxsymbols = []
products = degiro.get_stock_list(6, 906)
for product in products:
daxsymbols.append(Product(product).symbol)
# placing an order(dependent on the order type)
# set a limit order price to which the order gets executed
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for unlimited, size, limit(the limit price)
degiro.buyorder(Order.Type.LIMIT, Product(products[0]).id, Order.Time.GTC, 1, 30)
# sets a limit order when the stoploss price is reached(not bought for more than the limit at the stop loss price)
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for "unlimited"), size, limit(the limit price), stop_loss(stop loss price)
degiro.buyorder(Order.Type.STOPLIMIT, Product(products[0]).id, Order.Time.GTC, 1, 38, 38)
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for "unlimited"), size
degiro.buyorder(Order.Type.MARKET, Product(products[0]).id, Order.Time.GTC, 1)
# the stop loss price has to be higher than the current price, when current price reaches the stoploss price the order is placed
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for "unlimited"), size, don't change none, stop_loss(stop loss price)
degiro.buyorder(Order.Type.STOPLOSS, Product(products[0]).id, Order.Time.GTC, 1, None, 38)
# selling a product
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for unlimited, size, limit(the limit price)
degiro.sellorder(Order.Type.LIMIT, Product(products[0]).id, Order.Time.GTC, 1, 40)
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for "unlimited"), size, limit(the limit price), stop_loss(stop loss price)
degiro.sellorder(Order.Type.STOPLIMIT, Product(products[0]).id, Order.Time.GTC, 1, 37, 38)
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for "unlimited"), size
degiro.sellorder(Order.Type.MARKET, Product(products[0]).id, Order.Time.GTC, 1)
# order type, product id, execution time type (either Order.Time.DAY for "valid on a daily basis", or Order.Time.GTC for "unlimited"), size, don't change none, stop_loss(stop loss price)
degiro.sellorder(Order.Type.STOPLOSS, Product(products[0]).id, Order.Time.GTC, 1, None, 38)
| 43.590551 | 191 | 0.758851 |
793ea9435bd170011bc32e6d01e7ada53f178603 | 2,593 | py | Python | spider.py | trinathtiru/jaipal | 232b7a6742b4ec695b1eda5aa6010b1d8fb274a8 | [
"Apache-2.0"
] | null | null | null | spider.py | trinathtiru/jaipal | 232b7a6742b4ec695b1eda5aa6010b1d8fb274a8 | [
"Apache-2.0"
] | null | null | null | spider.py | trinathtiru/jaipal | 232b7a6742b4ec695b1eda5aa6010b1d8fb274a8 | [
"Apache-2.0"
] | null | null | null | from urllib.request import urlopen
from link_finder import LinkFinder
from domain import *
from general import *
class Spider:
project_name = ''
base_url = ''
domain_name = ''
queue_file = ''
crawled_file = ''
queue = set()
crawled = set()
def __init__(self, project_name, base_url, domain_name):
Spider.project_name = project_name
Spider.base_url = base_url
Spider.domain_name = domain_name
Spider.queue_file = Spider.project_name + '/queue.txt'
Spider.crawled_file = Spider.project_name + '/crawled.txt'
self.boot()
self.crawl_page('First spider', Spider.base_url)
# Creates directory and files for project on first run and starts the spider
@staticmethod
def boot():
create_project_dir(Spider.project_name)
create_data_files(Spider.project_name, Spider.base_url)
Spider.queue = file_to_set(Spider.queue_file)
Spider.crawled = file_to_set(Spider.crawled_file)
# Updates user display, fills queue and updates files
@staticmethod
def crawl_page(thread_name, page_url):
if page_url not in Spider.crawled:
print(thread_name + ' now crawling ' + page_url)
print('Queue ' + str(len(Spider.queue)) + ' | Crawled ' + str(len(Spider.crawled)))
Spider.add_links_to_queue(Spider.gather_links(page_url))
Spider.queue.remove(page_url)
Spider.crawled.add(page_url)
Spider.update_files()
# Converts raw response data into readable information and checks for proper html formatting
@staticmethod
def gather_links(page_url):
html_string = ''
try:
response = urlopen(page_url)
if 'text/html' in response.getheader('Content-Type'):
html_bytes = response.read()
html_string = html_bytes.decode("utf-8")
finder = LinkFinder(Spider.base_url, page_url)
finder.feed(html_string)
except Exception as e:
print(str(e))
return set()
return finder.page_links()
# Saves queue data to project files
@staticmethod
def add_links_to_queue(links):
for url in links:
if (url in Spider.queue) or (url in Spider.crawled):
continue
if Spider.domain_name != get_domain_name(url):
continue
Spider.queue.add(url)
@staticmethod
def update_files():
set_to_file(Spider.queue, Spider.queue_file)
set_to_file(Spider.crawled, Spider.crawled_file)
| 35.520548 | 96 | 0.639028 |
793ea9e4082cfdb90bd39771017d990592d4bcb9 | 4,916 | py | Python | examples/app_based_example.py | dokime7/flask-cors | caa7ba1e0df3f65e254fe9e31121491ad0e93a60 | [
"MIT"
] | null | null | null | examples/app_based_example.py | dokime7/flask-cors | caa7ba1e0df3f65e254fe9e31121491ad0e93a60 | [
"MIT"
] | null | null | null | examples/app_based_example.py | dokime7/flask-cors | caa7ba1e0df3f65e254fe9e31121491ad0e93a60 | [
"MIT"
] | null | null | null | """
Flask-Cors example
===================
This is a tiny Flask Application demonstrating Flask-Cors, making it simple
to add cross origin support to your flask app!
:copyright: (c) 2016 by Cory Dolphin.
:license: MIT/X11, see LICENSE for more details.
"""
from flask import Flask, jsonify
import logging
try:
from flask_cors import CORS # The typical way to import flask-cors
except ImportError:
# Path hack allows examples to be run without installation.
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from flask_cors import CORS
app = Flask('FlaskCorsAppBasedExample')
logging.basicConfig(level=logging.INFO)
# To enable logging for flask-cors,
logging.getLogger('flask_cors').level = logging.DEBUG
# One of the simplest configurations. Exposes all resources matching /api/* to
# CORS and allows the Content-Type header, which is necessary to POST JSON
# cross origin.
CORS(app, resources=r'/api/*')
@app.route("/")
def helloWorld():
'''
Since the path '/' does not match the regular expression r'/api/*',
this route does not have CORS headers set.
'''
return '''
<html>
<h1>Hello CORS!</h1>
<h3> End to end editable example with jquery! </h3>
<a class="jsbin-embed" href="http://jsbin.com/zazitas/embed?js,console">JS Bin on jsbin.com</a>
<script src="//static.jsbin.com/js/embed.min.js?3.35.12"></script>
</html>
'''
@app.route("/api/v1/users/")
def list_users():
'''
Since the path matches the regular expression r'/api/*', this resource
automatically has CORS headers set. The expected result is as follows:
$ curl --include -X GET http://127.0.0.1:5000/api/v1/users/ \
--header Origin:www.examplesite.com
HTTP/1.0 200 OK
Access-Control-Allow-Headers: Content-Type
Access-Control-Allow-Origin: *
Content-Length: 21
Content-Type: application/json
Date: Sat, 09 Aug 2014 00:26:41 GMT
Server: Werkzeug/0.9.4 Python/2.7.8
{
"success": true
}
'''
return jsonify(user="joe")
@app.route("/api/v1/users/create", methods=['POST'])
def create_user():
'''
Since the path matches the regular expression r'/api/*', this resource
automatically has CORS headers set.
Browsers will first make a preflight request to verify that the resource
allows cross-origin POSTs with a JSON Content-Type, which can be simulated
as:
$ curl --include -X OPTIONS http://127.0.0.1:5000/api/v1/users/create \
--header Access-Control-Request-Method:POST \
--header Access-Control-Request-Headers:Content-Type \
--header Origin:www.examplesite.com
>> HTTP/1.0 200 OK
Content-Type: text/html; charset=utf-8
Allow: POST, OPTIONS
Access-Control-Allow-Origin: *
Access-Control-Allow-Headers: Content-Type
Access-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT
Content-Length: 0
Server: Werkzeug/0.9.6 Python/2.7.9
Date: Sat, 31 Jan 2015 22:25:22 GMT
$ curl --include -X POST http://127.0.0.1:5000/api/v1/users/create \
--header Content-Type:application/json \
--header Origin:www.examplesite.com
>> HTTP/1.0 200 OK
Content-Type: application/json
Content-Length: 21
Access-Control-Allow-Origin: *
Server: Werkzeug/0.9.6 Python/2.7.9
Date: Sat, 31 Jan 2015 22:25:04 GMT
{
"success": true
}
'''
return jsonify(success=True)
@app.route("/api/exception")
def get_exception():
'''
Since the path matches the regular expression r'/api/*', this resource
automatically has CORS headers set.
Browsers will first make a preflight request to verify that the resource
allows cross-origin POSTs with a JSON Content-Type, which can be simulated
as:
$ curl --include -X OPTIONS http://127.0.0.1:5000/exception \
--header Access-Control-Request-Method:POST \
--header Access-Control-Request-Headers:Content-Type \
--header Origin:www.examplesite.com
>> HTTP/1.0 200 OK
Content-Type: text/html; charset=utf-8
Allow: POST, OPTIONS
Access-Control-Allow-Origin: *
Access-Control-Allow-Headers: Content-Type
Access-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT
Content-Length: 0
Server: Werkzeug/0.9.6 Python/2.7.9
Date: Sat, 31 Jan 2015 22:25:22 GMT
'''
raise Exception("example")
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request. %s', e)
return "An internal error occured", 500
if __name__ == "__main__":
app.run(debug=True)
| 32.556291 | 99 | 0.641985 |
793eaa435d535277f474f280ac2b7b63e68bfbc0 | 8,196 | py | Python | homeassistant/components/zwave_js/entity.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 5 | 2020-12-15T04:09:01.000Z | 2022-03-11T21:34:24.000Z | homeassistant/components/zwave_js/entity.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 77 | 2020-07-16T16:43:09.000Z | 2022-03-31T06:14:37.000Z | homeassistant/components/zwave_js/entity.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Generic Z-Wave Entity Class."""
from __future__ import annotations
import logging
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import NodeStatus
from zwave_js_server.model.value import Value as ZwaveValue, get_value_id
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .helpers import get_device_id, get_unique_id
LOGGER = logging.getLogger(__name__)
EVENT_VALUE_UPDATED = "value updated"
EVENT_DEAD = "dead"
EVENT_ALIVE = "alive"
class ZWaveBaseEntity(Entity):
"""Generic Entity Class for a Z-Wave Device."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize a generic Z-Wave device entity."""
self.config_entry = config_entry
self.client = client
self.info = info
# entities requiring additional values, can add extra ids to this list
self.watched_value_ids = {self.info.primary_value.value_id}
if self.info.additional_value_ids_to_watch:
self.watched_value_ids = self.watched_value_ids.union(
self.info.additional_value_ids_to_watch
)
# Entity class attributes
self._attr_name = self.generate_name()
self._attr_unique_id = get_unique_id(
self.client.driver.controller.home_id, self.info.primary_value.value_id
)
self._attr_entity_registry_enabled_default = (
self.info.entity_registry_enabled_default
)
self._attr_assumed_state = self.info.assumed_state
# device is precreated in main handler
self._attr_device_info = {
"identifiers": {get_device_id(self.client, self.info.node)},
}
@callback
def on_value_update(self) -> None:
"""Call when one of the watched values change.
To be overridden by platforms needing this event.
"""
async def async_poll_value(self, refresh_all_values: bool) -> None:
"""Poll a value."""
if not refresh_all_values:
self.hass.async_create_task(
self.info.node.async_poll_value(self.info.primary_value)
)
LOGGER.info(
(
"Refreshing primary value %s for %s, "
"state update may be delayed for devices on battery"
),
self.info.primary_value,
self.entity_id,
)
return
for value_id in self.watched_value_ids:
self.hass.async_create_task(self.info.node.async_poll_value(value_id))
LOGGER.info(
(
"Refreshing values %s for %s, state update may be delayed for "
"devices on battery"
),
", ".join(self.watched_value_ids),
self.entity_id,
)
async def async_added_to_hass(self) -> None:
"""Call when entity is added."""
# Add value_changed callbacks.
self.async_on_remove(
self.info.node.on(EVENT_VALUE_UPDATED, self._value_changed)
)
for status_event in (EVENT_ALIVE, EVENT_DEAD):
self.async_on_remove(
self.info.node.on(status_event, self._node_status_alive_or_dead)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.unique_id}_poll_value",
self.async_poll_value,
)
)
def generate_name(
self,
include_value_name: bool = False,
alternate_value_name: str | None = None,
additional_info: list[str] | None = None,
name_suffix: str | None = None,
) -> str:
"""Generate entity name."""
if additional_info is None:
additional_info = []
name: str = (
self.info.node.name
or self.info.node.device_config.description
or f"Node {self.info.node.node_id}"
)
if name_suffix:
name = f"{name} {name_suffix}"
if include_value_name:
value_name = (
alternate_value_name
or self.info.primary_value.metadata.label
or self.info.primary_value.property_key_name
or self.info.primary_value.property_name
)
name = f"{name}: {value_name}"
for item in additional_info:
if item:
name += f" - {item}"
# append endpoint if > 1
if self.info.primary_value.endpoint > 1:
name += f" ({self.info.primary_value.endpoint})"
return name
@property
def available(self) -> bool:
"""Return entity availability."""
return (
self.client.connected
and bool(self.info.node.ready)
and self.info.node.status != NodeStatus.DEAD
)
@callback
def _node_status_alive_or_dead(self, event_data: dict) -> None:
"""
Call when node status changes to alive or dead.
Should not be overridden by subclasses.
"""
self.async_write_ha_state()
@callback
def _value_changed(self, event_data: dict) -> None:
"""Call when (one of) our watched values changes.
Should not be overridden by subclasses.
"""
value_id = event_data["value"].value_id
if value_id not in self.watched_value_ids:
return
value = self.info.node.values[value_id]
LOGGER.debug(
"[%s] Value %s/%s changed to: %s",
self.entity_id,
value.property_,
value.property_key_name,
value.value,
)
self.on_value_update()
self.async_write_ha_state()
@callback
def get_zwave_value(
self,
value_property: str | int,
command_class: int | None = None,
endpoint: int | None = None,
value_property_key: int | None = None,
add_to_watched_value_ids: bool = True,
check_all_endpoints: bool = False,
) -> ZwaveValue | None:
"""Return specific ZwaveValue on this ZwaveNode."""
# use commandclass and endpoint from primary value if omitted
return_value = None
if command_class is None:
command_class = self.info.primary_value.command_class
if endpoint is None:
endpoint = self.info.primary_value.endpoint
# lookup value by value_id
value_id = get_value_id(
self.info.node,
command_class,
value_property,
endpoint=endpoint,
property_key=value_property_key,
)
return_value = self.info.node.values.get(value_id)
# If we haven't found a value and check_all_endpoints is True, we should
# return the first value we can find on any other endpoint
if return_value is None and check_all_endpoints:
for endpoint_idx in self.info.node.endpoints:
if endpoint_idx != self.info.primary_value.endpoint:
value_id = get_value_id(
self.info.node,
command_class,
value_property,
endpoint=endpoint_idx,
property_key=value_property_key,
)
return_value = self.info.node.values.get(value_id)
if return_value:
break
# add to watched_ids list so we will be triggered when the value updates
if (
return_value
and return_value.value_id not in self.watched_value_ids
and add_to_watched_value_ids
):
self.watched_value_ids.add(return_value.value_id)
return return_value
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
| 33.453061 | 86 | 0.598829 |
793eaa6c3b9605410c7f4668cd9db5ac18caa17f | 5,412 | py | Python | pyqtgraph/graphicsItems/ArrowItem.py | StSav012/pyqtgraph | 65e17c4e3707eb3bd4d91cdc13504d9b150f4360 | [
"MIT"
] | 1 | 2022-01-30T20:04:51.000Z | 2022-01-30T20:04:51.000Z | pyqtgraph/graphicsItems/ArrowItem.py | StSav012/pyqtgraph | 65e17c4e3707eb3bd4d91cdc13504d9b150f4360 | [
"MIT"
] | null | null | null | pyqtgraph/graphicsItems/ArrowItem.py | StSav012/pyqtgraph | 65e17c4e3707eb3bd4d91cdc13504d9b150f4360 | [
"MIT"
] | null | null | null | from math import hypot
from .. import functions as fn
from ..Qt import QtGui, QtWidgets
__all__ = ['ArrowItem']
class ArrowItem(QtWidgets.QGraphicsPathItem):
"""
For displaying scale-invariant arrows.
For arrows pointing to a location on a curve, see CurveArrow
"""
def __init__(self, parent=None, **opts):
"""
Arrows can be initialized with any keyword arguments accepted by
the setStyle() method.
"""
self.opts = {}
QtWidgets.QGraphicsPathItem.__init__(self, parent)
if 'size' in opts:
opts['headLen'] = opts['size']
if 'width' in opts:
opts['headWidth'] = opts['width']
pos = opts.pop('pos', (0, 0))
defaultOpts = {
'pxMode': True,
'angle': -150, ## If the angle is 0, the arrow points left
'headLen': 20,
'headWidth': None,
'tipAngle': 25,
'baseAngle': 0,
'tailLen': None,
'tailWidth': 3,
'pen': (200,200,200),
'brush': (50,50,200),
}
defaultOpts.update(opts)
self.setStyle(**defaultOpts)
# for backward compatibility
self.setPos(*pos)
def setStyle(self, **opts):
"""
Changes the appearance of the arrow.
All arguments are optional:
====================== =================================================
**Keyword Arguments:**
angle Orientation of the arrow in degrees. Default is
0; arrow pointing to the left.
headLen Length of the arrow head, from tip to base.
default=20
headWidth Width of the arrow head at its base. If
headWidth is specified, it overrides tipAngle.
tipAngle Angle of the tip of the arrow in degrees. Smaller
values make a 'sharper' arrow. default=25
baseAngle Angle of the base of the arrow head. Default is
0, which means that the base of the arrow head
is perpendicular to the arrow tail.
tailLen Length of the arrow tail, measured from the base
of the arrow head to the end of the tail. If
this value is None, no tail will be drawn.
default=None
tailWidth Width of the tail. default=3
pen The pen used to draw the outline of the arrow.
brush The brush used to fill the arrow.
pxMode If True, then the arrow is drawn as a fixed size
regardless of the scale of its parents (including
the ViewBox zoom level).
====================== =================================================
"""
arrowOpts = ['headLen', 'tipAngle', 'baseAngle', 'tailLen', 'tailWidth', 'headWidth']
allowedOpts = ['angle', 'pen', 'brush', 'pxMode'] + arrowOpts
needUpdate = False
for k,v in opts.items():
if k not in allowedOpts:
raise KeyError('Invalid arrow style option "%s"' % k)
if self.opts.get(k) != v:
needUpdate = True
self.opts[k] = v
if not needUpdate:
return
opt = dict([(k,self.opts[k]) for k in arrowOpts if k in self.opts])
tr = QtGui.QTransform()
tr.rotate(self.opts['angle'])
self.path = tr.map(fn.makeArrowPath(**opt))
self.setPath(self.path)
self.setPen(fn.mkPen(self.opts['pen']))
self.setBrush(fn.mkBrush(self.opts['brush']))
if self.opts['pxMode']:
self.setFlags(self.flags() | self.GraphicsItemFlag.ItemIgnoresTransformations)
else:
self.setFlags(self.flags() & ~self.GraphicsItemFlag.ItemIgnoresTransformations)
def paint(self, p, *args):
p.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing)
super().paint(p, *args)
#p.setPen(fn.mkPen('r'))
#p.setBrush(fn.mkBrush(None))
#p.drawRect(self.boundingRect())
def shape(self):
#if not self.opts['pxMode']:
#return QtWidgets.QGraphicsPathItem.shape(self)
return self.path
## dataBounds and pixelPadding methods are provided to ensure ViewBox can
## properly auto-range
def dataBounds(self, ax, frac, orthoRange=None):
pw = 0
pen = self.pen()
if not pen.isCosmetic():
pw = pen.width() * 0.7072
if self.opts['pxMode']:
return [0,0]
else:
br = self.boundingRect()
if ax == 0:
return [br.left()-pw, br.right()+pw]
else:
return [br.top()-pw, br.bottom()+pw]
def pixelPadding(self):
pad = 0
if self.opts['pxMode']:
br = self.boundingRect()
pad += hypot(br.width(), br.height())
pen = self.pen()
if pen.isCosmetic():
pad += max(1, pen.width()) * 0.7072
return pad
| 36.567568 | 93 | 0.49612 |
793eaacc92317d3ab8302c3131c199248127e3c9 | 3,807 | py | Python | desertbot/modules/utils/StringUtils.py | DesertBot/DesertBot | 33b4fe03dae5ead23003e18b511179e0b03b061d | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-20T17:10:10.000Z | 2021-11-17T18:58:04.000Z | desertbot/modules/utils/StringUtils.py | DesertBot/DesertBot | 33b4fe03dae5ead23003e18b511179e0b03b061d | [
"MIT",
"BSD-3-Clause"
] | 109 | 2015-08-20T13:16:35.000Z | 2022-01-21T19:40:35.000Z | desertbot/modules/utils/StringUtils.py | DesertBot/DesertBot | 33b4fe03dae5ead23003e18b511179e0b03b061d | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-29T05:55:01.000Z | 2021-02-05T19:19:39.000Z | import json
import re
from collections import OrderedDict
from typing import List
from pyxdameraulevenshtein import normalized_damerau_levenshtein_distance as ndld
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.message import IRCMessage
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from desertbot.response import IRCResponse
@implementer(IPlugin, IModule)
class StringUtils(BotCommand):
def triggers(self):
return ["tojson", "fromjson", "prevmsg", "prev_or_args"]
def actions(self):
return super(StringUtils, self).actions() + [('closest-matches', 1, self.closestMatches),
('message-channel', 1, self._storeMessage),
('message-user', 1, self._storeMessage),
('action-channel', 1, self._storeMessage),
('action-user', 1, self._storeMessage)]
def closestMatches(self, search: str, wordList: List[str],
numMatches: int, threshold: float) -> List[str]:
similarities = sorted([(ndld(search, word), word) for word in wordList])
closeMatches = [word for (diff, word) in similarities if diff <= threshold]
topN = closeMatches[:numMatches]
return topN
def _tojson(self, message: IRCMessage):
"""converts input string to json-escaped string"""
return IRCResponse(json.dumps(message.parameters), message.replyTo)
def _fromjson(self, message: IRCMessage):
"""un-escapes json strings"""
return IRCResponse(str(json.loads(message.parameters)), message.replyTo)
def _storeMessage(self, message: IRCMessage):
"""stores the current message for _prevmsg to return later"""
if message.command and message.command.lower() in self.bot.moduleHandler.mappedTriggers:
# ignore bot commands
return
if 'tracking' in message.metadata:
# ignore internal messages from alias processing
if any(m in message.metadata['tracking'] for m in ['Sub', 'Chain', 'Alias']):
return
self.messages[message.replyTo] = message
def _prevmsg(self, message: IRCMessage):
"""returns the previous message from the current channel"""
if message.replyTo not in self.messages:
return IRCResponse("No previous message stored for this channel yet", message.replyTo)
msg = self.messages[message.replyTo]
return IRCResponse(msg.messageString, message.replyTo)
def _prev_or_args(self, message: IRCMessage):
"""returns the previous message from the current channel,
or the command arguments if given"""
if message.parameters:
return IRCResponse(message.parameters, message.replyTo)
else:
return self._prevmsg(message)
commands = OrderedDict([
('tojson', _tojson),
('fromjson', _fromjson),
('prevmsg', _prevmsg),
('prev_or_args', _prev_or_args),
])
def execute(self, message: IRCMessage):
command = message.command.lower()
if command in self.commands:
return self.commands[command](self, message)
else:
return IRCResponse(f'"{message.command}" is not a recognized StringUtils command', message.replyTo)
def help(self, query):
command = query[0].lower()
if command in self.commands:
doc = re.sub(r"\s+", " ", self.commands[command].__doc__)
return f"{self.bot.commandChar}{command} - {doc}"
def onLoad(self):
self.messages = {}
stringUtils = StringUtils()
| 39.65625 | 111 | 0.63357 |
793eac9111aa62412e756ed54e444241ef79474e | 35,590 | py | Python | mmdet/models/dense_heads/gfl_head_fixlastscale.py | wuguikel/DCFANet | 3d6a7d767f89b7c95692d89bfb2951aeca2740f6 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/gfl_head_fixlastscale.py | wuguikel/DCFANet | 3d6a7d767f89b7c95692d89bfb2951aeca2740f6 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/gfl_head_fixlastscale.py | wuguikel/DCFANet | 3d6a7d767f89b7c95692d89bfb2951aeca2740f6 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
build_assigner, build_sampler, distance2bbox,
images_to_levels, multi_apply, multiclass_nms,
reduce_mean, unmap)
from ..builder import HEADS, build_loss
from .anchor_head import AnchorHead
from .cross_head_modules import BasicCrossBlock, BasicTransBlock
class Integral(nn.Module):
"""A fixed layer for calculating integral result from distribution.
This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
P(y_i) denotes the softmax vector that represents the discrete distribution
y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
Args:
reg_max (int): The maximal value of the discrete set. Default: 16. You
may want to reset it according to your new dataset or related
settings.
"""
def __init__(self, reg_max=16):
super(Integral, self).__init__()
self.reg_max = reg_max
self.register_buffer('project',
torch.linspace(0, self.reg_max, self.reg_max + 1))
def forward(self, x):
"""Forward feature from the regression head to get integral result of
bounding box location.
Args:
x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
n is self.reg_max.
Returns:
x (Tensor): Integral result of box locations, i.e., distance
offsets from the box center in four directions, shape (N, 4).
"""
x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
return x
@HEADS.register_module()
class GFLHead(AnchorHead):
"""Generalized Focal Loss: Learning Qualified and Distributed Bounding
Boxes for Dense Object Detection.
GFL head structure is similar with ATSS, however GFL uses
1) joint representation for classification and localization quality, and
2) flexible General distribution for bounding box locations,
which are supervised by
Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
https://arxiv.org/abs/2006.04388
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
stacked_convs (int): Number of conv layers in cls and reg tower.
Default: 4.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='GN', num_groups=32, requires_grad=True).
loss_qfl (dict): Config of Quality Focal Loss (QFL).
reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
in QFL setting. Default: 16.
Example:
>>> self = GFLHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_quality_score, bbox_pred = self.forward(feats)
>>> assert len(cls_quality_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=3,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
none_DenseRes=False,
none_Cross=False,
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reg_max = reg_max
self.none_DenseRes = none_DenseRes
self.none_Cross = none_Cross
super(GFLHead, self).__init__(num_classes, in_channels, **kwargs)
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# SSD sampling=False so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
if self.reg_max > 0:
self.integral = Integral(self.reg_max)
self.loss_dfl = build_loss(loss_dfl)
def _init_layers(self):
"""Initialize layers of the head."""
# self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.cross_convs = nn.ModuleList()
if not self.none_DenseRes:
self.scalesX = nn.ModuleList()
convlayers_scale = [0.8, 0.64, 0.51, 0.42, 0.38]
convlayers_num = [2, 2, 2, 2, 2, 2]
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
BasicTransBlock(
chn,
self.feat_channels,
convlayers_num[i],
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
BasicTransBlock(
chn,
self.feat_channels,
convlayers_num[i],
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
if i>0:
if not self.none_DenseRes:
self.scalesX.append(
Scale(convlayers_scale[i-1]))
self.cross_convs.append(
BasicCrossBlock(
chn,
self.feat_channels,
noneCross=self.none_Cross,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
assert self.num_anchors == 1, 'anchor free version'
self.gfl_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.gfl_reg = nn.Conv2d(
self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
# ==========================================================
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.anchor_generator.strides])
def init_weights(self):
"""Initialize weights of the head."""
for m in self.cls_convs:
# normal_init(m.conv, std=0.01)
m.init_weights()
for m in self.reg_convs:
# normal_init(m.conv, std=0.01)
m.init_weights()
for m in self.cross_convs:
m.init_weights()
bias_cls = bias_init_with_prob(0.01)
normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
normal_init(self.gfl_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification and quality (IoU)
joint scores for all scale levels, each is a 4D-tensor,
the channel number is num_classes.
bbox_preds (list[Tensor]): Box distribution logits for all
scale levels, each is a 4D-tensor, the channel number is
4*(n+1), n is max value of integral set.
"""
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls and quality joint scores for a single
scale level the channel number is num_classes.
bbox_pred (Tensor): Box distribution logits for a single scale
level, the channel number is 4*(n+1), n is max value of
integral set.
"""
# cls_feat = x
# reg_feat = x
# for cls_conv in self.cls_convs:
# cls_feat = cls_conv(cls_feat)
# for reg_conv in self.reg_convs:
# reg_feat = reg_conv(reg_feat)
# cls_score = self.gfl_cls(cls_feat)
# # bbox_pred = scale(self.gfl_reg(reg_feat)).float()
# bbox_pred_nonscale = self.gfl_reg(reg_feat)
# bbox_pred = scale(bbox_pred_nonscale).float()
cls_feat = x
reg_feat = x
if not self.none_DenseRes:
cls_featX = torch.zeros_like(cls_feat)
reg_featX = torch.zeros_like(reg_feat)
for i in range(self.stacked_convs):
if i>0:
# cls_feat =cls_feat + cls_feat_pre
# reg_feat =reg_feat + reg_feat_pre
cls_feat, reg_feat = self.cross_convs[i-1](cls_feat, reg_feat)
# cls_feat_pre = cls_feat
# reg_feat_pre = reg_feat
cls_feat = self.cls_convs[i](cls_feat)
reg_feat = self.reg_convs[i](reg_feat)
if not self.none_DenseRes:
if i<self.stacked_convs-1:
cls_featX += self.scalesX[i](cls_feat)
reg_featX += self.scalesX[i](reg_feat)
else:
cls_featX += cls_feat
reg_featX += reg_feat
if self.none_DenseRes:
cls_featX = cls_feat
reg_featX = reg_feat
cls_score = self.gfl_cls(cls_featX)
bbox_pred = scale(self.gfl_reg(reg_featX)).float()
return cls_score, bbox_pred #会进一步在 loss->loss_single 中处理
def anchor_center(self, anchors):
"""Get anchor centers from anchors.
Args:
anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
Returns:
Tensor: Anchor centers with shape (N, 2), "xy" format.
"""
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
return torch.stack([anchors_cx, anchors_cy], dim=-1)
def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
bbox_targets, stride, num_total_samples):
"""Compute loss of a single scale level.
Args:
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
cls_score (Tensor): Cls and quality joint scores for each scale
level has shape (N, num_classes, H, W).
bbox_pred (Tensor): Box distribution logits for each scale
level with shape (N, 4*(n+1), H, W), n is max value of integral
set. #张量的维度依次是: batch size, 4*(bb回归分组区间数,类似直方图bin + 1), 图像高, 图像宽
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor wight
shape (N, num_total_anchors, 4).
stride (tuple): Stride in this scale level.
num_total_samples (int): Number of positive samples that is
reduced over all GPUs.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert stride[0] == stride[1], 'h stride is not equal to w stride!'
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1, 4 * (self.reg_max + 1))
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
# 仅对正例计算bb reg损失
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
score = label_weights.new_zeros(labels.shape)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
weight_targets = cls_score.detach().sigmoid()
weight_targets = weight_targets.max(dim=1)[0][pos_inds]
if self.reg_max > 0:
pos_bbox_pred_corners = self.integral(pos_bbox_pred)
else:
pos_bbox_pred_corners = pos_bbox_pred
# bb(r,l,t,b) <---> reg distance
pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
pos_bbox_pred_corners)
pos_decode_bbox_targets = pos_bbox_targets / stride[0]
score[pos_inds] = bbox_overlaps(
pos_decode_bbox_pred.detach(),
pos_decode_bbox_targets,
is_aligned=True)
if self.reg_max > 0:
pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
target_corners = bbox2distance(pos_anchor_centers,
pos_decode_bbox_targets,
self.reg_max).reshape(-1)
# regression loss
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_decode_bbox_targets,
weight=weight_targets,
avg_factor=1.0)
# dfl loss
if self.reg_max > 0:
loss_dfl = self.loss_dfl(
pred_corners,
target_corners,
weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
avg_factor=4.0)
else:
loss_dfl = bbox_pred.sum() * 0
else:
loss_bbox = bbox_pred.sum() * 0
# loss_bbox2 = bbox_pred.sum() * 0
loss_dfl = bbox_pred.sum() * 0
weight_targets = torch.tensor(0).cuda()
# weight_targets2 = torch.tensor(0).cuda()
# cls (qfl) loss
# loss_cls在config中的这段指定,继承了AnchorHead中的loss_cls的默认成员,并没在gfl中重写定义
# loss_cls=dict(
# type='QualityFocalLoss',
# use_sigmoid=False,
# beta=2.0,
# loss_weight=1.0),
loss_cls = self.loss_cls(
cls_score, (labels, score),
weight=label_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Cls and quality scores for each scale
level has shape (N, num_classes, H, W).
bbox_preds (list[Tensor]): Box distribution logits for each scale
level with shape (N, 4*(n+1), H, W), n is max value of integral
set.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
# get_anchors()的实现在文件mmdet/models/dense_heads/anchor_head.py中
# 预计原理就是根据设置的长宽比列表和大小列表生成anchor, 但没有输入GT的valid_flag_list怎么计算?
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
# get_targets()的实现在文件mmdet/models/dense_heads/anchor_head.py中
# 预计原理是根据GT选择有效的anchor
# 关键在get_targets_single中sample实现
# assign和sample是在anchor target中的核心操作。
# assign一般基于IOU,mmdet中也有基于atss和基于point的等。
# sample一般为随机,也有ohem的,基于伪标签的。
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
# 注意,如果整个batch都没有有效的anchor则直接返回
if cls_reg_targets is None:
return None
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = reduce_mean(
torch.tensor(num_total_pos).cuda()).item()
num_total_samples = max(num_total_samples, 1.0)
losses_cls, losses_bbox, losses_dfl,\
avg_factor = multi_apply(
self.loss_single,
anchor_list,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
self.anchor_generator.strides,
num_total_samples=num_total_samples)
avg_factor = sum(avg_factor)
avg_factor = reduce_mean(avg_factor).item()
losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
if self.reg_max > 0:
losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
# avg_factor2 = sum(avg_factor2)
# avg_factor2 = reduce_mean(avg_factor2).item()
# losses_bbox2 = list(map(lambda x: x / avg_factor2, losses_bbox2))
if self.reg_max > 0:
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
else:
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox)
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for a single scale level
has shape (num_classes, H, W).
bbox_preds (list[Tensor]): Box distribution logits for a single
scale level with shape (4*(n+1), H, W), n is max value of
integral set.
mlvl_anchors (list[Tensor]): Box reference for a single scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple(Tensor):
det_bboxes (Tensor): Bbox predictions in shape (N, 5), where
the first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1.
det_labels (Tensor): A (N,) tensor where each item is the
predicted class label of the corresponding box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, stride, anchors in zip(
cls_scores, bbox_preds, self.anchor_generator.strides,
mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
assert stride[0] == stride[1]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0)
if self.reg_max > 0:
bbox_pred = self.integral(bbox_pred) * stride[0]
else:
bbox_pred = bbox_pred.reshape(-1,4) * stride[0]
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(
self.anchor_center(anchors), bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
if with_nms:
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the predicted class labelof the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(
>>> num_classes=9,
>>> in_channels=1,
>>> anchor_generator=dict(
>>> type='AnchorGenerator',
>>> scales=[8],
>>> ratios=[0.5, 1.0, 2.0],
>>> strides=[4,]))
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
if with_nms:
# some heads don't support with_nms argument
proposals = self._get_bboxes_single(cls_score_list,
bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
else:
proposals = self._get_bboxes_single(cls_score_list,
bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale,
with_nms)
result_list.append(proposals)
return result_list
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""Get targets for GFL head.
This method is almost the same as `AnchorHead.get_targets()`. Besides
returning the targets as the parent method does, it also returns the
anchors as the first element of the returned tuple.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, bbox_weights_list, num_total_pos,
num_total_neg)
def _get_target_single(self,
flat_anchors,
valid_flags,
num_level_anchors,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression, classification targets for anchors in a single
image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors, 4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
num_level_anchors Tensor): Number of anchors of each scale level.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
img_meta (dict): Meta info of the image.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
anchors (Tensor): All anchors in the image with shape (N, 4).
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4).
pos_inds (Tensor): Indices of postive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
num_level_anchors_inside = self.get_num_level_anchors_inside(
num_level_anchors, inside_flags)
assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
gt_bboxes, gt_bboxes_ignore,
gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
pos_inds, neg_inds)
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
split_inside_flags = torch.split(inside_flags, num_level_anchors)
num_level_anchors_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_anchors_inside
| 42.931242 | 80 | 0.558191 |
793eaecccff9f663269eef488d71f4b7da1b97ef | 7,547 | py | Python | builder/main.py | stanford-ssi/platform-atmelsam | d9fe6a3699c687e213e71bee91dd5f3836e79247 | [
"Apache-2.0"
] | null | null | null | builder/main.py | stanford-ssi/platform-atmelsam | d9fe6a3699c687e213e71bee91dd5f3836e79247 | [
"Apache-2.0"
] | null | null | null | builder/main.py | stanford-ssi/platform-atmelsam | d9fe6a3699c687e213e71bee91dd5f3836e79247 | [
"Apache-2.0"
] | 1 | 2022-01-15T13:19:10.000Z | 2022-01-15T13:19:10.000Z | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import basename, join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
from platformio.util import get_serialports
def BeforeUpload(target, source, env): # pylint: disable=W0613,W0621
env.AutodetectUploadPort()
upload_options = {}
if "BOARD" in env:
upload_options = env.BoardConfig().get("upload", {})
if not upload_options.get("disable_flushing", False):
env.FlushSerialBuffer("$UPLOAD_PORT")
before_ports = get_serialports()
if upload_options.get("use_1200bps_touch", False):
env.TouchSerialPort("$UPLOAD_PORT", 1200)
if upload_options.get("wait_for_upload_port", False):
env.Replace(UPLOAD_PORT=env.WaitForNewSerialPort(before_ports))
# use only port name for BOSSA
if ("/" in env.subst("$UPLOAD_PORT") and
env.subst("$UPLOAD_PROTOCOL") == "sam-ba"):
env.Replace(UPLOAD_PORT=basename(env.subst("$UPLOAD_PORT")))
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
build_mcu = env.get("BOARD_MCU", board.get("build.mcu", ""))
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "${PROGNAME}.%s" %
("hex" if upload_protocol == "stk500v2" else "bin"))
else:
target_elf = env.BuildProgram()
if upload_protocol == "stk500v2":
target_firm = env.ElfToHex(
join("$BUILD_DIR", "${PROGNAME}"), target_elf)
else:
target_firm = env.ElfToBin(
join("$BUILD_DIR", "${PROGNAME}"), target_elf)
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
debug_tools = board.get("debug.tools", {})
upload_actions = []
if upload_protocol.startswith("blackmagic"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor %s_scan" %
("jtag" if upload_protocol == "blackmagic-jtag" else "swdp"),
"-ex", "attach 1",
"-ex", "load",
"-ex", "compare-sections",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.elf"
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for BlackMagic port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "sam-ba":
env.Replace(
UPLOADER="bossac",
UPLOADERFLAGS=[
"--port", '"$UPLOAD_PORT"',
"--erase",
"--write",
"--verify",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
if board.get("build.core") == "adafruit":
env.Append(
UPLOADERFLAGS=["-U", "--offset",
board.get("upload.section_start")])
else:
env.Append(UPLOADERFLAGS=[
"-U", "true"
if env.BoardConfig().get("upload.native_usb", False) else "false"
])
if "sam3x8e" in build_mcu:
env.Append(UPLOADERFLAGS=["--boot"])
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["--info", "--debug"])
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "stk500v2":
env.Replace(
UPLOADER="avrdude",
UPLOADERFLAGS=[
"-p", "atmega2560", # Arduino M0/Tian upload hook
"-C", join(
platform.get_package_dir("tool-avrdude") or "",
"avrdude.conf"),
"-c", "$UPLOAD_PROTOCOL",
"-P", '"$UPLOAD_PORT"',
"-b", "$UPLOAD_SPEED",
"-u"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS -U flash:w:$SOURCES:i"
)
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["-v"])
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol in debug_tools:
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=debug_tools.get(upload_protocol).get("server").get(
"arguments", []) + [
"-c",
"program {{$SOURCE}} verify reset %s; shutdown" %
board.get("upload.section_start", "")
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS"
)
env['UPLOADERFLAGS'] = [
f.replace("$PACKAGE_DIR", platform.get_package_dir("tool-openocd") or "")
for f in env['UPLOADERFLAGS']
]
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
# custom upload tool
elif "UPLOADCMD" in env:
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Setup default targets
#
Default([target_buildprog, target_size])
| 30.309237 | 86 | 0.591891 |
793eaf12e8df5b69fbe0c625b09e8a6a110c4160 | 6,192 | py | Python | pretrained-model/vocoder/universal-melgan/universal-melgan-1024.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 111 | 2020-08-31T04:58:54.000Z | 2022-03-29T15:44:18.000Z | pretrained-model/vocoder/universal-melgan/universal-melgan-1024.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 14 | 2020-12-16T07:27:22.000Z | 2022-03-15T17:39:01.000Z | pretrained-model/vocoder/universal-melgan/universal-melgan-1024.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 29 | 2021-02-09T08:57:15.000Z | 2022-03-12T14:09:19.000Z | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
import numpy as np
from glob import glob
from itertools import cycle
import malaya_speech
import malaya_speech.train
from malaya_speech.train.model import universal_melgan as melgan
from malaya_speech.train.model import melgan as melgan_loss
import malaya_speech.config
from malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss
import random
mels = glob('output-universal/mels/*.npy')
mels.extend(glob('speech-augmentation/mels/*.npy'))
random.shuffle(mels)
file_cycle = cycle(mels)
def generate(batch_max_steps=8192, hop_size=256):
while True:
f = next(file_cycle)
mel = np.load(f)
audio = np.load(f.replace('mels', 'audios'))
batch_max_frames = batch_max_steps // hop_size
if len(audio) < len(mel) * hop_size:
audio = np.pad(audio, [[0, len(mel) * hop_size - len(audio)]])
if len(mel) > batch_max_frames:
interval_start = 0
interval_end = len(mel) - batch_max_frames
start_frame = random.randint(interval_start, interval_end)
start_step = start_frame * hop_size
audio = audio[start_step: start_step + batch_max_steps]
mel = mel[start_frame: start_frame + batch_max_frames, :]
else:
audio = np.pad(audio, [[0, batch_max_steps - len(audio)]])
mel = np.pad(mel, [[0, batch_max_frames - len(mel)], [0, 0]])
yield {'mel': mel, 'audio': audio}
dataset = tf.data.Dataset.from_generator(
generate,
{'mel': tf.float32, 'audio': tf.float32},
output_shapes={
'mel': tf.TensorShape([None, 80]),
'audio': tf.TensorShape([None]),
},
)
dataset = dataset.shuffle(32)
dataset = dataset.padded_batch(
32,
padded_shapes={
'audio': tf.TensorShape([None]),
'mel': tf.TensorShape([None, 80]),
},
padding_values={
'audio': tf.constant(0, dtype=tf.float32),
'mel': tf.constant(0, dtype=tf.float32),
},
)
features = dataset.make_one_shot_iterator().get_next()
melgan_config = malaya_speech.config.universal_melgan_config
melgan_config['melgan_generator_params']['filters'] = 1024
generator = melgan.Generator(
melgan.GeneratorConfig(**melgan_config['melgan_generator_params']),
name='universalmelgan-generator',
)
discriminator = melgan.MultiScaleDiscriminator(
melgan.WaveFormDiscriminatorConfig(
**melgan_config['melgan_waveform_discriminator_params']
),
melgan.STFTDiscriminatorConfig(
**melgan_config['melgan_stft_discriminator_params']
),
name='universalmelgan-discriminator',
)
mels_loss = melgan_loss.loss.TFMelSpectrogram()
mse_loss = tf.keras.losses.MeanSquaredError()
mae_loss = tf.keras.losses.MeanAbsoluteError()
def compute_per_example_generator_losses(audios, outputs):
y_hat = outputs
p_hat = discriminator(y_hat)
p = discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += mse_loss(tf.ones_like(p_hat[i][-1]), p_hat[i][-1])
adv_loss /= i + 1
fm_loss = 0.0
for i in range(len(p_hat)):
for j in range(len(p_hat[i]) - 1):
fm_loss += mae_loss(p[i][j], p_hat[i][j])
fm_loss /= (i + 1) * (j + 1)
adv_loss += 10 * fm_loss
per_example_losses = adv_loss
a = calculate_2d_loss(audios, tf.squeeze(y_hat, -1), loss_fn=mels_loss)
dict_metrics_losses = {
'adversarial_loss': adv_loss,
'fm_loss': fm_loss,
'gen_loss': adv_loss,
'mels_spectrogram_loss': tf.reduce_mean(a),
}
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(audios, gen_outputs):
y_hat = gen_outputs
y = tf.expand_dims(audios, 2)
p = discriminator(y)
p_hat = discriminator(y_hat)
real_loss = 0.0
fake_loss = 0.0
for i in range(len(p)):
real_loss += mse_loss(tf.ones_like(p[i][-1]), p[i][-1])
fake_loss += mse_loss(tf.zeros_like(p_hat[i][-1]), p_hat[i][-1])
real_loss /= i + 1
fake_loss /= i + 1
dis_loss = real_loss + fake_loss
per_example_losses = dis_loss
dict_metrics_losses = {
'real_loss': real_loss,
'fake_loss': fake_loss,
'dis_loss': dis_loss,
}
return per_example_losses, dict_metrics_losses
y_hat = generator(features['mel'], training=True)
audios = features['audio']
per_example_losses, generator_losses = compute_per_example_generator_losses(
audios, y_hat
)
generator_loss = tf.reduce_mean(per_example_losses)
y_hat = generator(features['mel'], training=True)
audios = features['audio']
per_example_losses, discriminator_losses = compute_per_example_discriminator_losses(
audios, y_hat
)
discriminator_loss = tf.reduce_mean(per_example_losses)
for k, v in generator_losses.items():
tf.summary.scalar(k, v)
for k, v in discriminator_losses.items():
tf.summary.scalar(k, v)
summaries = tf.summary.merge_all()
t_vars = tf.trainable_variables()
d_vars = [
var
for var in t_vars
if var.name.startswith('universalmelgan-discriminator')
]
g_vars = [
var for var in t_vars if var.name.startswith('universalmelgan-generator')
]
d_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5, beta2=0.9).minimize(
discriminator_loss, var_list=d_vars
)
g_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5, beta2=0.9).minimize(
generator_loss, var_list=g_vars
)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
checkpoint = 5000
write_tensorboard = 100
epoch = 1_000_000
path = 'universal-melgan-1024'
writer = tf.summary.FileWriter(f'./{path}')
ckpt_path = tf.train.latest_checkpoint(path)
if ckpt_path:
saver.restore(sess, ckpt_path)
for i in range(epoch):
g_loss, _ = sess.run([generator_loss, g_optimizer])
d_loss, _ = sess.run([discriminator_loss, d_optimizer])
s = sess.run(summaries)
writer.add_summary(s, i)
if i % checkpoint == 0:
saver.save(sess, f'{path}/model.ckpt', global_step=i)
if i % write_tensorboard == 0:
writer.add_summary(s, i)
print(i, g_loss, d_loss)
| 28.534562 | 84 | 0.683463 |
793eafbf7cda80db650a33dde634dfb2104e463f | 6,493 | py | Python | Run.py | raghuslash/3D-BoundingBox | 8969f18ab585af1e2da0e6deae0f1cc72ce289e1 | [
"MIT"
] | 287 | 2018-12-09T11:53:53.000Z | 2022-03-24T12:12:07.000Z | Run.py | raghuslash/3D-BoundingBox | 8969f18ab585af1e2da0e6deae0f1cc72ce289e1 | [
"MIT"
] | 23 | 2019-02-28T06:56:14.000Z | 2022-03-04T23:15:54.000Z | Run.py | raghuslash/3D-BoundingBox | 8969f18ab585af1e2da0e6deae0f1cc72ce289e1 | [
"MIT"
] | 69 | 2018-12-17T12:20:31.000Z | 2022-03-23T08:50:05.000Z | """
Images must be in ./Kitti/testing/image_2/ and camera matricies in ./Kitti/testing/calib/
Uses YOLO to obtain 2D box, PyTorch to get 3D box, plots both
SPACE bar for next image, any other key to exit
"""
from torch_lib.Dataset import *
from library.Math import *
from library.Plotting import *
from torch_lib import Model, ClassAverages
from yolo.yolo import cv_Yolo
import os
import time
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument("--image-dir", default="eval/image_2/",
help="Relative path to the directory containing images to detect. Default \
is eval/image_2/")
# TODO: support multiple cal matrix input types
parser.add_argument("--cal-dir", default="camera_cal/",
help="Relative path to the directory containing camera calibration form KITTI. \
Default is camera_cal/")
parser.add_argument("--video", action="store_true",
help="Weather or not to advance frame-by-frame as fast as possible. \
By default, this will pull images from ./eval/video")
parser.add_argument("--show-yolo", action="store_true",
help="Show the 2D BoundingBox detecions on a separate image")
parser.add_argument("--hide-debug", action="store_true",
help="Supress the printing of each 3d location")
def plot_regressed_3d_bbox(img, cam_to_img, box_2d, dimensions, alpha, theta_ray, img_2d=None):
# the math! returns X, the corners used for constraint
location, X = calc_location(dimensions, cam_to_img, box_2d, alpha, theta_ray)
orient = alpha + theta_ray
if img_2d is not None:
plot_2d_box(img_2d, box_2d)
plot_3d_box(img, cam_to_img, orient, dimensions, location) # 3d boxes
return location
def main():
FLAGS = parser.parse_args()
# load torch
weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'
model_lst = [x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')]
if len(model_lst) == 0:
print('No previous model found, please train first!')
exit()
else:
print('Using previous model %s'%model_lst[-1])
my_vgg = vgg.vgg19_bn(pretrained=True)
# TODO: load bins from file or something
model = Model.Model(features=my_vgg.features, bins=2).cuda()
checkpoint = torch.load(weights_path + '/%s'%model_lst[-1])
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
# load yolo
yolo_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'
yolo = cv_Yolo(yolo_path)
averages = ClassAverages.ClassAverages()
# TODO: clean up how this is done. flag?
angle_bins = generate_bins(2)
image_dir = FLAGS.image_dir
cal_dir = FLAGS.cal_dir
if FLAGS.video:
if FLAGS.image_dir == "eval/image_2/" and FLAGS.cal_dir == "camera_cal/":
image_dir = "eval/video/2011_09_26/image_2/"
cal_dir = "eval/video/2011_09_26/"
img_path = os.path.abspath(os.path.dirname(__file__)) + "/" + image_dir
# using P_rect from global calibration file
calib_path = os.path.abspath(os.path.dirname(__file__)) + "/" + cal_dir
calib_file = calib_path + "calib_cam_to_cam.txt"
# using P from each frame
# calib_path = os.path.abspath(os.path.dirname(__file__)) + '/Kitti/testing/calib/'
try:
ids = [x.split('.')[0] for x in sorted(os.listdir(img_path))]
except:
print("\nError: no images in %s"%img_path)
exit()
for img_id in ids:
start_time = time.time()
img_file = img_path + img_id + ".png"
# P for each frame
# calib_file = calib_path + id + ".txt"
truth_img = cv2.imread(img_file)
img = np.copy(truth_img)
yolo_img = np.copy(truth_img)
detections = yolo.detect(yolo_img)
for detection in detections:
if not averages.recognized_class(detection.detected_class):
continue
# this is throwing when the 2d bbox is invalid
# TODO: better check
try:
detectedObject = DetectedObject(img, detection.detected_class, detection.box_2d, calib_file)
except:
continue
theta_ray = detectedObject.theta_ray
input_img = detectedObject.img
proj_matrix = detectedObject.proj_matrix
box_2d = detection.box_2d
detected_class = detection.detected_class
input_tensor = torch.zeros([1,3,224,224]).cuda()
input_tensor[0,:,:,:] = input_img
[orient, conf, dim] = model(input_tensor)
orient = orient.cpu().data.numpy()[0, :, :]
conf = conf.cpu().data.numpy()[0, :]
dim = dim.cpu().data.numpy()[0, :]
dim += averages.get_item(detected_class)
argmax = np.argmax(conf)
orient = orient[argmax, :]
cos = orient[0]
sin = orient[1]
alpha = np.arctan2(sin, cos)
alpha += angle_bins[argmax]
alpha -= np.pi
if FLAGS.show_yolo:
location = plot_regressed_3d_bbox(img, proj_matrix, box_2d, dim, alpha, theta_ray, truth_img)
else:
location = plot_regressed_3d_bbox(img, proj_matrix, box_2d, dim, alpha, theta_ray)
if not FLAGS.hide_debug:
print('Estimated pose: %s'%location)
if FLAGS.show_yolo:
numpy_vertical = np.concatenate((truth_img, img), axis=0)
cv2.imshow('SPACE for next image, any other key to exit', numpy_vertical)
else:
cv2.imshow('3D detections', img)
if not FLAGS.hide_debug:
print("\n")
print('Got %s poses in %.3f seconds'%(len(detections), time.time() - start_time))
print('-------------')
if FLAGS.video:
cv2.waitKey(1)
else:
if cv2.waitKey(0) != 32: # space bar
exit()
if __name__ == '__main__':
main()
| 32.143564 | 109 | 0.6142 |
793eb07c8974fbede5a5ee2bf3c4b09557c72b7b | 20,709 | py | Python | azure-mgmt-compute/tests/test_mgmt_compute.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | azure-mgmt-compute/tests/test_mgmt_compute.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-compute/tests/test_mgmt_compute.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
from collections import namedtuple
import azure.mgmt.compute
import azure.mgmt.network.models
import azure.mgmt.storage.models
from devtools_testutils import (
AzureMgmtTestCase,
ResourceGroupPreparer,
)
ComputeResourceNames = namedtuple(
'ComputeResourceNames',
['storage', 'vm' ,'network', 'nic', 'subnet'],
)
class MgmtComputeTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtComputeTest, self).setUp()
self.compute_client = self.create_mgmt_client(
azure.mgmt.compute.ComputeManagementClient
)
self.linux_img_ref_id = "/" + self.compute_client.config.subscription_id + "/services/images/b4590d9e3ed742e4a1d46e5424aa335e__sles12-azure-guest-priority.x86-64-0.4.3-build1.1"
self.windows_img_ref_id = "/" + self.compute_client.config.subscription_id + "/services/images/a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-Datacenter-201503.01-en.us-127GB.vhd"
if not self.is_playback():
self.storage_client = self.create_mgmt_client(
azure.mgmt.storage.StorageManagementClient
)
self.network_client = self.create_mgmt_client(
azure.mgmt.network.NetworkManagementClient
)
def get_resource_names(self, base):
return ComputeResourceNames(
self.get_resource_name(base + 'stor'),
self.get_resource_name(base + 'vm'),
self.get_resource_name(base + 'net'),
self.get_resource_name(base + 'nic'),
self.get_resource_name(base + 'sub'),
)
def create_storage_account(self, group_name, location, storage_name):
params_create = azure.mgmt.storage.models.StorageAccountCreateParameters(
sku=azure.mgmt.storage.models.Sku(name=azure.mgmt.storage.models.SkuName.standard_lrs),
kind=azure.mgmt.storage.models.Kind.storage,
location=location
)
result_create = self.storage_client.storage_accounts.create(
group_name,
storage_name,
params_create,
)
result_create.wait()
def create_virtual_network(self, group_name, location, network_name, subnet_name):
params_create = azure.mgmt.network.models.VirtualNetwork(
location=location,
address_space=azure.mgmt.network.models.AddressSpace(
address_prefixes=[
'10.0.0.0/16',
],
),
subnets=[
azure.mgmt.network.models.Subnet(
name=subnet_name,
address_prefix='10.0.0.0/24',
),
],
)
azure_operation_poller = self.network_client.virtual_networks.create_or_update(
group_name,
network_name,
params_create,
)
result_create = azure_operation_poller.result()
self.assertEqual(result_create.name, network_name)
result_get = self.network_client.subnets.get(
group_name,
network_name,
subnet_name,
)
self.assertEqual(result_get.name, subnet_name)
return result_get
def create_network_interface(self, group_name, location, interface_name, subnet):
config_name = 'pyarmconfig'
params_create = azure.mgmt.network.models.NetworkInterface(
location=location,
ip_configurations=[
azure.mgmt.network.models.NetworkInterfaceIPConfiguration(
name=config_name,
# bug in Swagger azure.mgmt.network.models.enums.IPAllocationMethod.dynamic,
private_ip_allocation_method="Dynamic",
subnet=subnet,
),
],
)
result_create = self.network_client.network_interfaces.create_or_update(
group_name,
interface_name,
params_create,
)
result_create = result_create.result()
self.assertEqual(result_create.name, interface_name)
return result_create.id
def get_os_profile(self, group_name):
virtual_machines_models = self.compute_client.virtual_machines.models
return virtual_machines_models.OSProfile(
admin_username='Foo12',
admin_password='BaR@123' + group_name,
computer_name='test',
)
def get_hardware_profile(self):
virtual_machines_models = self.compute_client.virtual_machines.models
return virtual_machines_models.HardwareProfile(
vm_size=virtual_machines_models.VirtualMachineSizeTypes.standard_a0
)
def get_storage_profile(self, os_vhd_uri):
virtual_machines_models = self.compute_client.virtual_machines.models
return virtual_machines_models.StorageProfile(
os_disk=virtual_machines_models.OSDisk(
caching=virtual_machines_models.CachingTypes.none,
create_option=virtual_machines_models.DiskCreateOptionTypes.from_image,
name='test',
vhd=virtual_machines_models.VirtualHardDisk(
uri=os_vhd_uri,
),
),
)
def get_network_profile(self, network_interface_id):
virtual_machines_models = self.compute_client.virtual_machines.models
return virtual_machines_models.NetworkProfile(
network_interfaces=[
virtual_machines_models.NetworkInterfaceReference(
id=network_interface_id,
),
],
)
def get_vhd_uri(self, storage_name, vhd_name):
return 'https://{0}.blob.core.windows.net/vhds/{1}.vhd'.format(
storage_name,
vhd_name,
)
@ResourceGroupPreparer()
def test_virtual_machines_operations(self, resource_group, location):
virtual_machines_models = self.compute_client.virtual_machines.models
names = self.get_resource_names('pyvmir')
os_vhd_uri = self.get_vhd_uri(names.storage, 'osdisk')
if not self.is_playback():
self.create_storage_account(resource_group.name, location, names.storage)
subnet = self.create_virtual_network(resource_group.name, location, names.network, names.subnet)
nic_id = self.create_network_interface(resource_group.name, location, names.nic, subnet)
else:
nic_id = ("/subscriptions/00000000-0000-0000-0000-000000000000"
"/resourceGroups/test_mgmt_compute_test_virtual_machines_operations122014cf"
"/providers/Microsoft.Network/networkInterfaces/pyvmirnic122014cf")
storage_profile = self.get_storage_profile(os_vhd_uri)
storage_profile.image_reference = virtual_machines_models.ImageReference(
publisher='Canonical',
offer='UbuntuServer',
sku='16.04.0-LTS',
version='latest'
)
params_create = virtual_machines_models.VirtualMachine(
location=location,
os_profile=self.get_os_profile(resource_group.name),
hardware_profile=self.get_hardware_profile(),
network_profile=self.get_network_profile(nic_id),
storage_profile=storage_profile,
)
# Create VM test
result_create = self.compute_client.virtual_machines.create_or_update(
resource_group.name,
names.vm,
params_create,
)
vm_result = result_create.result()
self.assertEqual(vm_result.name, names.vm)
# Get by name
result_get = self.compute_client.virtual_machines.get(
resource_group.name,
names.vm
)
self.assertEqual(result_get.name, names.vm)
self.assertIsNone(result_get.instance_view)
# Get instanceView
result_iv = self.compute_client.virtual_machines.get(
resource_group.name,
names.vm,
expand=virtual_machines_models.InstanceViewTypes.instance_view
)
self.assertTrue(result_iv.instance_view)
# Deallocate
async_vm_deallocate = self.compute_client.virtual_machines.deallocate(resource_group.name, names.vm)
async_vm_deallocate.wait()
# Start VM
async_vm_start =self.compute_client.virtual_machines.start(resource_group.name, names.vm)
async_vm_start.wait()
# Restart VM
async_vm_restart = self.compute_client.virtual_machines.restart(resource_group.name, names.vm)
async_vm_restart.wait()
# Stop VM
async_vm_stop = self.compute_client.virtual_machines.power_off(resource_group.name, names.vm)
async_vm_stop.wait()
# List in resouce group
vms_rg = list(self.compute_client.virtual_machines.list(resource_group.name))
self.assertEqual(len(vms_rg), 1)
# Delete
async_vm_delete = self.compute_client.virtual_machines.delete(resource_group.name, names.vm)
async_vm_delete.wait()
@ResourceGroupPreparer()
def test_virtual_machine_capture(self, resource_group, location):
virtual_machines_models = self.compute_client.virtual_machines.models
names = self.get_resource_names('pyvmir')
os_vhd_uri = self.get_vhd_uri(names.storage, 'osdisk')
if not self.is_playback():
self.create_storage_account(resource_group.name, location, names.storage)
subnet = self.create_virtual_network(resource_group.name, location, names.network, names.subnet)
nic_id = self.create_network_interface(resource_group.name, location, names.nic, subnet)
else:
nic_id = ("/subscriptions/00000000-0000-0000-0000-000000000000"
"/resourceGroups/test_mgmt_compute_test_virtual_machine_capturec0f9130c"
"/providers/Microsoft.Network/networkInterfaces/pyvmirnicc0f9130c")
storage_profile = self.get_storage_profile(os_vhd_uri)
storage_profile.image_reference = virtual_machines_models.ImageReference(
publisher='Canonical',
offer='UbuntuServer',
sku='16.04.0-LTS',
version='latest'
)
params_create = virtual_machines_models.VirtualMachine(
location=location,
os_profile=self.get_os_profile(resource_group.name),
hardware_profile=self.get_hardware_profile(),
network_profile=self.get_network_profile(nic_id),
storage_profile=storage_profile,
)
# Create VM test
result_create = self.compute_client.virtual_machines.create_or_update(
resource_group.name,
names.vm,
params_create,
)
vm_result = result_create.result()
self.assertEqual(vm_result.name, names.vm)
# Deallocate
async_vm_deallocate = self.compute_client.virtual_machines.deallocate(resource_group.name, names.vm)
async_vm_deallocate.wait()
# Generalize (possible because deallocated)
self.compute_client.virtual_machines.generalize(resource_group.name, names.vm)
# Capture VM (VM must be generalized before)
async_capture = self.compute_client.virtual_machines.capture(
resource_group.name,
names.vm,
{
"vhd_prefix":"pslib",
"destination_container_name":"dest",
"overwrite_vhds": True
}
)
capture_result = async_capture.result()
assert capture_result.content_version == "1.0.0.0"
@ResourceGroupPreparer()
def test_vm_extensions(self, resource_group, location):
#WARNING: this test may take 40 mins to complete against live server
virtual_machines_models = self.compute_client.virtual_machines.models
names = self.get_resource_names('pyvmext')
os_vhd_uri = self.get_vhd_uri(names.storage, 'osdisk')
ext_name = names.vm + 'AccessAgent'
if not self.is_playback():
self.create_storage_account(resource_group.name, location, names.storage)
subnet = self.create_virtual_network(resource_group.name, location, names.network, names.subnet)
nic_id = self.create_network_interface(resource_group.name, location, names.nic, subnet)
else:
nic_id = ("/subscriptions/00000000-0000-0000-0000-000000000000"
"/resourceGroups/test_mgmt_compute_test_vm_extensions15a60f10"
"/providers/Microsoft.Network/networkInterfaces/pyvmextnic15a60f10")
storage_profile = self.get_storage_profile(os_vhd_uri)
storage_profile.image_reference = virtual_machines_models.ImageReference(
publisher='MicrosoftWindowsServer',
offer='WindowsServer',
sku='2016-Datacenter',
version='latest'
)
params_create = virtual_machines_models.VirtualMachine(
location=location,
os_profile=self.get_os_profile(resource_group.name),
hardware_profile=self.get_hardware_profile(),
network_profile=self.get_network_profile(nic_id),
storage_profile=storage_profile,
)
result_create = self.compute_client.virtual_machines.create_or_update(
resource_group.name,
names.vm,
params_create,
)
result_create.wait()
params_create = virtual_machines_models.VirtualMachineExtension(
location=location,
publisher='Microsoft.Compute',
virtual_machine_extension_type='VMAccessAgent',
type_handler_version='2.0',
auto_upgrade_minor_version=True,
settings={},
protected_settings={},
)
result_create = self.compute_client.virtual_machine_extensions.create_or_update(
resource_group.name,
names.vm,
ext_name,
params_create,
)
result_create.wait()
result_get = self.compute_client.virtual_machine_extensions.get(
resource_group.name,
names.vm,
ext_name,
)
self.assertEqual(result_get.name, ext_name)
result_delete = self.compute_client.virtual_machine_extensions.delete(
resource_group.name,
names.vm,
ext_name,
)
result_delete.wait()
def test_vm_extension_images(self):
result_list_pub = self.compute_client.virtual_machine_images.list_publishers(
self.region,
)
for res in result_list_pub:
publisher_name = res.name
result_list = self.compute_client.virtual_machine_extension_images.list_types(
self.region,
publisher_name,
)
for res in result_list:
type_name = res.name
result_list_versions = self.compute_client.virtual_machine_extension_images.list_versions(
self.region,
publisher_name,
type_name,
)
for res in result_list_versions:
version = res.name
result_get = self.compute_client.virtual_machine_extension_images.get(
self.region,
publisher_name,
type_name,
version,
)
return
def test_vm_images(self):
location = "westus"
result_list_pub = self.compute_client.virtual_machine_images.list_publishers(
location
)
self.assertGreater(len(result_list_pub), 0)
for res in result_list_pub:
publisher_name = res.name
result_list_offers = self.compute_client.virtual_machine_images.list_offers(
location,
publisher_name
)
for res in result_list_offers:
offer = res.name
result_list_skus = self.compute_client.virtual_machine_images.list_skus(
location,
publisher_name,
offer
)
for res in result_list_skus:
skus = res.name
result_list = self.compute_client.virtual_machine_images.list(
location,
publisher_name,
offer,
skus
)
for res in result_list:
version = res.name
result_get = self.compute_client.virtual_machine_images.get(
location,
publisher_name,
offer,
skus,
version
)
print('PUBLISHER: {0}, OFFER: {1}, SKUS: {2}, VERSION: {3}'.format(
publisher_name,
offer,
skus,
version,
))
return
@ResourceGroupPreparer()
def test_availability_sets(self, resource_group, location):
availability_sets_models = self.compute_client.availability_sets.models
availability_set_name = self.get_resource_name('pyarmset')
params_create = availability_sets_models.AvailabilitySet(
location=location,
platform_fault_domain_count=2,
platform_update_domain_count=4,
tags={
'tag1': 'value1',
},
)
result_create = self.compute_client.availability_sets.create_or_update(
resource_group.name,
availability_set_name,
params_create,
)
self.assertEqual(result_create.name, availability_set_name)
result_get = self.compute_client.availability_sets.get(
resource_group.name,
availability_set_name,
)
self.assertEqual(result_get.name, availability_set_name)
self.assertEqual(
result_get.platform_fault_domain_count,
params_create.platform_fault_domain_count,
)
self.assertEqual(
result_get.platform_update_domain_count,
params_create.platform_update_domain_count,
)
result_list = self.compute_client.availability_sets.list(
resource_group.name,
)
result_list = list(result_list)
result_list_sizes = self.compute_client.availability_sets.list_available_sizes(
resource_group.name,
availability_set_name,
)
result_list_sizes = list(result_list_sizes)
self.compute_client.availability_sets.delete(
resource_group.name,
availability_set_name,
)
def test_usage(self):
location = "westus"
usages = list(self.compute_client.usage.list(location))
self.assertGreater(len(usages), 0)
def test_vm_sizes(self):
location = "westus"
virtual_machine_sizes = list(self.compute_client.virtual_machine_sizes.list(location))
self.assertGreater(len(virtual_machine_sizes), 0)
def test_run_command(self):
# FIXME, test unfinished
run_commands_models = self.compute_client.virtual_machines.models
run_command_parameters = run_commands_models.RunCommandInput(
command_id="RunShellScript",
script=[
'echo $1 $2'
],
parameters=[
run_commands_models.RunCommandInputParameter(name="arg1", value="hello"),
run_commands_models.RunCommandInputParameter(name="arg2", value="world"),
]
)
run_command_parameters = {
'command_id': 'RunShellScript',
'script': [
'echo $arg1'
],
'parameters': [
{'name':"arg1", 'value':"hello world"}
]
}
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.790146 | 194 | 0.612874 |
793eb15e446e70f77a79cca2e5f6d20af52000ec | 4,402 | py | Python | src/train.py | ShahRutav/dmcontrol-generalization-benchmark | 99365bd1d71581a67a9e7626c5ac676450c158a9 | [
"MIT"
] | 75 | 2020-11-30T07:59:09.000Z | 2022-03-30T21:26:53.000Z | src/train.py | ShahRutav/dmcontrol-generalization-benchmark | 99365bd1d71581a67a9e7626c5ac676450c158a9 | [
"MIT"
] | 12 | 2021-02-02T09:03:44.000Z | 2022-03-29T19:17:39.000Z | src/train.py | ShahRutav/dmcontrol-generalization-benchmark | 99365bd1d71581a67a9e7626c5ac676450c158a9 | [
"MIT"
] | 18 | 2020-12-04T03:48:34.000Z | 2022-03-21T15:10:08.000Z | import torch
import os
import numpy as np
import gym
import utils
import time
from arguments import parse_args
from env.wrappers import make_env
from algorithms.factory import make_agent
from logger import Logger
from video import VideoRecorder
def evaluate(env, agent, video, num_episodes, L, step, test_env=False):
episode_rewards = []
for i in range(num_episodes):
obs = env.reset()
video.init(enabled=(i==0))
done = False
episode_reward = 0
while not done:
with utils.eval_mode(agent):
action = agent.select_action(obs)
obs, reward, done, _ = env.step(action)
video.record(env)
episode_reward += reward
if L is not None:
_test_env = '_test_env' if test_env else ''
video.save(f'{step}{_test_env}.mp4')
L.log(f'eval/episode_reward{_test_env}', episode_reward, step)
episode_rewards.append(episode_reward)
return np.mean(episode_rewards)
def main(args):
# Set seed
utils.set_seed_everywhere(args.seed)
# Initialize environments
gym.logger.set_level(40)
env = make_env(
domain_name=args.domain_name,
task_name=args.task_name,
seed=args.seed,
episode_length=args.episode_length,
action_repeat=args.action_repeat,
image_size=args.image_size,
mode='train'
)
test_env = make_env(
domain_name=args.domain_name,
task_name=args.task_name,
seed=args.seed+42,
episode_length=args.episode_length,
action_repeat=args.action_repeat,
image_size=args.image_size,
mode=args.eval_mode,
intensity=args.distracting_cs_intensity
) if args.eval_mode is not None else None
# Create working directory
work_dir = os.path.join(args.log_dir, args.domain_name+'_'+args.task_name, args.algorithm, str(args.seed))
print('Working directory:', work_dir)
assert not os.path.exists(os.path.join(work_dir, 'train.log')), 'specified working directory already exists'
utils.make_dir(work_dir)
model_dir = utils.make_dir(os.path.join(work_dir, 'model'))
video_dir = utils.make_dir(os.path.join(work_dir, 'video'))
video = VideoRecorder(video_dir if args.save_video else None, height=448, width=448)
utils.write_info(args, os.path.join(work_dir, 'info.log'))
# Prepare agent
assert torch.cuda.is_available(), 'must have cuda enabled'
replay_buffer = utils.ReplayBuffer(
obs_shape=env.observation_space.shape,
action_shape=env.action_space.shape,
capacity=args.train_steps,
batch_size=args.batch_size
)
cropped_obs_shape = (3*args.frame_stack, args.image_crop_size, args.image_crop_size)
print('Observations:', env.observation_space.shape)
print('Cropped observations:', cropped_obs_shape)
agent = make_agent(
obs_shape=cropped_obs_shape,
action_shape=env.action_space.shape,
args=args
)
start_step, episode, episode_reward, done = 0, 0, 0, True
L = Logger(work_dir)
start_time = time.time()
for step in range(start_step, args.train_steps+1):
if done:
if step > start_step:
L.log('train/duration', time.time() - start_time, step)
start_time = time.time()
L.dump(step)
# Evaluate agent periodically
if step % args.eval_freq == 0:
print('Evaluating:', work_dir)
L.log('eval/episode', episode, step)
evaluate(env, agent, video, args.eval_episodes, L, step)
if test_env is not None:
evaluate(test_env, agent, video, args.eval_episodes, L, step, test_env=True)
L.dump(step)
# Save agent periodically
if step > start_step and step % args.save_freq == 0:
torch.save(agent, os.path.join(model_dir, f'{step}.pt'))
L.log('train/episode_reward', episode_reward, step)
obs = env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
L.log('train/episode', episode, step)
# Sample action for data collection
if step < args.init_steps:
action = env.action_space.sample()
else:
with utils.eval_mode(agent):
action = agent.sample_action(obs)
# Run training update
if step >= args.init_steps:
num_updates = args.init_steps if step == args.init_steps else 1
for _ in range(num_updates):
agent.update(replay_buffer, L, step)
# Take step
next_obs, reward, done, _ = env.step(action)
done_bool = 0 if episode_step + 1 == env._max_episode_steps else float(done)
replay_buffer.add(obs, action, reward, next_obs, done_bool)
episode_reward += reward
obs = next_obs
episode_step += 1
print('Completed training for', work_dir)
if __name__ == '__main__':
args = parse_args()
main(args)
| 29.152318 | 109 | 0.732849 |
793eb251385fd93206d930f9b70651b5fe101398 | 2,384 | py | Python | wenker/location_updater/states.py | CitizenScienceCenter/c3s_tools | 36479905ffbeb2bdabbc2be145dfe4fe7258ef5d | [
"Apache-2.0"
] | null | null | null | wenker/location_updater/states.py | CitizenScienceCenter/c3s_tools | 36479905ffbeb2bdabbc2be145dfe4fe7258ef5d | [
"Apache-2.0"
] | 1 | 2022-03-22T22:11:21.000Z | 2022-03-22T22:11:21.000Z | wenker/location_updater/states.py | CitizenScienceCenter/c3s_tools | 36479905ffbeb2bdabbc2be145dfe4fe7258ef5d | [
"Apache-2.0"
] | null | null | null | import psycopg2
import csv, json
BOGEN = '/home/encima/Nextcloud/Documents/uzh/wenkerdaten.csv'
DB_STRING = 'postgresql://pybossa:testing@localhost:5430/cs'
UPDATE_STRING = "UPDATE tasks SET info = info || '(%s)' WHERE id = %s;"
UPDATE = "UPDATE tasks SET info = info || '{}' WHERE id = '{}';"
SELECT_STRING = "SELECT * FROM tasks WHERE info ->> 'path'::text like '%{}%'::text"
cantons = {
'BE': 'Bern',
'TG': 'Thurgau',
'LU': 'Luzern',
'SO': 'Solothurn',
'ZH': 'Zürich',
'UR': 'Uri',
'SZ': 'Schwyz',
'OW': 'Obwalden',
'NW': 'Nidwalden',
'GL': 'Glarus',
'ZG': 'Zug',
'FR': 'Fribought',
'BS': 'Basel-Stadt',
'BL': 'Basel-Landschaft',
'SH': 'Schaffhausen',
'AR': 'Appenzell Ausserhoden',
'AI': 'Appenzell Innerhoden',
'SG': 'St. Gallen',
'GR': 'Grisons',
'AG': 'Aargau',
'TI': 'Ticino',
'VD': 'Vaud',
'VS': 'Valais',
'NE': 'Neuchâtel',
'GE': 'Geneva',
'JU': 'Jura'
}
class LocationUpdate:
def __init__(self):
self.conn = psycopg2.connect(DB_STRING)
def find(self, csv_file, place):
for row in csv.reader(csv_file):
# print(ascii(row[2]), ascii(place))
if ascii(place.lower()) in ascii(row[2].lower()):
print(row[8])
def process(self):
cursor = self.conn.cursor()
cursor.execute("select id, info from tasks where info->>'SchoolState' = '';")
res = cursor.fetchall()
count = 1
with open(BOGEN) as sheets:
wenker_map = []
states = []
places = []
for row in csv.reader(sheets):
wenker_map.append({'state': row[8], 'place': row[2]})
states.append(row[8])
places.append(ascii(row[2].lower()))
for r in res:
place = ascii(r[1]['title'].lower())
if place in places:
idx = places.index(place)
if len(states[idx]) > 0:
canton = cantons[states[idx]]
info = {
'SchoolState': canton
}
# print(cursor.execute(UPDATE_STRING, (json.dumps(info), r[0])))
print(UPDATE.format((json.dumps(info)), r[0]))
cursor.close()
if __name__ == "__main__":
i = LocationUpdate()
i.process()
| 27.402299 | 85 | 0.50797 |
793eb2e88583657e20377067053657265a9b8905 | 103 | py | Python | tests/test_basic.py | neozenith/invoke-databricks-wheel-tasks | 4f13d00c826f5f46652443dbfd4d35c19b869503 | [
"MIT"
] | null | null | null | tests/test_basic.py | neozenith/invoke-databricks-wheel-tasks | 4f13d00c826f5f46652443dbfd4d35c19b869503 | [
"MIT"
] | null | null | null | tests/test_basic.py | neozenith/invoke-databricks-wheel-tasks | 4f13d00c826f5f46652443dbfd4d35c19b869503 | [
"MIT"
] | null | null | null | def test_placholder():
"""Placeholder test to pass CI until creating actual test suite."""
...
| 25.75 | 71 | 0.669903 |
793eb361e30c185e1bf8333d5957c3f5bce04999 | 472 | py | Python | python-ds-practice/14_compact/compact.py | MostFunGuy/SpringboardProjectsPublic | bbda3ba26ecf8a09e62df81583122cae83acc1e6 | [
"MIT"
] | null | null | null | python-ds-practice/14_compact/compact.py | MostFunGuy/SpringboardProjectsPublic | bbda3ba26ecf8a09e62df81583122cae83acc1e6 | [
"MIT"
] | null | null | null | python-ds-practice/14_compact/compact.py | MostFunGuy/SpringboardProjectsPublic | bbda3ba26ecf8a09e62df81583122cae83acc1e6 | [
"MIT"
] | null | null | null | def compact(lst):
"""Return a copy of lst with non-true elements removed.
>>> compact([0, 1, 2, '', [], False, (), None, 'All done'])
[1, 2, 'All done']
"""
return_list = []
for item in lst:
if item:
return_list.append(item)
return return_list
print(F"compact.py: compact([0, 1, 2, '', [], False, (), None, 'All done']) = [1, 2, 'All done'] = {compact([0, 1, 2, '', [], False, (), None, 'All done'])}") | 27.764706 | 158 | 0.493644 |
793eb542cb6384a6e7a5f7bce47d5f6d0a0b9d97 | 1,757 | py | Python | test/record/parser/test_response_whois_nic_ck_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_whois_nic_ck_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_whois_nic_ck_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.ck/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicCkStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.ck/status_registered.txt"
host = "whois.nic.ck"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 2)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2011-10-04'))
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2013-11-01'))
| 34.45098 | 87 | 0.695504 |
793eb54e2a86c814466b30183a60e9cd18f7e2c6 | 1,988 | py | Python | python/40.combination-sum-ii.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 10 | 2019-09-15T00:23:57.000Z | 2022-01-05T12:53:42.000Z | python/40.combination-sum-ii.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 3 | 2021-06-30T00:39:26.000Z | 2021-08-01T07:13:59.000Z | python/40.combination-sum-ii.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 6 | 2020-02-08T02:55:22.000Z | 2022-01-02T22:48:18.000Z | #
# @lc app=leetcode id=40 lang=python3
#
# [40] Combination Sum II
#
# https://leetcode.com/problems/combination-sum-ii/description/
#
# algorithms
# Medium (49.91%)
# Likes: 2542
# Dislikes: 86
# Total Accepted: 392.7K
# Total Submissions: 782K
# Testcase Example: '[10,1,2,7,6,1,5]\n8'
#
# Given a collection of candidate numbers (candidates) and a target number
# (target), find all unique combinations in candidates where the candidate
# numbers sum to target.
#
# Each number in candidates may only be used once in the combination.
#
# Note: The solution set must not contain duplicate combinations.
#
#
# Example 1:
#
#
# Input: candidates = [10,1,2,7,6,1,5], target = 8
# Output:
# [
# [1,1,6],
# [1,2,5],
# [1,7],
# [2,6]
# ]
#
#
# Example 2:
#
#
# Input: candidates = [2,5,2,1,2], target = 5
# Output:
# [
# [1,2,2],
# [5]
# ]
#
#
#
# Constraints:
#
#
# 1 <= candidates.length <= 100
# 1 <= candidates[i] <= 50
# 1 <= target <= 30
#
#
#
# @lc code=start
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
if not candidates or len(candidates) == 0:
return []
res = []
visited = [0 for _ in range(len(candidates))]
candidates.sort()
self._dfs(candidates, target, [], res, 0, visited)
return res
def _dfs(self, candidates, target, curr, res, start, visited):
if target < 0:
return
if target == 0:
res.append(curr[:])
return
for i in range(start, len(candidates)):
if i != start and candidates[i] == candidates[i - 1] and visited[i - 1] == 0:
continue
if candidates[i] > target:
continue
curr.append(candidates[i])
visited[i] = 1
self._dfs(candidates, target - candidates[i], curr, res, i + 1, visited)
curr.pop()
visited[i] = 0
# @lc code=end
| 20.926316 | 89 | 0.561871 |
793eb57377e9b4cf44881d9d01ed94107f7f710e | 7,177 | py | Python | code/ssw_composite_cmam_optimized2-wss_hotspots.py | kuchaale/grl_2020 | ddeca6955e6435c38d18be6ebca874fa037142b3 | [
"MIT"
] | null | null | null | code/ssw_composite_cmam_optimized2-wss_hotspots.py | kuchaale/grl_2020 | ddeca6955e6435c38d18be6ebca874fa037142b3 | [
"MIT"
] | null | null | null | code/ssw_composite_cmam_optimized2-wss_hotspots.py | kuchaale/grl_2020 | ddeca6955e6435c38d18be6ebca874fa037142b3 | [
"MIT"
] | null | null | null | import xarray as xr
import sys
import pandas as pd
import glob
from scipy import stats
import xarray.ufuncs as xrf
from itertools import product
from cftime import DatetimeNoLeap
#from dask.distributed import Client
#client = Client(set_as_default = True)
def open_date_file(file_path):
df = pd.read_csv(file_path, index_col=0, parse_dates=True)
df['BeginDate'] = df.BeginDate.apply(lambda t: pd.to_datetime(t, format='%Y-%m-%d'))
return df
def ttest_1samp(a, popmean, dim):
"""
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`
Inspired here: https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py#L3769-L3846
Parameters
----------
a : xarray
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
dim : string
dimension along which to compute test
Returns
-------
mean : xarray
averaged sample along which dimension t-test was computed
pvalue : xarray
two-tailed p-value
"""
n = a[dim].shape[0]
df = n - 1
a_mean = a.mean(dim)
d = a_mean - popmean
v = a.var(dim, ddof=1)
denom = xrf.sqrt(v / float(n))
t = d /denom
prob = stats.distributions.t.sf(xrf.fabs(t), df) * 2
prob_xa = xr.DataArray(prob, coords=a_mean.coords, name = a_mean.name)
return a_mean, prob_xa
var = sys.argv[1]
try:
w_clim = sys.argv[2]
w_clim = '_'+w_clim
print('climatology wo SSW')
except IndexError:
w_clim = ''
print('climatology w SSW')
what = sys.argv[3]
what_ls = ['anomalies', 'absolute', 'percentages']
if what not in what_ls:
raise ValueError('could not find {0} within [{1},{2},{3}]'.format(what, *what_ls))
if var[:3].lower() in ['lwa']:
lev_sys_fo = 'log_coord/'
lev_sys_fi = '_logH'
elif var.lower() == 'acceldivmrho':
lev_sys_fo = ''
lev_sys_fi = '_6hrPlev'
else:
lev_sys_fo = ''
lev_sys_fi = ''
if var in ['lwatend', 'TEM-res2', 'TEM-res', 'TEM-res3', 'TEM-res3-new','TEM-res2-new', 'sink']:
zarr = True
#hourly_index = pd.date_range('1979-01-01-06', '2010-12-31-18', freq='6H')
else:
zarr = False
#hourly_index = pd.date_range('1979-01-01', '2010-12-31-18', freq='6H')
signif = False
DJF_bool = True
cesta = '/mnt/4data/CMAM/0A.daily/'
max_lag = 10
line_width = 5
ch_lev = 70
timescale = int(sys.argv[4])
type_ls = ['himalayas', 'westamer', 'eastasia']
if zarr:
infiles = '{}{}{}_197901-201012.zarr'.format(cesta,var,lev_sys_fi)
print(infiles)
else:
files_path = cesta+var+'/'+lev_sys_fo+var+'_*_CMAM_CMAM30-SD_r1i1p1_??????????-??????????.nc'
print('opening '+files_path)
infiles = sorted(glob.glob(files_path))
print(len(infiles))
def open_infile(infiles):
if isinstance(infiles, str) & ('zarr' in infiles):
ds = xr.open_zarr(infiles)
else:
ds = xr.open_mfdataset(infiles, concat_dim = 'time', parallel = True, combine='nested')
return ds
try:
DJF_bool = sys.argv[5]
print('{} only'.format(DJF_bool))
except IndexError:
DJF_bool = ''
with open_infile(infiles) as ds:
#datetimeindex = ds.indexes['time'].to_datetimeindex()
#ds['time'] = datetimeindex
with xr.open_dataset( cesta+var+'/'+lev_sys_fo+var+'_climatology'+w_clim+'.nc') as clim:
print('composites construction')
if len(DJF_bool) > 0:
w_clim += '_{}only'.format(DJF_bool)
for di, ssw_type in enumerate(type_ls):
print("".ljust(line_width) + ssw_type)
df_dates = open_date_file('accelogw_{}_hotspot@{}hPa_{}dayts_indexes.csv'.format(ssw_type, ch_lev, timescale))
#sys.exit()
xa_ls = []
pv_ls = []
for il, lag in enumerate(range(-max_lag,max_lag+1)):
#print("".ljust(line_width*3)+str(lag)+' lag')
dates = df_dates.set_index('BeginDate')
dates = dates.index +pd.Timedelta(str(lag)+' days')
#filter lags withi 29th February
#dates = dates[dates.apply(lambda x: not (x.day in [29] and x.month in [2]))]
dates = dates[~((dates.month == 2) & (dates.day == 29))]
#filter dates shited to year out of range
#dates = dates[dates.apply(lambda x: not (x.year in [1978,2011]))]
dates = dates[(dates.year != 2011) & (dates.year != 1978)]
if DJF_bool == 'DJF':
dates = dates[(dates.month == 12) | (dates.month == 1) | (dates.month == 2)]
elif DJF_bool == 'JF':
dates = dates[(dates.month == 1) | (dates.month == 2)]
elif DJF_bool == 'J':
dates = dates[(dates.month == 1)]
elif DJF_bool == 'F':
dates = dates[(dates.month == 2)]
elif DJF_bool == 'D':
dates = dates[(dates.month == 12)]
#choose all values within particular day
#hourly_index_temp = hourly_index[hourly_index.floor('D').isin(dates)]
hourly_index_temp = [DatetimeNoLeap(*x, hour) for x, hour in product(zip(dates.year, dates.month, dates.day), range(0,24,6))]
if var in ['lwatend']:
ds['time'] = xr.cftime_range('1979-01-01T06', '2010-12-31T18', freq='6H', calendar='noleap')
#print(lag, dates.shape,)
#print(hourly_index_temp)
ds_sel = ds.sel(time = hourly_index_temp[:])
print(ds_sel[var].shape)
if what == 'percentages':
comp = (ds_sel[var].groupby('time.month') - clim[var]).groupby('time.month')/clim[var]*100.
elif what == 'absolute':
comp = ds_sel[var]
elif what == 'anomalies':
comp = ds_sel[var].groupby('time.month') - clim[var]
if signif:
comp_m, pvalues = ttest_1samp(comp, 0.0, dim = 'time') # calculates stat. signaficance using t-test
pv_ls.append(pvalues)
else:
comp_m = comp.mean('time')
xa_ls.append(comp_m)
print("".ljust(line_width*2) + 'concatenation')
xa_comp = xr.concat(xa_ls, dim = 'lag')
xa_ls = []
xa_comp['lag'] = range(-max_lag, max_lag+1)
outfile = "{}composites{}/{}{}_{}_comp_{}_{}days.nc".format(cesta, w_clim, var, lev_sys_fi, what, type_ls[di], timescale)
print("".ljust(line_width*3) + outfile)
xa_comp.to_netcdf(outfile)
if signif:
xa_pval = xr.concat(pv_ls, dim = 'lag')
pv_ls = []
xa_pval['lag'] = range(-max_lag, max_lag+1)
print("".ljust(line_width*2) + 'saving')
xa_pval.to_netcdf(cesta+'composites'+w_clim+'/'+var+lev_sys_fi+'_pvalues_comp_'+type_ls[di]+'.nc')
print('done')
| 36.065327 | 141 | 0.575031 |
793eb57e295884cb272e62a5347f077241a15dc1 | 3,096 | py | Python | AOC2020/4/1.py | Fapannen/Advent-of-Code | 6cdc4a4582d9e3a1761579f42abd01c7ecaf5561 | [
"MIT"
] | null | null | null | AOC2020/4/1.py | Fapannen/Advent-of-Code | 6cdc4a4582d9e3a1761579f42abd01c7ecaf5561 | [
"MIT"
] | null | null | null | AOC2020/4/1.py | Fapannen/Advent-of-Code | 6cdc4a4582d9e3a1761579f42abd01c7ecaf5561 | [
"MIT"
] | null | null | null | with open("input.txt", 'r') as f:
txt = f.read()
passports = txt.split("\n\n")
fix = [psp.replace("\n", " ") for psp in passports]
fields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
count = 0
for passport in fix:
found = True
for field in fields:
if field not in passport:
found = False
break
if found:
count += 1
print(count)
with open("input.txt", 'r') as f:
txt = f.read()
passports = txt.split("\n\n")
fix = [psp.replace("\n", " ") for psp in passports]
fields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
count = 0
for passport in fix:
foundf = True
for field in fields:
if field not in passport:
foundf = False
break
if not foundf:
continue
parse = passport.split(" ")
found = True
for entry in parse:
spl = entry.split(":")
if len(spl) == 1:
continue
attribute = spl[0]
value = spl[1]
if attribute == "byr":
if int(value) < 1920 or int(value) > 2002:
found = False
break
elif attribute == "iyr":
if int(value) < 2010 or int(value) > 2020:
found = False
break
elif attribute == "eyr":
if int(value) < 2020 or int(value) > 2030:
found = False
break
elif attribute == "hgt":
if value[-2:] == "cm":
val = int(value[:-2])
if val < 150 or val > 193:
found = False
break
elif value[-2:] == "in":
val = int(value[:-2])
if val < 59 or val > 76:
found = False
break
else:
found = False
break
elif attribute == "hcl":
if len(value) == 7 and value[0] == "#":
for i in range(6):
if not value[i+1].isalnum():
found = False
break
else:
found = False
break
elif attribute == "ecl":
if value not in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]:
found = False
break
elif attribute == "pid":
if len(value) != 9:
found = False
break
if found:
print(attribute, " : ", value)
if found:
count += 1
print(count)
| 29.769231 | 83 | 0.350129 |
793eb59f6dc96e9aebbb64db0b122c35d5531a80 | 520 | py | Python | examples/ex24_configuration.py | gramaziokohler/robotic_assembly_workshop | 252d9750175061fd7d4746a4701afd42882773a5 | [
"MIT"
] | 19 | 2019-02-08T13:07:49.000Z | 2020-12-21T12:41:51.000Z | examples/ex24_configuration.py | gramaziokohler/robotic_assembly_workshop | 252d9750175061fd7d4746a4701afd42882773a5 | [
"MIT"
] | 11 | 2019-01-31T09:32:11.000Z | 2020-11-06T14:12:21.000Z | examples/ex24_configuration.py | gramaziokohler/robotic_assembly_workshop | 252d9750175061fd7d4746a4701afd42882773a5 | [
"MIT"
] | 8 | 2019-03-05T13:38:27.000Z | 2021-04-13T07:01:52.000Z | from math import pi
from compas.robots import Joint
from compas_fab.robots import Configuration
print(Joint.REVOLUTE)
print(Joint.PRISMATIC)
print(Joint.FIXED)
values = [0] * 6
types = [Joint.REVOLUTE] * 6
config = Configuration(values, types)
config = Configuration([pi / 2, 3., 0.1], [Joint.REVOLUTE, Joint.PRISMATIC, Joint.PLANAR])
config = Configuration.from_revolute_values([pi / 2, 0., 0., pi / 2, pi, 0])
config = Configuration.from_prismatic_and_revolute_values([8.312], [pi / 2, 0., 0., 0., 2 * pi, 0.8])
| 27.368421 | 101 | 0.715385 |
793eb6a72480a21ae64c4431aaac595eaf17e1e2 | 1,694 | py | Python | Meetup.py | sureshmallakuntla/Project3 | 0f90f5beda9b06b49d6279c2093ef580ccdcc66b | [
"MIT"
] | null | null | null | Meetup.py | sureshmallakuntla/Project3 | 0f90f5beda9b06b49d6279c2093ef580ccdcc66b | [
"MIT"
] | null | null | null | Meetup.py | sureshmallakuntla/Project3 | 0f90f5beda9b06b49d6279c2093ef580ccdcc66b | [
"MIT"
] | null | null | null | from __future__ import print_function
import json
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
f_count = 0;
# Function to aggregate the city counts to the alreaddy existing value
def city_count(newValue, oldValue):
if oldValue is None:
oldValue = 0
return sum(newValue)+oldValue
if __name__ == "__main__":
#Creaitng spark Context since we are submitting from the command line
sc = SparkContext("local[2]",appName="MeetupStreaming")
sc.setLogLevel("WARN")
#Creating the streaming context
ssc = StreamingContext(sc, 2)
ssc.checkpoint("checkpoint")
#Getting the zkQuorum port and topic value from command line
zkQuorum, topic = sys.argv[1:]
#Assigning the created stream to receive messages from kafka meetup
sstream = KafkaUtils.createStream(ssc, zkQuorum, "1", {topic: 1})
rsvps = sstream.map(lambda x: x[1])
rsvps_json = rsvps.map(lambda x: json.loads(x.encode('ascii','ignore')))
#Filtering only the messages belong to the U.S.
us_only = rsvps_json.filter(lambda x: x['group']['group_country']=='us')
#Extracting the city name from messages.
city_pair = us_only.map(lambda x: (x['group']['group_city'],1))
#Calling the function to get the city count
city_count= city_pair.updateStateByKey(city_count)
#Storing the output files in a local file system to get the visualization done.
city_count.saveAsTextFiles('file:/home/cloudera/streamData/output')
#rs.pprint()
ssc.start()
ssc.awaitTermination()
| 32.576923 | 84 | 0.689492 |
793eb6ecb03ece9a9076ec213e957a43184e68c6 | 63,690 | py | Python | great_expectations/expectations/expectation.py | rishabh-bhargava/great_expectations | e3ce2d094536a2bc738f92e5686005390d694105 | [
"Apache-2.0"
] | 2 | 2022-01-28T15:51:32.000Z | 2022-02-02T05:07:58.000Z | great_expectations/expectations/expectation.py | rishabh-bhargava/great_expectations | e3ce2d094536a2bc738f92e5686005390d694105 | [
"Apache-2.0"
] | null | null | null | great_expectations/expectations/expectation.py | rishabh-bhargava/great_expectations | e3ce2d094536a2bc738f92e5686005390d694105 | [
"Apache-2.0"
] | null | null | null | import logging
import re
import traceback
from abc import ABC, ABCMeta, abstractmethod
from collections import Counter
from copy import deepcopy
from inspect import isabstract
from typing import Dict, List, Optional, Tuple
import pandas as pd
from great_expectations import __version__ as ge_version
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import (
ExpectationConfiguration,
parse_result_format,
)
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.exceptions import (
GreatExpectationsError,
InvalidExpectationConfigurationError,
InvalidExpectationKwargsError,
)
from great_expectations.expectations.registry import (
_registered_metrics,
_registered_renderers,
get_metric_kwargs,
register_expectation,
register_renderer,
)
from great_expectations.expectations.util import legacy_method_parameters
from great_expectations.self_check.util import (
evaluate_json_test_cfe,
generate_expectation_tests,
)
from great_expectations.validator.validator import Validator
from ..core.util import convert_to_json_serializable, nested_update
from ..execution_engine import ExecutionEngine, PandasExecutionEngine
from ..render.renderer.renderer import renderer
from ..render.types import (
CollapseContent,
RenderedStringTemplateContent,
RenderedTableContent,
)
from ..render.util import num_to_str
from ..util import is_parseable_date
from ..validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
p1 = re.compile(r"(.)([A-Z][a-z]+)")
p2 = re.compile(r"([a-z0-9])([A-Z])")
def camel_to_snake(name):
name = p1.sub(r"\1_\2", name)
return p2.sub(r"\1_\2", name).lower()
class MetaExpectation(ABCMeta):
"""MetaExpectation registers Expectations as they are defined, adding them to the Expectation registry.
Any class inheriting from Expectation will be registered based on the value of the "expectation_type" class
attribute, or, if that is not set, by snake-casing the name of the class.
"""
def __new__(cls, clsname, bases, attrs):
newclass = super().__new__(cls, clsname, bases, attrs)
if not newclass.is_abstract():
newclass.expectation_type = camel_to_snake(clsname)
register_expectation(newclass)
newclass._register_renderer_functions()
default_kwarg_values = dict()
for base in reversed(bases):
default_kwargs = getattr(base, "default_kwarg_values", dict())
default_kwarg_values = nested_update(default_kwarg_values, default_kwargs)
newclass.default_kwarg_values = nested_update(
default_kwarg_values, attrs.get("default_kwarg_values", dict())
)
return newclass
class Expectation(metaclass=MetaExpectation):
"""Base class for all Expectations.
Expectation classes *must* have the following attributes set:
1. `domain_keys`: a tuple of the *keys* used to determine the domain of the
expectation
2. `success_keys`: a tuple of the *keys* used to determine the success of
the expectation.
In some cases, subclasses of Expectation (such as TableExpectation) can
inherit these properties from their parent class.
They *may* optionally override `runtime_keys` and `default_kwarg_values`, and
may optionally set an explicit value for expectation_type.
1. runtime_keys lists the keys that can be used to control output but will
not affect the actual success value of the expectation (such as result_format).
2. default_kwarg_values is a dictionary that will be used to fill unspecified
kwargs from the Expectation Configuration.
Expectation classes *must* implement the following:
1. `_validate`
2. `get_validation_dependencies`
In some cases, subclasses of Expectation, such as ColumnMapExpectation will already
have correct implementations that may simply be inherited.
Additionally, they *may* provide implementations of:
1. `validate_configuration`, which should raise an error if the configuration
will not be usable for the Expectation
2. Data Docs rendering methods decorated with the @renderer decorator. See the
"""
version = ge_version
domain_keys = tuple()
success_keys = tuple()
runtime_keys = (
"include_config",
"catch_exceptions",
"result_format",
)
default_kwarg_values = {
"include_config": True,
"catch_exceptions": False,
"result_format": "BASIC",
}
legacy_method_parameters = legacy_method_parameters
def __init__(self, configuration: Optional[ExpectationConfiguration] = None):
if configuration is not None:
self.validate_configuration(configuration)
self._configuration = configuration
@classmethod
def is_abstract(cls):
return isabstract(cls)
@classmethod
def _register_renderer_functions(cls):
expectation_type = camel_to_snake(cls.__name__)
for candidate_renderer_fn_name in dir(cls):
attr_obj = getattr(cls, candidate_renderer_fn_name)
if not hasattr(attr_obj, "_renderer_type"):
continue
register_renderer(
object_name=expectation_type, parent_class=cls, renderer_fn=attr_obj
)
@abstractmethod
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
raise NotImplementedError
@classmethod
@renderer(renderer_type="renderer.prescriptive")
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"styling": {"parent": {"classes": ["alert", "alert-warning"]}},
"string_template": {
"template": "$expectation_type(**$kwargs)",
"params": {
"expectation_type": configuration.expectation_type,
"kwargs": configuration.kwargs,
},
"styling": {
"params": {
"expectation_type": {
"classes": ["badge", "badge-warning"],
}
}
},
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.diagnostic.status_icon")
def _diagnostic_status_icon_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
assert result, "Must provide a result object."
if result.exception_info["raised_exception"]:
return RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": "", "markdown_status_icon": "❗"},
"styling": {
"params": {
"icon": {
"classes": [
"fas",
"fa-exclamation-triangle",
"text-warning",
],
"tag": "i",
}
}
},
},
}
)
if result.success:
return RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": "", "markdown_status_icon": "✅"},
"styling": {
"params": {
"icon": {
"classes": [
"fas",
"fa-check-circle",
"text-success",
],
"tag": "i",
}
}
},
},
"styling": {
"parent": {
"classes": ["hide-succeeded-validation-target-child"]
}
},
}
)
else:
return RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": "", "markdown_status_icon": "❌"},
"styling": {
"params": {
"icon": {
"tag": "i",
"classes": ["fas", "fa-times", "text-danger"],
}
}
},
},
}
)
@classmethod
@renderer(renderer_type="renderer.diagnostic.unexpected_statement")
def _diagnostic_unexpected_statement_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
assert result, "Must provide a result object."
success = result.success
result_dict = result.result
if result.exception_info["raised_exception"]:
exception_message_template_str = (
"\n\n$expectation_type raised an exception:\n$exception_message"
)
exception_message = RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": exception_message_template_str,
"params": {
"expectation_type": result.expectation_config.expectation_type,
"exception_message": result.exception_info[
"exception_message"
],
},
"tag": "strong",
"styling": {
"classes": ["text-danger"],
"params": {
"exception_message": {"tag": "code"},
"expectation_type": {
"classes": ["badge", "badge-danger", "mb-2"]
},
},
},
},
}
)
exception_traceback_collapse = CollapseContent(
**{
"collapse_toggle_link": "Show exception traceback...",
"collapse": [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": result.exception_info[
"exception_traceback"
],
"tag": "code",
},
}
)
],
}
)
return [exception_message, exception_traceback_collapse]
if success or not result_dict.get("unexpected_count"):
return []
else:
unexpected_count = num_to_str(
result_dict["unexpected_count"], use_locale=True, precision=20
)
unexpected_percent = (
num_to_str(result_dict["unexpected_percent"], precision=4) + "%"
)
element_count = num_to_str(
result_dict["element_count"], use_locale=True, precision=20
)
template_str = (
"\n\n$unexpected_count unexpected values found. "
"$unexpected_percent of $element_count total rows."
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": {
"unexpected_count": unexpected_count,
"unexpected_percent": unexpected_percent,
"element_count": element_count,
},
"tag": "strong",
"styling": {"classes": ["text-danger"]},
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.diagnostic.unexpected_table")
def _diagnostic_unexpected_table_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
try:
result_dict = result.result
except KeyError:
return None
if result_dict is None:
return None
if not result_dict.get("partial_unexpected_list") and not result_dict.get(
"partial_unexpected_counts"
):
return None
table_rows = []
if result_dict.get("partial_unexpected_counts"):
# We will check to see whether we have *all* of the unexpected values
# accounted for in our count, and include counts if we do. If we do not,
# we will use this as simply a better (non-repeating) source of
# "sampled" unexpected values
total_count = 0
for unexpected_count_dict in result_dict.get("partial_unexpected_counts"):
value = unexpected_count_dict.get("value")
count = unexpected_count_dict.get("count")
total_count += count
if value is not None and value != "":
table_rows.append([value, count])
elif value == "":
table_rows.append(["EMPTY", count])
else:
table_rows.append(["null", count])
# Check to see if we have *all* of the unexpected values accounted for. If so,
# we show counts. If not, we only show "sampled" unexpected values.
if total_count == result_dict.get("unexpected_count"):
header_row = ["Unexpected Value", "Count"]
else:
header_row = ["Sampled Unexpected Values"]
table_rows = [[row[0]] for row in table_rows]
else:
header_row = ["Sampled Unexpected Values"]
sampled_values_set = set()
for unexpected_value in result_dict.get("partial_unexpected_list"):
if unexpected_value:
string_unexpected_value = str(unexpected_value)
elif unexpected_value == "":
string_unexpected_value = "EMPTY"
else:
string_unexpected_value = "null"
if string_unexpected_value not in sampled_values_set:
table_rows.append([unexpected_value])
sampled_values_set.add(string_unexpected_value)
unexpected_table_content_block = RenderedTableContent(
**{
"content_block_type": "table",
"table": table_rows,
"header_row": header_row,
"styling": {
"body": {"classes": ["table-bordered", "table-sm", "mt-3"]}
},
}
)
return unexpected_table_content_block
@classmethod
@renderer(renderer_type="renderer.diagnostic.observed_value")
def _diagnostic_observed_value_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
result_dict = result.result
if result_dict is None:
return "--"
if result_dict.get("observed_value") is not None:
observed_value = result_dict.get("observed_value")
if isinstance(observed_value, (int, float)) and not isinstance(
observed_value, bool
):
return num_to_str(observed_value, precision=10, use_locale=True)
return str(observed_value)
elif result_dict.get("unexpected_percent") is not None:
return (
num_to_str(result_dict.get("unexpected_percent"), precision=5)
+ "% unexpected"
)
else:
return "--"
@classmethod
def get_allowed_config_keys(cls):
return cls.domain_keys + cls.success_keys + cls.runtime_keys
def metrics_validate(
self,
metrics: Dict,
configuration: Optional[ExpectationConfiguration] = None,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
) -> "ExpectationValidationResult":
if configuration is None:
configuration = self.configuration
provided_metrics = dict()
requested_metrics = self.get_validation_dependencies(
configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)["metrics"]
for name, metric_edge_key in requested_metrics.items():
provided_metrics[name] = metrics[metric_edge_key.id]
return self._build_evr(
self._validate(
configuration=configuration,
metrics=provided_metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
),
configuration,
)
def _build_evr(self, raw_response, configuration):
"""_build_evr is a lightweight convenience wrapper handling cases where an Expectation implementor
fails to return an EVR but returns the necessary components in a dictionary."""
if not isinstance(raw_response, ExpectationValidationResult):
if isinstance(raw_response, dict):
evr = ExpectationValidationResult(**raw_response)
evr.expectation_config = configuration
else:
raise GreatExpectationsError("Unable to build EVR")
else:
evr = raw_response
evr.expectation_config = configuration
return evr
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""Returns the result format and metrics required to validate this Expectation using the provided result format."""
return {
"result_format": parse_result_format(
self.get_runtime_kwargs(
configuration=configuration,
runtime_configuration=runtime_configuration,
).get("result_format")
),
"metrics": dict(),
}
def get_domain_kwargs(
self, configuration: Optional[ExpectationConfiguration] = None
):
if not configuration:
configuration = self.configuration
domain_kwargs = {
key: configuration.kwargs.get(key, self.default_kwarg_values.get(key))
for key in self.domain_keys
}
# Process evaluation parameter dependencies
missing_kwargs = set(self.domain_keys) - set(domain_kwargs.keys())
if missing_kwargs:
raise InvalidExpectationKwargsError(
f"Missing domain kwargs: {list(missing_kwargs)}"
)
return domain_kwargs
def get_success_kwargs(
self, configuration: Optional[ExpectationConfiguration] = None
):
if not configuration:
configuration = self.configuration
domain_kwargs = self.get_domain_kwargs(configuration)
success_kwargs = {
key: configuration.kwargs.get(key, self.default_kwarg_values.get(key))
for key in self.success_keys
}
success_kwargs.update(domain_kwargs)
return success_kwargs
def get_runtime_kwargs(
self,
configuration: Optional[ExpectationConfiguration] = None,
runtime_configuration: dict = None,
):
if not configuration:
configuration = self.configuration
configuration = deepcopy(configuration)
if runtime_configuration:
configuration.kwargs.update(runtime_configuration)
success_kwargs = self.get_success_kwargs(configuration)
runtime_kwargs = {
key: configuration.kwargs.get(key, self.default_kwarg_values.get(key))
for key in self.runtime_keys
}
runtime_kwargs.update(success_kwargs)
runtime_kwargs["result_format"] = parse_result_format(
runtime_kwargs["result_format"]
)
return runtime_kwargs
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
if configuration is None:
configuration = self.configuration
try:
assert (
configuration.expectation_type == self.expectation_type
), f"expectation configuration type {configuration.expectation_type} does not match expectation type {self.expectation_type}"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
def validate(
self,
validator: "Validator",
configuration: Optional[ExpectationConfiguration] = None,
evaluation_parameters=None,
interactive_evaluation=True,
data_context=None,
runtime_configuration=None,
):
if configuration is None:
configuration = self.configuration
configuration.process_evaluation_parameters(
evaluation_parameters, interactive_evaluation, data_context
)
evr = validator.graph_validate(
configurations=[configuration],
runtime_configuration=runtime_configuration,
)[0]
return evr
@property
def configuration(self):
if self._configuration is None:
raise InvalidExpectationConfigurationError(
"cannot access configuration: expectation has not yet been configured"
)
return self._configuration
@classmethod
def build_configuration(cls, *args, **kwargs):
# Combine all arguments into a single new "all_args" dictionary to name positional parameters
all_args = dict(zip(cls.validation_kwargs, args))
all_args.update(kwargs)
# Unpack display parameters; remove them from all_args if appropriate
if "include_config" in kwargs:
include_config = kwargs["include_config"]
del all_args["include_config"]
else:
include_config = cls.default_expectation_args["include_config"]
if "catch_exceptions" in kwargs:
catch_exceptions = kwargs["catch_exceptions"]
del all_args["catch_exceptions"]
else:
catch_exceptions = cls.default_expectation_args["catch_exceptions"]
if "result_format" in kwargs:
result_format = kwargs["result_format"]
else:
result_format = cls.default_expectation_args["result_format"]
# Extract the meta object for use as a top-level expectation_config holder
if "meta" in kwargs:
meta = kwargs["meta"]
del all_args["meta"]
else:
meta = None
# Construct the expectation_config object
return ExpectationConfiguration(
expectation_type=cls.expectation_type,
kwargs=convert_to_json_serializable(deepcopy(all_args)),
meta=meta,
)
def run_diagnostics(self, pretty_print=True):
"""
Produce a diagnostic report about this expectation.
The current uses for this method's output are
using the JSON structure to populate the Public Expectation Gallery
and enabling a fast devloop for developing new expectations where the
contributors can quickly check the completeness of their expectations.
The content of the report:
* name and description
* "library metadata", such as the GitHub usernames of the expectation's authors
* the execution engines the expectation is implemented for
* the implemented renderers
* tests in "examples" member variable
* the tests are executed against the execution engines for which the expectation
is implemented and the output of the test runs is included in the report.
At least one test case with include_in_gallery=True must be present in the examples to
produce the metrics, renderers and execution engines parts of the report. This is due to
a get_validation_dependencies requiring expectation_config as an argument.
If errors are encountered in the process of running the diagnostics, they are assumed to be due to
incompleteness of the Expectation's implementation (e.g., declaring a dependency on Metrics
that do not exist). These errors are added under "errors" key in the report.
:param pretty_print: TODO: this argument is not currently used. The intent is to return
a well formatted and easily readable text instead of the dictionary when the argument is set
to True
:return: a dictionary view of the report
"""
camel_name = self.__class__.__name__
snake_name = camel_to_snake(self.__class__.__name__)
docstring, short_description = self._get_docstring_and_short_description()
library_metadata = self._get_library_metadata()
report_obj = {
"description": {
"camel_name": camel_name,
"snake_name": snake_name,
"short_description": short_description,
"docstring": docstring,
},
"library_metadata": library_metadata,
"renderers": {},
"examples": [],
"metrics": [],
"execution_engines": {},
"test_report": [],
"diagnostics_report": [],
}
# Generate artifacts from an example case
gallery_examples = self._get_examples()
report_obj.update({"examples": gallery_examples})
if gallery_examples != []:
example_data, example_test = self._choose_example(gallery_examples)
# TODO: this should be creating a Batch using an engine
test_batch = Batch(data=pd.DataFrame(example_data))
expectation_config = ExpectationConfiguration(
**{"expectation_type": snake_name, "kwargs": example_test}
)
validation_result = None
try:
validation_results = self._instantiate_example_validation_results(
test_batch=test_batch,
expectation_config=expectation_config,
)
validation_result = validation_results[0]
except (
GreatExpectationsError,
AttributeError,
ImportError,
LookupError,
ValueError,
SyntaxError,
) as e:
report_obj = self._add_error_to_diagnostics_report(
report_obj, e, traceback.format_exc()
)
if validation_result is not None:
renderers = self._get_renderer_dict(
expectation_name=snake_name,
expectation_config=expectation_config,
validation_result=validation_result,
)
report_obj.update({"renderers": renderers})
upstream_metrics = None
try:
upstream_metrics = self._get_upstream_metrics(expectation_config)
report_obj.update({"metrics": upstream_metrics})
except GreatExpectationsError as e:
report_obj = self._add_error_to_diagnostics_report(
report_obj, e, traceback.format_exc()
)
execution_engines = None
if upstream_metrics is not None:
execution_engines = self._get_execution_engine_dict(
upstream_metrics=upstream_metrics,
)
report_obj.update({"execution_engines": execution_engines})
try:
tests = self._get_examples(return_only_gallery_examples=False)
if len(tests) > 0:
if execution_engines is not None:
test_results = self._get_test_results(
snake_name,
tests,
execution_engines,
)
report_obj.update({"test_report": test_results})
except Exception as e:
report_obj = self._add_error_to_diagnostics_report(
report_obj, e, traceback.format_exc()
)
return report_obj
def _add_error_to_diagnostics_report(
self, report_obj: Dict, error: Exception, stack_trace: str
) -> Dict:
error_entries = report_obj.get("diagnostics_report")
if error_entries is None:
error_entries = []
report_obj["diagnostics_report"] = error_entries
error_entries.append(
{
"error_message": str(error),
"stack_trace": stack_trace,
}
)
return report_obj
def _get_examples(self, return_only_gallery_examples=True) -> List[Dict]:
"""
Get a list of examples from the object's `examples` member variable.
:param return_only_gallery_examples: if True, include only test examples where `include_in_gallery` is true
:return: list of examples or [], if no examples exist
"""
try:
all_examples = self.examples
except AttributeError:
return []
included_examples = []
for example in all_examples:
# print(example)
included_tests = []
for test in example["tests"]:
if (
test.get("include_in_gallery") == True
or return_only_gallery_examples == False
):
included_tests.append(test)
if len(included_tests) > 0:
copied_example = deepcopy(example)
copied_example["tests"] = included_tests
included_examples.append(copied_example)
return included_examples
def _get_docstring_and_short_description(self) -> Tuple[str, str]:
if self.__doc__ is not None:
docstring = self.__doc__
short_description = self.__doc__.split("\n")[0]
else:
docstring = ""
short_description = ""
return docstring, short_description
def _choose_example(self, examples):
example = examples[0]
example_data = example["data"]
example_test = example["tests"][0]["in"]
return example_data, example_test
def _instantiate_example_validation_results(
self,
test_batch: Batch,
expectation_config: ExpectationConfiguration,
) -> List[ExpectationValidationResult]:
validation_results = Validator(
execution_engine=PandasExecutionEngine(), batches=[test_batch]
).graph_validate(configurations=[expectation_config])
return validation_results
def _get_supported_renderers(self, snake_name: str) -> List[str]:
supported_renderers = list(_registered_renderers[snake_name].keys())
supported_renderers.sort()
return supported_renderers
def _get_test_results(
self,
snake_name,
examples,
execution_engines,
):
test_results = []
exp_tests = generate_expectation_tests(
snake_name,
examples,
expectation_execution_engines_dict=execution_engines,
)
for exp_test in exp_tests:
try:
evaluate_json_test_cfe(
validator=exp_test["validator_with_data"],
expectation_type=exp_test["expectation_type"],
test=exp_test["test"],
)
test_results.append(
{
"test title": exp_test["test"]["title"],
"backend": exp_test["backend"],
"test_passed": "true",
}
)
except Exception as e:
test_results.append(
{
"test title": exp_test["test"]["title"],
"backend": exp_test["backend"],
"test_passed": "false",
"error_message": str(e),
"stack_trace": traceback.format_exc(),
}
)
return test_results
from great_expectations.render.types import RenderedStringTemplateContent
# NOTE: Abe 20201228: This method probably belong elsewhere. Putting it here for now.
def _get_rendered_result_as_string(self, rendered_result):
if type(rendered_result) == str:
return rendered_result
elif type(rendered_result) == list:
sub_result_list = []
for sub_result in rendered_result:
sub_result_list.append(self._get_rendered_result_as_string(sub_result))
return "\n".join(sub_result_list)
elif type(rendered_result) == RenderedStringTemplateContent:
return rendered_result.__str__()
else:
pass
# print(type(rendered_result))
def _get_renderer_dict(
self,
expectation_name: str,
expectation_config: ExpectationConfiguration,
validation_result: ExpectationValidationResult,
standard_renderers=[
"renderer.answer",
"renderer.diagnostic.unexpected_statement",
"renderer.diagnostic.observed_value",
"renderer.diagnostic.status_icon",
"renderer.diagnostic.unexpected_table",
"renderer.prescriptive",
"renderer.question",
],
) -> Dict[str, str]:
supported_renderers = self._get_supported_renderers(expectation_name)
standard_renderer_dict = {}
for renderer_name in standard_renderers:
if renderer_name in supported_renderers:
_, renderer = _registered_renderers[expectation_name][renderer_name]
rendered_result = renderer(
configuration=expectation_config,
result=validation_result,
)
standard_renderer_dict[
renderer_name
] = self._get_rendered_result_as_string(rendered_result)
else:
standard_renderer_dict[renderer_name] = None
return {
"standard": standard_renderer_dict,
"custom": [],
}
def _get_execution_engine_dict(
self,
upstream_metrics,
) -> Dict:
expectation_engines = {}
for provider in [
"PandasExecutionEngine",
"SqlAlchemyExecutionEngine",
"SparkDFExecutionEngine",
]:
all_true = True
for metric in upstream_metrics:
if not provider in _registered_metrics[metric]["providers"]:
all_true = False
break
expectation_engines[provider] = all_true
return expectation_engines
def _get_upstream_metrics(self, expectation_config) -> List[str]:
# NOTE: Abe 20210102: Strictly speaking, identifying upstream metrics shouldn't need to rely on an expectation config.
# There's probably some part of get_validation_dependencies that can be factored out to remove the dependency.
validation_dependencies = self.get_validation_dependencies(
configuration=expectation_config
)
return list(validation_dependencies["metrics"].keys())
def _get_library_metadata(self):
library_metadata = {
"maturity": None,
"package": None,
"tags": [],
"contributors": [],
}
if hasattr(self, "library_metadata"):
library_metadata.update(self.library_metadata)
return library_metadata
class TableExpectation(Expectation, ABC):
domain_keys = (
"batch_id",
"table",
"row_condition",
"condition_parser",
)
metric_dependencies = tuple()
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
for metric_name in self.metric_dependencies:
metric_kwargs = get_metric_kwargs(
metric_name=metric_name,
configuration=configuration,
runtime_configuration=runtime_configuration,
)
dependencies["metrics"][metric_name] = MetricConfiguration(
metric_name=metric_name,
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
return dependencies
def validate_metric_value_between_configuration(
self, configuration: Optional[ExpectationConfiguration]
):
# Validating that Minimum and Maximum values are of the proper format and type
min_val = None
max_val = None
parse_strings_as_datetimes = None
if "min_value" in configuration.kwargs:
min_val = configuration.kwargs["min_value"]
if "max_value" in configuration.kwargs:
max_val = configuration.kwargs["max_value"]
if "parse_strings_as_datetimes" in configuration.kwargs:
parse_strings_as_datetimes = configuration.kwargs[
"parse_strings_as_datetimes"
]
try:
# Ensuring Proper interval has been provided
if parse_strings_as_datetimes:
assert min_val is None or is_parseable_date(
min_val
), "Provided min threshold must be a dateutil-parseable date"
assert (
max_val is None or is_parseable_date(max_val),
), "Provided max threshold must be a dateutil-parseable date"
else:
assert min_val is None or isinstance(
min_val, (float, int, dict)
), "Provided min threshold must be a number"
if isinstance(min_val, dict):
assert (
"$PARAMETER" in min_val
), 'Evaluation Parameter dict for min_value kwarg must have "$PARAMETER" key'
assert max_val is None or isinstance(
max_val, (float, int, dict)
), "Provided max threshold must be a number"
if isinstance(max_val, dict):
assert "$PARAMETER" in max_val, (
"Evaluation Parameter dict for max_value "
"kwarg "
'must have "$PARAMETER" key'
)
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
if min_val is not None and max_val is not None and min_val > max_val:
raise InvalidExpectationConfigurationError(
"Minimum Threshold cannot be larger than Maximum Threshold"
)
return True
def _validate_metric_value_between(
self,
metric_name,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
metric_value = metrics.get(metric_name)
# Obtaining components needed for validation
min_value = self.get_success_kwargs(configuration).get("min_value")
strict_min = self.get_success_kwargs(configuration).get("strict_min")
max_value = self.get_success_kwargs(configuration).get("max_value")
strict_max = self.get_success_kwargs(configuration).get("strict_max")
if metric_value is None:
return {"success": False, "result": {"observed_value": metric_value}}
# Checking if mean lies between thresholds
if min_value is not None:
if strict_min:
above_min = metric_value > min_value
else:
above_min = metric_value >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = metric_value < max_value
else:
below_max = metric_value <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": metric_value}}
class ColumnExpectation(TableExpectation, ABC):
domain_keys = ("batch_id", "table", "column", "row_condition", "condition_parser")
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
# Ensuring basic configuration parameters are properly set
try:
assert (
"column" in configuration.kwargs
), "'column' parameter is required for column expectations"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
class ColumnMapExpectation(TableExpectation, ABC):
map_metric = None
domain_keys = ("batch_id", "table", "column", "row_condition", "condition_parser")
success_keys = ("mostly",)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
@classmethod
def is_abstract(cls):
return cls.map_metric is None or super().is_abstract()
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
if not super().validate_configuration(configuration):
return False
try:
assert (
"column" in configuration.kwargs
), "'column' parameter is required for column map expectations"
if "mostly" in configuration.kwargs:
mostly = configuration.kwargs["mostly"]
assert isinstance(
mostly, (int, float)
), "'mostly' parameter must be an integer or float"
assert 0 <= mostly <= 1, "'mostly' parameter must be between 0 and 1"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
assert isinstance(
self.map_metric, str
), "ColumnMapExpectation must override get_validation_dependencies or declare exactly one map_metric"
assert (
self.metric_dependencies == tuple()
), "ColumnMapExpectation must be configured using map_metric, and cannot have metric_dependencies declared."
# convenient name for updates
metric_dependencies = dependencies["metrics"]
metric_kwargs = get_metric_kwargs(
metric_name="column_values.nonnull.unexpected_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
"column_values.nonnull.unexpected_count"
] = MetricConfiguration(
"column_values.nonnull.unexpected_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
metric_kwargs = get_metric_kwargs(
metric_name=self.map_metric + ".unexpected_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
self.map_metric + ".unexpected_count"
] = MetricConfiguration(
self.map_metric + ".unexpected_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
result_format_str = dependencies["result_format"].get("result_format")
metric_kwargs = get_metric_kwargs(
metric_name="table.row_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies["table.row_count"] = MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
if result_format_str == "BOOLEAN_ONLY":
return dependencies
metric_kwargs = get_metric_kwargs(
self.map_metric + ".unexpected_values",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
self.map_metric + ".unexpected_values"
] = MetricConfiguration(
metric_name=self.map_metric + ".unexpected_values",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
if result_format_str in ["BASIC", "SUMMARY"]:
return dependencies
metric_kwargs = get_metric_kwargs(
self.map_metric + ".unexpected_rows",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[self.map_metric + ".unexpected_rows"] = MetricConfiguration(
metric_name=self.map_metric + ".unexpected_rows",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
if isinstance(execution_engine, PandasExecutionEngine):
metric_kwargs = get_metric_kwargs(
self.map_metric + ".unexpected_index_list",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
self.map_metric + ".unexpected_index_list"
] = MetricConfiguration(
metric_name=self.map_metric + ".unexpected_index_list",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
if runtime_configuration:
result_format = runtime_configuration.get(
"result_format",
configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
),
)
else:
result_format = configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
)
mostly = self.get_success_kwargs().get(
"mostly", self.default_kwarg_values.get("mostly")
)
total_count = metrics.get("table.row_count")
null_count = metrics.get("column_values.nonnull.unexpected_count")
unexpected_count = metrics.get(self.map_metric + ".unexpected_count")
success = None
if total_count is None or null_count is None:
# Vacuously true
success = True
elif (total_count - null_count) != 0:
success_ratio = (total_count - unexpected_count - null_count) / (
total_count - null_count
)
success = success_ratio >= mostly
elif total_count == 0 or (total_count - null_count) == 0:
success = True
try:
nonnull_count = metrics.get("table.row_count") - metrics.get(
"column_values.nonnull.unexpected_count"
)
except TypeError:
nonnull_count = None
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=metrics.get("table.row_count"),
nonnull_count=nonnull_count,
unexpected_count=metrics.get(self.map_metric + ".unexpected_count"),
unexpected_list=metrics.get(self.map_metric + ".unexpected_values"),
unexpected_index_list=metrics.get(
self.map_metric + ".unexpected_index_list"
),
)
class ColumnPairMapExpectation(TableExpectation, ABC):
map_metric = None
domain_keys = (
"batch_id",
"table",
"column_A",
"column_B",
"row_condition",
"condition_parser",
)
success_keys = ("mostly",)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
@classmethod
def is_abstract(cls):
return cls.map_metric is None or super().is_abstract()
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
if not super().validate_configuration(configuration):
return False
try:
assert (
"column_A" in configuration.kwargs
), "'column_A' parameter is required for column pair map expectations"
assert (
"column_B" in configuration.kwargs
), "'column_B' parameter is required for column pair map expectations"
if "mostly" in configuration.kwargs:
mostly = configuration.kwargs["mostly"]
assert isinstance(
mostly, (int, float)
), "'mostly' parameter must be an integer or float"
assert 0 <= mostly <= 1, "'mostly' parameter must be between 0 and 1"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
assert isinstance(
self.map_metric, str
), "ColumnPairMapExpectation must override get_validation_dependencies or declare exactly one map_metric"
assert (
self.metric_dependencies == tuple()
), "ColumnPairMapExpectation must be configured using map_metric, and cannot have metric_dependencies declared."
# convenient name for updates
metric_dependencies = dependencies["metrics"]
metric_kwargs = get_metric_kwargs(
metric_name="column_values.nonnull.unexpected_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
"column_values.nonnull.unexpected_count"
] = MetricConfiguration(
"column_values.nonnull.unexpected_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
metric_kwargs = get_metric_kwargs(
metric_name=self.map_metric + ".unexpected_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
self.map_metric + ".unexpected_count"
] = MetricConfiguration(
self.map_metric + ".unexpected_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
result_format_str = dependencies["result_format"].get("result_format")
if result_format_str == "BOOLEAN_ONLY":
return dependencies
metric_kwargs = get_metric_kwargs(
metric_name="table.row_count",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies["table.row_count"] = MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
metric_kwargs = get_metric_kwargs(
self.map_metric + ".unexpected_values",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
self.map_metric + ".unexpected_values"
] = MetricConfiguration(
metric_name=self.map_metric + ".unexpected_values",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
if result_format_str in ["BASIC", "SUMMARY"]:
return dependencies
metric_kwargs = get_metric_kwargs(
self.map_metric + ".unexpected_rows",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[self.map_metric + ".unexpected_rows"] = MetricConfiguration(
metric_name=self.map_metric + ".unexpected_rows",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
if isinstance(execution_engine, PandasExecutionEngine):
metric_kwargs = get_metric_kwargs(
self.map_metric + ".unexpected_index_list",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
metric_dependencies[
self.map_metric + ".unexpected_index_list"
] = MetricConfiguration(
metric_name=self.map_metric + ".unexpected_index_list",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
)
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
if runtime_configuration:
result_format = runtime_configuration.get(
"result_format",
configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
),
)
else:
result_format = configuration.kwargs.get(
"result_format", self.default_kwarg_values.get("result_format")
)
mostly = self.get_success_kwargs().get(
"mostly", self.default_kwarg_values.get("mostly")
)
total_count = metrics.get("table.row_count")
null_count = metrics.get("column_values.nonnull.unexpected_count")
unexpected_count = metrics.get(self.map_metric + ".unexpected_count")
success = None
if total_count is None or null_count is None:
# Vacuously true
success = True
elif (total_count - null_count) != 0:
success_ratio = (total_count - unexpected_count - null_count) / (
total_count - null_count
)
success = success_ratio >= mostly
elif total_count == 0 or (total_count - null_count) == 0:
success = True
try:
nonnull_count = metrics.get("table.row_count") - metrics.get(
"column_values.nonnull.unexpected_count"
)
except TypeError:
nonnull_count = None
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=metrics.get("table.row_count"),
nonnull_count=nonnull_count,
unexpected_count=metrics.get(self.map_metric + ".unexpected_count"),
unexpected_list=metrics.get(self.map_metric + ".unexpected_values"),
unexpected_index_list=metrics.get(
self.map_metric + ".unexpected_index_list"
),
)
def _format_map_output(
result_format,
success,
element_count,
nonnull_count,
unexpected_count,
unexpected_list,
unexpected_index_list,
):
"""Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
"""
# NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list
# Incrementally add to result and return when all values for the specified level are present
return_obj = {"success": success}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
skip_missing = False
if nonnull_count is None:
missing_count = None
skip_missing: bool = True
else:
missing_count = element_count - nonnull_count
if element_count > 0:
unexpected_percent_total = unexpected_count / element_count * 100
if not skip_missing:
missing_percent = missing_count / element_count * 100
if nonnull_count > 0:
unexpected_percent_nonmissing = unexpected_count / nonnull_count * 100
else:
unexpected_percent_nonmissing = None
else:
unexpected_percent_nonmissing = unexpected_percent_total
else:
missing_percent = None
unexpected_percent_total = None
unexpected_percent_nonmissing = None
return_obj["result"] = {
"element_count": element_count,
"unexpected_count": unexpected_count,
"unexpected_percent": unexpected_percent_nonmissing,
"partial_unexpected_list": unexpected_list[
: result_format["partial_unexpected_count"]
],
}
if not skip_missing:
return_obj["result"]["missing_count"] = missing_count
return_obj["result"]["missing_percent"] = missing_percent
return_obj["result"]["unexpected_percent_total"] = unexpected_percent_total
return_obj["result"][
"unexpected_percent_nonmissing"
] = unexpected_percent_nonmissing
if result_format["result_format"] == "BASIC":
return return_obj
# Try to return the most common values, if possible.
if 0 < result_format.get("partial_unexpected_count"):
try:
partial_unexpected_counts = [
{"value": key, "count": value}
for key, value in sorted(
Counter(unexpected_list).most_common(
result_format["partial_unexpected_count"]
),
key=lambda x: (-x[1], x[0]),
)
]
except TypeError:
partial_unexpected_counts = [
"partial_exception_counts requires a hashable type"
]
finally:
return_obj["result"].update(
{
"partial_unexpected_index_list": unexpected_index_list[
: result_format["partial_unexpected_count"]
]
if unexpected_index_list is not None
else None,
"partial_unexpected_counts": partial_unexpected_counts,
}
)
if result_format["result_format"] == "SUMMARY":
return return_obj
return_obj["result"].update(
{
"unexpected_list": unexpected_list,
"unexpected_index_list": unexpected_index_list,
}
)
if result_format["result_format"] == "COMPLETE":
return return_obj
raise ValueError("Unknown result_format {}.".format(result_format["result_format"]))
| 37.708703 | 137 | 0.58959 |
793eb739df12fa7a3dcb33f038bbffa2f3f10bde | 1,149 | py | Python | min_coin2.py | zmiddle/Coin_Change.py | 5dd14d6ab2a9310d30ea6a004dd652e0c25f5127 | [
"MIT"
] | null | null | null | min_coin2.py | zmiddle/Coin_Change.py | 5dd14d6ab2a9310d30ea6a004dd652e0c25f5127 | [
"MIT"
] | null | null | null | min_coin2.py | zmiddle/Coin_Change.py | 5dd14d6ab2a9310d30ea6a004dd652e0c25f5127 | [
"MIT"
] | null | null | null | #2. Given a value and a set of coins, return the optimal count of coins to achieve the value.
# Example: Can you rewrite this so that it will work when you're machine has run out of nickels?
# (and minimize the number of coins given [a reasonable amount])
def min_coin2(cents):
try:
cents == int(cents)
except ValueError:
return "Not an integer!"
else:
if cents < 1:
return 0
coins = [25, 10, 5, 1]
q = True
d = True
n = False
p = True
supply = [q,d,n,p]
i = 0
num_coins = 0
while cents >= 1:
#This can be simplified
'''
if supply[i] == False:
i += 1
elif cents < coins[i]:
i += 1
'''
if (supply[i] == False) or cents < coins[i] :
i += 1
elif (supply[i] == True) & (cents >= coins[i]):
cents -= coins[i]
num_coins += 1
return num_coins
print (min_coin2(31))
print (min_coin2(70))
print (min_coin2('hi'))
| 27.357143 | 96 | 0.466493 |
793eb83464ec233e8c1df61541772c3de5a7e935 | 3,003 | py | Python | Queue/DesignCircularQueue.py | karan2808/Python-Data-Structures-and-Algorithms | a4b39ddf7297541d90dc4efcaab883f928281abd | [
"MIT"
] | 2 | 2021-01-31T03:42:01.000Z | 2021-01-31T03:43:08.000Z | Queue/DesignCircularQueue.py | karan2808/Python-Data-Structures-and-Algorithms | a4b39ddf7297541d90dc4efcaab883f928281abd | [
"MIT"
] | null | null | null | Queue/DesignCircularQueue.py | karan2808/Python-Data-Structures-and-Algorithms | a4b39ddf7297541d90dc4efcaab883f928281abd | [
"MIT"
] | 1 | 2021-01-31T03:42:02.000Z | 2021-01-31T03:42:02.000Z | class MyCircularQueue:
def __init__(self, k: int):
self.q = [None] * k
self.front_idx = -1
self.back_idx = -1
self.capacity = k
def display_elements(self):
print("The elements in the Queue are ")
print_str = ""
# add the elements to the print string
for i in range(self.capacity):
print_str += str(self.q[i]) + " "
print(print_str)
def enQueue(self, value: int) -> bool:
# if the queue is full return false
if self.isFull():
print("The queue is full..")
return False
# if the front index is negative, update its value to 0
if self.front_idx == -1:
self.front_idx = 0
# increment the back index
self.back_idx = (self.back_idx + 1) % self.capacity
# update the queue value
self.q[self.back_idx] = value
return True
def deQueue(self) -> bool:
# if the queue is empty return false
if self.front_idx == -1:
print("The queue is empty..")
return False
self.q[self.front_idx] = None
# if the front and back indices are the same reset the queue indices
if self.front_idx == self.back_idx:
self.front_idx = -1
self.back_idx = -1
else:
# increment the front idx
self.front_idx = (self.front_idx + 1) % self.capacity
return True
def Front(self) -> int:
# if the front idx is -1 return -1 else the front value
return -1 if self.front_idx == -1 else self.q[self.front_idx]
def Rear(self) -> int:
# if the rear idx is -1 return -1 else the back value
return -1 if self.back_idx == -1 else self.q[self.back_idx]
# check if queue is empty
def isEmpty(self) -> bool:
return self.front_idx == -1
# check if queue is full
def isFull(self) -> bool:
return (self.back_idx + 1) % self.capacity == self.front_idx
def main():
Queue = MyCircularQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.display_elements()
Queue.deQueue()
Queue.enQueue(20)
Queue.enQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.enQueue(20)
Queue.enQueue(10)
Queue.display_elements()
print("The front element of the queue is " + str(Queue.Front()))
print("The rear element of the queue is " + str(Queue.Rear()))
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
Queue.deQueue()
print("The front element of the queue is " + str(Queue.Front()))
print("The rear element of the queue is " + str(Queue.Rear()))
Queue.display_elements()
if __name__ == "__main__":
main() | 30.333333 | 76 | 0.590077 |
793eb870bf620d1046e4865ddf3c8b3e73e00a81 | 13,502 | py | Python | neutron/services/firewall/fwaas_plugin.py | SnabbCo/neutron | a657c06d10f2171149c6b1863df36522bdc11cd7 | [
"Apache-2.0"
] | 1 | 2016-04-19T08:20:19.000Z | 2016-04-19T08:20:19.000Z | neutron/services/firewall/fwaas_plugin.py | SnabbCo/neutron | a657c06d10f2171149c6b1863df36522bdc11cd7 | [
"Apache-2.0"
] | null | null | null | neutron/services/firewall/fwaas_plugin.py | SnabbCo/neutron | a657c06d10f2171149c6b1863df36522bdc11cd7 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
from oslo.config import cfg
from neutron.common import exceptions as n_exception
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron import context as neutron_context
from neutron.db import api as qdbapi
from neutron.db.firewall import firewall_db
from neutron.extensions import firewall as fw_ext
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants as const
LOG = logging.getLogger(__name__)
class FirewallCallbacks(object):
RPC_API_VERSION = '1.0'
def __init__(self, plugin):
self.plugin = plugin
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
def set_firewall_status(self, context, firewall_id, status, **kwargs):
"""Agent uses this to set a firewall's status."""
LOG.debug(_("set_firewall_status() called"))
with context.session.begin(subtransactions=True):
fw_db = self.plugin._get_firewall(context, firewall_id)
#TODO(xuhanp): Remove INACTIVE status and use DOWN to
# be consistent with other network resources
if status in (const.ACTIVE, const.INACTIVE, const.DOWN):
fw_db.status = status
return True
else:
fw_db.status = const.ERROR
return False
def firewall_deleted(self, context, firewall_id, **kwargs):
"""Agent uses this to indicate firewall is deleted."""
LOG.debug(_("firewall_deleted() called"))
with context.session.begin(subtransactions=True):
fw_db = self.plugin._get_firewall(context, firewall_id)
if fw_db.status == const.PENDING_DELETE:
self.plugin.delete_db_firewall_object(context, firewall_id)
return True
else:
fw_db.status = const.ERROR
LOG.warn(_('Firewall %s unexpectedly deleted by agent.'),
firewall_id)
return False
def get_firewalls_for_tenant(self, context, **kwargs):
"""Agent uses this to get all firewalls and rules for a tenant."""
LOG.debug(_("get_firewalls_for_tenant() called"))
fw_list = [
self.plugin._make_firewall_dict_with_rules(context, fw['id'])
for fw in self.plugin.get_firewalls(context)
]
return fw_list
def get_firewalls_for_tenant_without_rules(self, context, **kwargs):
"""Agent uses this to get all firewalls for a tenant."""
LOG.debug(_("get_firewalls_for_tenant_without_rules() called"))
fw_list = [fw for fw in self.plugin.get_firewalls(context)]
return fw_list
def get_tenants_with_firewalls(self, context, **kwargs):
"""Agent uses this to get all tenants that have firewalls."""
LOG.debug(_("get_tenants_with_firewalls() called"))
ctx = neutron_context.get_admin_context()
fw_list = self.plugin.get_firewalls(ctx)
fw_tenant_list = list(set(fw['tenant_id'] for fw in fw_list))
return fw_tenant_list
class FirewallAgentApi(proxy.RpcProxy):
"""Plugin side of plugin to agent RPC API."""
API_VERSION = '1.0'
def __init__(self, topic, host):
super(FirewallAgentApi, self).__init__(topic, self.API_VERSION)
self.host = host
def create_firewall(self, context, firewall):
return self.fanout_cast(
context,
self.make_msg('create_firewall', firewall=firewall,
host=self.host),
topic=self.topic
)
def update_firewall(self, context, firewall):
return self.fanout_cast(
context,
self.make_msg('update_firewall', firewall=firewall,
host=self.host),
topic=self.topic
)
def delete_firewall(self, context, firewall):
return self.fanout_cast(
context,
self.make_msg('delete_firewall', firewall=firewall,
host=self.host),
topic=self.topic
)
class FirewallCountExceeded(n_exception.Conflict):
"""Reference implementation specific exception for firewall count.
Only one firewall is supported per tenant. When a second
firewall is tried to be created, this exception will be raised.
"""
message = _("Exceeded allowed count of firewalls for tenant "
"%(tenant_id)s. Only one firewall is supported per tenant.")
class FirewallPlugin(firewall_db.Firewall_db_mixin):
"""Implementation of the Neutron Firewall Service Plugin.
This class manages the workflow of FWaaS request/response.
Most DB related works are implemented in class
firewall_db.Firewall_db_mixin.
"""
supported_extension_aliases = ["fwaas"]
def __init__(self):
"""Do the initialization for the firewall service plugin here."""
qdbapi.register_models()
self.callbacks = FirewallCallbacks(self)
self.conn = rpc.create_connection(new=True)
self.conn.create_consumer(
topics.FIREWALL_PLUGIN,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
self.agent_rpc = FirewallAgentApi(
topics.L3_AGENT,
cfg.CONF.host
)
def _make_firewall_dict_with_rules(self, context, firewall_id):
firewall = self.get_firewall(context, firewall_id)
fw_policy_id = firewall['firewall_policy_id']
if fw_policy_id:
fw_policy = self.get_firewall_policy(context, fw_policy_id)
fw_rules_list = [self.get_firewall_rule(
context, rule_id) for rule_id in fw_policy['firewall_rules']]
firewall['firewall_rule_list'] = fw_rules_list
else:
firewall['firewall_rule_list'] = []
# FIXME(Sumit): If the size of the firewall object we are creating
# here exceeds the largest message size supported by rabbit/qpid
# then we will have a problem.
return firewall
def _rpc_update_firewall(self, context, firewall_id):
status_update = {"firewall": {"status": const.PENDING_UPDATE}}
fw = super(FirewallPlugin, self).update_firewall(context, firewall_id,
status_update)
if fw:
fw_with_rules = (
self._make_firewall_dict_with_rules(context,
firewall_id))
self.agent_rpc.update_firewall(context, fw_with_rules)
def _rpc_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._rpc_update_firewall(context, firewall_id)
def _ensure_update_firewall(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [const.PENDING_CREATE,
const.PENDING_UPDATE,
const.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy and 'firewall_list' in firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._ensure_update_firewall(context, firewall_id)
def _ensure_update_or_delete_firewall_rule(self, context,
firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']:
self._ensure_update_firewall_policy(context,
fw_rule['firewall_policy_id'])
def create_firewall(self, context, firewall):
LOG.debug(_("create_firewall() called"))
tenant_id = self._get_tenant_id_for_create(context,
firewall['firewall'])
fw_count = self.get_firewalls_count(context,
filters={'tenant_id': [tenant_id]})
if fw_count:
raise FirewallCountExceeded(tenant_id=tenant_id)
firewall['firewall']['status'] = const.PENDING_CREATE
fw = super(FirewallPlugin, self).create_firewall(context, firewall)
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
self.agent_rpc.create_firewall(context, fw_with_rules)
return fw
def update_firewall(self, context, id, firewall):
LOG.debug(_("update_firewall() called"))
self._ensure_update_firewall(context, id)
firewall['firewall']['status'] = const.PENDING_UPDATE
fw = super(FirewallPlugin, self).update_firewall(context, id, firewall)
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
self.agent_rpc.update_firewall(context, fw_with_rules)
return fw
def delete_db_firewall_object(self, context, id):
firewall = self.get_firewall(context, id)
if firewall['status'] in [const.PENDING_DELETE]:
super(FirewallPlugin, self).delete_firewall(context, id)
def delete_firewall(self, context, id):
LOG.debug(_("delete_firewall() called"))
status_update = {"firewall": {"status": const.PENDING_DELETE}}
fw = super(FirewallPlugin, self).update_firewall(context, id,
status_update)
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
self.agent_rpc.delete_firewall(context, fw_with_rules)
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug(_("update_firewall_policy() called"))
self._ensure_update_firewall_policy(context, id)
fwp = super(FirewallPlugin,
self).update_firewall_policy(context, id, firewall_policy)
self._rpc_update_firewall_policy(context, id)
return fwp
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug(_("update_firewall_rule() called"))
self._ensure_update_or_delete_firewall_rule(context, id)
fwr = super(FirewallPlugin,
self).update_firewall_rule(context, id, firewall_rule)
firewall_policy_id = fwr['firewall_policy_id']
if firewall_policy_id:
self._rpc_update_firewall_policy(context, firewall_policy_id)
return fwr
def delete_firewall_rule(self, context, id):
LOG.debug(_("delete_firewall_rule() called"))
self._ensure_update_or_delete_firewall_rule(context, id)
fwr = self.get_firewall_rule(context, id)
firewall_policy_id = fwr['firewall_policy_id']
super(FirewallPlugin, self).delete_firewall_rule(context, id)
# At this point we have already deleted the rule in the DB,
# however it's still not deleted on the backend firewall.
# Until it gets deleted on the backend we will be setting
# the firewall in PENDING_UPDATE state. The backend firewall
# implementation is responsible for setting the appropriate
# configuration (e.g. do not allow any traffic) until the rule
# is deleted. Once the rule is deleted, the backend should put
# the firewall back in ACTIVE state. While the firewall is in
# PENDING_UPDATE state, the firewall behavior might differ based
# on the backend implementation.
if firewall_policy_id:
self._rpc_update_firewall_policy(context, firewall_policy_id)
def insert_rule(self, context, id, rule_info):
LOG.debug(_("insert_rule() called"))
self._ensure_update_firewall_policy(context, id)
fwp = super(FirewallPlugin,
self).insert_rule(context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
return fwp
def remove_rule(self, context, id, rule_info):
LOG.debug(_("remove_rule() called"))
self._ensure_update_firewall_policy(context, id)
fwp = super(FirewallPlugin,
self).remove_rule(context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
return fwp
| 42.863492 | 79 | 0.654273 |
793eb87b822a1075998491a87eecd690fa751f43 | 5,280 | py | Python | Script_PLSDA_RF_SVM.py | Gustoaxel/Statistical-autoencoder | f3328f9c2a45ef0f7fe4adf98af4a64d02d34afc | [
"MIT"
] | 1 | 2021-06-22T13:28:06.000Z | 2021-06-22T13:28:06.000Z | Script_PLSDA_RF_SVM.py | Gustoaxel/Statistical-autoencoder | f3328f9c2a45ef0f7fe4adf98af4a64d02d34afc | [
"MIT"
] | null | null | null | Script_PLSDA_RF_SVM.py | Gustoaxel/Statistical-autoencoder | f3328f9c2a45ef0f7fe4adf98af4a64d02d34afc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright I3S CNRS UCA
This code is an implementation of the other methods used for comparison in the article :
An efficient diagnostic that uses the latent space of a Non-Parametric Supervised Autoencoder
for metabolomic datasets of clinical studies.
Parameters :
- Seed (line 42)
- Database (line 41)
- Scaling(line 56)
- Algorithme to compare (line 43)
- Features extraction (line 45)
Results :
- Accuracy (accTestCompare_final)
- Top features (df_featureList_final)
- Metrics (aucTestCompare_final)
"""
import sys
if '../functions/' not in sys.path:
sys.path.append('../functions/')
import functions.functions_compare as ff
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale as scale
from sklearn.cross_decomposition import PLSRegression
import matplotlib.pyplot as plt
from random import randrange
import random
if __name__ == '__main__':
# Set params :
filename = 'LUNG.csv'
Seed = [6, 7]
alglist = ['plsda', 'RF' ] # Other ML algorithm to compare
# alglist = ['plsda', 'RF', 'svm' ] # SVM could be slow
doTopgenes = True # Features selection
# Load data
X, Yr, nbr_clusters, feature_names = ff.readData(filename)
# Data Preprocessiong
#X=normalize(X,norm='l1',axis=1)
X=np.log(abs(X+1))
X=X-np.mean(X,axis=0)
X=scale(X,axis=0)
#X=scale(X,axis=1)
X=X/ff.normest(X)
######## Main #######
print("Starts trainning")
for i in Seed:
# Processing
print("------ Seed {} ------".format(i))
accTestCompare,df_timeElapsed, aucTestCompare =\
ff.basic_run_other(
X,Yr,nbr_clusters,alglist,
genenames=None,
clusternames=None,
nfold=4,
rng=6,
outputPath='../results_compare/')
if doTopgenes :
df_featureList = ff.rankFeatures(X,Yr,alglist,feature_names)
if i == Seed[0] :
accTestCompare_final = accTestCompare.iloc[:4, :]
aucTestCompare_final = aucTestCompare.iloc[:4, :]
if doTopgenes:
df_featureList_final = df_featureList
else :
accTestCompare_final= pd.concat([accTestCompare_final , accTestCompare.iloc[:4, :]])
aucTestCompare_final= pd.concat([aucTestCompare_final , aucTestCompare.iloc[:4, :]])
if doTopgenes:
for met in range(len(df_featureList_final)):
df_featureList_final[met] = df_featureList_final[met].join(df_featureList[met]["weights"], rsuffix=" {}".format(i))
mean = pd.DataFrame(accTestCompare_final.mean(axis = 0))
if doTopgenes:
for met in range(len(df_featureList_final)) :
mean_met = pd.DataFrame(df_featureList_final[met].iloc[:,1:].mean(axis = 1))
std_met = pd.DataFrame(df_featureList_final[met].iloc[:,1:].std(axis = 1))
mean_met.columns= ["Mean"]
df_featureList_final[met] = df_featureList_final[met].join(mean_met)
std_met.columns= ["Std"]
df_featureList_final[met] = df_featureList_final[met].join(std_met)
std = pd.DataFrame(accTestCompare_final.std(axis = 0))
mean.columns= ["Mean"]
accTestCompare_final = accTestCompare_final.T.join(mean).T
std.columns= ["Std"]
accTestCompare_final = accTestCompare_final.T.join(std).T
mean = pd.DataFrame(aucTestCompare_final.mean(axis = 0))
std = pd.DataFrame(aucTestCompare_final.std(axis = 0))
mean.columns= ["Mean"]
aucTestCompare_final = aucTestCompare_final.T.join(mean).T
std.columns= ["Std"]
aucTestCompare_final = aucTestCompare_final.T.join(std).T
color = ['#1F77B4', '#FF7F0E', '#2CA02C', '#D62728', '#9467BD','#8C564B', '#E377C2', '#BCBD22', '#17BECF', '#40004B','#762A83',\
'#9970AB', '#C2A5CF', '#E7D4E8', '#F7F7F7','#D9F0D3', '#A6DBA0', '#5AAE61', '#1B7837', '#00441B','#8DD3C7', '#FFFFB3',\
'#BEBADA', '#FB8072', '#80B1D3','#FDB462', '#B3DE69', '#FCCDE5', '#D9D9D9', '#BC80BD','#CCEBC5', '#FFED6F']
random.seed(Seed[0])
lab = np.unique(Yr)
index = [[] for l in lab ]
test_index = []
train_index = []
for i in range(len(Yr)) :
for l in lab :
if l == Yr[i] :
index[l-1].append(i)
for l in index :
test_index.append( l.pop(randrange(len(l))))
train_index += l
print(" test index = ", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Yr[train_index], Yr[test_index]
clf = PLSRegression(n_components=4,scale=False)
model = clf.fit(X_train,y_train.ravel())
col = [color[i] for i in y_train ]
plt.figure()
plt.scatter(model.x_scores_[:,0], model.x_scores_[:,1], c=col )
plt.xlabel("Component 1")
plt.ylabel("Component 2")
plt.title("Score plot PLSDA on BRAIN")
testm = model.transform(X_test)
col = [color[i+4] for i in y_test ]
plt.scatter(testm[:,0], testm[:,1], c=col , s=120, marker='s')
plt.show()
| 33.417722 | 135 | 0.598295 |
793eb8baf4bf182cd10adb297506a7f2e49242ca | 4,070 | py | Python | sdk/python/kulado_azure/apimanagement/product_policy.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/kulado_azure/apimanagement/product_policy.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/kulado_azure/apimanagement/product_policy.py | kulado/kulado-azure | f3a408fa0405fe6ae93e0049b2ae0f0e266f1cf6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class ProductPolicy(kulado.CustomResource):
api_management_name: kulado.Output[str]
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
product_id: kulado.Output[str]
"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""
resource_group_name: kulado.Output[str]
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
xml_content: kulado.Output[str]
"""
The XML Content for this Policy.
"""
xml_link: kulado.Output[str]
"""
A link to a Policy XML Document, which must be publicly available.
"""
def __init__(__self__, resource_name, opts=None, api_management_name=None, product_id=None, resource_group_name=None, xml_content=None, xml_link=None, __name__=None, __opts__=None):
"""
Manages an API Management Product Policy
:param str resource_name: The name of the resource.
:param kulado.ResourceOptions opts: Options for the resource.
:param kulado.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param kulado.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param kulado.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
:param kulado.Input[str] xml_content: The XML Content for this Policy.
:param kulado.Input[str] xml_link: A link to a Policy XML Document, which must be publicly available.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/api_management_product_policy.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if api_management_name is None:
raise TypeError("Missing required property 'api_management_name'")
__props__['api_management_name'] = api_management_name
if product_id is None:
raise TypeError("Missing required property 'product_id'")
__props__['product_id'] = product_id
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['xml_content'] = xml_content
__props__['xml_link'] = xml_link
super(ProductPolicy, __self__).__init__(
'azure:apimanagement/productPolicy:ProductPolicy',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.222222 | 185 | 0.700246 |
793eb917b99db1a86686b1a2c599c0d20566212a | 2,837 | py | Python | zenodo_uploader.py | JunAishima/conda-pack-template | d40390606ed3af80c1fc3d731124728431a25788 | [
"BSD-3-Clause"
] | null | null | null | zenodo_uploader.py | JunAishima/conda-pack-template | d40390606ed3af80c1fc3d731124728431a25788 | [
"BSD-3-Clause"
] | null | null | null | zenodo_uploader.py | JunAishima/conda-pack-template | d40390606ed3af80c1fc3d731124728431a25788 | [
"BSD-3-Clause"
] | null | null | null | import os
import argparse
import requests
import yaml
import json
def upload_to_zenodo(
file_name_to_upload,
config_file,
zenodo_server="https://sandbox.zenodo.org/api/deposit/depositions",
):
filename = os.path.abspath(file_name_to_upload)
if not os.path.isfile(filename):
raise FileNotFoundError(
f"The file, specified for uploading does not exist: {filename}"
)
config_name = os.path.abspath(config_file)
if not os.path.isfile(config_name):
raise FileNotFoundError(
f"The file with metadata, specified for uploading does not exist: {config_name}"
)
headers = {"Content-Type": "application/json"}
params = {"access_token": os.getenv("ZENODO_ACCESS_TOKEN", "")}
r = requests.post(
zenodo_server,
params=params,
json={},
headers=headers,
)
if r.status_code != 201:
raise RuntimeError(f"The status code for the request is {r.status_code}.\nMessage: {r.text}")
return_json = r.json()
deposition_id = return_json["id"]
bucket_url = return_json["links"]["bucket"]
filebase = os.path.basename(file_name_to_upload)
file_url = return_json["links"]["html"].replace('deposit', 'record')
print(f"Uploading {filename} to Zenodo. This may take some time...")
with open(filename, "rb") as fp:
r = requests.put(f"{bucket_url}/{filebase}", data=fp, params=params)
if r.status_code != 200:
raise RuntimeError(f"The status code for the request is {r.status_code}.\nMessage: {r.text}")
print(f"\nFile Uploaded successfully!\nFile link: {file_url}")
print(f"Uploading metadata for {filename} ...")
with open(config_file) as fp:
data = yaml.safe_load(fp)
r = requests.put(
f"{zenodo_server}/{deposition_id}",
params=params,
data=json.dumps(data["zenodo_metadata"]),
headers=headers,
)
if r.status_code != 200:
raise RuntimeError(f"The status code for the request is {r.status_code}.\nMessage: {r.text}")
print(f"Publishing {filebase}...")
r = requests.post(f"{zenodo_server}/{deposition_id}/actions/publish", params=params)
if r.status_code != 202:
raise RuntimeError(f"The status code for the request is {r.status_code}.\nMessage: {r.text}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=("Upload files to Zenodo."))
parser.add_argument(
"-f", "--file", dest="file_name_to_upload", help="path to the file to be uploaded",
)
parser.add_argument(
"-c", "--config-file", dest="config_file", help="config file with metadata information"
)
args = parser.parse_args()
upload_to_zenodo(args.file_name_to_upload, args.config_file)
| 34.597561 | 105 | 0.645753 |
793eb937d5e94b56a88d5977fb15c0e113593dc4 | 382 | py | Python | markdownme/fields.py | cbidici/cbsite | 594e327ad0c0bfa5015461eb243176c3aec7b68d | [
"MIT"
] | null | null | null | markdownme/fields.py | cbidici/cbsite | 594e327ad0c0bfa5015461eb243176c3aec7b68d | [
"MIT"
] | 1 | 2020-05-10T14:45:22.000Z | 2020-05-10T15:06:50.000Z | markdownme/fields.py | cbidici/cbsite | 594e327ad0c0bfa5015461eb243176c3aec7b68d | [
"MIT"
] | null | null | null | from django.db.models import TextField
from django.forms import fields
from .widgets import MarkdownWidget
class MarkdownField(TextField):
def __init__(self, rows=64, **kwargs):
super().__init__(**kwargs)
self.rows = rows
def formfield(self, **kwargs):
kwargs['widget'] = MarkdownWidget(rows=self.rows)
return super().formfield(**kwargs)
| 25.466667 | 57 | 0.685864 |
793eb9489d4faf2a9b1b5a1d28ee724f8b239c78 | 5,294 | py | Python | plugins/modules/cp4s/cp4s_delete_case.py | IBM/cp4s-ansible-collection | c80a9f6869cd2f141befb744a516568cf9fb5b61 | [
"MIT"
] | null | null | null | plugins/modules/cp4s/cp4s_delete_case.py | IBM/cp4s-ansible-collection | c80a9f6869cd2f141befb744a516568cf9fb5b61 | [
"MIT"
] | 3 | 2021-02-17T15:23:32.000Z | 2021-02-22T13:40:38.000Z | plugins/modules/cp4s/cp4s_delete_case.py | IBM/cp4s-ansible-collection | c80a9f6869cd2f141befb744a516568cf9fb5b61 | [
"MIT"
] | 1 | 2021-01-29T21:43:08.000Z | 2021-01-29T21:43:08.000Z |
# Copyright: (c) 2021, Dara Meaney
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cp4s_delete_case
short_description: A Module used to delete a case
# If this is part of a collection, you need to use semantic versioning,
# i.e. the version is of the form "2.5.0" and not "2.4".
version_added: "1.0.0"
author:
- Dara Meaney
'''
def run_module():
# define available arguments/parameters a user can pass to the module
# ansible module_args cannot accept a dict for custom modules so use a json str for input
module_args = dict(
incidentId=dict(type='str', required=True)
)
# seed the result dict in the object
# we primarily care about changed and state
# changed is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
response=''
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
module.exit_json(**result)
# during the execution of the module, if there is an exception or a
# conditional state that effectively causes a failure, run
# AnsibleModule.fail_json() to pass in the message and the result
if module.params['incidentId'] == 'fail me':
module.fail_json(msg='You requested this to fail', **result)
# TODO: Review if we can make the exception less bare, or if we can use a conditional for the changed property instead
try: # Try to make the API call
incident = delete_case(incident_id=module.params.get(
'incidentId', {}))
result.update({"case": incident})
except Exception as e: # we need to except in order to do else; use bare except and just raise the exception as normal
# raise # raises the exact error that would have otherwise been raised.
module.fail_json(msg=u'An exception occurred when deleting the case: {}'.format(e), **result)
else: # if no expections are raised we can assume the API call is successful and has changed state
result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def delete_case(incident_id: str):
"""delete_cases is a helper function which
will get a handle on an instance of the REST API client.
:param incident_id: The incident/case id used when making the request
:type incident_id: str
"""
client = create_authenticated_client()
return client.delete("/incidents/{}".format(incident_id))
def create_authenticated_client():
"""create_authenticated_client uses the resilient package
to gather values from a standard app.config file; the configuration file
used for an Integration Server or App Host App.
This means all credentials needed to run this module can be kept
separate and we can also avoid var prompts.
Note: If your running this module on a host other than localhost,
that host needs to have an app.config file or you need to copy one over.
:return: An authenticated rest client to CP4S or Resilient
:rtype: SimpleClient
"""
import resilient
# Create Resilient API Client
resilient_parser = resilient.ArgumentParser(
config_file=resilient.get_config_file())
resilient_opts = resilient_parser.parse_known_args()
# Instantiate a client using the gathered opts
return resilient.get_client(resilient_opts[0])
def main():
run_module()
if __name__ == '__main__':
main()
| 43.752066 | 462 | 0.729316 |
793eb964e0d3b2f7d10e7ed423165598b7541e4d | 4,691 | py | Python | goalsettrack/goalsettrack/settings.py | ingsoft-famaf/the-late-ones | 4641413890ae3713d415afca1d42462cc07405bf | [
"MIT"
] | null | null | null | goalsettrack/goalsettrack/settings.py | ingsoft-famaf/the-late-ones | 4641413890ae3713d415afca1d42462cc07405bf | [
"MIT"
] | null | null | null | goalsettrack/goalsettrack/settings.py | ingsoft-famaf/the-late-ones | 4641413890ae3713d415afca1d42462cc07405bf | [
"MIT"
] | null | null | null | """
Django settings for goalsettrack project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x4czc95g)epvvp54y@!vil$=_)a&+9$tcy5m68f1t=wv=j$$qm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# los sig dos campos para envio de mails
#EMAIL_BACKEND = "sgbackend.SendGridBackend"
#SENDGRID_API_KEY = "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"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = "apikey"
EMAIL_HOST_PASSWORD = "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"
EMAIL_PORT = 587
# Application definition
INSTALLED_APPS = [
'archivoadjunto.apps.ArchivoadjuntoConfig',
'recordatorio.apps.RecordatorioConfig',
'categoria.apps.CategoriaConfig',
'comentario.apps.ComentarioConfig',
'usuario.apps.UsuarioConfig',
'meta.apps.MetaConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'goalsettrack.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates',
os.path.join(BASE_DIR, "../templates/"),
os.path.join(BASE_DIR, "./templates/"),
os.path.join(
os.path.join(BASE_DIR, 'templates'), 'archivoadjunto'),
os.path.join(os.path.join(BASE_DIR, 'templates'), 'categoria'),
os.path.join(os.path.join(BASE_DIR, 'templates'), 'comentario'),
os.path.join(os.path.join(BASE_DIR, 'templates'), 'meta'),
os.path.join(os.path.join(BASE_DIR, 'templates'), 'usuario'),
os.path.join(os.path.join(BASE_DIR, 'templates'), 'recordatorio'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'goalsettrack.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'es-AR'
TIME_ZONE = 'America/Argentina/Cordoba'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'templates'), os.path.join(os.path.join(BASE_DIR, 'templates'), 'meta'),
os.path.join(BASE_DIR, 'templates'), os.path.join(os.path.join(BASE_DIR, 'templates'), 'recordatorio'),
)
| 30.461039 | 107 | 0.690471 |
793eba7fa4539fe09f3ac5ceace84011ad6a730a | 1,389 | py | Python | msrest/version.py | bgklein/msrest-for-python | d5103970de1ec7f56375ae4da1b5738a71196c3f | [
"MIT"
] | null | null | null | msrest/version.py | bgklein/msrest-for-python | d5103970de1ec7f56375ae4da1b5738a71196c3f | [
"MIT"
] | null | null | null | msrest/version.py | bgklein/msrest-for-python | d5103970de1ec7f56375ae4da1b5738a71196c3f | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
#: version of this package. Use msrest.__version__ instead
msrest_version = "0.6.6"
| 47.896552 | 78 | 0.686105 |
793eba9b80ffa0c3e142db14dc245c198d8aabcf | 961 | py | Python | marqeta/response_models/identification_response_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 21 | 2019-04-12T09:02:17.000Z | 2022-02-18T11:39:06.000Z | marqeta/response_models/identification_response_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 1 | 2020-07-22T21:27:40.000Z | 2020-07-23T17:38:43.000Z | marqeta/response_models/identification_response_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 10 | 2019-05-08T14:20:37.000Z | 2021-09-20T18:09:26.000Z | from datetime import datetime, date
from marqeta.response_models import datetime_object
import json
import re
class IdentificationResponseModel(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def type(self):
return self.json_response.get('type', None)
@property
def value(self):
return self.json_response.get('value', None)
@property
def expiration_date(self):
if 'expiration_date' in self.json_response:
return datetime_object('expiration_date', self.json_response)
def __repr__(self):
return '<Marqeta.response_models.identification_response_model.IdentificationResponseModel>' + self.__str__()
| 25.972973 | 118 | 0.702393 |
Subsets and Splits