ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b413a190ce9ae7df74ed42c5915cf39c0e453f62 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import functools
from cryptography import utils, x509
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends.openssl.decode_asn1 import (
_CRL_ENTRY_REASON_CODE_TO_ENUM, _OCSP_BASICRESP_EXT_PARSER,
_OCSP_REQ_EXT_PARSER, _OCSP_SINGLERESP_EXT_PARSER,
_OCSP_SINGLERESP_EXT_PARSER_NO_SCT,
_asn1_integer_to_int,
_asn1_string_to_bytes, _decode_x509_name, _obj2txt,
_parse_asn1_generalized_time,
)
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from cryptography.hazmat.primitives import serialization
from cryptography.x509.ocsp import (
OCSPCertStatus, OCSPRequest, OCSPResponse, OCSPResponseStatus,
_CERT_STATUS_TO_ENUM, _OIDS_TO_HASH, _RESPONSE_STATUS_TO_ENUM,
)
def _requires_successful_response(func):
@functools.wraps(func)
def wrapper(self, *args):
if self.response_status != OCSPResponseStatus.SUCCESSFUL:
raise ValueError(
"OCSP response status is not successful so the property "
"has no value"
)
else:
return func(self, *args)
return wrapper
def _issuer_key_hash(backend, cert_id):
key_hash = backend._ffi.new("ASN1_OCTET_STRING **")
res = backend._lib.OCSP_id_get0_info(
backend._ffi.NULL, backend._ffi.NULL,
key_hash, backend._ffi.NULL, cert_id
)
backend.openssl_assert(res == 1)
backend.openssl_assert(key_hash[0] != backend._ffi.NULL)
return _asn1_string_to_bytes(backend, key_hash[0])
def _issuer_name_hash(backend, cert_id):
name_hash = backend._ffi.new("ASN1_OCTET_STRING **")
res = backend._lib.OCSP_id_get0_info(
name_hash, backend._ffi.NULL,
backend._ffi.NULL, backend._ffi.NULL, cert_id
)
backend.openssl_assert(res == 1)
backend.openssl_assert(name_hash[0] != backend._ffi.NULL)
return _asn1_string_to_bytes(backend, name_hash[0])
def _serial_number(backend, cert_id):
num = backend._ffi.new("ASN1_INTEGER **")
res = backend._lib.OCSP_id_get0_info(
backend._ffi.NULL, backend._ffi.NULL,
backend._ffi.NULL, num, cert_id
)
backend.openssl_assert(res == 1)
backend.openssl_assert(num[0] != backend._ffi.NULL)
return _asn1_integer_to_int(backend, num[0])
def _hash_algorithm(backend, cert_id):
asn1obj = backend._ffi.new("ASN1_OBJECT **")
res = backend._lib.OCSP_id_get0_info(
backend._ffi.NULL, asn1obj,
backend._ffi.NULL, backend._ffi.NULL, cert_id
)
backend.openssl_assert(res == 1)
backend.openssl_assert(asn1obj[0] != backend._ffi.NULL)
oid = _obj2txt(backend, asn1obj[0])
try:
return _OIDS_TO_HASH[oid]
except KeyError:
raise UnsupportedAlgorithm(
"Signature algorithm OID: {} not recognized".format(oid)
)
@utils.register_interface(OCSPResponse)
class _OCSPResponse(object):
def __init__(self, backend, ocsp_response):
self._backend = backend
self._ocsp_response = ocsp_response
status = self._backend._lib.OCSP_response_status(self._ocsp_response)
self._backend.openssl_assert(status in _RESPONSE_STATUS_TO_ENUM)
self._status = _RESPONSE_STATUS_TO_ENUM[status]
if self._status is OCSPResponseStatus.SUCCESSFUL:
basic = self._backend._lib.OCSP_response_get1_basic(
self._ocsp_response
)
self._backend.openssl_assert(basic != self._backend._ffi.NULL)
self._basic = self._backend._ffi.gc(
basic, self._backend._lib.OCSP_BASICRESP_free
)
self._backend.openssl_assert(
self._backend._lib.OCSP_resp_count(self._basic) == 1
)
self._single = self._backend._lib.OCSP_resp_get0(self._basic, 0)
self._backend.openssl_assert(
self._single != self._backend._ffi.NULL
)
self._cert_id = self._backend._lib.OCSP_SINGLERESP_get0_id(
self._single
)
self._backend.openssl_assert(
self._cert_id != self._backend._ffi.NULL
)
response_status = utils.read_only_property("_status")
@property
@_requires_successful_response
def signature_algorithm_oid(self):
alg = self._backend._lib.OCSP_resp_get0_tbs_sigalg(self._basic)
self._backend.openssl_assert(alg != self._backend._ffi.NULL)
oid = _obj2txt(self._backend, alg.algorithm)
return x509.ObjectIdentifier(oid)
@property
@_requires_successful_response
def signature_hash_algorithm(self):
oid = self.signature_algorithm_oid
try:
return x509._SIG_OIDS_TO_HASH[oid]
except KeyError:
raise UnsupportedAlgorithm(
"Signature algorithm OID:{} not recognized".format(oid)
)
@property
@_requires_successful_response
def signature(self):
sig = self._backend._lib.OCSP_resp_get0_signature(self._basic)
self._backend.openssl_assert(sig != self._backend._ffi.NULL)
return _asn1_string_to_bytes(self._backend, sig)
@property
@_requires_successful_response
def tbs_response_bytes(self):
respdata = self._backend._lib.OCSP_resp_get0_respdata(self._basic)
self._backend.openssl_assert(respdata != self._backend._ffi.NULL)
pp = self._backend._ffi.new("unsigned char **")
res = self._backend._lib.i2d_OCSP_RESPDATA(respdata, pp)
self._backend.openssl_assert(pp[0] != self._backend._ffi.NULL)
pp = self._backend._ffi.gc(
pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0])
)
self._backend.openssl_assert(res > 0)
return self._backend._ffi.buffer(pp[0], res)[:]
@property
@_requires_successful_response
def certificates(self):
sk_x509 = self._backend._lib.OCSP_resp_get0_certs(self._basic)
num = self._backend._lib.sk_X509_num(sk_x509)
certs = []
for i in range(num):
x509 = self._backend._lib.sk_X509_value(sk_x509, i)
self._backend.openssl_assert(x509 != self._backend._ffi.NULL)
cert = _Certificate(self._backend, x509)
# We need to keep the OCSP response that the certificate came from
# alive until the Certificate object itself goes out of scope, so
# we give it a private reference.
cert._ocsp_resp = self
certs.append(cert)
return certs
@property
@_requires_successful_response
def responder_key_hash(self):
_, asn1_string = self._responder_key_name()
if asn1_string == self._backend._ffi.NULL:
return None
else:
return _asn1_string_to_bytes(self._backend, asn1_string)
@property
@_requires_successful_response
def responder_name(self):
x509_name, _ = self._responder_key_name()
if x509_name == self._backend._ffi.NULL:
return None
else:
return _decode_x509_name(self._backend, x509_name)
def _responder_key_name(self):
asn1_string = self._backend._ffi.new("ASN1_OCTET_STRING **")
x509_name = self._backend._ffi.new("X509_NAME **")
res = self._backend._lib.OCSP_resp_get0_id(
self._basic, asn1_string, x509_name
)
self._backend.openssl_assert(res == 1)
return x509_name[0], asn1_string[0]
@property
@_requires_successful_response
def produced_at(self):
produced_at = self._backend._lib.OCSP_resp_get0_produced_at(
self._basic
)
return _parse_asn1_generalized_time(self._backend, produced_at)
@property
@_requires_successful_response
def certificate_status(self):
status = self._backend._lib.OCSP_single_get0_status(
self._single,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
)
self._backend.openssl_assert(status in _CERT_STATUS_TO_ENUM)
return _CERT_STATUS_TO_ENUM[status]
@property
@_requires_successful_response
def revocation_time(self):
if self.certificate_status is not OCSPCertStatus.REVOKED:
return None
asn1_time = self._backend._ffi.new("ASN1_GENERALIZEDTIME **")
self._backend._lib.OCSP_single_get0_status(
self._single,
self._backend._ffi.NULL,
asn1_time,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
)
self._backend.openssl_assert(asn1_time[0] != self._backend._ffi.NULL)
return _parse_asn1_generalized_time(self._backend, asn1_time[0])
@property
@_requires_successful_response
def revocation_reason(self):
if self.certificate_status is not OCSPCertStatus.REVOKED:
return None
reason_ptr = self._backend._ffi.new("int *")
self._backend._lib.OCSP_single_get0_status(
self._single,
reason_ptr,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
)
# If no reason is encoded OpenSSL returns -1
if reason_ptr[0] == -1:
return None
else:
self._backend.openssl_assert(
reason_ptr[0] in _CRL_ENTRY_REASON_CODE_TO_ENUM
)
return _CRL_ENTRY_REASON_CODE_TO_ENUM[reason_ptr[0]]
@property
@_requires_successful_response
def this_update(self):
asn1_time = self._backend._ffi.new("ASN1_GENERALIZEDTIME **")
self._backend._lib.OCSP_single_get0_status(
self._single,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
asn1_time,
self._backend._ffi.NULL,
)
self._backend.openssl_assert(asn1_time[0] != self._backend._ffi.NULL)
return _parse_asn1_generalized_time(self._backend, asn1_time[0])
@property
@_requires_successful_response
def next_update(self):
asn1_time = self._backend._ffi.new("ASN1_GENERALIZEDTIME **")
self._backend._lib.OCSP_single_get0_status(
self._single,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
asn1_time,
)
if asn1_time[0] != self._backend._ffi.NULL:
return _parse_asn1_generalized_time(self._backend, asn1_time[0])
else:
return None
@property
@_requires_successful_response
def issuer_key_hash(self):
return _issuer_key_hash(self._backend, self._cert_id)
@property
@_requires_successful_response
def issuer_name_hash(self):
return _issuer_name_hash(self._backend, self._cert_id)
@property
@_requires_successful_response
def hash_algorithm(self):
return _hash_algorithm(self._backend, self._cert_id)
@property
@_requires_successful_response
def serial_number(self):
return _serial_number(self._backend, self._cert_id)
@utils.cached_property
@_requires_successful_response
def extensions(self):
return _OCSP_BASICRESP_EXT_PARSER.parse(self._backend, self._basic)
@utils.cached_property
@_requires_successful_response
def single_extensions(self):
if self._backend._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER:
return _OCSP_SINGLERESP_EXT_PARSER.parse(
self._backend, self._single
)
else:
return _OCSP_SINGLERESP_EXT_PARSER_NO_SCT.parse(
self._backend, self._single
)
def public_bytes(self, encoding):
if encoding is not serialization.Encoding.DER:
raise ValueError(
"The only allowed encoding value is Encoding.DER"
)
bio = self._backend._create_mem_bio_gc()
res = self._backend._lib.i2d_OCSP_RESPONSE_bio(
bio, self._ocsp_response
)
self._backend.openssl_assert(res > 0)
return self._backend._read_mem_bio(bio)
@utils.register_interface(OCSPRequest)
class _OCSPRequest(object):
def __init__(self, backend, ocsp_request):
if backend._lib.OCSP_request_onereq_count(ocsp_request) > 1:
raise NotImplementedError(
'OCSP request contains more than one request'
)
self._backend = backend
self._ocsp_request = ocsp_request
self._request = self._backend._lib.OCSP_request_onereq_get0(
self._ocsp_request, 0
)
self._backend.openssl_assert(self._request != self._backend._ffi.NULL)
self._cert_id = self._backend._lib.OCSP_onereq_get0_id(self._request)
self._backend.openssl_assert(self._cert_id != self._backend._ffi.NULL)
@property
def issuer_key_hash(self):
return _issuer_key_hash(self._backend, self._cert_id)
@property
def issuer_name_hash(self):
return _issuer_name_hash(self._backend, self._cert_id)
@property
def serial_number(self):
return _serial_number(self._backend, self._cert_id)
@property
def hash_algorithm(self):
return _hash_algorithm(self._backend, self._cert_id)
@utils.cached_property
def extensions(self):
return _OCSP_REQ_EXT_PARSER.parse(self._backend, self._ocsp_request)
def public_bytes(self, encoding):
if encoding is not serialization.Encoding.DER:
raise ValueError(
"The only allowed encoding value is Encoding.DER"
)
bio = self._backend._create_mem_bio_gc()
res = self._backend._lib.i2d_OCSP_REQUEST_bio(bio, self._ocsp_request)
self._backend.openssl_assert(res > 0)
return self._backend._read_mem_bio(bio)
|
py | b413a1cb0a6c0baf11218cecd739e29b45b505f1 | class Person:
def __init__(self, name, surname):
self.name = name
self.surname = surname
def __add__(self, other):
return Person(self.name, other.surname)
def __repr__(self):
return f'{self.name} {self.surname}'
class Group:
def __init__(self, name, people):
self.name = name
self.people = people
def __add__(self, other):
return Group(self.name, self.people + other.people)
def __len__(self):
return len(self.people)
def __getitem__(self, index):
# return f'Person {self.people.index(self.people[item])}: {self.people[item]}'
return f'Person {index}: {self.people[index]}'
# The index can be implemented as a class attribute in the Person class
# which will lead to changes to both __repr__ methods
def __repr__(self):
return f'Group {self.name} with members {", ".join(p.name + " " + p.surname for p in self.people)}'
p0 = Person('Aliko', 'Dangote')
p1 = Person('Bill', 'Gates')
p2 = Person('Warren', 'Buffet')
p3 = Person('Elon', 'Musk')
p4 = p2 + p3
first_group = Group('__VIP__', [p0, p1, p2])
second_group = Group('Special', [p3, p4])
third_group = first_group + second_group
print(len(first_group))
print(second_group)
print(third_group[0])
for person in third_group:
print(person)
|
py | b413a3d4e14451d460e0596f8aa839ec5373dbc6 | """ MXNet neural networks for tabular data containing numerical, categorical, and text fields.
First performs neural network specific pre-processing of the data.
Contains separate input modules which are applied to different columns of the data depending on the type of values they contain:
- Numeric columns are pased through single Dense layer (binary categorical variables are treated as numeric)
- Categorical columns are passed through separate Embedding layers
- Text columns are passed through separate LanguageModel layers
Vectors produced by different input layers are then concatenated and passed to multi-layer MLP model with problem_type determined output layer.
Hyperparameters are passed as dict params, including options for preprocessing stages.
"""
import random, json, time, os, logging, warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet import nd, autograd, gluon
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, QuantileTransformer # PowerTransformer
from ......core import Space
from ......utils import try_import_mxboard
from ......task.base import BasePredictor
from ....utils.loaders import load_pkl
from ..abstract.abstract_model import AbstractModel, fixedvals_from_searchspaces
from ....utils.savers import save_pkl
from ...constants import BINARY, MULTICLASS, REGRESSION
from .categorical_encoders import OneHotMergeRaresHandleUnknownEncoder, OrdinalMergeRaresHandleUnknownEncoder
from .tabular_nn_dataset import TabularNNDataset
from .embednet import EmbedNet
from .tabular_nn_trial import tabular_nn_trial
from .hyperparameters.parameters import get_default_param
from .hyperparameters.searchspaces import get_default_searchspace
# __all__ = ['TabularNeuralNetModel', 'EPS']
warnings.filterwarnings("ignore", module='sklearn.preprocessing') # sklearn processing n_quantiles warning
logger = logging.getLogger(__name__)
EPS = 10e-8 # small number
# TODO: Gets stuck after infering feature types near infinitely in nyc-jiashenliu-515k-hotel-reviews-data-in-europe dataset, 70 GB of memory, c5.9xlarge
# Suspect issue is coming from embeddings due to text features with extremely large categorical counts.
class TabularNeuralNetModel(AbstractModel):
""" Class for neural network models that operate on tabular data.
These networks use different types of input layers to process different types of data in various columns.
Attributes:
types_of_features (dict): keys = 'continuous', 'skewed', 'onehot', 'embed', 'language'; values = column-names of Dataframe corresponding to the features of this type
feature_arraycol_map (OrderedDict): maps feature-name -> list of column-indices in processed_array corresponding to this feature
self.feature_type_map (OrderedDict): maps feature-name -> feature_type string (options: 'vector', 'embed', 'language')
processor (sklearn.ColumnTransformer): scikit-learn preprocessor object.
Note: This model always assumes higher values of self.objective_func indicate better performance.
"""
# Constants used throughout this class:
# model_internals_file_name = 'model-internals.pkl' # store model internals here
unique_category_str = '!missing!' # string used to represent missing values and unknown categories for categorical features. Should not appear in the dataset
# TODO: remove: metric_map = {REGRESSION: 'Rsquared', BINARY: 'accuracy', MULTICLASS: 'accuracy'} # string used to represent different evaluation metrics. metric_map[self.problem_type] produces str corresponding to metric used here.
# TODO: should be using self.objective_func as the metric of interest. Should have method: get_metric_name(self.objective_func)
rescale_losses = {gluon.loss.L1Loss:'std', gluon.loss.HuberLoss:'std', gluon.loss.L2Loss:'var'} # dict of loss names where we should rescale loss, value indicates how to rescale. Call self.loss_func.name
model_file_name = 'tabularNN.pkl'
params_file_name = 'net.params' # Stores parameters of final network
temp_file_name = 'temp_net.params' # Stores temporary network parameters (eg. during the course of training)
def __init__(self, path, name, problem_type, objective_func, hyperparameters=None, features=None):
super().__init__(path=path, name=name, model=None, problem_type=problem_type, objective_func=objective_func, hyperparameters=hyperparameters, features=features)
"""
TabularNeuralNetModel object.
Parameters
----------
path (str): file-path to directory where to save files associated with this model
name (str): name used to refer to this model
problem_type (str): what type of prediction problem is this model used for
objective_func (func): function used to evaluate performance (Note: we assume higher = better)
hyperparameters (dict): various hyperparameters for neural network and the NN-specific data processing
features (list): List of predictive features to use, other features are ignored by the model.
"""
self.problem_type = problem_type
self.objective_func = objective_func
self.eval_metric_name = self.objective_func.name
self.feature_types_metadata = None
self.types_of_features = None
self.feature_arraycol_map = None
self.feature_type_map = None
self.processor = None # data processor
self.summary_writer = None
self.ctx = mx.cpu()
# TODO: Remove this, add generic unfit_copy func or fix model to not have tabNN in params
def create_unfit_copy(self):
new_model = TabularNeuralNetModel(path=self.path, name=self.name, problem_type=self.problem_type, objective_func=self.objective_func, features=self.features, hyperparameters=self.params)
new_model.path = self.path
new_model.params['tabNN'] = None
return new_model
def _set_default_params(self):
""" Specifies hyperparameter values to use by default """
default_params = get_default_param(self.problem_type)
for param, val in default_params.items():
self._set_default_param_value(param, val)
def set_net_defaults(self, train_dataset):
""" Sets dataset-adaptive default values to use for our neural network """
if self.problem_type == MULTICLASS:
self.num_classes = train_dataset.num_classes
self.num_net_outputs = self.num_classes
elif self.problem_type == REGRESSION:
self.num_net_outputs = 1
if self.params['y_range'] is None: # Infer default y-range
y_vals = train_dataset.dataset._data[train_dataset.label_index].asnumpy()
min_y = float(min(y_vals))
max_y = float(max(y_vals))
std_y = np.std(y_vals)
y_ext = self.params['y_range_extend']*std_y
if min_y >= 0: # infer y must be nonnegative
min_y = max(0, min_y-y_ext)
else:
min_y = min_y-y_ext
if max_y <= 0: # infer y must be non-positive
max_y = min(0, max_y+y_ext)
else:
max_y = max_y+y_ext
self.params['y_range'] = (min_y, max_y)
elif self.problem_type == BINARY:
self.num_classes = 2
self.num_net_outputs = 2
if self.params['layers'] is None: # Use default choices for MLP architecture
if self.problem_type == REGRESSION:
default_layer_sizes = [256, 128] # overall network will have 4 layers. Input layer, 256-unit hidden layer, 128-unit hidden layer, output layer.
elif self.problem_type == BINARY or self.problem_type == MULTICLASS:
default_sizes = [256, 128] # will be scaled adaptively
# base_size = max(1, min(self.num_net_outputs, 20)/2.0) # scale layer width based on number of classes
base_size = max(1, min(self.num_net_outputs, 100) / 50) # TODO: Updated because it improved model quality and made training far faster
default_layer_sizes = [defaultsize*base_size for defaultsize in default_sizes]
# TODO: This gets really large on 100K+ rows... It takes hours on gpu for nyc-albert: 78 float/int features which get expanded to 1734, it also overfits and maxes accuracy on epoch
# LGBM takes 120 seconds on 4 cpu's and gets far better accuracy
# Perhaps we should add an order of magnitude to the pre-req with -3, or else scale based on feature count instead of row count.
# layer_expansion_factor = np.log10(max(train_dataset.num_examples, 1000)) - 2 # scale layers based on num_training_examples
layer_expansion_factor = 1 # TODO: Hardcoded to 1 because it results in both better model quality and far faster training time
max_layer_width = self.params['max_layer_width']
self.params['layers'] = [int(min(max_layer_width, layer_expansion_factor*defaultsize))
for defaultsize in default_layer_sizes]
if train_dataset.has_vector_features() and self.params['numeric_embed_dim'] is None:
# Use default choices for numeric embedding size
vector_dim = train_dataset.dataset._data[train_dataset.vectordata_index].shape[1] # total dimensionality of vector features
prop_vector_features = train_dataset.num_vector_features() / float(train_dataset.num_features) # Fraction of features that are numeric
min_numeric_embed_dim = 32
max_numeric_embed_dim = self.params['max_layer_width']
self.params['numeric_embed_dim'] = int(min(max_numeric_embed_dim, max(min_numeric_embed_dim,
self.params['layers'][0]*prop_vector_features*np.log10(vector_dim+10) )))
return
def fit(self, X_train, Y_train, X_test=None, Y_test=None, **kwargs):
""" X_train (pd.DataFrame): training data features (not necessarily preprocessed yet)
X_test (pd.DataFrame): test data features (should have same column names as Xtrain)
Y_train (pd.Series):
Y_test (pd.Series): are pandas Series
kwargs: Can specify amount of compute resources to utilize (num_cpus, num_gpus).
"""
self.verbosity = kwargs.get('verbosity', 2)
self.params = fixedvals_from_searchspaces(self.params)
if self.feature_types_metadata is None:
raise ValueError("Trainer class must set feature_types_metadata for this model")
X_train = self.preprocess(X_train)
if self.features is None:
self.features = list(X_train.columns)
# print('features: ', self.features)
if 'num_cpus' in kwargs:
self.params['num_dataloading_workers'] = max(1, int(kwargs['num_cpus']/2.0))
else:
self.params['num_dataloading_workers'] = 1
if 'num_gpus' in kwargs and kwargs['num_gpus'] >= 1: # Currently cannot use >1 GPU
self.params['ctx'] = mx.gpu() # Currently cannot use more than 1 GPU
else:
self.params['ctx'] = mx.cpu()
train_dataset = self.process_data(X_train, Y_train, is_test=False) # Dataset object
if X_test is not None:
X_test = self.preprocess(X_test)
test_dataset = self.process_data(X_test, Y_test, is_test=True) # Dataset object to use for validation
else:
test_dataset = None
logger.log(15, "Training data for neural network has: %d examples, %d features (%d vector, %d embedding, %d language)" %
(train_dataset.num_examples, train_dataset.num_features,
len(train_dataset.feature_groups['vector']), len(train_dataset.feature_groups['embed']),
len(train_dataset.feature_groups['language']) ))
# train_dataset.save()
# test_dataset.save()
# self._save_preprocessor() # TODO: should save these things for hyperparam tunning. Need one HP tuner for network-specific HPs, another for preprocessing HPs.
self.get_net(train_dataset)
self.train_net(params=self.params, train_dataset=train_dataset, test_dataset=test_dataset, initialize=True, setup_trainer=True)
"""
# TODO: if we don't want to save intermediate network parameters, need to do something like saving in temp directory to clean up after training:
with make_temp_directory() as temp_dir:
save_callback = SaveModelCallback(self.model, monitor=self.metric, mode=save_callback_mode, name=self.name)
with progress_disabled_ctx(self.model) as model:
original_path = model.path
model.path = Path(temp_dir)
model.fit_one_cycle(self.epochs, self.lr, callbacks=save_callback)
# Load the best one and export it
model.load(self.name)
print(f'Model validation metrics: {model.validate()}')
model.path = original_path\
"""
def get_net(self, train_dataset):
""" Creates a Gluon neural net and context for this dataset.
Also sets up trainer/optimizer as necessary.
"""
self.set_net_defaults(train_dataset)
self.ctx = self.params['ctx']
net = EmbedNet(train_dataset=train_dataset, params=self.params,
num_net_outputs=self.num_net_outputs, ctx=self.ctx)
self.architecture_desc = net.architecture_desc # Description of network architecture
self.net_filename = self.path + self.temp_file_name
self.model = net
if not os.path.exists(self.path):
os.makedirs(self.path)
return
def train_net(self, params, train_dataset, test_dataset=None,
initialize=True, setup_trainer=True, file_prefix=""):
""" Trains neural net on given train dataset, early stops based on test_dataset.
Args:
params (dict): various hyperparameter values
train_dataset (TabularNNDataset): training data used to learn network weights
test_dataset (TabularNNDataset): validation data used for hyperparameter tuning
initialize (bool): set = False to continue training of a previously trained model, otherwise initializes network weights randomly
setup_trainer (bool): set = False to reuse the same trainer from a previous training run, otherwise creates new trainer from scratch
file_prefix (str): prefix to append to all file-names created here. Can use to make sure different trials create different files
"""
logger.log(15, "Training neural network for up to %s epochs..." % self.params['num_epochs'])
seed_value = self.params.get('seed_value')
if seed_value is not None: # Set seed
random.seed(seed_value)
np.random.seed(seed_value)
mx.random.seed(seed_value)
if initialize: # Initialize the weights of network
logging.debug("initializing neural network...")
self.model.collect_params().initialize(ctx=self.ctx)
self.model.hybridize()
logging.debug("initialized")
if setup_trainer:
# Also setup mxboard if visualizer has been specified:
visualizer = self.params.get('visualizer', 'none')
if visualizer == 'tensorboard' or visualizer == 'mxboard':
try_import_mxboard()
from mxboard import SummaryWriter
self.summary_writer = SummaryWriter(logdir=self.path, flush_secs=5, verbose=False)
self.setup_trainer()
best_val_metric = -np.inf # higher = better
val_metric = None
best_val_epoch = 0
best_train_epoch = 0 # epoch with best training loss so far
best_train_loss = np.inf # smaller = better
num_epochs = self.params['num_epochs']
if test_dataset is not None:
y_test = test_dataset.get_labels()
else:
y_test = None
loss_scaling_factor = 1.0 # we divide loss by this quantity to stabilize gradients
loss_torescale = [key for key in self.rescale_losses if isinstance(self.loss_func, key)]
if len(loss_torescale) > 0:
loss_torescale = loss_torescale[0]
if self.rescale_losses[loss_torescale] == 'std':
loss_scaling_factor = np.std(train_dataset.get_labels())/5.0 + EPS # std-dev of labels
elif self.rescale_losses[loss_torescale] == 'var':
loss_scaling_factor = np.var(train_dataset.get_labels())/5.0 + EPS # variance of labels
else:
raise ValueError("Unknown loss-rescaling type %s specified for loss_func==%s" % (self.rescale_losses[loss_torescale],self.loss_func))
if self.verbosity <= 1:
verbose_eval = -1 # Print losses every verbose epochs, Never if -1
elif self.verbosity == 2:
verbose_eval = 50
elif self.verbosity == 3:
verbose_eval = 10
else:
verbose_eval = 1
# Training Loop:
for e in range(num_epochs):
if e == 0: # special actions during first epoch:
logger.log(15, "Neural network architecture:")
logger.log(15, str(self.model)) # TODO: remove?
cumulative_loss = 0
for batch_idx, data_batch in enumerate(train_dataset.dataloader):
data_batch = train_dataset.format_batch_data(data_batch, self.ctx)
with autograd.record():
output = self.model(data_batch)
labels = data_batch['label']
loss = self.loss_func(output, labels) / loss_scaling_factor
# print(str(nd.mean(loss).asscalar()), end="\r") # prints per-batch losses
loss.backward()
self.optimizer.step(labels.shape[0])
cumulative_loss += nd.sum(loss).asscalar()
train_loss = cumulative_loss/float(train_dataset.num_examples) # training loss this epoch
if test_dataset is not None:
# val_metric = self.evaluate_metric(test_dataset) # Evaluate after each epoch
val_metric = self.score(X=test_dataset, y=y_test)
if test_dataset is None or val_metric >= best_val_metric: # keep training if score has improved
best_val_metric = val_metric
best_val_epoch = e
self.model.save_parameters(self.net_filename)
if test_dataset is not None:
if verbose_eval > 0 and e % verbose_eval == 0:
logger.log(15, "Epoch %s. Train loss: %s, Val %s: %s" %
(e, train_loss, self.eval_metric_name, val_metric))
if self.summary_writer is not None:
self.summary_writer.add_scalar(tag='val_'+self.eval_metric_name,
value=val_metric, global_step=e)
else:
if verbose_eval > 0 and e % verbose_eval == 0:
logger.log(15, "Epoch %s. Train loss: %s" % (e, train_loss))
if self.summary_writer is not None:
self.summary_writer.add_scalar(tag='train_loss', value=train_loss, global_step=e) # TODO: do we want to keep mxboard support?
if e - best_val_epoch > self.params['epochs_wo_improve']:
break
self.model.load_parameters(self.net_filename) # Revert back to best model
if test_dataset is None: # evaluate one final time:
logger.log(15, "Best model found in epoch %d" % best_val_epoch)
else:
final_val_metric = self.score(X=test_dataset, y=y_test)
logger.log(15, "Best model found in epoch %d. Val %s: %s" %
(best_val_epoch, self.eval_metric_name, final_val_metric))
return
def evaluate_metric(self, dataset, mx_metric=None):
""" Evaluates metric on the given dataset (TabularNNDataset object), used for early stopping and to tune hyperparameters.
If provided, mx_metric must be a function that follows the mxnet.metric API. Higher values = better!
By default, returns accuracy in the case of classification, R^2 for regression.
TODO: currently hard-coded metrics used only. Does not respect user-supplied metrics...
"""
if mx_metric is None:
if self.problem_type == REGRESSION:
mx_metric = mx.metric.MSE()
else:
mx_metric = mx.metric.Accuracy()
for batch_idx, data_batch in enumerate(dataset.dataloader):
data_batch = dataset.format_batch_data(data_batch, self.ctx)
preds = self.model(data_batch)
mx_metric.update(preds=preds, labels=data_batch['label']) # argmax not needed, even for classification
if self.problem_type == REGRESSION:
y_var = np.var(dataset.dataset._data[dataset.label_index].asnumpy()) + EPS
return 1.0 - mx_metric.get()[1] / y_var
else:
return mx_metric.get()[1] # accuracy
def predict_proba(self, X, preprocess=True):
""" To align predict wiht abstract_model API.
Preprocess here only refers to feature processing stesp done by all AbstractModel objects,
not tabularNN-specific preprocessing steps.
If X is not DataFrame but instead TabularNNDataset object, we can still produce predictions,
but cannot use preprocess in this case (needs to be already processed).
"""
if isinstance(X, TabularNNDataset):
return self._predict_tabular_data(new_data=X, process=False, predict_proba=True)
elif isinstance(X, pd.DataFrame):
if preprocess:
X = self.preprocess(X)
return self._predict_tabular_data(new_data=X, process=True, predict_proba=True)
else:
raise ValueError("X must be of type pd.DataFrame or TabularNNDataset, not type: %s" % type(X))
def _predict_tabular_data(self, new_data, process=True, predict_proba=True): # TODO ensure API lines up with tabular.Model class.
""" Specific TabularNN method to produce predictions on new (unprocessed) data.
Returns 1D numpy array unless predict_proba=True and task is multi-class classification (not binary).
Args:
new_data (pd.Dataframe or TabularNNDataset): new data to make predictions on.
If you want to make prediction for just a single row of new_data, pass in: new_data.iloc[[row_index]]
process (bool): should new data be processed (if False, new_data must be TabularNNDataset)
predict_proba (bool): should we output class-probabilities (not used for regression)
"""
if process:
new_data = self.process_data(new_data, labels=None, is_test=True)
if not isinstance(new_data, TabularNNDataset):
raise ValueError("new_data must of of type TabularNNDataset if process=False")
if self.problem_type == REGRESSION or not predict_proba:
preds = nd.zeros((new_data.num_examples,1))
else:
preds = nd.zeros((new_data.num_examples, self.num_net_outputs))
i = 0
for batch_idx, data_batch in enumerate(new_data.dataloader):
data_batch = new_data.format_batch_data(data_batch, self.ctx)
preds_batch = self.model(data_batch)
batch_size = len(preds_batch)
if self.problem_type != REGRESSION:
if not predict_proba: # need to take argmax
preds_batch = nd.argmax(preds_batch, axis=1, keepdims=True)
else: # need to take softmax
preds_batch = nd.softmax(preds_batch, axis=1)
preds[i:(i+batch_size)] = preds_batch
i = i+batch_size
if self.problem_type == REGRESSION or not predict_proba:
return preds.asnumpy().flatten() # return 1D numpy array
elif self.problem_type == BINARY and predict_proba:
return preds[:,1].asnumpy() # for binary problems, only return P(Y==1)
return preds.asnumpy() # return 2D numpy array
def process_data(self, df, labels = None, is_test=True):
""" Process train or test DataFrame into a form fit for neural network models.
Args:
df (pd.DataFrame): Data to be processed (X)
labels (pd.Series): labels to be processed (y)
test (bool): Is this test data where each datapoint should be processed separately using predetermined preprocessing steps.
Otherwise preprocessor uses all data to determine propreties like best scaling factors, number of categories, etc.
Returns:
Dataset object
"""
warnings.filterwarnings("ignore", module='sklearn.preprocessing') # sklearn processing n_quantiles warning
if set(df.columns) != set(self.features):
raise ValueError("Column names in provided Dataframe do not match self.features")
if labels is not None and len(labels) != len(df):
raise ValueError("Number of examples in Dataframe does not match number of labels")
if not is_test:
return self.process_train_data(df, labels)
# Otherwise we are processing test data:
if (self.processor is None or self.types_of_features is None
or self.feature_arraycol_map is None or self.feature_type_map is None):
raise ValueError("Need to process training data before test data")
df = self.ensure_onehot_object(df)
processed_array = self.processor.transform(df) # 2D numpy array. self.feature_arraycol_map, self.feature_type_map have been previously set while processing training data.
return TabularNNDataset(processed_array, self.feature_arraycol_map, self.feature_type_map,
self.params, self.problem_type, labels=labels, is_test=True)
def process_train_data(self, df, labels):
""" Preprocess training data and create self.processor object that can be used to process future data.
This method should only be used once per TabularNeuralNetModel object, otherwise will produce Warning.
# TODO no label processing for now
# TODO: language features are ignored for now
# TODO: how to add new features such as time features and remember to do the same for test data?
# TODO: no filtering of data-frame columns based on statistics, e.g. categorical columns with all unique variables or zero-variance features.
This should be done in default_learner class for all models not just TabularNeuralNetModel...
Here is old Grail code for column-filtering of data-frame Xtrain based on statistics:
try:
X_train_stats = X_train.describe(include='all').T.reset_index()
cols_to_drop = X_train_stats[(X_train_stats['unique'] > self.max_unique_categorical_values) | (X_train_stats['unique'].isna())]['index'].values
except:
cols_to_drop = []
cols_to_keep = [col for col in list(X_train.columns) if col not in cols_to_drop]
cols_to_use = [col for col in self.cat_names if col in cols_to_keep]
print(f'Using {len(cols_to_use)}/{len(self.cat_names)} categorical features')
self.cat_names = cols_to_use
print(f'Using {len(self.cont_names)} cont features')
"""
if labels is None:
raise ValueError("Attempting process training data without labels")
self.types_of_features = self._get_types_of_features(df) # dict with keys: : 'continuous', 'skewed', 'onehot', 'embed', 'language', values = column-names of df
df = df[self.features]
logger.log(15, "AutoGluon Neural Network infers features are of the following types:")
logger.log(15, json.dumps(self.types_of_features, indent=4))
logger.log(15, "\n")
df = self.ensure_onehot_object(df)
self.processor = self._create_preprocessor()
processed_array = self.processor.fit_transform(df) # 2D numpy array
self.feature_arraycol_map = self._get_feature_arraycol_map() # OrderedDict of feature-name -> list of column-indices in processed_array corresponding to this feature
# print(self.feature_arraycol_map)
self.feature_type_map = self._get_feature_type_map() # OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language')
# print(self.feature_type_map)
return TabularNNDataset(processed_array, self.feature_arraycol_map, self.feature_type_map,
self.params, self.problem_type, labels=labels, is_test=False)
def setup_trainer(self):
""" Set up stuff needed for training:
optimizer, loss, and summary writer (for mxboard).
Network must first be initialized before this.
"""
optimizer_opts = {'learning_rate': self.params['learning_rate'],
'wd': self.params['weight_decay'], 'clip_gradient': self.params['clip_gradient']}
if self.params['optimizer'] == 'sgd':
optimizer_opts['momentum'] = self.params['momentum']
self.optimizer = gluon.Trainer(self.model.collect_params(), 'sgd', optimizer_opts)
elif self.params['optimizer'] == 'adam': # TODO: Can we try AdamW?
self.optimizer = gluon.Trainer(self.model.collect_params(), 'adam', optimizer_opts)
else:
raise ValueError("Unknown optimizer specified: %s" % self.params['optimizer'])
if self.params['loss_function'] is None:
if self.problem_type == REGRESSION:
self.params['loss_function'] = gluon.loss.L1Loss()
else:
self.params['loss_function'] = gluon.loss.SoftmaxCrossEntropyLoss(from_logits=self.model.from_logits)
self.loss_func = self.params['loss_function']
# Helper functions for tabular NN:
def ensure_onehot_object(self, df):
""" Converts all numerical one-hot columns to object-dtype.
Note: self.types_of_features must already exist!
"""
new_df = df.copy() # To avoid SettingWithCopyWarning
for feature in self.types_of_features['onehot']:
if df[feature].dtype != 'object':
new_df.loc[:,feature] = df.loc[:,feature].astype(str)
return new_df
def __get_feature_type_if_present(self, feature_type):
""" Returns crude categorization of feature types """
return self.feature_types_metadata[feature_type] if feature_type in self.feature_types_metadata else []
def _get_types_of_features(self, df):
""" Returns dict with keys: : 'continuous', 'skewed', 'onehot', 'embed', 'language', values = ordered list of feature-names falling into each category.
Each value is a list of feature-names corresponding to columns in original dataframe.
TODO: ensure features with zero variance have already been removed before this function is called.
"""
if self.types_of_features is not None:
Warning("Attempting to _get_types_of_features for TabularNeuralNetModel, but previously already did this.")
categorical_featnames = self.__get_feature_type_if_present('object') + self.__get_feature_type_if_present('bool')
continuous_featnames = self.__get_feature_type_if_present('float') + self.__get_feature_type_if_present('int') + self.__get_feature_type_if_present('datetime')
# print("categorical_featnames:", categorical_featnames)
# print("continuous_featnames:", continuous_featnames)
language_featnames = [] # TODO: not implemented. This should fetch text features present in the data
valid_features = categorical_featnames + continuous_featnames + language_featnames
if len(categorical_featnames) + len(continuous_featnames)\
+ len(language_featnames)\
!= df.shape[1]:
unknown_features = [feature for feature in df.columns if feature not in valid_features]
# print('unknown features:', unknown_features)
df = df.drop(columns=unknown_features)
self.features = list(df.columns)
# raise ValueError("unknown feature types present in DataFrame")
types_of_features = {'continuous': [], 'skewed': [], 'onehot': [], 'embed': [], 'language': []}
# continuous = numeric features to rescale
# skewed = features to which we will apply power (ie. log / box-cox) transform before normalization
# onehot = features to one-hot encode (unknown categories for these features encountered at test-time are encoded as all zeros). We one-hot encode any features encountered that only have two unique values.
for feature in self.features:
feature_data = df[feature] # pd.Series
num_unique_vals = len(feature_data.unique())
if num_unique_vals == 2: # will be onehot encoded regardless of proc.embed_min_categories value
types_of_features['onehot'].append(feature)
elif feature in continuous_featnames:
if np.abs(feature_data.skew()) > self.params['proc.skew_threshold']:
types_of_features['skewed'].append(feature)
else:
types_of_features['continuous'].append(feature)
elif feature in categorical_featnames:
if num_unique_vals >= self.params['proc.embed_min_categories']: # sufficiently many cateories to warrant learned embedding dedicated to this feature
types_of_features['embed'].append(feature)
else:
types_of_features['onehot'].append(feature)
elif feature in language_featnames:
types_of_features['language'].append(feature)
return types_of_features
def _get_feature_arraycol_map(self):
""" Returns OrderedDict of feature-name -> list of column-indices in processed data array corresponding to this feature """
feature_preserving_transforms = set(['continuous','skewed', 'ordinal', 'language']) # these transforms do not alter dimensionality of feature
feature_arraycol_map = {} # unordered version
current_colindex = 0
for transformer in self.processor.transformers_:
transformer_name = transformer[0]
transformed_features = transformer[2]
if transformer_name in feature_preserving_transforms:
for feature in transformed_features:
if feature in feature_arraycol_map:
raise ValueError("same feature is processed by two different column transformers: %s" % feature)
feature_arraycol_map[feature] = [current_colindex]
current_colindex += 1
elif transformer_name == 'onehot':
oh_encoder = [step for (name, step) in transformer[1].steps if name == 'onehot'][0]
for i in range(len(transformed_features)):
feature = transformed_features[i]
if feature in feature_arraycol_map:
raise ValueError("same feature is processed by two different column transformers: %s" % feature)
oh_dimensionality = len(oh_encoder.categories_[i])
feature_arraycol_map[feature] = list(range(current_colindex, current_colindex+oh_dimensionality))
current_colindex += oh_dimensionality
else:
raise ValueError("unknown transformer encountered: %s" % transformer_name)
if set(feature_arraycol_map.keys()) != set(self.features):
raise ValueError("failed to account for all features when determining column indices in processed array")
return OrderedDict([(key, feature_arraycol_map[key]) for key in feature_arraycol_map])
def _get_feature_type_map(self):
""" Returns OrderedDict of feature-name -> feature_type string (options: 'vector', 'embed', 'language') """
if self.feature_arraycol_map is None:
raise ValueError("must first call _get_feature_arraycol_map() before _get_feature_type_map()")
vector_features = self.types_of_features['continuous'] + self.types_of_features['skewed'] + self.types_of_features['onehot']
feature_type_map = OrderedDict()
for feature_name in self.feature_arraycol_map:
if feature_name in vector_features:
feature_type_map[feature_name] = 'vector'
elif feature_name in self.types_of_features['embed']:
feature_type_map[feature_name] = 'embed'
elif feature_name in self.types_of_features['language']:
feature_type_map[feature_name] = 'language'
else:
raise ValueError("unknown feature type encountered")
return feature_type_map
def _create_preprocessor(self):
""" Defines data encoders used to preprocess different data types and creates instance variable which is sklearn ColumnTransformer object """
if self.processor is not None:
Warning("Attempting to process training data for TabularNeuralNetModel, but previously already did this.")
continuous_features = self.types_of_features['continuous']
skewed_features = self.types_of_features['skewed']
onehot_features = self.types_of_features['onehot']
embed_features = self.types_of_features['embed']
language_features = self.types_of_features['language']
transformers = [] # order of various column transformers in this list is important!
if len(continuous_features) > 0:
continuous_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy=self.params['proc.impute_strategy'])),
('scaler', StandardScaler())])
transformers.append( ('continuous', continuous_transformer, continuous_features) )
if len(skewed_features) > 0:
power_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy=self.params['proc.impute_strategy'])),
('quantile', QuantileTransformer(output_distribution='normal')) ]) # Or output_distribution = 'uniform'
# TODO: remove old code: ('power', PowerTransformer(method=self.params['proc.power_transform_method'])) ])
transformers.append( ('skewed', power_transformer, skewed_features) )
if len(onehot_features) > 0:
onehot_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),
('onehot', OneHotMergeRaresHandleUnknownEncoder(max_levels=self.params['proc.max_category_levels'],sparse=False)) ]) # test-time unknown values will be encoded as all zeros vector
transformers.append( ('onehot', onehot_transformer, onehot_features) )
if len(embed_features) > 0: # Ordinal transformer applied to convert to-be-embedded categorical features to integer levels
ordinal_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value=self.unique_category_str)),
('ordinal', OrdinalMergeRaresHandleUnknownEncoder(max_levels=self.params['proc.max_category_levels'])) ]) # returns 0-n when max_category_levels = n-1. category n is reserved for unknown test-time categories.
transformers.append( ('ordinal', ordinal_transformer, embed_features) )
if len(language_features) > 0:
raise NotImplementedError("language_features cannot be used at the moment")
return ColumnTransformer(transformers=transformers) # numeric features are processed in the same order as in numeric_features vector, so feature-names remain the same.
def save(self, file_prefix="", directory = None, return_name=False, verbose=None):
""" file_prefix (str): Appended to beginning of file-name (does not affect directory in file-path).
directory (str): if unspecified, use self.path as directory
return_name (bool): return the file-names corresponding to this save as tuple (model_obj_file, net_params_file)
"""
if verbose is None:
verbose = self.verbosity >= 3
if directory is not None:
path = directory + file_prefix
else:
path = self.path + file_prefix
params_filepath = path + self.params_file_name
modelobj_filepath = path + self.model_file_name
if self.model is not None:
self.model.save_parameters(params_filepath)
temp_model = self.model
temp_sw = self.summary_writer
self.model = None
self.summary_writer = None
save_pkl.save(path=modelobj_filepath, object=self, verbose=verbose)
self.model = temp_model
self.summary_writer = temp_sw
if return_name:
return (modelobj_filepath, params_filepath)
@classmethod
def load(cls, path, file_prefix="", reset_paths=False, verbose=True):
""" file_prefix (str): Appended to beginning of file-name.
If you want to load files with given prefix, can also pass arg: path = directory+file_prefix
"""
path = path + file_prefix
obj = load_pkl.load(path = path + cls.model_file_name, verbose=verbose)
if reset_paths:
obj.set_contexts(path)
obj.model = EmbedNet(architecture_desc=obj.architecture_desc, ctx=obj.ctx) # recreate network from architecture description
# TODO: maybe need to initialize/hybridize??
obj.model.load_parameters(path + cls.params_file_name, ctx=obj.ctx)
obj.summary_writer = None
return obj
def hyperparameter_tune(self, X_train, X_test, Y_train, Y_test, scheduler_options, **kwargs):
""" Performs HPO and sets self.params to best hyperparameter values """
self.verbosity = kwargs.get('verbosity', 2)
logger.log(15, "Beginning hyperparameter tuning for Neural Network...")
self._set_default_searchspace() # changes non-specified default hyperparams from fixed values to search-spaces.
if self.feature_types_metadata is None:
raise ValueError("Trainer class must set feature_types_metadata for this model")
scheduler_func = scheduler_options[0] # Unpack tuple
scheduler_options = scheduler_options[1]
if scheduler_func is None or scheduler_options is None:
raise ValueError("scheduler_func and scheduler_options cannot be None for hyperparameter tuning")
num_cpus = scheduler_options['resource']['num_cpus']
num_gpus = scheduler_options['resource']['num_gpus']
self.params['num_dataloading_workers'] = max(1, int(num_cpus/2.0))
if num_gpus >= 1:
self.params['ctx'] = mx.gpu() # Currently cannot use more than 1 GPU until scheduler works
else:
self.params['ctx'] = mx.cpu()
# self.params['ctx'] = mx.cpu() # use this in case embedding layer complains during predict() for HPO with GPU
start_time = time.time()
X_train = self.preprocess(X_train)
if self.features is None:
self.features = list(X_train.columns)
params_copy = self.params.copy()
if not np.any([isinstance(params_copy[hyperparam], Space) for hyperparam in params_copy]):
logger.warning("Warning: Attempting to do hyperparameter optimization without any search space (all hyperparameters are already fixed values)")
else:
logger.log(15, "Hyperparameter search space for Neural Network: ")
for hyperparam in params_copy:
if isinstance(params_copy[hyperparam], Space):
logger.log(15, str(hyperparam)+ ": "+str(params_copy[hyperparam]))
directory = self.path # path to directory where all remote workers store things
train_dataset = self.process_data(X_train, Y_train, is_test=False) # Dataset object
X_test = self.preprocess(X_test)
test_dataset = self.process_data(X_test, Y_test, is_test=True) # Dataset object to use for validation
train_fileprefix = self.path + "train"
test_fileprefix = self.path + "validation"
train_dataset.save(file_prefix=train_fileprefix) # TODO: cleanup after HPO?
test_dataset.save(file_prefix=test_fileprefix)
tabular_nn_trial.register_args(train_fileprefix=train_fileprefix, test_fileprefix=test_fileprefix,
directory=directory, tabNN=self, **params_copy)
scheduler = scheduler_func(tabular_nn_trial, **scheduler_options)
if ('dist_ip_addrs' in scheduler_options) and (len(scheduler_options['dist_ip_addrs']) > 0):
# This is multi-machine setting, so need to copy dataset to workers:
logger.log(15, "Uploading preprocessed data to remote workers...")
scheduler.upload_files([train_fileprefix+TabularNNDataset.DATAOBJ_SUFFIX,
train_fileprefix+TabularNNDataset.DATAVALUES_SUFFIX,
test_fileprefix+TabularNNDataset.DATAOBJ_SUFFIX,
test_fileprefix+TabularNNDataset.DATAVALUES_SUFFIX]) # TODO: currently does not work.
train_fileprefix = "train"
test_fileprefix = "validation"
directory = self.path # TODO: need to change to path to working directory on every remote machine
tabular_nn_trial.update(train_fileprefix=train_fileprefix, test_fileprefix=test_fileprefix,
directory=directory)
logger.log(15, "uploaded")
scheduler.run()
scheduler.join_jobs()
scheduler.get_training_curves(plot=False, use_legend=False)
# Store results / models from this HPO run:
best_hp = scheduler.get_best_config() # best_hp only contains searchable stuff
hpo_results = {'best_reward': scheduler.get_best_reward(),
'best_config': best_hp,
'total_time': time.time() - start_time,
'metadata': scheduler.metadata,
'training_history': scheduler.training_history,
'config_history': scheduler.config_history,
'reward_attr': scheduler._reward_attr,
'args': tabular_nn_trial.args
}
hpo_results = BasePredictor._format_results(hpo_results) # store results summarizing HPO for this model
if ('dist_ip_addrs' in scheduler_options) and (len(scheduler_options['dist_ip_addrs']) > 0):
raise NotImplementedError("need to fetch model files from remote Workers")
# TODO: need to handle locations carefully: fetch these 2 files and put into self.path:
# 1) hpo_results['trial_info'][trial]['metadata']['modelobj_file']
# 2) hpo_results['trial_info'][trial]['metadata']['netparams_file']
hpo_models = {} # stores all the model names and file paths to model objects created during this HPO run.
hpo_model_performances = {}
for trial in sorted(hpo_results['trial_info'].keys()):
# TODO: ignore models which were killed early by scheduler (eg. in Hyperband)s
file_id = "trial_"+str(trial) # unique identifier to files from this trial
file_prefix = file_id + "_"
trial_model_name = self.name+"_"+file_id
trial_model_path = self.path + file_prefix
hpo_models[trial_model_name] = trial_model_path
hpo_model_performances[trial_model_name] = hpo_results['trial_info'][trial][scheduler._reward_attr]
logger.log(15, "Time for Neural Network hyperparameter optimization: %s" % str(hpo_results['total_time']))
self.params.update(best_hp)
# TODO: reload model params from best trial? Do we want to save this under cls.model_file as the "optimal model"
logger.log(15, "Best hyperparameter configuration for Tabular Neural Network: ")
logger.log(15, str(best_hp))
return (hpo_models, hpo_model_performances, hpo_results)
"""
# TODO: do final fit here?
args.final_fit = True
model_weights = scheduler.run_with_config(best_config)
save(model_weights)
"""
def _set_default_searchspace(self):
""" Sets up default search space for HPO. Each hyperparameter which user did not specify is converted from
default fixed value to default spearch space.
"""
search_space = get_default_searchspace(self.problem_type)
for key in self.nondefault_params: # delete all user-specified hyperparams from the default search space
_ = search_space.pop(key, None)
self.params.update(search_space)
""" General TODOs:
- Automatically decrease batch-size if memory issue arises
- Retrain final NN on full dataset (train+val). How to ensure stability here?
- OrdinalEncoder class in sklearn currently cannot handle rare categories or unknown ones at test-time, so we have created our own Encoder in category_encoders.py
There is open PR in sklearn to address this: https://github.com/scikit-learn/scikit-learn/pull/13833/files
Currently, our code uses category_encoders package (BSD license) instead: https://github.com/scikit-learn-contrib/categorical-encoding
Once PR is merged into sklearn, may want to switch: category_encoders.Ordinal -> sklearn.preprocessing.OrdinalEncoder in preprocess_train_data()
- Save preprocessed data so that we can do HPO of neural net hyperparameters more efficiently, while also doing HPO of preprocessing hyperparameters?
Naive full HPO method requires redoing preprocessing in each trial even if we did not change preprocessing hyperparameters.
Alternative is we save each proprocessed dataset & corresponding TabularNeuralNetModel object with its unique param names in the file. Then when we try a new HP-config, we first try loading from file if one exists.
"""
|
py | b413a48aeaeadb7379b9c71de32ba0ef478d88f4 | #!/usr/bin/env python
# (C) 2001 by Argonne National Laboratory.
# See COPYRIGHT in top-level directory.
#
# This program does installs, etc and needs to run start to finish.
# Presently, it does NOT use the 'testing' MPD_CON_EXT.
import os, sys
USERDIR = "/tmp/rmbmpd"
ROOTDIR = "/tmp/rootmpd"
# install as user
print "install as user ---------------------------------------------------"
if not os.access("./mpdroot",os.X_OK):
os.system("./configure") # use prefix on makes below
os.system("make")
os.system("make prefix=%s install" % (USERDIR) )
# test:
print "TEST mpd as user ; mpdtrace as user"
PYEXT = ''
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.system("%s/bin/mpdallexit%s 1> /dev/null 2> /dev/null" % (USERDIR,PYEXT) )
os.system("%s/bin/mpdboot%s -n %d" % (USERDIR,PYEXT,NMPDS) )
expout = ['%s' % (socket.gethostname())]
rv = mpdtest.run(cmd="%s/bin/mpdtrace%s -l" % (USERDIR,PYEXT), grepOut=1, expOut=expout )
os.system("%s/bin/mpdallexit%s 1> /dev/null 2> /dev/null" % (USERDIR,PYEXT) )
# install as root
print "install as root ---------------------------------------------------"
if not os.access("./mpdroot",os.X_OK):
os.system("./configure") # use prefix on makes below
os.system("make")
os.system("sudo make prefix=%s install" % (ROOTDIR) ) # sudo did not work here
# test:
print "TEST mpd as root ; mpdtrace as user"
PYEXT = ''
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.system("sudo %s/bin/mpdallexit%s 1> /dev/null 2> /dev/null" % (ROOTDIR,PYEXT) )
os.system("sudo %s/bin/mpd%s -d" % (ROOTDIR,PYEXT) ) # not using boot here
import time
time.sleep(2)
os.environ['MPD_USE_ROOT_MPD'] = '1'
# os.system("%s/bin/mpdtrace%s -l" % (ROOTDIR,PYEXT))
expout = ['%s' % (socket.gethostname())]
rv = mpdtest.run(cmd="%s/bin/mpdtrace%s -l" % (ROOTDIR,PYEXT), grepOut=1, expOut=expout )
print "TEST that user cannot remove files owned by root"
os.system("sudo touch /tmp/testroot")
expout = ['cannot remove']
rv = mpdtest.run(cmd="%s/bin/mpiexec%s -n 1 rm -f /tmp/testroot" % (ROOTDIR,PYEXT),
grepOut=1, expOut=expout )
os.system("sudo rm -f /tmp/testroot")
os.system("sudo %s/bin/mpdallexit%s 1> /dev/null 2> /dev/null" % (ROOTDIR,PYEXT) )
|
py | b413a56c07a9ce3afbe15baffbffaf92a3d42129 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv3D(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight = np.random.uniform(
-1, 1, (self.out_channels, self.in_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
self.channel_last = (self.data_format == "NDHWC")
if self.channel_last:
self.input_shape = (self.batch_size, ) + self.spatial_shape + (
self.in_channels, )
else:
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
def static_graph_case_1(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
y = fluid.layers.conv3d(
x,
self.out_channels,
self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def static_graph_case_2(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv3d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
if not self.no_bias:
feed_dict["bias"] = self.bias
out, = exe.run(main, feed=feed_dict, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv3d(
x,
weight,
bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
out = y.numpy()
return out
def _test_identity(self):
self.prepare()
out1 = self.static_graph_case_1()
out2 = self.static_graph_case_2()
out3 = self.dygraph_case()
np.testing.assert_array_almost_equal(out1, out2)
np.testing.assert_array_almost_equal(out2, out3)
def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
class TestFunctionalConv3DError(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
def test_exception(self):
self.prepare()
with self.assertRaises(ValueError):
self.static_graph_case()
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight_shape = (self.out_channels, self.in_channels // self.groups
) + filter_shape
self.bias_shape = (self.out_channels, )
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv3d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
class TestFunctionalConv3DCase2(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase3(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 3, 1, 2, 3]
self.stride = 2
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase4(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 1, 2, 2, 3, 3]
self.stride = 1
self.dilation = 2
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase5(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [2, 2], [1, 1], [0, 0]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase6(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [2, 2], [2, 2]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DCase7(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 6
self.out_channels = 8
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DCase8(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 6
self.out_channels = 12
self.filter_shape = 3
self.padding = "valid"
self.stride = 1
self.dilation = 1
self.groups = 6
self.no_bias = True
self.act = None
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase2(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [1, 2], [3, 4], [5, 6]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase3(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "not_valid"
class TestFunctionalConv3DErrorCase4(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 3
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase7(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "not_valid"
class TestFunctionalConv3DErrorCase8(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase9(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = -5
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [3, 2], [1, 2], [1, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
if __name__ == "__main__":
unittest.main()
|
py | b413a646a171500eca9587aadbf59e952f553aba | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import random
import shutil
import tempfile
import unittest
from logging import DEBUG
from mock import patch, call, DEFAULT
from contextlib import nested
from swift.account import reaper
from swift.account.backend import DATADIR
from swift.common.exceptions import ClientException
from swift.common.utils import normalize_timestamp
from test import unit
from swift.common.storage_policy import StoragePolicy, POLICIES
class FakeLogger(object):
def __init__(self, *args, **kwargs):
self.inc = {'return_codes.4': 0,
'return_codes.2': 0,
'objects_failures': 0,
'objects_deleted': 0,
'objects_remaining': 0,
'objects_possibly_remaining': 0,
'containers_failures': 0,
'containers_deleted': 0,
'containers_remaining': 0,
'containers_possibly_remaining': 0}
self.exp = []
def info(self, msg, *args):
self.msg = msg
def error(self, msg, *args):
self.msg = msg
def timing_since(*args, **kwargs):
pass
def getEffectiveLevel(self):
return DEBUG
def exception(self, *args):
self.exp.append(args)
def increment(self, key):
self.inc[key] += 1
class FakeBroker(object):
def __init__(self):
self.info = {}
def get_info(self):
return self.info
class FakeAccountBroker(object):
def __init__(self, containers):
self.containers = containers
def get_info(self):
info = {'account': 'a',
'delete_timestamp': time.time() - 10}
return info
def list_containers_iter(self, *args):
for cont in self.containers:
yield cont, None, None, None
def is_status_deleted(self):
return True
def empty(self):
return False
class FakeRing(object):
def __init__(self):
self.nodes = [{'id': '1',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
{'id': '2',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
{'id': '3',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
]
def get_nodes(self, *args, **kwargs):
return ('partition', self.nodes)
def get_part_nodes(self, *args, **kwargs):
return self.nodes
acc_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
cont_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
@unit.patch_policies([StoragePolicy(0, 'zero', False,
object_ring=unit.FakeRing()),
StoragePolicy(1, 'one', True,
object_ring=unit.FakeRing(replicas=4))])
class TestReaper(unittest.TestCase):
def setUp(self):
self.to_delete = []
self.myexp = ClientException("", http_host=None,
http_port=None,
http_device=None,
http_status=404,
http_reason=None
)
def tearDown(self):
for todel in self.to_delete:
shutil.rmtree(todel)
def fake_direct_delete_object(self, *args, **kwargs):
if self.amount_fail < self.max_fail:
self.amount_fail += 1
raise self.myexp
def fake_direct_delete_container(self, *args, **kwargs):
if self.amount_delete_fail < self.max_delete_fail:
self.amount_delete_fail += 1
raise self.myexp
def fake_direct_get_container(self, *args, **kwargs):
if self.get_fail:
raise self.myexp
objects = [{'name': 'o1'},
{'name': 'o2'},
{'name': unicode('o3')},
{'name': ''}]
return None, objects
def fake_container_ring(self):
return FakeRing()
def fake_reap_object(self, *args, **kwargs):
if self.reap_obj_fail:
raise Exception
def prepare_data_dir(self, ts=False):
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
path = os.path.join(path, '100',
'a86', 'a8c682d2472e1720f2d81ff8993aba6')
os.makedirs(path)
suffix = 'db'
if ts:
suffix = 'ts'
with open(os.path.join(path, 'a8c682203aba6.%s' % suffix), 'w') as fd:
fd.write('')
return devices_path
def init_reaper(self, conf=None, myips=None, fakelogger=False):
if conf is None:
conf = {}
if myips is None:
myips = ['10.10.10.1']
r = reaper.AccountReaper(conf)
r.stats_return_codes = {}
r.stats_containers_deleted = 0
r.stats_containers_remaining = 0
r.stats_containers_possibly_remaining = 0
r.stats_objects_deleted = 0
r.stats_objects_remaining = 0
r.stats_objects_possibly_remaining = 0
r.myips = myips
if fakelogger:
r.logger = unit.debug_logger('test-reaper')
return r
def fake_reap_account(self, *args, **kwargs):
self.called_amount += 1
def fake_account_ring(self):
return FakeRing()
def test_delay_reaping_conf_default(self):
r = reaper.AccountReaper({})
self.assertEqual(r.delay_reaping, 0)
r = reaper.AccountReaper({'delay_reaping': ''})
self.assertEqual(r.delay_reaping, 0)
def test_delay_reaping_conf_set(self):
r = reaper.AccountReaper({'delay_reaping': '123'})
self.assertEqual(r.delay_reaping, 123)
def test_delay_reaping_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'delay_reaping': 'abc'})
def test_reap_warn_after_conf_set(self):
conf = {'delay_reaping': '2', 'reap_warn_after': '3'}
r = reaper.AccountReaper(conf)
self.assertEqual(r.reap_not_done_after, 5)
def test_reap_warn_after_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'reap_warn_after': 'abc'})
def test_reap_delay(self):
time_value = [100]
def _time():
return time_value[0]
time_orig = reaper.time
try:
reaper.time = _time
r = reaper.AccountReaper({'delay_reaping': '10'})
b = FakeBroker()
b.info['delete_timestamp'] = normalize_timestamp(110)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(100)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(90)
self.assertFalse(r.reap_account(b, 0, None))
# KeyError raised immediately as reap_account tries to get the
# account's name to do the reaping.
b.info['delete_timestamp'] = normalize_timestamp(89)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
b.info['delete_timestamp'] = normalize_timestamp(1)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
finally:
reaper.time = time_orig
def test_reap_object(self):
conf = {
'mount_check': 'false',
}
r = reaper.AccountReaper(conf, logger=unit.debug_logger())
mock_path = 'swift.account.reaper.direct_delete_object'
for policy in POLICIES:
r.reset_stats()
with patch(mock_path) as fake_direct_delete:
with patch('swift.account.reaper.time') as mock_time:
mock_time.return_value = 1429117638.86767
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
mock_time.assert_called_once_with()
for i, call_args in enumerate(
fake_direct_delete.call_args_list):
cnode = cont_nodes[i % len(cont_nodes)]
host = '%(ip)s:%(port)s' % cnode
device = cnode['device']
headers = {
'X-Container-Host': host,
'X-Container-Partition': 'partition',
'X-Container-Device': device,
'X-Backend-Storage-Policy-Index': policy.idx,
'X-Timestamp': '1429117638.86767'
}
ring = r.get_object_ring(policy.idx)
expected = call(dict(ring.devs[i], index=i), 0,
'a', 'c', 'o',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(policy.object_ring.replicas - 1, i)
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas)
def test_reap_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.amount_fail = 0
self.max_fail = 1
policy = random.choice(list(POLICIES))
with patch('swift.account.reaper.direct_delete_object',
self.fake_direct_delete_object):
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
# IMHO, the stat handling in the node loop of reap object is
# over indented, but no one has complained, so I'm not inclined
# to move it. However it's worth noting we're currently keeping
# stats on deletes per *replica* - which is rather obvious from
# these tests, but this results is surprising because of some
# funny logic to *skip* increments on successful deletes of
# replicas until we have more successful responses than
# failures. This means that while the first replica doesn't
# increment deleted because of the failure, the second one
# *does* get successfully deleted, but *also does not* increment
# the counter (!?).
#
# In the three replica case this leaves only the last deleted
# object incrementing the counter - in the four replica case
# this leaves the last two.
#
# Basically this test will always result in:
# deleted == num_replicas - 2
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas - 2)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 1)
def test_reap_object_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 2)
self.assertEqual(r.stats_objects_deleted, 0)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 0)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container(self):
policy = random.choice(list(POLICIES))
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': policy.idx}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
with patch('swift.account.reaper.time') as mock_time:
mock_time.side_effect = [1429117638.86767, 1429117639.67676]
r.reap_container('a', 'partition', acc_nodes, 'c')
# verify calls to direct_delete_object
mock_calls = mocks['direct_delete_object'].call_args_list
self.assertEqual(policy.object_ring.replicas, len(mock_calls))
for call_args in mock_calls:
_args, kwargs = call_args
self.assertEqual(kwargs['headers']
['X-Backend-Storage-Policy-Index'],
policy.idx)
self.assertEqual(kwargs['headers']
['X-Timestamp'],
'1429117638.86767')
# verify calls to direct_delete_container
self.assertEquals(mocks['direct_delete_container'].call_count, 3)
for i, call_args in enumerate(
mocks['direct_delete_container'].call_args_list):
anode = acc_nodes[i % len(acc_nodes)]
host = '%(ip)s:%(port)s' % anode
device = anode['device']
headers = {
'X-Account-Host': host,
'X-Account-Partition': 'partition',
'X-Account-Device': device,
'X-Account-Override-Deleted': 'yes',
'X-Timestamp': '1429117639.67676'
}
ring = r.get_object_ring(policy.idx)
expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas)
def test_reap_container_get_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = True
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 0
ctx = [patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container),
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container),
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring),
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 1)
self.assertEqual(r.stats_containers_deleted, 1)
def test_reap_container_partial_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 2
ctx = [patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container),
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container),
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring),
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 2)
self.assertEqual(r.stats_containers_possibly_remaining, 1)
def test_reap_container_full_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 3
ctx = [patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container),
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container),
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring),
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object)]
with nested(*ctx):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 3)
self.assertEqual(r.stats_containers_remaining, 1)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': 2}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_lines_for_level('error'), [
'ERROR: invalid storage policy index: 2'])
def fake_reap_container(self, *args, **kwargs):
self.called_amount += 1
self.r.stats_containers_deleted = 1
self.r.stats_objects_deleted = 1
self.r.stats_containers_remaining = 1
self.r.stats_objects_remaining = 1
self.r.stats_containers_possibly_remaining = 1
self.r.stats_objects_possibly_remaining = 1
def test_reap_account(self):
containers = ('c1', 'c2', 'c3', '')
broker = FakeAccountBroker(containers)
self.called_amount = 0
self.r = r = self.init_reaper({}, fakelogger=True)
r.start_time = time.time()
ctx = [patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring)]
with nested(*ctx):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertEqual(self.called_amount, 4)
info_lines = r.logger.get_lines_for_level('info')
self.assertEqual(len(info_lines), 2)
start_line, stat_line = info_lines
self.assertEqual(start_line, 'Beginning pass on account a')
self.assertTrue(stat_line.find('1 containers deleted'))
self.assertTrue(stat_line.find('1 objects deleted'))
self.assertTrue(stat_line.find('1 containers remaining'))
self.assertTrue(stat_line.find('1 objects remaining'))
self.assertTrue(stat_line.find('1 containers possibly remaining'))
self.assertTrue(stat_line.find('1 objects possibly remaining'))
def test_reap_account_no_container(self):
broker = FakeAccountBroker(tuple())
self.r = r = self.init_reaper({}, fakelogger=True)
self.called_amount = 0
r.start_time = time.time()
ctx = [patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring)]
with nested(*ctx):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertTrue(r.logger.get_lines_for_level(
'info')[-1].startswith('Completed pass'))
self.assertEqual(self.called_amount, 0)
def test_reap_device(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf)
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring),
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account)]
with nested(*ctx):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 1)
def test_reap_device_with_ts(self):
devices = self.prepare_data_dir(ts=True)
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf=conf)
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring),
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account)]
with nested(*ctx):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_reap_device_with_not_my_ip(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.1.2'])
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring),
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account)]
with nested(*ctx):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_run_once(self):
def prepare_data_dir():
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
return devices_path
def init_reaper(devices):
r = reaper.AccountReaper({'devices': devices})
return r
devices = prepare_data_dir()
r = init_reaper(devices)
with patch('swift.account.reaper.ismount', lambda x: True):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertEqual(foo.called, 1)
with patch('swift.account.reaper.ismount', lambda x: False):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertFalse(foo.called)
def test_run_forever(self):
def fake_sleep(val):
self.val = val
def fake_random():
return 1
def fake_run_once():
raise Exception('exit')
def init_reaper():
r = reaper.AccountReaper({'interval': 1})
r.run_once = fake_run_once
return r
r = init_reaper()
with patch('swift.account.reaper.sleep', fake_sleep):
with patch('swift.account.reaper.random.random', fake_random):
try:
r.run_forever()
except Exception as err:
pass
self.assertEqual(self.val, 1)
self.assertEqual(str(err), 'exit')
if __name__ == '__main__':
unittest.main()
|
py | b413a7ebe21e850e85f4e13adeaf5ab27568d92e |
def insertion_sort(collection):
"""Pure implementation of the insertion sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> insertion_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> insertion_sort([])
[]
>>> insertion_sort([-2, -5, -45])
[-45, -5, -2]
"""
for index in range(1, len(collection)):
while index > 0 and collection[index - 1] > collection[index]:
collection[index], collection[index - 1] = collection[index - 1], collection[index]
index -= 1
return collection
if __name__ == '__main__':
user_input = input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(insertion_sort(unsorted))
|
py | b413a9160cbe6992a450cbf7b3e31ed1c54e625e | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware vCenter platform.
"""
import os
import re
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import versionutils as v_utils
from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
from oslo_vmware import vim
from oslo_vmware import vim_util
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
import nova.privsep.path
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim_util as nova_vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
TIME_BETWEEN_API_CALL_RETRIES = 1.0
MAX_CONSOLE_BYTES = 100 * units.Ki
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
capabilities = {
"has_imagecache": True,
"supports_evacuate": False,
"supports_migrate_to_same_host": True,
"supports_attach_interface": True,
"supports_multiattach": False,
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
# Image type support flags
"supports_image_type_aki": False,
"supports_image_type_ami": False,
"supports_image_type_ari": False,
"supports_image_type_iso": True,
"supports_image_type_qcow2": False,
"supports_image_type_raw": False,
"supports_image_type_vdi": False,
"supports_image_type_vhd": False,
"supports_image_type_vhdx": False,
"supports_image_type_vmdk": True,
"supports_image_type_ploop": False,
}
# Legacy nodename is of the form: <mo id>(<cluster name>)
# e.g. domain-26(TestCluster)
# We assume <mo id> consists of alphanumeric, _ and -.
# We assume cluster name is everything between the first ( and the last ).
# We pull out <mo id> for re-use.
LEGACY_NODENAME = re.compile(r'([\w-]+)\(.+\)')
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
if (CONF.vmware.host_ip is None or
CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, host_username and "
"host_password to use vmwareapi.VMwareVCDriver"))
self._datastore_regex = None
if CONF.vmware.datastore_regex:
try:
self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
except re.error:
raise exception.InvalidInput(reason=
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
self._check_min_version()
# Update the PBM location if necessary
if CONF.vmware.pbm_enabled:
self._update_pbm_location()
self._validate_configuration()
self._cluster_name = CONF.vmware.cluster_name
self._cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
self._cluster_name)
if self._cluster_ref is None:
raise exception.NotFound(_("The specified cluster '%s' was not "
"found in vCenter")
% self._cluster_name)
self._vcenter_uuid = self._get_vcenter_uuid()
self._nodename = self._create_nodename(self._cluster_ref.value)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self._cluster_ref)
self._vmops = vmops.VMwareVMOps(self._session,
virtapi,
self._volumeops,
self._cluster_ref,
datastore_regex=self._datastore_regex)
self._vc_state = host.VCState(self._session,
self._nodename,
self._cluster_ref,
self._datastore_regex)
# Register the OpenStack extension
self._register_openstack_extension()
def _check_min_version(self):
min_version = v_utils.convert_version_to_int(constants.MIN_VC_VERSION)
next_min_ver = v_utils.convert_version_to_int(
constants.NEXT_MIN_VC_VERSION)
vc_version = vim_util.get_vc_version(self._session)
LOG.info("VMware vCenter version: %s", vc_version)
if v_utils.convert_version_to_int(vc_version) < min_version:
raise exception.NovaException(
_('Detected vCenter version %(version)s. Nova requires VMware '
'vCenter version %(min_version)s or greater.') % {
'version': vc_version,
'min_version': constants.MIN_VC_VERSION})
elif v_utils.convert_version_to_int(vc_version) < next_min_ver:
LOG.warning('Running Nova with a VMware vCenter version less '
'than %(version)s is deprecated. The required '
'minimum version of vCenter will be raised to '
'%(version)s in the 16.0.0 release.',
{'version': constants.NEXT_MIN_VC_VERSION})
@property
def need_legacy_block_device_info(self):
return False
def _update_pbm_location(self):
if CONF.vmware.pbm_wsdl_location:
pbm_wsdl_loc = CONF.vmware.pbm_wsdl_location
else:
version = vim_util.get_vc_version(self._session)
pbm_wsdl_loc = pbm.get_pbm_wsdl_location(version)
self._session.pbm_wsdl_loc_set(pbm_wsdl_loc)
def _validate_configuration(self):
if CONF.vmware.pbm_enabled:
if not CONF.vmware.pbm_default_policy:
raise error_util.PbmDefaultPolicyUnspecified()
if not pbm.get_profile_id_by_name(
self._session,
CONF.vmware.pbm_default_policy):
raise error_util.PbmDefaultPolicyDoesNotExist()
if CONF.vmware.datastore_regex:
LOG.warning("datastore_regex is ignored when PBM is enabled")
self._datastore_regex = None
def init_host(self, host):
LOG.warning('The vmwareapi driver is deprecated and may be removed in '
'a future release. The driver is not tested by the '
'OpenStack project nor does it have clear maintainer(s) '
'and thus its quality can not be ensured. If you are '
'using the driver in production please let us know in '
'freenode IRC and/or the openstack-discuss mailing list.')
vim = self._session.vim
if vim is None:
self._session._create_session()
def cleanup_host(self, host):
self._session.logout()
def _register_openstack_extension(self):
# Register an 'OpenStack' extension in vCenter
os_extension = self._session._call_method(vim_util, 'find_extension',
constants.EXTENSION_KEY)
if os_extension is None:
try:
self._session._call_method(vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)
LOG.info('Registered extension %s with vCenter',
constants.EXTENSION_KEY)
except vexc.VimFaultException as e:
with excutils.save_and_reraise_exception() as ctx:
if 'InvalidArgument' in e.fault_list:
LOG.debug('Extension %s already exists.',
constants.EXTENSION_KEY)
ctx.reraise = False
else:
LOG.debug('Extension %s already exists.', constants.EXTENSION_KEY)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
state = vm_util.get_vm_state(self._session, instance)
ignored_states = [power_state.RUNNING, power_state.SUSPENDED]
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
return self._vmops.list_instances()
def list_instances(self):
"""List VM instances from the single compute node."""
return self._vmops.list_instances()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor)
def confirm_migration(self, context, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
migration, block_device_info=None,
power_on=True):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
allocations, block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
return migrate_data
def post_live_migration_at_source(self, context, instance, network_info):
pass
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
pass
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host."""
self._vmops.live_migration(context, instance, dest, post_method,
recover_method, block_migration,
migrate_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
cluster_name = dest_check_data.cluster_name
cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
cluster_name)
if cluster_ref is None:
msg = (_("Cannot find destination cluster %s for live migration") %
cluster_name)
raise exception.MigrationPreCheckError(reason=msg)
res_pool_ref = vm_util.get_res_pool_ref(self._session, cluster_ref)
if res_pool_ref is None:
msg = _("Cannot find destination resource pool for live migration")
raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
# the information that we need for the destination compute node
# is the name of its cluster and datastore regex
data = objects.VMwareLiveMigrateData()
data.cluster_name = CONF.vmware.cluster_name
data.datastore_regex = CONF.vmware.datastore_regex
return data
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
# vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
return self._vmops.get_vnc_console(instance)
def get_mks_console(self, context, instance):
return self._vmops.get_mks_console(instance)
def get_console_output(self, context, instance):
if not CONF.vmware.serial_log_dir:
LOG.error("The 'serial_log_dir' config option is not set!")
return
fname = instance.uuid.replace('-', '')
path = os.path.join(CONF.vmware.serial_log_dir, fname)
if not os.path.exists(path):
LOG.warning('The console log is missing. Check your VSPC '
'configuration', instance=instance)
return b""
read_log_data, remaining = nova.privsep.path.last_bytes(
path, MAX_CONSOLE_BYTES)
return read_log_data
def _get_vcenter_uuid(self):
"""Retrieves the vCenter UUID."""
about = self._session._call_method(nova_vim_util, 'get_about_info')
return about.instanceUuid
def _create_nodename(self, mo_id):
"""Return a nodename which uniquely describes a cluster.
The name will be of the form:
<mo id>.<vcenter uuid>
e.g.
domain-26.9d51f082-58a4-4449-beed-6fd205a5726b
"""
return '%s.%s' % (mo_id, self._vcenter_uuid)
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
# The VMWare driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'supported_instances': host_stats['supported_instances'],
'numa_topology': None,
}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
host_stats = self._vc_state.get_host_stats(refresh=True)
stats_dict = self._get_available_resources(host_stats)
return stats_dict
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This driver supports only one compute node.
"""
return [self._nodename]
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider,
inventory information and CPU traits.
:param nova.compute.provider_tree.ProviderTree provider_tree:
A nova.compute.provider_tree.ProviderTree object representing all
the providers in the tree associated with the compute node, and any
sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
trait) associated via aggregate with any of those providers (but
not *their* tree- or aggregate-associated providers), as currently
known by placement.
:param nodename:
String name of the compute node (i.e.
ComputeNode.hypervisor_hostname) for which the caller is requesting
updated provider information.
:param allocations:
Dict of allocation data of the form:
{ $CONSUMER_UUID: {
# The shape of each "allocations" dict below is identical
# to the return from GET /allocations/{consumer_uuid}
"allocations": {
$RP_UUID: {
"generation": $RP_GEN,
"resources": {
$RESOURCE_CLASS: $AMOUNT,
...
},
},
...
},
"project_id": $PROJ_ID,
"user_id": $USER_ID,
"consumer_generation": $CONSUMER_GEN,
},
...
}
If None, and the method determines that any inventory needs to be
moved (from one provider to another and/or to a different resource
class), the ReshapeNeeded exception must be raised. Otherwise, this
dict must be edited in place to indicate the desired final state of
allocations.
:raises ReshapeNeeded: If allocations is None and any inventory needs
to be moved from one provider to another and/or to a different
resource class. At this time the VMware driver does not reshape.
:raises: ReshapeFailed if the requested tree reshape fails for
whatever reason.
"""
# NOTE(cdent): This is a side-effecty method, we are changing the
# the provider tree in place (on purpose).
inv = provider_tree.data(nodename).inventory
ratios = self._get_allocation_ratios(inv)
stats = vm_util.get_stats_from_cluster(self._session,
self._cluster_ref)
datastores = ds_util.get_available_datastores(self._session,
self._cluster_ref,
self._datastore_regex)
total_disk_capacity = sum([ds.capacity for ds in datastores])
max_free_space = max([ds.freespace for ds in datastores])
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
result = {
orc.VCPU: {
'total': stats['cpu']['vcpus'],
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': stats['cpu']['max_vcpus_per_host'],
'step_size': 1,
'allocation_ratio': ratios[orc.VCPU],
},
orc.MEMORY_MB: {
'total': stats['mem']['total'],
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': stats['mem']['max_mem_mb_per_host'],
'step_size': 1,
'allocation_ratio': ratios[orc.MEMORY_MB],
},
}
# If a sharing DISK_GB provider exists in the provider tree, then our
# storage is shared, and we should not report the DISK_GB inventory in
# the compute node provider.
# TODO(cdent): We don't do this yet, in part because of the issues
# in bug #1784020, but also because we can represent all datastores
# as shared providers and should do once update_provider_tree is
# working well.
if provider_tree.has_sharing_provider(orc.DISK_GB):
LOG.debug('Ignoring sharing provider - see bug #1784020')
result[orc.DISK_GB] = {
'total': total_disk_capacity // units.Gi,
'reserved': reserved_disk_gb,
'min_unit': 1,
'max_unit': max_free_space // units.Gi,
'step_size': 1,
'allocation_ratio': ratios[orc.DISK_GB],
}
provider_tree.update_inventory(nodename, result)
# TODO(cdent): Here is where additional functionality would be added.
# In the libvirt driver this is where nested GPUs are reported and
# where cpu traits are added. In the vmware world, this is where we
# would add nested providers representing tenant VDC and similar.
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None, power_on=True, accel_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info, instance)
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
# NOTE(claudiub): if context parameter is to be used in the future,
# the _detach_instance_volumes method will have to be updated as well.
return self._volumeops.detach_volume(connection_info, instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Returns the IP address of the vCenter host."""
return CONF.vmware.host_ip
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None,
accel_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info, reboot_type)
def _detach_instance_volumes(self, instance, block_device_info):
# We need to detach attached volumes
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if block_device_mapping:
# Certain disk types, for example 'IDE' do not support hot
# plugging. Hence we need to power off the instance and update
# the instance state.
self._vmops.power_off(instance)
for disk in block_device_mapping:
connection_info = disk['connection_info']
try:
# NOTE(claudiub): Passing None as the context, as it is
# not currently used.
self.detach_volume(None, connection_info, instance,
disk.get('device_name'))
except exception.DiskNotFound:
LOG.warning('The volume %s does not exist!',
disk.get('device_name'),
instance=instance)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Failed to detach %(device_name)s. "
"Exception: %(exc)s",
{'device_name': disk.get('device_name'),
'exc': e},
instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker
# is not successful. When resource claim is not successful,
# node is not set in instance. Perform destroy only if node is set
if not instance.node:
return
# A resize uses the same instance on the VC. We do not delete that
# VM in the event of a revert
if instance.task_state == task_states.RESIZE_REVERTING:
return
# We need to detach attached volumes
if block_device_info is not None:
try:
self._detach_instance_volumes(instance, block_device_info)
except (vexc.ManagedObjectNotFoundException,
exception.InstanceNotFound):
LOG.warning('Instance does not exists. Proceeding to '
'delete instance properties on datastore',
instance=instance)
self._vmops.destroy(instance, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, context, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password, block_device_info):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(
self,
context: nova_context.RequestContext,
instance: 'objects.Instance',
):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def get_info(self, instance, use_cache=True):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def host_power_action(self, action):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def set_host_enabled(self, enabled):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def get_host_uptime(self):
"""Host uptime operation not supported by VC driver."""
msg = _("Multiple hosts may be managed by the VMWare "
"vCenter driver; therefore we do not return "
"uptime for just one host.")
raise NotImplementedError(msg)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self._vmops.manage_image_cache(context, all_instances)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
return self._vmops.instance_exists(instance)
def attach_interface(self, context, instance, image_meta, vif):
"""Attach an interface to the instance."""
self._vmops.attach_interface(context, instance, image_meta, vif)
def detach_interface(self, context, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(context, instance, vif)
class VMwareAPISession(api.VMwareAPISession):
"""Sets up a session with the VC/ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip=CONF.vmware.host_ip,
host_port=CONF.vmware.host_port,
username=CONF.vmware.host_username,
password=CONF.vmware.host_password,
retry_count=CONF.vmware.api_retry_count,
scheme="https",
cacert=CONF.vmware.ca_file,
insecure=CONF.vmware.insecure,
pool_size=CONF.vmware.connection_pool_size):
super(VMwareAPISession, self).__init__(
host=host_ip,
port=host_port,
server_username=username,
server_password=password,
api_retry_count=retry_count,
task_poll_interval=CONF.vmware.task_poll_interval,
scheme=scheme,
create_session=True,
cacert=cacert,
insecure=insecure,
pool_size=pool_size)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""Calls a method within the module specified with
args provided.
"""
if not self._is_vim_object(module):
return self.invoke_api(module, method, self.vim, *args, **kwargs)
else:
return self.invoke_api(module, method, *args, **kwargs)
def _wait_for_task(self, task_ref):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
return self.wait_for_task(task_ref)
|
py | b413a91d51e494d4e3b0ee067908095cb02ef531 | import os
## TODO: create list for listDir and update when needed
class Explorer:
@staticmethod
def listDir(path):
items = os.listdir(path)
dirs, files = [], []
for item in items:
if os.path.isdir(os.path.join(path, item)):
dirs.append(item)
else: files.append(item)
return dirs + files
def __init__(self, path):
self.path = os.path.abspath(path)
self.listdir = Explorer.listDir(self.path)
def pathUp(self):
self.path = os.path.abspath( os.path.join(self.path, "..") )
self.listdir = Explorer.listDir(self.path)
def pathIn(self, ind):
self.path = os.path.join(self.path, Explorer.listDir(self.path)[ind])
self.listdir = Explorer.listDir(self.path)
def getItemCount(self):
return len(self.listdir)
def isItemDir(self, ind):
return os.path.isdir( os.path.join( self.path, self.listdir[ind] ) )
def getItemPath(self, ind):
return os.path.join( self.path, self.listdir[ind] )
def getItemName(self, ind):
return self.listdir[ind]
def getPath(self):
return self.path
def setPath(self, path):
if os.path.isdir(path):
self.path = os.path.abspath(path)
def reload(self):
self.listdir = Explorer.listDir(self.path)
|
py | b413a9b451aa063276455e65ec47cde2ec3b0179 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUInfrastructurePortProfilesFetcher(NURESTFetcher):
""" Represents a NUInfrastructurePortProfiles fetcher
Notes:
This fetcher enables to fetch NUInfrastructurePortProfile objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUInfrastructurePortProfile class that is managed.
Returns:
.NUInfrastructurePortProfile: the managed class
"""
from .. import NUInfrastructurePortProfile
return NUInfrastructurePortProfile
|
py | b413aae020e8d864afd5bc9e1e562fcf6e01c367 | '''
Copyright 2014-2018 Biogen, Celgene Corporation, EMBL - European Bioinformatics Institute, GlaxoSmithKline, Takeda Pharmaceutical Company and Wellcome Sanger Institute
This software was developed as part of the Open Targets project. For more information please see: http://www.opentargets.org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import re
import sys
import iso8601
import types
import json
import logging
import six
import collections
__author__ = "Gautier Koscielny"
__copyright__ = "Copyright 2014-2018 Biogen, Celgene Corporation, EMBL - European Bioinformatics Institute, GlaxoSmithKline, Takeda Pharmaceutical Company and Wellcome Sanger Institute"
__credits__ = ["Gautier Koscielny", "Samiul Hasan"]
__license__ = "Apache 2.0"
__version__ = "1.2.8"
__maintainer__ = "Gautier Koscielny"
__email__ = "[email protected]"
__status__ = "Production"
logger = logging.getLogger(__name__)
"""
https://raw.githubusercontent.com/opentargets/json_schema/master/src/evidence/linkout/linkout.json
"""
class Linkout(object):
"""
Constructor using all fields with default values
Arguments:
:param nice_name = None
:param url = None
"""
def __init__(self, nice_name = None, url = None):
"""
Name: nice_name
Type: string
Can be null: False
Required: {True}
"""
self.nice_name = nice_name
"""
Name: url
Type: string
Can be null: False
Required: {True}
String format: uri
"""
self.url = url
@classmethod
def cloneObject(cls, clone):
obj = cls()
if clone.nice_name:
obj.nice_name = clone.nice_name
if clone.url:
obj.url = clone.url
return obj
@classmethod
def fromDict(cls, dict_obj):
cls_keys = ['nice_name','url']
obj = cls()
if not isinstance(dict_obj, dict):
logger.warn("Linkout - DictType expected - {0} found\n".format(type(dict_obj)))
return
if 'nice_name' in dict_obj:
obj.nice_name = dict_obj['nice_name']
if 'url' in dict_obj:
obj.url = dict_obj['url']
return obj
def validate(self, logger, path = "root"):
"""
Validate class Linkout
:returns: number of errors found during validation
"""
error = 0
# nice_name is mandatory
if self.nice_name is None :
logger.error("Linkout - {0}.nice_name is required".format(path))
error = error + 1
if self.nice_name is not None and not isinstance(self.nice_name, six.string_types):
logger.error("Linkout - {0}.nice_name type should be a string".format(path))
error = error + 1
# url is mandatory
if self.url is None :
logger.error("Linkout - {0}.url is required".format(path))
error = error + 1
if self.url is not None and not isinstance(self.url, six.string_types):
logger.error("Linkout - {0}.url type should be a string".format(path))
error = error + 1
return error
def serialize(self):
classDict = collections.OrderedDict()
if not self.nice_name is None: classDict['nice_name'] = self.nice_name
if not self.url is None: classDict['url'] = self.url
return classDict
def to_JSON(self, indentation=4):
if sys.version_info[0] == 3:
return json.dumps(self.serialize(), sort_keys=True, check_circular=False, indent=indentation)
elif sys.version_info[0] == 2:
return json.dumps(self, default=lambda o: o.serialize(), sort_keys=True, check_circular=False, indent=indentation)
|
py | b413ab4c2c7134c6b0bf2ecf60d39ea76ddbf576 | #!/usr/bin/env python2.7
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build and upload docker images to Google Container Registry per matrix."""
from __future__ import print_function
import argparse
import atexit
import multiprocessing
import os
import shutil
import subprocess
import sys
import tempfile
# Language Runtime Matrix
import client_matrix
python_util_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../run_tests/python_utils'))
sys.path.append(python_util_dir)
import dockerjob
import jobset
_IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh'
_LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(
list(
set(release
for release_dict in client_matrix.LANG_RELEASE_MATRIX.values()
for release in release_dict.keys())))
# Destination directory inside docker image to keep extra info from build time.
_BUILD_INFO = '/var/local/build_info'
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('--gcr_path',
default='gcr.io/grpc-testing',
help='Path of docker images in Google Container Registry')
argp.add_argument('--release',
default='master',
choices=['all', 'master'] + _RELEASES,
help='github commit tag to checkout. When building all '
'releases defined in client_matrix.py, use "all". Valid only '
'with --git_checkout.')
argp.add_argument('-l',
'--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Test languages to build docker images for.')
argp.add_argument('--git_checkout',
action='store_true',
help='Use a separate git clone tree for building grpc stack. '
'Required when using --release flag. By default, current'
'tree and the sibling will be used for building grpc stack.')
argp.add_argument('--git_checkout_root',
default='/export/hda3/tmp/grpc_matrix',
help='Directory under which grpc-go/java/main repo will be '
'cloned. Valid only with --git_checkout.')
argp.add_argument('--keep',
action='store_true',
help='keep the created local images after uploading to GCR')
argp.add_argument('--reuse_git_root',
default=False,
action='store_const',
const=True,
help='reuse the repo dir. If False, the existing git root '
'directory will removed before a clean checkout, because '
'reusing the repo can cause git checkout error if you switch '
'between releases.')
argp.add_argument(
'--upload_images',
action='store_true',
help='If set, images will be uploaded to container registry after building.'
)
args = argp.parse_args()
def add_files_to_image(image, with_files, label=None):
"""Add files to a docker image.
image: docker image name, i.e. grpc_interop_java:26328ad8
with_files: additional files to include in the docker image.
label: label string to attach to the image.
"""
tag_idx = image.find(':')
if tag_idx == -1:
jobset.message('FAILED',
'invalid docker image %s' % image,
do_newline=True)
sys.exit(1)
orig_tag = '%s_' % image
subprocess.check_output(['docker', 'tag', image, orig_tag])
lines = ['FROM ' + orig_tag]
if label:
lines.append('LABEL %s' % label)
temp_dir = tempfile.mkdtemp()
atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir]))
# Copy with_files inside the tmp directory, which will be the docker build
# context.
for f in with_files:
shutil.copy(f, temp_dir)
lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO))
# Create a Dockerfile.
with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f:
f.write('\n'.join(lines))
jobset.message('START', 'Repackaging %s' % image, do_newline=True)
build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir]
subprocess.check_output(build_cmd)
dockerjob.remove_image(orig_tag, skip_nonexistent=True)
def build_image_jobspec(runtime, env, gcr_tag, stack_base):
"""Build interop docker image for a language with runtime.
runtime: a <lang><version> string, for example go1.8.
env: dictionary of env to passed to the build script.
gcr_tag: the tag for the docker image (i.e. v1.3.0).
stack_base: the local gRPC repo path.
"""
basename = 'grpc_interop_%s' % runtime
tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag)
build_env = {'INTEROP_IMAGE': tag, 'BASE_NAME': basename, 'TTY_FLAG': '-t'}
build_env.update(env)
image_builder_path = _IMAGE_BUILDER
if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
build_job = jobset.JobSpec(cmdline=[image_builder_path],
environ=build_env,
shortname='build_docker_%s' % runtime,
timeout_seconds=30 * 60)
build_job.tag = tag
return build_job
def build_all_images_for_lang(lang):
"""Build all docker images for a language across releases and runtimes."""
if not args.git_checkout:
if args.release != 'master':
print(
'Cannot use --release without also enabling --git_checkout.\n')
sys.exit(1)
releases = [args.release]
else:
if args.release == 'all':
releases = client_matrix.get_release_tags(lang)
else:
# Build a particular release.
if args.release not in ['master'
] + client_matrix.get_release_tags(lang):
jobset.message('SKIPPED',
'%s for %s is not defined' %
(args.release, lang),
do_newline=True)
return []
releases = [args.release]
images = []
for release in releases:
images += build_all_images_for_release(lang, release)
jobset.message('SUCCESS',
'All docker images built for %s at %s.' % (lang, releases),
do_newline=True)
return images
def build_all_images_for_release(lang, release):
"""Build all docker images for a release across all runtimes."""
docker_images = []
build_jobs = []
env = {}
# If we not using current tree or the sibling for grpc stack, do checkout.
stack_base = ''
if args.git_checkout:
stack_base = checkout_grpc_stack(lang, release)
var = {
'go': 'GRPC_GO_ROOT',
'java': 'GRPC_JAVA_ROOT',
'node': 'GRPC_NODE_ROOT'
}.get(lang, 'GRPC_ROOT')
env[var] = stack_base
for runtime in client_matrix.get_runtimes_for_lang_release(lang, release):
job = build_image_jobspec(runtime, env, release, stack_base)
docker_images.append(job.tag)
build_jobs.append(job)
jobset.message('START', 'Building interop docker images.', do_newline=True)
print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
num_failures, _ = jobset.run(build_jobs,
newline_on_success=True,
maxjobs=multiprocessing.cpu_count())
if num_failures:
jobset.message('FAILED',
'Failed to build interop docker images.',
do_newline=True)
docker_images_cleanup.extend(docker_images)
sys.exit(1)
jobset.message('SUCCESS',
'All docker images built for %s at %s.' % (lang, release),
do_newline=True)
if release != 'master':
commit_log = os.path.join(stack_base, 'commit_log')
if os.path.exists(commit_log):
for image in docker_images:
add_files_to_image(image, [commit_log], 'release=%s' % release)
return docker_images
def cleanup():
if not args.keep:
for image in docker_images_cleanup:
dockerjob.remove_image(image, skip_nonexistent=True)
docker_images_cleanup = []
atexit.register(cleanup)
def maybe_apply_patches_on_git_tag(stack_base, lang, release):
files_to_patch = []
release_info = client_matrix.LANG_RELEASE_MATRIX[lang].get(release)
if release_info:
files_to_patch = release_info.patch
if not files_to_patch:
return
patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release)
patch_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), patch_file_relative_path))
if not os.path.exists(patch_file):
jobset.message('FAILED',
'expected patch file |%s| to exist' % patch_file)
sys.exit(1)
subprocess.check_output(['git', 'apply', patch_file],
cwd=stack_base,
stderr=subprocess.STDOUT)
# TODO(jtattermusch): this really would need simplification and refactoring
# - "git add" and "git commit" can easily be done in a single command
# - it looks like the only reason for the existence of the "files_to_patch"
# entry is to perform "git add" - which is clumsy and fragile.
# - we only allow a single patch with name "git_repo.patch". A better design
# would be to allow multiple patches that can have more descriptive names.
for repo_relative_path in files_to_patch:
subprocess.check_output(['git', 'add', repo_relative_path],
cwd=stack_base,
stderr=subprocess.STDOUT)
subprocess.check_output([
'git', 'commit', '-m',
('Hack performed on top of %s git '
'tag in order to build and run the %s '
'interop tests on that tag.' % (lang, release))
],
cwd=stack_base,
stderr=subprocess.STDOUT)
def checkout_grpc_stack(lang, release):
"""Invokes 'git check' for the lang/release and returns directory created."""
assert args.git_checkout and args.git_checkout_root
if not os.path.exists(args.git_checkout_root):
os.makedirs(args.git_checkout_root)
repo = client_matrix.get_github_repo(lang)
# Get the subdir name part of repo
# For example, '[email protected]:grpc/grpc-go.git' should use 'grpc-go'.
repo_dir = os.path.splitext(os.path.basename(repo))[0]
stack_base = os.path.join(args.git_checkout_root, repo_dir)
# Clean up leftover repo dir if necessary.
if not args.reuse_git_root and os.path.exists(stack_base):
jobset.message('START', 'Removing git checkout root.', do_newline=True)
shutil.rmtree(stack_base)
if not os.path.exists(stack_base):
subprocess.check_call(['git', 'clone', '--recursive', repo],
cwd=os.path.dirname(stack_base))
# git checkout.
jobset.message('START',
'git checkout %s from %s' % (release, stack_base),
do_newline=True)
# We should NEVER do checkout on current tree !!!
assert not os.path.dirname(__file__).startswith(stack_base)
output = subprocess.check_output(['git', 'checkout', release],
cwd=stack_base,
stderr=subprocess.STDOUT)
maybe_apply_patches_on_git_tag(stack_base, lang, release)
commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
jobset.message('SUCCESS',
'git checkout',
'%s: %s' % (str(output), commit_log),
do_newline=True)
# git submodule update
jobset.message('START',
'git submodule update --init at %s from %s' %
(release, stack_base),
do_newline=True)
subprocess.check_call(['git', 'submodule', 'update', '--init'],
cwd=stack_base,
stderr=subprocess.STDOUT)
jobset.message('SUCCESS',
'git submodule update --init',
'%s: %s' % (str(output), commit_log),
do_newline=True)
# Write git log to commit_log so it can be packaged with the docker image.
with open(os.path.join(stack_base, 'commit_log'), 'w') as f:
f.write(commit_log)
return stack_base
languages = args.language if args.language != ['all'] else _LANGUAGES
for lang in languages:
docker_images = build_all_images_for_lang(lang)
for image in docker_images:
if args.upload_images:
jobset.message('START', 'Uploading %s' % image, do_newline=True)
# docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
assert image.startswith(args.gcr_path) and image.find(':') != -1
subprocess.call(['gcloud', 'docker', '--', 'push', image])
else:
# Uploading (and overwriting images) by default can easily break things.
print(
'Not uploading image %s, run with --upload_images to upload.' %
image)
|
py | b413ac40b1ff3a2626f1a1f77916f7ead15d5523 | from cli_rack.modular import ExtensionUnavailableError
def is_available() -> bool:
try:
import jinja2
return True
except ImportError:
raise ExtensionUnavailableError(
"modular_app.feature2", "Jinja2 is required but it is not installed"
).hint_install_python_package("jinja2")
|
py | b413ac64fd651cc7245c3ebe8159f1a57c0f0778 | # coding:utf-8
import os
import cv2
import face_recognition
from PIL import Image, ImageDraw
def face_square():
"""
框选人脸部位
"""
face_image = face_recognition.load_image_file('../origin_face/face2.jpg')
face_location = face_recognition.face_locations(face_image, model='cnn')
print(face_location)
pil_image = Image.fromarray(face_image)
pos = face_location[0]
d = ImageDraw.Draw(pil_image, 'RGBA')
d.rectangle((pos[3], pos[0], pos[1], pos[2]))
pil_image.show()
pil_image.save('result.jpg')
def show_face():
"""
显示人脸图片
"""
image = face_recognition.load_image_file("../origin_face/face2.jpg")
face_locations = face_recognition.face_locations(image, model="cnn")
top, right, bottom, left = face_locations[0]
print(
"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.show()
pil_image.save("result.jpg")
pass
def face_lipstick():
"""
上口红
"""
face_image = face_recognition.load_image_file('../origin_face/face2.jpg')
face_landmarks_list = face_recognition.face_landmarks(face_image)
print(face_landmarks_list)
for face_landmarks in face_landmarks_list:
pil_image = Image.fromarray(face_image)
d = ImageDraw.Draw(pil_image, 'RGBA')
d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128))
d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128))
d.line(face_landmarks['top_lip'], fill=(150, 0, 0, 64), width=3)
d.line(face_landmarks['bottom_lip'], fill=(150, 0, 0, 64), width=3)
pil_image.show()
pil_image.save('result.jpg')
def dynamic_recognition():
video_capture = cv2.VideoCapture('rtsp://admin:[email protected]:554/25')
face_image = face_recognition.load_image_file("../origin_face/face2.jpg")
face_image_encoding = face_recognition.face_encodings(face_image)[0]
face_locations = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
if process_this_frame:
face_locations = face_recognition.face_locations(small_frame)
face_encodings = face_recognition.face_encodings(small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
match = face_recognition.compare_faces([face_image_encoding], face_encoding)
if match[0]:
name = "Barack Obama"
else:
name = "Unknown"
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.namedWindow("Video", 0)
cv2.resizeWindow("Video", 800, 600)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
class FaceRecognition:
def __init__(self, path):
self.path = path
def recognition(self):
picture_face = {}
for root, dirs, files in os.walk(self.path):
for file in files:
file_info = os.path.splitext(file)
if file_info[1] == '.jpg':
picture_face[os.path.join(root, file)] = file_info[0]
print(picture_face)
know_face_encodings = []
known_face_names = []
for path, name in picture_face.items():
face_image = face_recognition.load_image_file(path)
encoding = face_recognition.face_encodings(face_image)
if len(encoding) > 0:
know_face_encodings.append(encoding)
known_face_names.append(name)
print('len', len(known_face_names))
face_locations = []
face_names = []
process_this_frame = True
video_capture = cv2.VideoCapture('rtsp://admin:[email protected]:554/25')
while True:
ret, frame = video_capture.read()
# 改变摄像头图像的大小,图像小,所做的计算就少
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# openCV 的 BGR 格式转为 RGB 格式
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(know_face_encodings, face_encoding)
name = "Unknown"
print(matches)
# if all(matches):
# # first_match_index = matches.index(True)
# first_match_index = 0
# name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Show the square outline of the face
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# 矩形框
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# 加上标签
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Show video
cv2.imshow('video', frame)
# Quit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
# face_square()
# face_lipstick()
dynamic_recognition()
# fr = FaceRecognition(os.getcwd()[0:-4] + os.sep + 'origin_face')
# fr.recognition()
|
py | b413ac99e65cb5de7d77feb694c303ed3235ff26 |
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from os.path import join
import numpy as np
from datetime import datetime
import os
import glob
from MCEq.misc import theta_rad
from MCEq.misc import info
import mceq_config as config
class EarthsAtmosphere(with_metaclass(ABCMeta)):
"""
Abstract class containing common methods on atmosphere.
You have to inherit from this class and implement the virtual method
:func:`get_density`.
Note:
Do not instantiate this class directly.
Attributes:
thrad (float): current zenith angle :math:`\\theta` in radiants
theta_deg (float): current zenith angle :math:`\\theta` in degrees
max_X (float): Slant depth at the surface according to the geometry
defined in the :mod:`MCEq.geometry`
geometry (object): Can be a custom instance of EarthGeometry
"""
def __init__(self, *args, **kwargs):
from MCEq.geometry.geometry import EarthGeometry
self.geom = kwargs.pop('geometry', EarthGeometry())
self.thrad = None
self.theta_deg = None
self._max_den = config.max_density
self.max_theta = 90.
self.location = None
self.season = None
@abstractmethod
def get_density(self, h_cm):
"""Abstract method which implementation should return the density in g/cm**3.
Args:
h_cm (float): height in cm
Returns:
float: density in g/cm**3
Raises:
NotImplementedError:
"""
raise NotImplementedError("Base class called.")
def calculate_density_spline(self, n_steps=2000):
"""Calculates and stores a spline of :math:`\\rho(X)`.
Args:
n_steps (int, optional): number of :math:`X` values
to use for interpolation
Raises:
Exception: if :func:`set_theta` was not called before.
"""
from scipy.integrate import cumtrapz
from time import time
from scipy.interpolate import UnivariateSpline
if self.theta_deg is None:
raise Exception('zenith angle not set')
else:
info(
5, 'Calculating spline of rho(X) for zenith {0:4.1f} degrees.'.
format(self.theta_deg))
thrad = self.thrad
path_length = self.geom.l(thrad)
vec_rho_l = np.vectorize(
lambda delta_l: self.get_density(self.geom.h(delta_l, thrad)))
dl_vec = np.linspace(0, path_length, n_steps)
now = time()
rho_l = vec_rho_l(dl_vec)
# Calculate integral for each depth point
X_int = cumtrapz(rho_l[np.isfinite(rho_l)], dl_vec[np.isfinite(rho_l)]) #
dl_vec = dl_vec[np.isfinite(rho_l)][1:]
info(5, '.. took {0:1.2f}s'.format(time() - now))
# Save depth value at h_obs
self._max_X = X_int[-1]
self._max_den = self.get_density(self.geom.h(0, thrad))
# Interpolate with bi-splines without smoothing
h_intp = [self.geom.h(dl, thrad) for dl in reversed(dl_vec[1:])]
X_intp = [X for X in reversed(X_int[1:])]
self._s_h2X = UnivariateSpline(h_intp, np.log(X_intp), k=2, s=0.0)
self._s_X2rho = UnivariateSpline(X_int, vec_rho_l(dl_vec), k=2, s=0.0)
self._s_lX2h = UnivariateSpline(np.log(X_intp)[::-1],
h_intp[::-1],
k=2,
s=0.0)
@property
def max_X(self):
"""Depth at altitude 0."""
if not hasattr(self, '_max_X'):
self.set_theta(0)
return self._max_X
@property
def max_den(self):
"""Density at altitude 0."""
if not hasattr(self, '_max_den'):
self.set_theta(0)
return self._max_den
@property
def s_h2X(self):
"""Spline for conversion from altitude to depth."""
if not hasattr(self, '_s_h2X'):
self.set_theta(0)
return self._s_h2X
@property
def s_X2rho(self):
"""Spline for conversion from depth to density."""
if not hasattr(self, '_s_X2rho'):
self.set_theta(0)
return self._s_X2rho
@property
def s_lX2h(self):
"""Spline for conversion from depth to altitude."""
if not hasattr(self, '_s_lX2h'):
self.set_theta(0)
return self._s_lX2h
def set_theta(self, theta_deg, force_spline_calc=False):
"""Configures geometry and initiates spline calculation for
:math:`\\rho(X)`.
If the option 'use_atm_cache' is enabled in the config, the
function will check, if a corresponding spline is available
in the cache and use it. Otherwise it will call
:func:`calculate_density_spline`, make the function
:func:`r_X2rho` available to the core code and store the spline
in the cache.
Args:
theta_deg (float): zenith angle :math:`\\theta` at detector
force_spline_calc (bool): forces (re-)calculation of the
spline for each call
"""
if theta_deg < 0. or theta_deg > self.max_theta:
raise Exception('Zenith angle not in allowed range.')
self.thrad = theta_rad(theta_deg)
self.theta_deg = theta_deg
self.calculate_density_spline()
def r_X2rho(self, X):
"""Returns the inverse density :math:`\\frac{1}{\\rho}(X)`.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`1/\\rho` in cm**3/g
"""
return 1. / self.s_X2rho(X)
def h2X(self, h):
"""Returns the depth along path as function of height above
surface.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
h (float): vertical height above surface in cm
Returns:
float: X slant depth in g/cm**2
"""
return np.exp(self.s_h2X(h))
def X2h(self, X):
"""Returns the height above surface as a function of slant depth
for currently selected zenith angle.
The spline `s_lX2h` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float h: height above surface in cm
"""
return self.s_lX2h(np.log(X))
def X2rho(self, X):
"""Returns the density :math:`\\rho(X)`.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`\\rho` in cm**3/g
"""
return self.s_X2rho(X)
def moliere_air(self, h_cm):
"""Returns the Moliere unit of air for US standard atmosphere. """
return 9.3 / (self.get_density(h_cm) * 100.)
def nref_rel_air(self, h_cm):
"""Returns the refractive index - 1 in air (density parametrization
as in CORSIKA).
"""
return 0.000283 * self.get_density(h_cm) / self.get_density(0)
def gamma_cherenkov_air(self, h_cm):
"""Returns the Lorentz factor gamma of Cherenkov threshold in air (MeV).
"""
nrel = self.nref_rel_air(h_cm)
return (1. + nrel) / np.sqrt(2. * nrel + nrel**2)
def theta_cherenkov_air(self, h_cm):
"""Returns the Cherenkov angle in air (degrees).
"""
return np.arccos(1. / (1. + self.nref_rel_air(h_cm))) * 180. / np.pi
class CorsikaAtmosphere(EarthsAtmosphere):
"""Class, holding the parameters of a Linsley type parameterization
similar to the Air-Shower Monte Carlo
`CORSIKA <https://web.ikp.kit.edu/corsika/>`_.
The parameters pre-defined parameters are taken from the CORSIKA
manual. If new sets of parameters are added to :func:`init_parameters`,
the array _thickl can be calculated using :func:`calc_thickl` .
Attributes:
_atm_param (numpy.array): (5x5) Stores 5 atmospheric parameters
_aatm, _batm, _catm, _thickl, _hlay
for each of the 5 layers
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
_atm_param = None
def __init__(self, location, season=None):
cka_atmospheres = [
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", 'Summer'),
("ANTARES/KM3NeT-ORCA", 'Winter'),
("KM3NeT-ARCA", 'Summer'),
("KM3NeT-ARCA", 'Winter'),
("KM3NeT",None),
('SouthPole','December'),
('PL_SouthPole','January'),
('PL_SouthPole','August'),
]
assert (location, season) in cka_atmospheres, \
'{0}/{1} not available for CorsikaAtmsophere'.format(
location, season
)
self.init_parameters(location, season)
import MCEq.geometry.corsikaatm.corsikaatm as corsika_acc
self.corsika_acc = corsika_acc
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, season):
"""Initializes :attr:`_atm_param`. Parameters from ANTARES/KM3NET
are based on the work of T. Heid
(`see this issue <https://github.com/afedynitch/MCEq/issues/12>`_)
+---------------------+-------------------+------------------------------+
| location | CORSIKA Table | Description/season |
+=====================+===================+==============================+
| "USStd" | 23 | US Standard atmosphere |
+---------------------+-------------------+------------------------------+
| "BK_USStd" | 37 | Bianca Keilhauer's USStd |
+---------------------+-------------------+------------------------------+
| "Karlsruhe" | 24 | AT115 / Karlsruhe |
+---------------------+-------------------+------------------------------+
| "SouthPole" | 26 and 28 | MSIS-90-E for Dec and June |
+---------------------+-------------------+------------------------------+
|"PL_SouthPole" | 29 and 30 | P. Lipari's Jan and Aug |
+---------------------+-------------------+------------------------------+
|"ANTARES/KM3NeT-ORCA"| NA | PhD T. Heid |
+---------------------+-------------------+------------------------------+
| "KM3NeT-ARCA" | NA | PhD T. Heid |
+---------------------+-------------------+------------------------------+
Args:
location (str): see table
season (str, optional): choice of season for supported locations
Raises:
Exception: if parameter set not available
"""
_aatm, _batm, _catm, _thickl, _hlay = None, None, None, None, None
if location == "USStd":
_aatm = np.array([-186.5562, -94.919, 0.61289, 0.0, 0.01128292])
_batm = np.array([1222.6562, 1144.9069, 1305.5948, 540.1778, 1.0])
_catm = np.array([994186.38, 878153.55, 636143.04, 772170., 1.0e9])
_thickl = np.array(
[1036.102549, 631.100309, 271.700230, 3.039494, 0.001280])
_hlay = np.array([0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif location == "BK_USStd":
_aatm = np.array([
-149.801663, -57.932486, 0.63631894, 4.3545369e-4, 0.01128292
])
_batm = np.array([1183.6071, 1143.0425, 1322.9748, 655.69307, 1.0])
_catm = np.array(
[954248.34, 800005.34, 629568.93, 737521.77, 1.0e9])
_thickl = np.array(
[1033.804941, 418.557770, 216.981635, 4.344861, 0.001280])
_hlay = np.array([0.0, 7.0e5, 1.14e6, 3.7e6, 1.0e7])
elif location == "Karlsruhe":
_aatm = np.array(
[-118.1277, -154.258, 0.4191499, 5.4094056e-4, 0.01128292])
_batm = np.array([1173.9861, 1205.7625, 1386.7807, 555.8935, 1.0])
_catm = np.array([919546., 963267.92, 614315., 739059.6, 1.0e9])
_thickl = np.array(
[1055.858707, 641.755364, 272.720974, 2.480633, 0.001280])
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif location == "KM3NeT": # averaged over detector and season
_aatm = np.array([-141.31449999999998, -8.256029999999999, 0.6132505, -0.025998975, 0.4024275])
_batm = np.array([1153.0349999999999, 1263.3325, 1257.0724999999998, 404.85974999999996, 1.0])
_catm = np.array([967990.75, 668591.75, 636790.0, 814070.75, 21426175.0])
_thickl = np.array([1011.8521512499999, 275.84507575000003, 51.0230705, 2.983134, 0.21927724999999998])
_hlay = np.array([0.0, 993750.0, 2081250.0, 4150000.0, 6877500.0])
elif location == "ANTARES/KM3NeT-ORCA":
if season == 'Summer':
_aatm = np.array([-158.85, -5.38682, 0.889893, -0.0286665, 0.50035])
_batm = np.array([1145.62, 1176.79, 1248.92, 415.543, 1.0])
_catm = np.array([998469.0, 677398.0, 636790.0, 823489.0, 16090500.0])
_thickl = np.array([986.951713, 306.4668, 40.546793, 4.288721, 0.277182])
_hlay = np.array([0, 9.0e5, 22.0e5, 38.0e5, 68.2e5])
elif season == 'Winter':
_aatm = np.array([-132.16, -2.4787, 0.298031, -0.0220264, 0.348021])
_batm = np.array([1120.45, 1203.97, 1163.28, 360.027, 1.0])
_catm = np.array([933697.0, 643957.0, 636790.0, 804486.0, 23109000.0])
_thickl = np.array([988.431172, 273.033464, 37.185105, 1.162987, 0.192998])
_hlay = np.array([0, 9.5e5, 22.0e5, 47.0e5, 68.2e5])
elif location == "KM3NeT-ARCA":
if season == 'Summer':
_aatm = np.array([-157.857, -28.7524, 0.790275, -0.0286999, 0.481114])
_batm = np.array([1190.44, 1171.0, 1344.78, 445.357, 1.0])
_catm = np.array([1006100.0, 758614.0, 636790.0, 817384.0, 16886800.0])
_thickl = np.array([1032.679434, 328.978681, 80.601135, 4.420745, 0.264112])
_hlay = np.array([0, 9.0e5, 18.0e5, 38.0e5, 68.2e5])
elif season == 'Winter':
_aatm = np.array([-116.391, 3.5938, 0.474803, -0.0246031, 0.280225])
_batm = np.array([1155.63, 1501.57, 1271.31, 398.512, 1.0])
_catm = np.array([933697.0, 594398.0, 636790.0, 810924.0, 29618400.0])
_thickl = np.array([1039.346286, 194.901358, 45.759249, 2.060083, 0.142817])
_hlay = np.array([0, 12.25e5, 21.25e5, 43.0e5, 70.5e5])
elif location == 'SouthPole':
if season == 'December':
_aatm = np.array(
[-128.601, -39.5548, 1.13088, -0.00264960, 0.00192534])
_batm = np.array([1139.99, 1073.82, 1052.96, 492.503, 1.0])
_catm = np.array(
[861913., 744955., 675928., 829627., 5.8587010e9])
_thickl = np.array(
[1011.398804, 588.128367, 240.955360, 3.964546, 0.000218])
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif season == "June":
_aatm = np.array(
[-163.331, -65.3713, 0.402903, -0.000479198, 0.00188667])
_batm = np.array([1183.70, 1108.06, 1424.02, 207.595, 1.0])
_catm = np.array(
[875221., 753213., 545846., 793043., 5.9787908e9])
_thickl = np.array(
[1020.370363, 586.143464, 228.374393, 1.338258, 0.000214])
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
else:
raise Exception('CorsikaAtmosphere(): Season "' + season +
'" not parameterized for location SouthPole.')
elif location == 'PL_SouthPole':
if season == 'January':
_aatm = np.array(
[-113.139, -7930635, -54.3888, -0.0, 0.00421033])
_batm = np.array([1133.10, 1101.20, 1085.00, 1098.00, 1.0])
_catm = np.array(
[861730., 826340., 790950., 682800., 2.6798156e9])
_thickl = np.array([
1019.966898, 718.071682, 498.659703, 340.222344, 0.000478
])
_hlay = np.array([0.0, 2.67e5, 5.33e5, 8.0e5, 1.0e7])
elif season == "August":
_aatm = np.array(
[-59.0293, -21.5794, -7.14839, 0.0, 0.000190175])
_batm = np.array([1079.0, 1071.90, 1182.0, 1647.1, 1.0])
_catm = np.array(
[764170., 699910., 635650., 551010., 59.329575e9])
_thickl = np.array(
[1019.946057, 391.739652, 138.023515, 43.687992, 0.000022])
_hlay = np.array([0.0, 6.67e5, 13.33e5, 2.0e6, 1.0e7])
else:
raise Exception('CorsikaAtmosphere(): Season "' + season +
'" not parameterized for location SouthPole.')
else:
raise Exception("CorsikaAtmosphere:init_parameters(): Location " +
str(location) + " not parameterized.")
self._atm_param = np.array([_aatm, _batm, _catm, _thickl, _hlay])
self.location, self.season = location, season
# Clear cached theta value to force spline recalculation
self.theta_deg = None
def depth2height(self, x_v):
"""Converts column/vertical depth to height.
Args:
x_v (float): column depth :math:`X_v` in g/cm**2
Returns:
float: height in cm
"""
_aatm, _batm, _catm, _thickl, _hlay = self._atm_param
if x_v >= _thickl[1]:
height = _catm[0] * np.log(_batm[0] / (x_v - _aatm[0]))
elif x_v >= _thickl[2]:
height = _catm[1] * np.log(_batm[1] / (x_v - _aatm[1]))
elif x_v >= _thickl[3]:
height = _catm[2] * np.log(_batm[2] / (x_v - _aatm[2]))
elif x_v >= _thickl[4]:
height = _catm[3] * np.log(_batm[3] / (x_v - _aatm[3]))
else:
height = (_aatm[4] - x_v) * _catm[4]
return height
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Uses the optimized module function :func:`corsika_get_density_jit`.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self.corsika_acc.corsika_get_density(h_cm, *self._atm_param)
# return corsika_get_density_jit(h_cm, self._atm_param)
def get_mass_overburden(self, h_cm):
""" Returns the mass overburden in atmosphere in g/cm**2.
Uses the optimized module function :func:`corsika_get_m_overburden_jit`
Args:
h_cm (float): height in cm
Returns:
float: column depth :math:`T(h_{cm})` in g/cm**2
"""
return self.corsika_acc.corsika_get_m_overburden(h_cm, *self._atm_param)
# return corsika_get_m_overburden_jit(h_cm, self._atm_param)
def rho_inv(self, X, cos_theta):
"""Returns reciprocal density in cm**3/g using planar approximation.
This function uses the optimized function :func:`planar_rho_inv_jit`
Args:
h_cm (float): height in cm
Returns:
float: :math:`\\frac{1}{\\rho}(X,\\cos{\\theta})` cm**3/g
"""
return self.corsika_acc.planar_rho_inv(X, cos_theta, *self._atm_param)
# return planar_rho_inv_jit(X, cos_theta, self._atm_param)
def calc_thickl(self):
"""Calculates thickness layers for :func:`depth2height`
The analytical inversion of the CORSIKA parameterization
relies on the knowledge about the depth :math:`X`, where
trasitions between layers/exponentials occur.
Example:
Create a new set of parameters in :func:`init_parameters`
inserting arbitrary values in the _thikl array::
$ cor_atm = CorsikaAtmosphere(new_location, new_season)
$ cor_atm.calc_thickl()
Replace _thickl values with printout.
"""
from scipy.integrate import quad
thickl = []
for h in self._atm_param[4]:
thickl.append('{0:4.6f}'.format(
quad(self.get_density, h, 112.8e5, epsrel=1e-4)[0]))
info(5, '_thickl = np.array([' + ', '.join(thickl) + '])')
return thickl
class IsothermalAtmosphere(EarthsAtmosphere):
"""Isothermal model of the atmosphere.
This model is widely used in semi-analytical calculations. The isothermal
approximation is valid in a certain range of altitudes and usually
one adjust the parameters to match a more realistic density profile
at altitudes between 10 - 30 km, where the high energy muon production
rate peaks. Such parametrizations are given in the book "Cosmic Rays and
Particle Physics", Gaisser, Engel and Resconi (2016). The default values
are from M. Thunman, G. Ingelman, and P. Gondolo, Astropart. Physics 5,
309 (1996).
Args:
location (str): no effect
season (str): no effect
hiso_km (float): isothermal scale height in km
X0 (float): Ground level overburden
"""
def __init__(self, location, season, hiso_km=6.3, X0=1300.):
self.hiso_cm = hiso_km * 1e5
self.X0 = X0
self.location = location
self.season = season
EarthsAtmosphere.__init__(self)
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self.X0 / self.hiso_cm * np.exp(-h_cm / self.hiso_cm)
def get_mass_overburden(self, h_cm):
""" Returns the mass overburden in atmosphere in g/cm**2.
Args:
h_cm (float): height in cm
Returns:
float: column depth :math:`T(h_{cm})` in g/cm**2
"""
return self.X0 * np.exp(-h_cm / self.hiso_cm)
class MSIS00Atmosphere(EarthsAtmosphere):
"""Wrapper class for a python interface to the NRLMSISE-00 model.
`NRLMSISE-00 <http://ccmc.gsfc.nasa.gov/modelweb/atmos/nrlmsise00.html>`_
is an empirical model of the Earth's atmosphere. It is available as
a FORTRAN 77 code or as a verson traslated into
`C by Dominik Borodowski <http://www.brodo.de/english/pub/nrlmsise/>`_.
Here a PYTHON wrapper has been used.
Attributes:
_msis : NRLMSISE-00 python wrapper object handler
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self,
location,
season=None,
doy=None,
use_loc_altitudes=False):
from MCEq.geometry.nrlmsise00_mceq import cNRLMSISE00
msis_atmospheres = [
'SouthPole',
'Karlsruhe',
'Geneva',
'Tokyo',
'SanGrasso',
'TelAviv',
'KSC',
'SoudanMine',
'Tsukuba',
'LynnLake',
'PeaceRiver',
'FtSumner'
]
assert location in msis_atmospheres, \
'{0} not available for MSIS00Atmosphere'.format(
location
)
self._msis = cNRLMSISE00()
self.init_parameters(location, season, doy, use_loc_altitudes)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, season, doy, use_loc_altitudes):
"""Sets location and season in :class:`NRLMSISE-00`.
Translates location and season into day of year
and geo coordinates.
Args:
location (str): Supported are "SouthPole" and "Karlsruhe"
season (str): months of the year: January, February, etc.
use_loc_altitudes (bool): If to use default altitudes from location
"""
self._msis.set_location(location)
if season is not None:
self._msis.set_season(season)
else:
self._msis.set_doy(doy)
self.location, self.season = location, season
# Clear cached value to force spline recalculation
self.theta_deg = None
if use_loc_altitudes:
info(0, 'Using loc altitude', self._msis.alt_surface, 'cm')
self.geom.h_obs = self._msis.alt_surface
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Wraps around ctypes calls to the NRLMSISE-00 C library.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self._msis.get_density(h_cm)
def set_location(self, location):
""" Changes MSIS location by strings defined in _msis_wrapper.
Args:
location (str): location as defined in :class:`NRLMSISE-00.`
"""
self._msis.set_location(location)
def set_season(self, month):
""" Changes MSIS location by month strings defined in _msis_wrapper.
Args:
location (str): month as defined in :class:`NRLMSISE-00.`
"""
self._msis.set_season(month)
def set_doy(self, day_of_year):
""" Changes MSIS season by day of year.
Args:
day_of_year (int): 1. Jan.=0, 1.Feb=32
"""
self._msis.set_doy(day_of_year)
def get_temperature(self, h_cm):
""" Returns the temperature of air in K.
Wraps around ctypes calls to the NRLMSISE-00 C library.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`T(h_{cm})` in K
"""
return self._msis.get_temperature(h_cm)
class AIRSAtmosphereLatLong(EarthsAtmosphere):
"""Class for downloading AIRS data from website and using as for atmosphere data.
This class can only be used with a working internet connection or already downloaded AIRS data.
For this to work a .netcdf file has to be created containing the Username password combination for eosdis.nasa.gov.
The files are downloaded with wget in AIRS_Download and further processed by the read_in function of AIRS_T_eff_functions(credit to Phillip Fuerst and Simon Hauser).
Additionally netcdf4+HDF4 are required to read the downloaded AIRS files.
(https://unidata.github.io/netcdf4-python/netCDF4/index.html,https://www.unidata.ucar.edu/software/netcdf/, https://portal.hdfgroup.org/display/HDF4/HDF4)
HDF4 has to be installed with the --prefix=H4DIR --enable-shared --disable-fortran --disable-netcdf options.
HDF5 has to be installed with the --prefix=H5DIR --enable-zlib --with-zlib= --with-szlib= options.
Next the NETCDF4 C package has to be installed:
Env variables:
H5DIR=
H4DIR=
LIBS="-ldl"
CPPFLAGS="-I${H5DIR}/include -I${H4DIR}/include"
LDFLAGS="-L${H5DIR}/lib -L${H4DIR}/lib"
Configure Flags:
--prefix= --enable-hdf4 --disble-dap
Before installing python netcdf, enviroment variables have to be set:
NETCDF4_DIR=
USE_SETUPCFG=0
HDF5_INCDIR=
HDF5_LIBDIR=
Same for HDF4 and NETCDF4!
Then python setup.py install --user
Args:
date (str): Date to probe for analysis (YearMonthDay)
location ((int,int)): (lat,long) of location. Latitude in degrees (0 to 180) 0=North Pole, 180=South Pole
airs_dir (str): directory to save downloaded AIRS data or which contains already downloaded AIRS data
keep_airs (bool): Wether to keep downloaded airs data
asc_desc (str) : Either "asc","desc" or "mean". Defines if ascending or descending or the average value of the Dataset is used for temperature and height.
"""
def __init__(self, date, location,airs_dir = '',keep_airs=False,dataset = 'AIRS',asc_desc='mean' ,*args,**kwargs):
self.airs_dir = airs_dir
self.keep_airs = keep_airs
self.lat,self.long = location
self.asc_desc = asc_desc
self.dataset = dataset
self.init_parameters(date,dataset=dataset, **kwargs)
self.set_location(location)
EarthsAtmosphere.__init__(self)
def init_parameters(self, date, **kwargs):
from MCEq.geometry.T_downloader import AIRS_Download, AIRS_read_in
from MCEq.geometry.ECMWF_Download import Download_ERA5, read_grib_data, read_gribTwo_Data
#from matplotlib.dates import strpdate2num, num2date
if self.dataset == 'AIRS':
airs_filenames = AIRS_Download(self.airs_dir, date, date)
self.Pressure, self.Lat_rad_bins, self.Long_rad_bins, Temp_asc ,Height_asc,Temp_desc,Height_desc = AIRS_read_in(airs_filenames[0]) # hPa, rad,rad,K,m
if not self.keep_airs:
for file in airs_filenames:
os.remove(glob.glob(file)[0] )
if self.asc_desc == 'asc':
Temp = Temp_asc
Height = Height_asc
elif self.asc_desc == 'desc':
Temp = Temp_desc
Height = Height_desc
elif self.asc_desc == 'mean':
Temp = np.nanmean(np.stack((Temp_asc[...,np.newaxis],Temp_desc[...,np.newaxis]), axis=3),3)
Height = np.nanmean(np.stack((Height_asc,Height_desc), axis=3),3)
else:
raise Exception("Variable asc_desc should be asc, desc or mean not " +asc_desc )
del Temp_asc,Height_asc,Height_desc, Temp_desc
elif self.dataset == 'ERA5':
Filenames = Download_ERA5(self.airs_dir, date, date,'12:00')
Temp,Height,lat_bins,long_bins,self.Pressure = read_grib_data(Filenames[0])
self.Lat_rad_bins,self.Long_rad_bins = lat_bins[:,0]/180*np.pi,long_bins[0,:]/180*np.pi
#print(self.Lat_rad_bins.shape,self.Long_rad_bins.shape)
Temp,Height,self.Pressure = Temp[::-1],Height[::-1],self.Pressure[::-1]
elif self.dataset == 'ERA5_model':
Filenames = Download_ERA5(self.airs_dir, date, date,'12:00',model_lvl=True)
if 'single_day' in Filenames[0]:
Temp,Height,lat_bins,long_bins,self.Pressure = read_gribTwo_Data(Filenames[0])
else:
Temp,Height,lat_bins,long_bins,Pressure = read_gribTwo_Data(Filenames[0],datetime.strptime(date,"%Y%m%d"))
self.Lat_rad_bins,self.Long_rad_bins = lat_bins/180*np.pi,long_bins/180*np.pi
#print(self.Lat_rad_bins.shape,self.Long_rad_bins.shape)
Temp,Height,Pressure = Temp[::-1],Height[::-1],Pressure[::-1]
else:
raise Exception("Dataset must be either 'AIRS' or 'ERA5'")
self.date_obj = datetime.strptime(date , '%Y%m%d')
#self.T_splines,self.H_splines,self.D_splines = [],[],[]
self.msis = MSIS00Atmosphere("SouthPole", 'January')
self.msis._msis.set_doy(self._get_y_doy(self.date_obj)[1] - 1)
if self.dataset == 'AIRS':
self.Temp,self.Height = Temp,Height
elif self.dataset == 'ERA5':
from scipy.interpolate import RectBivariateSpline
self.Temp,self.Height = [],[]
for p_idx,p in enumerate(self.Pressure):
T,H = RectBivariateSpline(self.Lat_rad_bins[::-1],self.Long_rad_bins,Temp[p_idx,::-1], kx=1,ky=1 ),RectBivariateSpline(self.Lat_rad_bins[::-1],self.Long_rad_bins,Height[p_idx,::-1], kx=1,ky=1 )
self.Temp.append(T)
self.Height.append(H)
elif self.dataset == 'ERA5_model':
from scipy.interpolate import RectBivariateSpline
self.Temp,self.Height,self.Pressure = [],[],[]
for p_idx,p in enumerate(Pressure):
T,H = RectBivariateSpline(self.Lat_rad_bins[::-1],self.Long_rad_bins,Temp[p_idx,::-1], kx=1,ky=1 ),RectBivariateSpline(self.Lat_rad_bins[::-1],self.Long_rad_bins,Height[p_idx,::-1], kx=1,ky=1 )
self.Pressure.append(RectBivariateSpline(self.Lat_rad_bins[::-1],self.Long_rad_bins,Pressure[p_idx,::-1], kx=1,ky=1 ))
self.Temp.append(T)
self.Height.append(H)
del Height,Temp
self.theta_deg = None
def splines(self,p_idx, latitude, longitude):
from scipy.interpolate import griddata
if self.dataset=='AIRS':
lo,la = np.meshgrid(self.Long_rad_bins,self.Lat_rad_bins)
la,lo = la.ravel(),lo.ravel()
T = self.Temp[p_idx,:,:].ravel()
H = self.Height[p_idx,:,:].ravel()
mask_T = np.isfinite(T)
mask_H = np.isfinite(H)
T,H = T[mask_T],H[mask_H]
la_T,lo_T,la_H,lo_H = la[mask_T],lo[mask_T] ,la[mask_H],lo[mask_H]
if self.theta_deg != None:
#print('doing angle correction')
theta_deg = 180 - self.theta_deg
H_pre = griddata((la_H,lo_H),H,(latitude,longitude),method='nearest')
length = config.r_E*(np.cos(theta_deg/180.*np.pi)+np.sqrt((1+H_pre/config.r_E )**2-np.sin(theta_deg*np.pi/180.)**2 ))
dlat = np.arccos(((config.r_E+H_pre)**2+config.r_E**2-length**2)/(2*config.r_E*(config.r_E+H_pre) ))
#print(dlat,latitude)
else:
dlat = 0
lat_height = min(90.,(self.lat+dlat))
#if self.dataset == 'AIRS':
return griddata((la_T,lo_T),T,(lat_height,longitude),method='nearest'),griddata((la_H,lo_H),H,(lat_height,longitude),method='nearest')
elif self.dataset == 'ERA5':
return self.Temp[p_idx](latitude,longitude%(2*np.pi),grid=False),self.Height[p_idx](latitude,longitude%(2*np.pi),grid=False)
elif self.dataset == 'ERA5_model':
return self.Temp[p_idx](latitude,longitude%(2*np.pi),grid=False),self.Height[p_idx](latitude,longitude%(2*np.pi),grid=False),self.Pressure[p_idx](latitude,longitude%(2*np.pi),grid=False)
def set_location(self, location):
from scipy.interpolate import interp1d
self.lat,self.long = location[0]*np.pi/180.,location[1]*np.pi/180.
R = 8.314*1e6*1e-2 #cm^3 hPa/K/mol (Ideal gas constant)
M = 28.964 #g/mol (molar density of Air)
T_H = np.array([self.splines(p_i,self.lat,self.long) for p_i in range(len(self.Pressure)) ])
h_vec = T_H[:,1]*1e2 #cm
#h_er = np.array([10,20,30,40,50,60,])*1e3*1e2 #cm
#sy_e = np.array([1.1,0.6,1.1,1.0,2.1,5.5])
#st_e = np.array([2.9,1.5,1.4,1.6,2.1,3.6])
#indx = np.digitize(h_vec,h_er)
t_vec = T_H[:,0]#- sy_e[indx-1] + np.random.normal(0,st_e[indx-1]) #K
if self.dataset == 'ERA5' or self.dataset =='AIRS':
d_vec = self.Pressure/t_vec*M/R #g/cm^3
elif self.dataset == 'ERA5_model':
mask =( t_vec > 0 )&(h_vec > 0)
d_vec = (T_H[:,2]/t_vec*M/R) #g/cm^3
t_vec = t_vec[mask]
h_vec = h_vec[mask]
d_vec = d_vec[mask]
if (len(h_vec)==0 or h_vec[-1] < config.h_atm*1e2) and self.dataset != 'ERA5_model':
self.msis._msis.set_location_coord(longitude = self.long*180./np.pi ,latitude = self.lat*180./np.pi)
if len(h_vec>0):
h_extra = np.linspace(h_vec[-1], config.h_atm * 1e2, 10)
#print(h_vec,h_extra)
else:
self.dens = lambda h: np.log(self.msis.get_density(h))
self.temp = lambda t: self.msis.get_temperature(t)
return
msis_extra_d,msis_extra_t = np.zeros(len(h_extra)),np.zeros(len(h_extra))
for h_i,h in enumerate(h_extra):
if self.theta_deg != None :
#print('doing angle correction')
theta_deg = 180 - self.theta_deg
length = config.r_E*(np.cos(theta_deg/180.*np.pi)+np.sqrt((1+h/1e2/config.r_E )**2-np.sin(theta_deg*np.pi/180.)**2 ))
dlat = np.arccos(((config.r_E+h/1e2)**2+config.r_E**2-length**2)/(2*config.r_E*(config.r_E+h/1e2) ))
#print(dlat,latitude)
else:
dlat = 0
#print(self.lat+dlat)
lat_height = min(90.,(self.lat+dlat)*180/np.pi)
self.msis._msis.set_location_coord(longitude = self.long*180./np.pi ,latitude = lat_height)
msis_extra_d[h_i] = self.msis.get_density(h)
msis_extra_t[h_i] = self.msis.get_temperature(h)
# Merge the two datasets
h_vec = np.hstack([h_vec[:-1], h_extra])
d_vec = np.hstack([d_vec[:-1], msis_extra_d])
t_vec = np.hstack([t_vec[:-1], msis_extra_t])
self.dens = interp1d(h_vec, np.log(d_vec),assume_sorted=True,bounds_error=False)
self.temp = interp1d(h_vec, t_vec,assume_sorted=True, bounds_error=False)
def set_date(self, date):
if self.date_obj ==datetime.strptime(date , '%Y%m%d'):
return
self.init_parameters(date)
self.set_location((self.lat,self.long))
def _get_y_doy(self, date):
return date.timetuple().tm_year, date.timetuple().tm_yday
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
ret = np.exp(self.dens(h_cm))
try:
ret[h_cm > config.h_atm*1e2] = np.nan
except TypeError:
if h_cm > config.h_atm*1e2:
return np.nan
return ret
def get_temperature(self, h_cm):
""" Returns the temperature in K.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: temperature :math:`T(h_{cm})` in K
"""
ret = self.temp(h_cm)
try:
ret[h_cm > config.h_atm*1e2] = np.nan
except TypeError:
if h_cm > config.h_atm*1e2:
return np.nan
return ret
class AIRSAtmosphereNorth(EarthsAtmosphere):
"""Interpolation class for tabulated atmospheres.
This class is intended to read preprocessed AIRS Satellite data.
Args:
location (int): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season, extrapolate=True, *args, **kwargs):
if location.isdigit():
location = int(location)
else:
raise Exception(self.__class__.__name__ +
"(): location should be a latitude between 0 and 180. " +
location)
if location > 180 or location <0:
raise Exception(self.__class__.__name__ +
"(): location should be a latitude between 0 and 180. " +
str(location))
self.extrapolate = extrapolate
self.month2doy = {
'January': 1,
'February': 32,
'March': 60,
'April': 91,
'May': 121,
'June': 152,
'July': 182,
'August': 213,
'September': 244,
'October': 274,
'November': 305,
'December': 335
}
self.season = season
self.init_parameters(location, **kwargs)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, **kwargs):
"""Loads tables and prepares interpolation.
Args:
location (int): Latitude in degrees (0 to 180) 0=North Pole, 180=South Pole
doy (int): Day Of Year
"""
from scipy.interpolate import interp1d
from matplotlib.dates import strpdate2num, num2date
from os import path
#This is a bruteforce path to takaos AIRS data on the IceCube Madison Server. This should be made more general in the future.
data_path = "/data/user/takao/analysis/airs/airx3std_v6_lat_daily/"
if 'table_path' in kwargs:
data_path = kwargs['table_path']
files = [('temp', 'airs_amsu_temp_%03i_daily.txt'%(location)),
('alti', 'airs_amsu_alti_%03i_daily.txt'%(location))]
data_collection = {}
# limit SouthPole pressure to <= 600
min_press_idx = 1
IC79_idx_1 = None
IC79_idx_2 = None
for d_key, fname in files:
fname = data_path + fname
tab = np.loadtxt(fname,
converters={0: strpdate2num('%Y/%m/%d')},
usecols=[0] + list(range(2, 27)))
with open(fname, 'r') as f:
comline = f.readline()
p_levels = [float(s.strip()) for s in comline.split(' ')[3:] if s != '' ][min_press_idx:]#hPa
dates = num2date(tab[:, 0])
for di, date in enumerate(dates):
if date.month == 6 and date.day == 1:
if date.year == 2010:
IC79_idx_1 = di
elif date.year == 2011:
IC79_idx_2 = di
surf_val = tab[:, 1]
cols = tab[:, min_press_idx + 2:]
data_collection[d_key] = (dates, surf_val, cols)
self.interp_tab_d = {}
self.interp_tab_t = {}
self.dates = {}
dates = data_collection['alti'][0]
msis = MSIS00Atmosphere("SouthPole", 'January')
msis._msis.set_location_coord(longitude=0.,latitude=90.-float(location))
for didx, date in enumerate(dates):
R = 8.314*1e6*1e-2# cm^3 hPa/K/mol (Ideal gas constant)
M = 28.964 #g/mol (molar density of Air)
h_vec = np.array(data_collection['alti'][2][didx, :] * 1e2) #cm
t_vec = np.array(data_collection['temp'][2][didx, :]) #K
d_vec = p_levels/t_vec*M/R #g/cm^3 (from ideal Gas Law)
#if all(np.isfinite(t_vec)&(t_vec>0)==0):
# print t_vec
h_vec = h_vec[np.isfinite(t_vec)&(t_vec>0)]
d_vec = d_vec[np.isfinite(t_vec)&(t_vec>0)]
t_vec = t_vec[np.isfinite(t_vec)&(t_vec>0)]
if self.extrapolate:
# Extrapolate using msis
msis._msis.set_doy(self._get_y_doy(date)[1] - 1)
if len(h_vec>0):
h_extra = np.linspace(h_vec[-1], config.h_atm * 1e2, 250)
else:
self.interp_tab_d[self._get_y_doy(date)] = lambda h: np.log(msis.get_density(h))
self.interp_tab_t[self._get_y_doy(date)] = lambda h: np.log(msis.get_temperature(h))
self.dates[self._get_y_doy(date)] = date
continue
msis_extra_d = np.array([msis.get_density(h) for h in h_extra])
msis_extra_t = np.array([msis.get_temperature(h) for h in h_extra])
# Interpolate last few altitude bins
#ninterp = 5
#for ni in range(ninterp):
# cl = (1 - np.exp(-ninterp + ni + 1))
# ch = (1 - np.exp(-ni))
# norm = 1. / (cl + ch)
# d_vec[-ni -
# 1] = (d_vec[-ni - 1] * cl * norm +
# msis.get_density(h_vec[-ni - 1]) * ch * norm)
# t_vec[-ni - 1] = (
# t_vec[-ni - 1] * cl * norm +
# msis.get_temperature(h_vec[-ni - 1]) * ch * norm)
# Merge the two datasets
h_vec = np.hstack([h_vec[:-1], h_extra])
d_vec = np.hstack([d_vec[:-1], msis_extra_d])
t_vec = np.hstack([t_vec[:-1], msis_extra_t])
self.interp_tab_d[self._get_y_doy(date)] = interp1d(h_vec, np.log(d_vec),kind='cubic', fill_value = 'extrapolate')
self.interp_tab_t[self._get_y_doy(date)] = interp1d(h_vec, np.log(t_vec),kind='cubic', fill_value = 'extrapolate')
self.dates[self._get_y_doy(date)] = date
self.IC79_start = self._get_y_doy(dates[IC79_idx_1])
self.IC79_end = self._get_y_doy(dates[IC79_idx_2])
self.IC79_days = (dates[IC79_idx_2] - dates[IC79_idx_1]).days
self.location = location
if self.season is None:
self.set_IC79_day(0)
else:
self.set_season(self.season)
# Clear cached value to force spline recalculation
self.theta_deg = None
def doy2month(self,doy):
oldseason = None
for season in self.month2doy:
if (oldseason != None ) and doy < self.month2doy[season]:
return oldseason
elif doy >= 335:
return'December'
oldseason = season
def set_location(self, location):
if location.isdigit():
location = int(location)
else:
raise Exception(self.__class__.__name__ +
"(): location should be a latitude between 0 and 180. " +
location)
if location > 180 or location <0:
raise Exception(self.__class__.__name__ +
"(): location should be a latitude between 0 and 180. " +
str(location))
self.init_parameters(location)
#self.calculate_density_spline()
def set_date(self, year, doy):
self.dens = self.interp_tab_d[(year, doy)]
self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
# Compatibility with caching
self.season = self.doy2month(doy)
self.calculate_density_spline()
def _set_doy(self, doy, year=2010):
self.dens = self.interp_tab_d[(year, doy)]
self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
def set_season(self, month):
self.season = month
self._set_doy(self.month2doy[month])
self.season = month
def set_IC79_day(self, IC79_day):
import datetime
if IC79_day > self.IC79_days:
raise Exception(self.__class__.__name__ +
"::set_IC79_day(): IC79_day above range.")
target_day = self._get_y_doy(self.dates[self.IC79_start] +
datetime.timedelta(days=IC79_day))
info(2, 'setting IC79_day', IC79_day)
self.dens = self.interp_tab_d[target_day]
self.temp = self.interp_tab_t[target_day]
self.date = self.dates[target_day]
# Compatibility with caching
self.season = self.date
def _get_y_doy(self, date):
return date.timetuple().tm_year, date.timetuple().tm_yday
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
ret = np.exp(self.dens(h_cm))#np.exp(np.interp(h_cm, self.h, np.log(self.dens)))
try:
ret[h_cm > config.h_atm*1e2] = np.nan
except TypeError:
if h_cm > config.h_atm*1e2:
return np.nan
return ret
def get_temperature(self, h_cm):
""" Returns the temperature in K.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: temperature :math:`T(h_{cm})` in K
"""
ret = np.exp(self.temp(h_cm))#np.exp(interp1d( self.h, np.log(self.temp))(h_cm))
try:
ret[h_cm > config.h_atm*1e2] = np.nan
except TypeError:
if h_cm > config.h_atm*1e2:
return np.nan
return ret
class AIRSAtmosphere(EarthsAtmosphere):
"""Interpolation class for tabulated atmospheres.
This class is intended to read preprocessed AIRS Satellite data.
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season, extrapolate=True, *args, **kwargs):
if location != 'SouthPole':
raise Exception(self.__class__.__name__ +
"(): Only South Pole location supported. " +
location)
self.extrapolate = extrapolate
self.month2doy = {
'January': 1,
'February': 32,
'March': 60,
'April': 91,
'May': 121,
'June': 152,
'July': 182,
'August': 213,
'September': 244,
'October': 274,
'November': 305,
'December': 335
}
self.season = season
self.init_parameters(location, **kwargs)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, **kwargs):
"""Loads tables and prepares interpolation.
Args:
location (str): supported is only "SouthPole"
doy (int): Day Of Year
"""
from scipy.interpolate import interp1d
#from matplotlib.dates import strpdate2num, num2date
# from time import strptime
from matplotlib.dates import datestr2num, num2date
from os import path
def bytespdate2num(b):
return datestr2num(b.decode('utf-8'))
data_path = (join(
path.expanduser('~'),
'OneDrive/Dokumente/projects/atmospheric_variations/'))
if 'table_path' in kwargs:
data_path = kwargs['table_path']
files = [('dens', 'airs_amsu_dens_180_daily.txt'),
('temp', 'airs_amsu_temp_180_daily.txt'),
('alti', 'airs_amsu_alti_180_daily.txt')]
data_collection = {}
# limit SouthPole pressure to <= 600
min_press_idx = 4
IC79_idx_1 = None
IC79_idx_2 = None
for d_key, fname in files:
fname = data_path + 'tables/' + fname
# tabf = open(fname).read()
tab = np.loadtxt(fname,
converters={0: bytespdate2num},
usecols=[0] + list(range(2, 27)))
# with open(fname, 'r') as f:
# comline = f.readline()
# p_levels = [
# float(s.strip()) for s in comline.split(' ')[3:] if s != ''
# ][min_press_idx:]
dates = num2date(tab[:, 0])
for di, date in enumerate(dates):
if date.month == 6 and date.day == 1:
if date.year == 2010:
IC79_idx_1 = di
elif date.year == 2011:
IC79_idx_2 = di
surf_val = tab[:, 1]
cols = tab[:, min_press_idx + 2:]
data_collection[d_key] = (dates, surf_val, cols)
self.interp_tab_d = {}
self.interp_tab_t = {}
self.dates = {}
dates = data_collection['alti'][0]
msis = MSIS00Atmosphere(location, 'January')
for didx, date in enumerate(dates):
h_vec = np.array(data_collection['alti'][2][didx, :] * 1e2)
d_vec = np.array(data_collection['dens'][2][didx, :])
t_vec = np.array(data_collection['temp'][2][didx, :])
if self.extrapolate:
# Extrapolate using msis
h_extra = np.linspace(h_vec[-1], self.geom.h_atm * 1e2, 250)
msis._msis.set_doy(self._get_y_doy(date)[1] - 1)
msis_extra_d = np.array([msis.get_density(h) for h in h_extra])
msis_extra_t = np.array(
[msis.get_temperature(h) for h in h_extra])
# Interpolate last few altitude bins
ninterp = 5
for ni in range(ninterp):
cl = (1 - np.exp(-ninterp + ni + 1))
ch = (1 - np.exp(-ni))
norm = 1. / (cl + ch)
d_vec[-ni -
1] = (d_vec[-ni - 1] * cl * norm +
msis.get_density(h_vec[-ni - 1]) * ch * norm)
t_vec[-ni - 1] = (
t_vec[-ni - 1] * cl * norm +
msis.get_temperature(h_vec[-ni - 1]) * ch * norm)
# Merge the two datasets
h_vec = np.hstack([h_vec[:-1], h_extra])
d_vec = np.hstack([d_vec[:-1], msis_extra_d])
t_vec = np.hstack([t_vec[:-1], msis_extra_t])
self.interp_tab_d[self._get_y_doy(date)] = (h_vec, d_vec)
self.interp_tab_t[self._get_y_doy(date)] = (h_vec, t_vec)
self.dates[self._get_y_doy(date)] = date
self.IC79_start = self._get_y_doy(dates[IC79_idx_1])
self.IC79_end = self._get_y_doy(dates[IC79_idx_2])
self.IC79_days = (dates[IC79_idx_2] - dates[IC79_idx_1]).days
self.location = location
if self.season is None:
self.set_IC79_day(0)
else:
self.set_season(self.season)
# Clear cached value to force spline recalculation
self.theta_deg = None
def set_date(self, year, doy):
self.h, self.dens = self.interp_tab_d[(year, doy)]
_, self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
# Compatibility with caching
self.season = self.date
def _set_doy(self, doy, year=2010):
self.h, self.dens = self.interp_tab_d[(year, doy)]
_, self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
def set_season(self, month):
self.season = month
self._set_doy(self.month2doy[month])
self.season = month
def set_IC79_day(self, IC79_day):
import datetime
if IC79_day > self.IC79_days:
raise Exception(self.__class__.__name__ +
"::set_IC79_day(): IC79_day above range.")
target_day = self._get_y_doy(self.dates[self.IC79_start] +
datetime.timedelta(days=IC79_day))
info(2, 'setting IC79_day', IC79_day)
self.h, self.dens = self.interp_tab_d[target_day]
_, self.temp = self.interp_tab_t[target_day]
self.date = self.dates[target_day]
# Compatibility with caching
self.season = self.date
def _get_y_doy(self, date):
return date.timetuple().tm_year, date.timetuple().tm_yday
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
ret = np.exp(np.interp(h_cm, self.h, np.log(self.dens)))
try:
ret[h_cm > self.h[-1]] = np.nan
except TypeError:
if h_cm > self.h[-1]:
return np.nan
return ret
def get_temperature(self, h_cm):
""" Returns the temperature in K.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: temperature :math:`T(h_{cm})` in K
"""
ret = np.exp(np.interp(h_cm, self.h, np.log(self.temp)))
try:
ret[h_cm > self.h[-1]] = np.nan
except TypeError:
if h_cm > self.h[-1]:
return np.nan
return ret
class MSIS00IceCubeCentered(MSIS00Atmosphere):
"""Extension of :class:`MSIS00Atmosphere` which couples the latitude
setting with the zenith angle of the detector.
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season):
if location != 'SouthPole':
info(2, 'location forced to the South Pole')
location = 'SouthPole'
MSIS00Atmosphere.__init__(self, location, season)
# Allow for upgoing zenith angles
self.max_theta = 180.
def latitude(self, det_zenith_deg):
""" Returns the geographic latitude of the shower impact point.
Assumes a spherical earth. The detector is 1948m under the
surface.
Credits: geometry fomulae by Jakob van Santen, DESY Zeuthen.
Args:
det_zenith_deg (float): zenith angle at detector in degrees
Returns:
float: latitude of the impact point in degrees
"""
r = self.geom.r_E
d = 1948 # m
theta_rad = det_zenith_deg / 180. * np.pi
x = (np.sqrt(2. * r * d + ((r - d) * np.cos(theta_rad))**2 - d**2) -
(r - d) * np.cos(theta_rad))
return -90. + np.arctan2(x * np.sin(theta_rad),
r - d + x * np.cos(theta_rad)) / np.pi * 180.
def set_theta(self, theta_deg, force_spline_calc=True):
self._msis.set_location_coord(longitude=0.,
latitude=self.latitude(theta_deg))
info(
1, 'latitude = {0:5.2f} for zenith angle = {1:5.2f}'.format(
self.latitude(theta_deg), theta_deg))
if theta_deg > 90.:
info(
1, 'theta = {0:5.2f} below horizon. using theta = {1:5.2f}'.
format(theta_deg, 180. - theta_deg))
theta_deg = 180. - theta_deg
MSIS00Atmosphere.set_theta(self,
theta_deg,
force_spline_calc=force_spline_calc)
class GeneralizedTarget(object):
"""This class provides a way to run MCEq on piece-wise constant
one-dimenional density profiles.
The default values for the average density are taken from
config file variables `len_target`, `env_density` and `env_name`.
The density profile has to be built by calling subsequently
:func:`add_material`. The current composition of the target
can be checked with :func:`draw_materials` or :func:`print_table`.
Note:
If the target is not air or hydrogen, the result is approximate,
since seconray particle yields are provided for nucleon-air or
proton-proton collisions. Depending on this choice one has to
adjust the nuclear mass in :mod:`mceq_config`.
Args:
len_target (float): total length of the target in meters
env_density (float): density of the default material in g/cm**3
env_name (str): title for this environment
"""
def __init__(
self,
len_target=config.len_target * 1e2, # cm
env_density=config.env_density, # g/cm3
env_name=config.env_name):
self.len_target = len_target
self.env_density = env_density
self.env_name = env_name
self.reset()
@property
def max_den(self):
return self._max_den
def reset(self):
"""Resets material list to defaults."""
self.mat_list = [[
0., self.len_target, self.env_density, self.env_name
]]
self._update_variables()
def _update_variables(self):
"""Updates internal variables. Not needed to call by user."""
self.start_bounds, self.end_bounds, \
self.densities = list(zip(*self.mat_list))[:-1]
self.densities = np.array(self.densities)
self.start_bounds = np.array(self.start_bounds)
self.end_bounds = np.array(self.end_bounds)
self._max_den = np.max(self.densities)
self._integrate()
def set_length(self, new_length_cm):
"""Updates the total length of the target.
Usually the length is set
"""
if new_length_cm < self.mat_list[-1][0]:
raise Exception(
"GeneralizedTarget::set_length(): " +
"can not set length below lower boundary of last " +
"material.")
self.len_target = new_length_cm
self.mat_list[-1][1] = new_length_cm
self._update_variables()
def add_material(self, start_position_cm, density, name):
"""Adds one additional material to a composite target.
Args:
start_position_cm (float): position where the material starts
counted from target origin l|X = 0 in cm
density (float): density of material in g/cm**3
name (str): any user defined name
Raises:
Exception: If requested start_position_cm is not properly defined.
"""
if start_position_cm < 0. or start_position_cm > self.len_target:
raise Exception("GeneralizedTarget::add_material(): " +
"distance exceeds target dimensions.")
elif (start_position_cm == self.mat_list[-1][0]
and self.mat_list[-1][-1] == self.env_name):
self.mat_list[-1] = [
start_position_cm, self.len_target, density, name
]
elif start_position_cm <= self.mat_list[-1][0]:
raise Exception("GeneralizedTarget::add_material(): " +
"start_position_cm is ahead of previous material.")
else:
self.mat_list[-1][1] = start_position_cm
self.mat_list.append(
[start_position_cm, self.len_target, density, name])
info(2,
("{0}::add_material(): Material '{1}' added. " +
"location on path {2} to {3} m").format(self.__class__.__name__,
name,
self.mat_list[-1][0],
self.mat_list[-1][1]))
self._update_variables()
def set_theta(self, *args):
"""This method is not defined for the generalized target. The purpose
is to catch usage errors.
Raises:
NotImplementedError: always
"""
raise NotImplementedError('GeneralizedTarget::set_theta(): Method' +
'not defined for this target class.')
def _integrate(self):
"""Walks through material list and computes the depth along the
position (path). Computes the spline for the position-depth relation
and determines the maximum depth for the material selection.
Method does not need to be called by the user, instead the class
calls it when necessary.
"""
from scipy.interpolate import UnivariateSpline
self.density_depth = None
self.knots = [0.]
self.X_int = [0.]
for start, end, density, _ in self.mat_list:
self.knots.append(end)
self.X_int.append(density * (end - start) + self.X_int[-1])
self._s_X2h = UnivariateSpline(self.X_int, self.knots, k=1, s=0.)
self._s_h2X = UnivariateSpline(self.knots, self.X_int, k=1, s=0.)
self._max_X = self.X_int[-1]
@property
def s_X2h(self):
"""Spline for depth at distance."""
if not hasattr(self, '_s_X2h'):
self._integrate()
return self._s_X2h
@property
def s_h2X(self):
"""Spline for distance at depth."""
if not hasattr(self, '_s_h2X'):
self._integrate()
return self._s_h2X
@property
def max_X(self):
"""Maximal depth of target."""
if not hasattr(self, '_max_X'):
self._integrate()
return self._max_X
def get_density_X(self, X):
"""Returns the density in g/cm**3 as a function of depth X.
Args:
X (float): depth in g/cm**2
Returns:
float: density in g/cm**3
Raises:
Exception: If requested depth exceeds target.
"""
X = np.atleast_1d(X)
# allow for some small constant extrapolation for odepack solvers
if X[-1] > self.max_X and X[-1] < self.max_X * 1.003:
X[-1] = self.max_X
if np.min(X) < 0. or np.max(X) > self.max_X:
# return self.get_density(self.s_X2h(self.max_X))
info(0, 'Depth {0:4.3f} exceeds target dimensions {1:4.3f}'.format(
np.max(X), self.max_X
))
raise Exception('Invalid input')
return self.get_density(self.s_X2h(X))
def r_X2rho(self, X):
"""Returns the inverse density :math:`\\frac{1}{\\rho}(X)`.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`1/\\rho` in cm**3/g
"""
return 1. / self.get_density_X(X)
def get_density(self, l_cm):
"""Returns the density in g/cm**3 as a function of position l in cm.
Args:
l (float): position in target in cm
Returns:
float: density in g/cm**3
Raises:
Exception: If requested position exceeds target length.
"""
l_cm = np.atleast_1d(l_cm)
res = np.zeros_like(l_cm)
if np.min(l_cm) < 0 or np.max(l_cm) > self.len_target:
raise Exception("GeneralizedTarget::get_density(): " +
"requested position exceeds target legth.")
for i, li in enumerate(l_cm):
bi = 0
while not (li >= self.start_bounds[bi]
and li <= self.end_bounds[bi]):
bi += 1
res[i] = self.densities[bi]
return res
def draw_materials(self, axes=None, logx=False):
"""Makes a plot of depth and density profile as a function
of the target length. The list of materials is printed out, too.
Args:
axes (plt.axes, optional): handle for matplotlib axes
"""
import matplotlib.pyplot as plt
if not axes:
plt.figure(figsize=(5, 2.5))
axes = plt.gca()
ymax = np.max(self.X_int) * 1.01
for _, mat in enumerate(self.mat_list):
xstart = mat[0]
xend = mat[1]
alpha = 0.188 * mat[2] / max(self.densities) + 0.248
if alpha > 1:
alpha = 1.
elif alpha < 0.:
alpha = 0.
axes.fill_between((xstart, xend), (ymax, ymax), (0., 0.),
label=mat[2],
facecolor='grey',
alpha=alpha)
# axes.text(0.5e-2 * (xstart + xend), 0.5 * ymax, str(nm))
axes.plot([xl for xl in self.knots], self.X_int, lw=1.7, color='r')
if logx:
axes.set_xscale('log', nonposx='clip')
axes.set_ylim(0., ymax)
axes.set_xlabel('distance in target (cm)')
axes.set_ylabel(r'depth X (g/cm$^2)$')
self.print_table(min_dbg_lev=2)
def print_table(self, min_dbg_lev=0):
"""Prints table of materials to standard output.
"""
templ = '{0:^3} | {1:15} | {2:^9.3g} | {3:^9.3g} | {4:^8.5g}'
info(
min_dbg_lev,
'********************* List of materials ***********************',
no_caller=True)
head = '{0:3} | {1:15} | {2:9} | {3:9} | {4:9}'.format(
'no', 'name', 'start [cm]', 'end [cm]', 'density [g/cm**3]')
info(min_dbg_lev, '-' * len(head), no_caller=True)
info(min_dbg_lev, head, no_caller=True)
info(min_dbg_lev, '-' * len(head), no_caller=True)
for nm, mat in enumerate(self.mat_list):
info(min_dbg_lev,
templ.format(nm, mat[3], mat[0], mat[1], mat[2]),
no_caller=True)
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 4))
plt.title('CORSIKA atmospheres')
cka_atmospheres = [
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", 'Summer'),
("ANTARES/KM3NeT-ORCA", 'Winter'),
("KM3NeT-ARCA", 'Summer'),
("KM3NeT-ARCA", 'Winter'),
("KM3NeT", None),
('SouthPole','December'),
('PL_SouthPole','January'),
('PL_SouthPole','August'),
]
cka_surf_100 = []
for loc, season in cka_atmospheres:
cka_obj = CorsikaAtmosphere(loc, season)
cka_obj.set_theta(0.0)
x_vec = np.linspace(0, cka_obj.max_X, 5000)
plt.plot(x_vec,
1 / cka_obj.r_X2rho(x_vec),
lw=1.5,
label='{0}/{1}'.format(loc, season) if season is not None
else '{0}'.format(loc))
cka_surf_100.append((cka_obj.max_X, 1. / cka_obj.r_X2rho(100.)))
print(cka_surf_100)
plt.ylabel(r'Density $\rho$ (g/cm$^3$)')
plt.xlabel(r'Depth (g/cm$^2$)')
plt.legend(loc='upper left')
plt.tight_layout()
plt.figure(figsize=(5, 4))
plt.title('NRLMSISE-00 atmospheres')
msis_atmospheres = [
('SouthPole', "January"),
('Karlsruhe', "January"),
('Geneva', "January"),
('Tokyo', "January"),
('SanGrasso', "January"),
('TelAviv', "January"),
('KSC', "January"),
('SoudanMine', "January"),
('Tsukuba', "January"),
('LynnLake', "January"),
('PeaceRiver', "January"),
('FtSumner', "January")
]
msis_surf_100 = []
for loc, season in msis_atmospheres:
msis_obj = MSIS00Atmosphere(loc, season)
msis_obj.set_theta(0.0)
x_vec = np.linspace(0, msis_obj.max_X, 5000)
plt.plot(x_vec,
1 / msis_obj.r_X2rho(x_vec),
lw=1.5,
label='{0}'.format(loc))
msis_surf_100.append((msis_obj.max_X, 1. / msis_obj.r_X2rho(100.)))
print(msis_surf_100)
plt.ylabel(r'Density $\rho$ (g/cm$^3$)')
plt.xlabel(r'Depth (g/cm$^2$)')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
|
py | b413acd0b61f8a5c7a35622c3c717d5940f96ec0 | from __future__ import print_function
import os
import sys
# Needed to pass
# http://www.w3.org/2009/sparql/docs/tests/data-sparql11/
# syntax-update-2/manifest#syntax-update-other-01
from test import TEST_DIR
from test.earl import report, add_test
from test.manifest import nose_tests, UP, MF
sys.setrecursionlimit(6000) # default is 1000
from collections import Counter
import datetime
import isodate
import typing
from rdflib import Dataset, Graph, URIRef, BNode
from rdflib.query import Result
from rdflib.compare import isomorphic
from rdflib.plugins import sparql as rdflib_sparql_module
from rdflib.plugins.sparql.algebra import pprintAlgebra, translateQuery, translateUpdate
from rdflib.plugins.sparql.parser import parseQuery, parseUpdate
from rdflib.plugins.sparql.results.rdfresults import RDFResultParser
from rdflib.plugins.sparql.update import evalUpdate
from rdflib.compat import decodeStringEscape, bopen
from urllib.parse import urljoin
from io import BytesIO
from nose.tools import nottest, eq_
from nose import SkipTest
def eq(a, b, msg):
return eq_(a, b, msg + ": (%r!=%r)" % (a, b))
def setFlags():
import rdflib
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = False
# we need an explicit default graph
rdflib_sparql_module.SPARQL_DEFAULT_GRAPH_UNION = False
# we obviously need this
rdflib.DAWG_LITERAL_COLLATION = True
def resetFlags():
import rdflib
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = True
# we need an explicit default graph
rdflib_sparql_module.SPARQL_DEFAULT_GRAPH_UNION = True
# we obviously need this
rdflib.DAWG_LITERAL_COLLATION = False
DEBUG_FAIL = True
DEBUG_FAIL = False
DEBUG_ERROR = True
DEBUG_ERROR = False
SPARQL10Tests = True
# SPARQL10Tests = False
SPARQL11Tests = True
# SPARQL11Tests=False
RDFLibTests = True
DETAILEDASSERT = True
# DETAILEDASSERT=False
NAME = None
fails: typing.Counter[str] = Counter()
errors: typing.Counter[str] = Counter()
failed_tests = []
error_tests = []
def bopen_read_close(fn):
with bopen(fn) as f:
return f.read()
try:
with open("skiptests.list") as skip_tests_f:
skiptests = dict(
[
(URIRef(x.strip().split("\t")[0]), x.strip().split("\t")[1])
for x in skip_tests_f
]
)
except IOError:
skiptests = dict()
def _fmt(f):
if f.endswith(".rdf"):
return "xml"
return "turtle"
def bindingsCompatible(a, b):
"""
Are two binding-sets compatible.
From the spec: http://www.w3.org/2009/sparql/docs/tests/#queryevaltests
A SPARQL implementation passes a query evaluation test if the
graph produced by evaluating the query against the RDF dataset
(and encoding in the DAWG result set vocabulary, if necessary) is
equivalent [RDF-CONCEPTS] to the graph named in the result (after
encoding in the DAWG result set vocabulary, if necessary). Note
that, solution order only is considered relevant, if the result is
expressed in the test suite in the DAWG result set vocabulary,
with explicit rs:index triples; otherwise solution order is
considered irrelevant for passing. Equivalence can be tested by
checking that the graphs are isomorphic and have identical IRI and
literal nodes. Note that testing whether two result sets are
isomorphic is simpler than full graph isomorphism. Iterating over
rows in one set, finding a match with the other set, removing this
pair, then making sure all rows are accounted for, achieves the
same effect.
"""
def rowCompatible(x, y):
m = {}
y = y.asdict()
for v1, b1 in x.asdict().items():
if v1 not in y:
return False
if isinstance(b1, BNode):
if b1 in m:
if y[v1] != m[b1]:
return False
else:
m[b1] = y[v1]
else:
# if y[v1]!=b1:
# return False
try:
if y[v1].neq(b1):
return False
except TypeError:
return False
return True
if not a:
if b:
return False
return True
x = next(iter(a))
for y in b:
if rowCompatible(x, y):
if bindingsCompatible(a - set((x,)), b - set((y,))):
return True
return False
def pp_binding(solutions):
"""
Pretty print a single binding - for less eye-strain when debugging
"""
return (
"\n["
+ ",\n\t".join(
"{" + ", ".join("%s:%s" % (x[0], x[1].n3()) for x in bindings.items()) + "}"
for bindings in solutions
)
+ "]\n"
)
@nottest
def update_test(t):
# the update-eval tests refer to graphs on http://example.org
rdflib_sparql_module.SPARQL_LOAD_GRAPHS = False
uri, name, comment, data, graphdata, query, res, syntax = t
if uri in skiptests:
raise SkipTest()
try:
g = Dataset()
if not res:
if syntax:
with bopen(query[7:]) as f:
translateUpdate(parseUpdate(f))
else:
try:
with bopen(query[7:]) as f:
translateUpdate(parseUpdate(f))
raise AssertionError("Query shouldn't have parsed!")
except:
pass # negative syntax test
return
resdata, resgraphdata = res
# read input graphs
if data:
g.default_context.load(data, format=_fmt(data))
if graphdata:
for x, l in graphdata:
g.load(x, publicID=URIRef(l), format=_fmt(x))
with bopen(query[7:]) as f:
req = translateUpdate(parseUpdate(f))
evalUpdate(g, req)
# read expected results
resg = Dataset()
if resdata:
resg.default_context.load(resdata, format=_fmt(resdata))
if resgraphdata:
for x, l in resgraphdata:
resg.load(x, publicID=URIRef(l), format=_fmt(x))
eq(
set(x.identifier for x in g.contexts() if x != g.default_context),
set(x.identifier for x in resg.contexts() if x != resg.default_context),
"named graphs in datasets do not match",
)
assert isomorphic(
g.default_context, resg.default_context
), "Default graphs are not isomorphic"
for x in g.contexts():
if x == g.default_context:
continue
assert isomorphic(x, resg.get_context(x.identifier)), (
"Graphs with ID %s are not isomorphic" % x.identifier
)
except Exception as e:
if isinstance(e, AssertionError):
failed_tests.append(uri)
fails[str(e)] += 1
else:
error_tests.append(uri)
errors[str(e)] += 1
if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL:
print("======================================")
print(uri)
print(name)
print(comment)
if not res:
if syntax:
print("Positive syntax test")
else:
print("Negative syntax test")
if data:
print("----------------- DATA --------------------")
print(">>>", data)
print(bopen_read_close(data[7:]))
if graphdata:
print("----------------- GRAPHDATA --------------------")
for x, l in graphdata:
print(">>>", x, l)
print(bopen_read_close(x[7:]))
print("----------------- Request -------------------")
print(">>>", query)
print(bopen_read_close(query[7:]))
if res:
if resdata:
print("----------------- RES DATA --------------------")
print(">>>", resdata)
print(bopen_read_close(resdata[7:]))
if resgraphdata:
print("----------------- RES GRAPHDATA -------------------")
for x, l in resgraphdata:
print(">>>", x, l)
print(bopen_read_close(x[7:]))
print("------------- MY RESULT ----------")
print(g.serialize(format="trig"))
try:
pq = translateUpdate(parseUpdate(bopen_read_close(query[7:])))
print("----------------- Parsed ------------------")
pprintAlgebra(pq)
# print pq
except:
print("(parser error)")
print(decodeStringEscape(str(e)))
import pdb
pdb.post_mortem(sys.exc_info()[2])
raise
@nottest # gets called by generator
def query_test(t):
uri, name, comment, data, graphdata, query, resfile, syntax = t
# the query-eval tests refer to graphs to load by resolvable filenames
rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True
if uri in skiptests:
raise SkipTest()
def skip(reason="(none)"):
print("Skipping %s from now on." % uri)
with bopen("skiptests.list", "a") as f:
f.write("%s\t%s\n" % (uri, reason))
try:
g = Dataset()
if data:
g.default_context.load(data, format=_fmt(data))
if graphdata:
for x in graphdata:
g.load(x, format=_fmt(x))
if not resfile:
# no result - syntax test
if syntax:
translateQuery(
parseQuery(bopen_read_close(query[7:])), base=urljoin(query, ".")
)
else:
# negative syntax test
try:
translateQuery(
parseQuery(bopen_read_close(query[7:])),
base=urljoin(query, "."),
)
assert False, "Query should not have parsed!"
except:
pass # it's fine - the query should not parse
return
# eval test - carry out query
res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, "."))
if resfile.endswith("ttl"):
resg = Graph()
resg.load(resfile, format="turtle", publicID=resfile)
res = RDFResultParser().parse(resg)
elif resfile.endswith("rdf"):
resg = Graph()
resg.load(resfile, publicID=resfile)
res = RDFResultParser().parse(resg)
else:
with bopen(resfile[7:]) as f:
if resfile.endswith("srj"):
res = Result.parse(f, format="json")
elif resfile.endswith("tsv"):
res = Result.parse(f, format="tsv")
elif resfile.endswith("csv"):
res = Result.parse(f, format="csv")
# CSV is lossy, round-trip our own resultset to
# lose the same info :)
# write bytes, read strings...
s = BytesIO()
res2.serialize(s, format="csv")
s.seek(0)
res2 = Result.parse(s, format="csv")
s.close()
else:
res = Result.parse(f, format="xml")
if not DETAILEDASSERT:
eq(res.type, res2.type, "Types do not match")
if res.type == "SELECT":
eq(set(res.vars), set(res2.vars), "Vars do not match")
comp = bindingsCompatible(set(res), set(res2))
assert comp, "Bindings do not match"
elif res.type == "ASK":
eq(res.askAnswer, res2.askAnswer, "Ask answer does not match")
elif res.type in ("DESCRIBE", "CONSTRUCT"):
assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
raise Exception("Unknown result type: %s" % res.type)
else:
eq(
res.type,
res2.type,
"Types do not match: %r != %r" % (res.type, res2.type),
)
if res.type == "SELECT":
eq(
set(res.vars),
set(res2.vars),
"Vars do not match: %r != %r" % (set(res.vars), set(res2.vars)),
)
assert bindingsCompatible(set(res), set(res2)), (
"Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s"
% (
res.serialize(
format="txt", namespace_manager=g.namespace_manager
),
res2.serialize(
format="txt", namespace_manager=g.namespace_manager
),
)
)
elif res.type == "ASK":
eq(
res.askAnswer,
res2.askAnswer,
"Ask answer does not match: %r != %r"
% (res.askAnswer, res2.askAnswer),
)
elif res.type in ("DESCRIBE", "CONSTRUCT"):
assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
raise Exception("Unknown result type: %s" % res.type)
except Exception as e:
if isinstance(e, AssertionError):
failed_tests.append(uri)
fails[str(e)] += 1
else:
error_tests.append(uri)
errors[str(e)] += 1
if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL:
print("======================================")
print(uri)
print(name)
print(comment)
if not resfile:
if syntax:
print("Positive syntax test")
else:
print("Negative syntax test")
if data:
print("----------------- DATA --------------------")
print(">>>", data)
print(bopen_read_close(data[7:]))
if graphdata:
print("----------------- GRAPHDATA --------------------")
for x in graphdata:
print(">>>", x)
print(bopen_read_close(x[7:]))
print("----------------- Query -------------------")
print(">>>", query)
print(bopen_read_close(query[7:]))
if resfile:
print("----------------- Res -------------------")
print(">>>", resfile)
print(bopen_read_close(resfile[7:]))
try:
pq = parseQuery(bopen_read_close(query[7:]))
print("----------------- Parsed ------------------")
pprintAlgebra(translateQuery(pq, base=urljoin(query, ".")))
except:
print("(parser error)")
print(decodeStringEscape(str(e)))
import pdb
pdb.post_mortem(sys.exc_info()[2])
# pdb.set_trace()
# nose.tools.set_trace()
raise
testers = {
UP.UpdateEvaluationTest: update_test,
MF.UpdateEvaluationTest: update_test,
MF.PositiveUpdateSyntaxTest11: update_test,
MF.NegativeUpdateSyntaxTest11: update_test,
MF.QueryEvaluationTest: query_test,
MF.NegativeSyntaxTest11: query_test,
MF.PositiveSyntaxTest11: query_test,
MF.CSVResultFormatTest: query_test,
}
def test_dawg():
setFlags()
if SPARQL10Tests:
for t in nose_tests(testers, "test/DAWG/data-r2/manifest-evaluation.ttl"):
yield t
if SPARQL11Tests:
for t in nose_tests(testers, "test/DAWG/data-sparql11/manifest-all.ttl"):
yield t
if RDFLibTests:
for t in nose_tests(testers, "test/DAWG/rdflib/manifest.ttl"):
yield t
resetFlags()
if __name__ == "__main__":
import sys
import time
start = time.time()
if len(sys.argv) > 1:
NAME = sys.argv[1]
DEBUG_FAIL = True
i = 0
success = 0
skip = 0
for _type, t in test_dawg():
if NAME and not str(t[0]).startswith(NAME):
continue
i += 1
try:
_type(t)
add_test(t[0], "passed")
success += 1
except SkipTest as e:
msg = skiptests.get(t[0], e.args)
add_test(t[0], "untested", msg)
print("skipping %s - %s" % (t[0], msg))
skip += 1
except KeyboardInterrupt:
raise
except AssertionError:
add_test(t[0], "failed")
except:
add_test(t[0], "failed", "error")
import traceback
traceback.print_exc()
sys.stderr.write("%s\n" % t[0])
print("\n----------------------------------------------------\n")
print("Failed tests:")
for failed in failed_tests:
print(failed)
print("\n----------------------------------------------------\n")
print("Error tests:")
for error in error_tests:
print(error)
print("\n----------------------------------------------------\n")
print("Most common fails:")
for failed in fails.most_common(10):
failed_str = str(failed)
print(failed_str[:450] + (failed_str[450:] and "..."))
print("\n----------------------------------------------------\n")
if errors:
print("Most common errors:")
for error in errors.most_common(10):
print(error)
else:
print("(no errors!)")
f_sum = sum(fails.values())
e_sum = sum(errors.values())
if success + f_sum + e_sum + skip != i:
print("(Something is wrong, %d!=%d)" % (success + f_sum + e_sum + skip, i))
print(
"\n%d tests, %d passed, %d failed, %d errors, \
%d skipped (%.2f%% success)"
% (i, success, f_sum, e_sum, skip, 100.0 * success / i)
)
print("Took %.2fs" % (time.time() - start))
if not NAME:
now = isodate.datetime_isoformat(datetime.datetime.utcnow())
with open("testruns.txt", "a") as tf:
tf.write(
"%s\n%d tests, %d passed, %d failed, %d errors, %d "
"skipped (%.2f%% success)\n\n"
% (now, i, success, f_sum, e_sum, skip, 100.0 * success / i)
)
earl_report = os.path.join(TEST_DIR, "../test_reports/rdflib_sparql-%s.ttl" % now.replace(":", ""))
report.serialize(earl_report, format="n3")
report.serialize(os.path.join(TEST_DIR, "../test_reports/rdflib_sparql-latest.ttl"), format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
|
py | b413ad6f8c08f474212bc5973b8fc53dc57d01dc | from django.contrib import admin
from django.apps import apps
models = apps.get_models()
for model in models:
try:
admin.site.register(model)
except admin.sites.AlreadyRegistered:
pass |
py | b413ade7a3ff4ca4026f90a84110d3aff9721933 | import pytest # noqa
from actions.homeassistant_action import HomeAssistantAction
def test_homeassistant_action(card, actions, monkeypatch, mocker):
requests_post = mocker.MagicMock()
monkeypatch.setattr("requests.post", requests_post)
card["uri"] = "uri"
action = actions["Test Action"]
action["token"] = "token"
action["host"] = "hostname"
action["port"] = "1337"
action["verify_ssl"] = False
processor = HomeAssistantAction(card, action)
processor.process()
requests_post.assert_called_with(
"http://hostname:1337/api/events/magic_card_scanned",
data={
"card_code": "123456789",
"card_type": "test-type",
"card_arturl": "test-URL",
"card_title": "test-Title",
"card_subtitle": "test-Subtitle",
"card_uri": "uri",
"magic_cards_room": "Living Room",
},
headers={"Authorization": "Bearer token", "Content-Type": "application/json"},
verify=False,
)
|
py | b413ae4205defeebcf0e650927f3bf9eb9ef21ec | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Geopm(AutotoolsPackage):
"""GEOPM is an extensible power management framework targeting HPC.
The GEOPM package provides libgeopm, libgeopmpolicy and applications
geopmctl and geopmpolicy, as well as tools for postprocessing.
GEOPM is designed to be extended for new control algorithms and new
hardware power management features via its plugin infrastructure.
Note: GEOPM interfaces with hardware using Model Specific Registers (MSRs).
For propper usage make sure MSRs are made available directly or via the
msr-safe kernel module by your administrator."""
homepage = "https://geopm.github.io"
url = "https://github.com/geopm/geopm/releases/download/v0.4.0/geopm-0.4.0.tar.gz"
# Add additional proper versions and checksums here. "spack checksum geopm"
version('0.5.0', '61b454bc74d4606fe84818aef16c1be4')
version('0.4.0', 'd4cc8fffe521296dab379857d7e2064d')
version('0.3.0', '568fd37234396fff134f8d57b60f2b83')
version('master', git='https://github.com/geopm/geopm.git', branch='master')
version('develop', git='https://github.com/geopm/geopm.git', branch='dev')
# Variants reflecting most ./configure --help options
variant('debug', default=False, description='Enable debug.')
variant('coverage', default=False, description='Enable test coverage support, enables debug by default.')
variant('overhead', default=False, description='Enable GEOPM to calculate and display time spent in GEOPM API calls.')
variant('procfs', default=True, description='Enable procfs (disable for OSes not using procfs).')
variant('mpi', default=True, description='Enable MPI dependent components.')
variant('fortran', default=True, description='Build fortran interface.')
variant('doc', default=True, description='Create man pages with ruby-ronn.')
variant('openmp', default=True, description='Build with OpenMP.')
variant('ompt', default=False, description='Use OpenMP Tools Interface.')
variant('hwloc', default=True, description='Build with hwloc.')
variant('gnu-ld', default=False, description='Assume C compiler uses gnu-ld.')
# Added dependencies.
depends_on('m4', type='build')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('ruby-ronn', type='build', when='+doc')
depends_on('doxygen', type='build', when='+doc')
depends_on('numactl')
depends_on('mpi', when='+mpi')
# TODO: check if hwloc@specific-version still required with future openmpi
depends_on('[email protected]', when='+hwloc')
depends_on('json-c')
depends_on('py-pandas', type='run')
depends_on('py-numpy', type='run')
depends_on('py-natsort', type='run')
depends_on('py-matplotlib', type='run')
parallel = False
def configure_args(self):
args = []
args.extend(self.enable_or_disable('debug'))
args.extend(self.enable_or_disable('coverage'))
args.extend(self.enable_or_disable('overhead'))
args.extend(self.enable_or_disable('procfs'))
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('fortran'))
args.extend(self.enable_or_disable('doc'))
args.extend(self.enable_or_disable('openmp'))
args.extend(self.enable_or_disable('ompt'))
args.extend(self.with_or_without('hwloc', activation_value='prefix'))
args.extend(self.with_or_without('gnu-ld'))
return args
|
py | b413ae5ae1a0791dc7ee069aca0c8b6d7b13d6f2 | """
Clean and validate a DataFrame column containing Indian Permanent Account numbers (PANs).
"""
# pylint: disable=too-many-lines, too-many-arguments, too-many-branches
from typing import Any, Union
from operator import itemgetter
import dask.dataframe as dd
import numpy as np
import pandas as pd
from stdnum.in_ import pan
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, to_dask
def clean_in_pan(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
output_format: str = "standard",
inplace: bool = False,
errors: str = "coerce",
progress: bool = True,
) -> pd.DataFrame:
"""
Clean Indian Permanent Account numbers (PANs) type data in a DataFrame column.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
col
The name of the column containing data of PAN type.
output_format
The output format of standardized number string.
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
If output_format = 'info', return a dictionary containing information
that can be decoded from the PAN.
If output_format = 'mask', mask the PAN as per CBDT masking standard.
Note: in the case of PAN, the compact format is the same as the standard one.
(default: "standard")
inplace
If True, delete the column containing the data that was cleaned.
Otherwise, keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
progress
If True, display a progress bar.
(default: True)
Examples
--------
Clean a column of PAN data.
>>> df = pd.DataFrame({{
"pan": [
'ACUPA7085R',
'234123412347',]
})
>>> clean_in_pan(df, 'pan')
pan pan_clean
0 ACUPA7085R ACUPA7085R
1 234123412347 NaN
"""
if output_format not in {"compact", "standard", "info", "mask"}:
raise ValueError(
f"output_format {output_format} is invalid. "
'It needs to be "compact", "standard", "info", "mask".'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [_format(x, output_format, errors) for x in srs],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
df = df.drop(columns=["clean_code_tup"])
if inplace:
df[column] = df[f"{column}_clean"]
df = df.drop(columns=f"{column}_clean")
df = df.rename(columns={column: f"{column}_clean"})
with ProgressBar(minimum=1, disable=not progress):
df = df.compute()
return df
def validate_in_pan(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is PAN in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(pan.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(pan.is_valid)
else:
return df.applymap(pan.is_valid)
return pan.is_valid(df)
def _format(val: Any, output_format: str = "standard", errors: str = "coarse") -> Any:
"""
Reformat a number string with proper separators and whitespace.
Parameters
----------
val
The value of number string.
output_format
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
If output_format = 'info', return a dictionary containing information
that can be decoded from the PAN.
If output_format = 'mask', mask the PAN as per CBDT masking standard.
Note: in the case of PAN, the compact format is the same as the standard one.
"""
val = str(val)
result: Any = []
if val in NULL_VALUES:
return [np.nan]
if not validate_in_pan(val):
if errors == "raise":
raise ValueError(f"Unable to parse value {val}")
error_result = val if errors == "ignore" else np.nan
return [error_result]
if output_format in {"compact", "standard"}:
result = [pan.compact(val)] + result
elif output_format == "info":
result = [pan.info(val)] + result
elif output_format == "mask":
result = [pan.mask(val)] + result
return result
|
py | b413ae91c639d1229334e2070db43cda2f3a75d8 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import gettext
from django.views.decorators.http import require_http_methods
from django.http import HttpResponse
from django.core import serializers
from main_app.forms import AddIceForm
from main_app.models import IceSlot
@require_http_methods(["GET"])
@login_required()
def list_ices(request):
"""
List all booked ics slots
:param request: client request
:return: a page with a table of all ices
"""
ice_slots = IceSlot.objects.all()
return render(request, 'ice_slots/list_ices.html', {
'ice_slots': ice_slots
})
@require_http_methods(["GET", "POST"])
@login_required()
def add_ice(request):
"""
Provides a form so user can schedule ice times
:param request: client request
:return: a page with add ice form
"""
if request.method == 'POST':
form = AddIceForm(request.POST)
if form.is_valid():
ice_slot = IceSlot(
start_time=form.cleaned_data['start_time'],
end_time=form.cleaned_data['end_time'],
club=request.user.member.club
)
ice_slot.save()
ice_slot.refresh_from_db()
messages.add_message(request, messages.SUCCESS,
gettext('Added ice-slot at {start} for club {club}').format(start=ice_slot.start_time,
club=ice_slot.club.name))
return redirect('list_ices')
else:
form = AddIceForm()
return render(request, 'ice_slots/add_ice.html', {
'form': form
})
@require_http_methods(["GET", "POST"])
@login_required()
def edit_ice(request, ice_slot_id):
"""
View to edit a booked ice slot. e.g. it's time.
:param request: client request
:param ice_slot_id: id of to be modified ice slot
:return: a page with a edit ice form
"""
ice_slot = get_object_or_404(IceSlot, pk=ice_slot_id)
if request.method == 'POST':
form = AddIceForm(request.POST, instance=ice_slot)
if form.is_valid():
old_start = ice_slot.start_time
old_end = ice_slot.end_time
form.save()
messages.add_message(request, messages.SUCCESS,
gettext(
'Changed time from {old_start}-{old_end} to {start}-{end} for slot {id}').format(
old_start=old_start.time(), old_end=old_end.time(), start=ice_slot.start_time.time(),
end=ice_slot.end_time.time(), id=ice_slot_id))
return redirect('list_ices')
else:
form = AddIceForm(instance=ice_slot)
return render(request, 'ice_slots/add_ice.html', {
'form': form,
})
@require_http_methods(["POST"])
@login_required()
def delete_ice(request, ice_slot_id):
"""
URL to delete a provided ice slot id and redirect to list ices view.
:param request: client request
:param ice_slot_id: id of to be deleted ice slot
:return: success message and redirect to ice slot list or throws a error message
"""
if request.method == 'POST':
ice_slot = get_object_or_404(IceSlot, pk=ice_slot_id)
ice_slot.delete()
messages.add_message(request, messages.SUCCESS, gettext('Deleted ice_slot {id}').format(id=ice_slot_id))
return redirect('list_ices')
@require_http_methods(["GET"])
@login_required()
def view_ice(request, ice_slot_id):
ice_slot = get_object_or_404(IceSlot, pk=ice_slot_id)
trainings = ice_slot.trainings.all()
data = serializers.serialize('json', trainings, fields=('title', 'start_time', 'end_time'))
return HttpResponse(data, content_type="application/json")
|
py | b413b04562c6bd38552d27a597cb39b2c081aff7 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Data generators for the Mathematical Language Understanding dataset.
The training and test data were generated by assigning symbolic variables
either positive or negative decimal integers and then describing the algebraic
operation to perform. We restrict our variable assignments to the range
x,y->[-1000,1000) and the operations to the set {+,-,*}. To ensure that the
model embraces symbolic variables, the order in which x and y appears in the
expression is randomly chosen. For instance, an input string contrasting from
the example shown above might be y=129,x=531,x-y. Each input string is
accompanied by its target string, which is the evaluation of the mathematical
expression. For this study, all targets considered are decimal integers
represented at the character level. About 12 million unique samples were thus
generated and randomly split into training and test sets at an approximate
ratio of 9:1, respectively.
For more information check the following paper:
Artit Wangperawong. Attending to Mathematical Language with Transformers,
arXiv:1812.02825 (https://arxiv.org/abs/1812.02825).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_problem
class MathematicalLanguageUnderstanding(text_problems.Text2TextProblem):
"""Mathematical language understanding, see arxiv.org/abs/1812.02825."""
URL = ("https://art.wangperawong.com/mathematical_language_understanding"
"_train.tar.gz")
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 10,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
@property
def is_generate_per_split(self):
return False
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""Downloads and extracts the dataset and generates examples.
Args:
data_dir: The base directory where data and vocab files are stored.
tmp_dir: temp directory to download and extract the dataset.
dataset_split: split of the data-set.
Yields:
The data examples.
"""
if not tf.gfile.Exists(tmp_dir):
tf.gfile.MakeDirs(tmp_dir)
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
# Download and extract.
compressed_filename = os.path.basename(self.URL)
download_path = generator_utils.maybe_download(
tmp_dir, compressed_filename, self.URL)
with tarfile.open(download_path, "r:gz") as tar:
tar.extractall(tmp_dir)
filepath = os.path.join(tmp_dir,
"mathematical_language_understanding_train.txt")
with open(filepath, "r") as fp:
for l in fp:
prob, ans = l.strip().split(":")
yield {"inputs": prob, "targets": ans}
|
py | b413b0e7e37b101783db669eefb48e9f2f8443a9 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import OhonetworkTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
'''
direct_fetch_response_time = 0.05
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(OhonetworkTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getdata([tip], timeout=5)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
inv_node.send_block_inv(tip)
# Should have received a getheaders as well!
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
print("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
print("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
py | b413b13ae8ec39e38f93ca07cc81908391dafe27 | import pytest
from concurrent.futures import ThreadPoolExecutor
import time
import threading
def test_thread_id(kivy_clock):
import asynckivy as ak
async def job(executer):
before = threading.get_ident()
await ak.run_in_executer(lambda: None, executer)
after = threading.get_ident()
assert before == after
with ThreadPoolExecutor() as executer:
task = ak.start(job(executer))
time.sleep(.01)
assert not task.done
kivy_clock.tick()
assert task.done
def test_propagate_exception(kivy_clock):
import asynckivy as ak
async def job(executer):
with pytest.raises(ZeroDivisionError):
await ak.run_in_executer(lambda: 1 / 0, executer)
with ThreadPoolExecutor() as executer:
task = ak.start(job(executer))
time.sleep(.01)
assert not task.done
kivy_clock.tick()
assert task.done
def test_no_exception(kivy_clock):
import asynckivy as ak
async def job(executer):
assert 'A' == await ak.run_in_executer(lambda: 'A', executer)
with ThreadPoolExecutor() as executer:
task = ak.start(job(executer))
time.sleep(.01)
assert not task.done
kivy_clock.tick()
assert task.done
def test_cancel_before_getting_excuted(kivy_clock):
import time
import asynckivy as ak
flag = ak.Event()
async def job(executer):
await ak.run_in_executer(flag.set, executer)
with ThreadPoolExecutor(max_workers=1) as executer:
executer.submit(time.sleep, .1)
task = ak.start(job(executer))
time.sleep(.02)
assert not task.done
assert not flag.is_set()
kivy_clock.tick()
task.cancel()
assert task.cancelled
assert not flag.is_set()
time.sleep(.2)
assert not flag.is_set()
|
py | b413b1ce732e77ea40d9743eb334adec98aec85b | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-06-19 06:40
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0051_auto_20180619_1256'),
]
operations = [
migrations.RemoveField(
model_name='applicationdeclineddetails',
name='activity_type',
),
migrations.AddField(
model_name='applicationdeclineddetails',
name='activity_type',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
]
|
py | b413b2817bf514ac01874e5377f22d20dfb75d29 | from pymongo import MongoClient
from progressbar import ProgressBar, Bar, Percentage, FormatLabel, ETA
import numpy as np
np.set_printoptions(threshold=np.nan)
client = MongoClient()
db = client.dotabot
matches = db.matches
# We're going to create a training matrix, X, where each
# row is a different match and each column is a feature
# The features are bit vectors indicating whether heroes
# were picked (1) or not picked (0). The first N features
# correspond to radiant, and the last N features are
# for dire.
NUM_HEROES = 108
NUM_FEATURES = NUM_HEROES * 2
# Our training label vector, Y, is a bit vector indicating
# whether radiant won (1) or lost (-1)
NUM_MATCHES = matches.count()
# Initialize training matrix
X = np.zeros((NUM_MATCHES, NUM_FEATURES), dtype=np.int8)
# Initialize training label vector
Y = np.zeros(NUM_MATCHES, dtype=np.int8)
widgets = [FormatLabel('Processed: %(value)d/%(max)d matches. '), ETA(), Percentage(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets, maxval=NUM_MATCHES).start()
for i, record in enumerate(matches.find()):
pbar.update(i)
Y[i] = 1 if record['radiant_win'] else 0
players = record['players']
for player in players:
hero_id = player['hero_id'] - 1
# If the left-most bit of player_slot is set,
# this player is on dire, so push the index accordingly
player_slot = player['player_slot']
if player_slot >= 128:
hero_id += NUM_HEROES
X[i, hero_id] = 1
pbar.finish()
print "Permuting, generating train and test sets."
indices = np.random.permutation(NUM_MATCHES)
test_indices = indices[0:NUM_MATCHES/10]
train_indices = indices[NUM_MATCHES/10:NUM_MATCHES]
X_test = X[test_indices]
Y_test = Y[test_indices]
X_train = X[train_indices]
Y_train = Y[train_indices]
print "Saving output file now..."
np.savez_compressed('test_%d.npz' % len(test_indices), X=X_test, Y=Y_test)
np.savez_compressed('train_%d.npz' % len(train_indices), X=X_train, Y=Y_train)
|
py | b413b2eb665223f588393065dc6737851f859f5d | import json
def select(order):
'''
0: brief
1: tips
2: detail
3: info
'''
filename = [
'data/brief.txt',
'data/tips.json',
'data/detail.json',
'data/info.json'
]
if order == 0:
f = open(filename[order], "r")
line = f.readline()
return line
else:
f = open(filename[order], "r")
line = f.readline()
return line
def update(order, data):
'''
0: brief
1: tips
2: detail
3: info
'''
filename = [
'data/brief.txt',
'data/tips.json',
'data/detail.json',
'data/info.json'
]
if order == 0:
f = open(filename[order], "w+")
f.write(data)
return data
else:
f = open(filename[order], "w+")
js = json.dumps(data, ensure_ascii=False)
f.write(js)
return data
|
py | b413b2ed912e3e3655d19c860e8e5d9db14c356b | import atexit
from distutils.version import StrictVersion
import glob
import uuid
import numpy as np
import os
import subprocess
from typing import Dict, List, Optional, Any, Tuple
import mlagents_envs
from mlagents_envs.logging_util import get_logger
from mlagents_envs.side_channel.side_channel import SideChannel, IncomingMessage
from mlagents_envs.base_env import (
BaseEnv,
DecisionSteps,
TerminalSteps,
BehaviorSpec,
BehaviorName,
AgentId,
)
from mlagents_envs.timers import timed, hierarchical_timer
from mlagents_envs.exception import (
UnityEnvironmentException,
UnityCommunicationException,
UnityActionException,
UnityTimeOutException,
)
from mlagents_envs.communicator_objects.command_pb2 import STEP, RESET
from mlagents_envs.rpc_utils import behavior_spec_from_proto, steps_from_proto
from mlagents_envs.communicator_objects.unity_rl_input_pb2 import UnityRLInputProto
from mlagents_envs.communicator_objects.unity_rl_output_pb2 import UnityRLOutputProto
from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto
from mlagents_envs.communicator_objects.unity_output_pb2 import UnityOutputProto
from mlagents_envs.communicator_objects.capabilities_pb2 import UnityRLCapabilitiesProto
from mlagents_envs.communicator_objects.unity_rl_initialization_input_pb2 import (
UnityRLInitializationInputProto,
)
from mlagents_envs.communicator_objects.unity_input_pb2 import UnityInputProto
from .rpc_communicator import RpcCommunicator
from sys import platform
import signal
import struct
logger = get_logger(__name__)
class UnityEnvironment(BaseEnv):
SCALAR_ACTION_TYPES = (int, np.int32, np.int64, float, np.float32, np.float64)
SINGLE_BRAIN_ACTION_TYPES = SCALAR_ACTION_TYPES + (list, np.ndarray)
# Communication protocol version.
# When connecting to C#, this must match Academy.k_ApiVersion
# Currently we require strict equality between the communication protocol
# on each side, although we may allow some flexibility in the future.
# This should be incremented whenever a change is made to the communication protocol.
API_VERSION = "0.17.0"
# Default port that the editor listens on. If an environment executable
# isn't specified, this port will be used.
DEFAULT_EDITOR_PORT = 5004
# Default base port for environments. Each environment will be offset from this
# by it's worker_id.
BASE_ENVIRONMENT_PORT = 5005
# Command line argument used to pass the port to the executable environment.
PORT_COMMAND_LINE_ARG = "--mlagents-port"
@staticmethod
def _raise_version_exception(unity_com_ver: str) -> None:
raise UnityEnvironmentException(
f"The communication API version is not compatible between Unity and python. "
f"Python API: {UnityEnvironment.API_VERSION}, Unity API: {unity_com_ver}.\n "
f"Please go to https://github.com/Unity-Technologies/ml-agents/releases/tag/latest_release "
f"to download the latest version of ML-Agents."
)
@staticmethod
def check_communication_compatibility(
unity_com_ver: str, python_api_version: str, unity_package_version: str
) -> bool:
unity_communicator_version = StrictVersion(unity_com_ver)
api_version = StrictVersion(python_api_version)
if unity_communicator_version.version[0] == 0:
if (
unity_communicator_version.version[0] != api_version.version[0]
or unity_communicator_version.version[1] != api_version.version[1]
):
# Minor beta versions differ.
return False
elif unity_communicator_version.version[0] != api_version.version[0]:
# Major versions mismatch.
return False
elif unity_communicator_version.version[1] != api_version.version[1]:
# Non-beta minor versions mismatch. Log a warning but allow execution to continue.
logger.warning(
f"WARNING: The communication API versions between Unity and python differ at the minor version level. "
f"Python API: {python_api_version}, Unity API: {unity_communicator_version}.\n"
f"This means that some features may not work unless you upgrade the package with the lower version."
f"Please find the versions that work best together from our release page.\n"
"https://github.com/Unity-Technologies/ml-agents/releases"
)
else:
logger.info(
f"Connected to Unity environment with package version {unity_package_version} "
f"and communication version {unity_com_ver}"
)
return True
@staticmethod
def get_capabilities_proto() -> UnityRLCapabilitiesProto:
capabilities = UnityRLCapabilitiesProto()
capabilities.baseRLCapabilities = True
return capabilities
@staticmethod
def warn_csharp_base_capabitlities(
caps: UnityRLCapabilitiesProto, unity_package_ver: str, python_package_ver: str
) -> None:
if not caps.baseRLCapabilities:
logger.warning(
"WARNING: The Unity process is not running with the expected base Reinforcement Learning"
" capabilities. Please be sure upgrade the Unity Package to a version that is compatible with this "
"python package.\n"
f"Python package version: {python_package_ver}, C# package version: {unity_package_ver}"
f"Please find the versions that work best together from our release page.\n"
"https://github.com/Unity-Technologies/ml-agents/releases"
)
def __init__(
self,
file_name: Optional[str] = None,
worker_id: int = 0,
base_port: Optional[int] = None,
seed: int = 0,
no_graphics: bool = False,
timeout_wait: int = 60,
additional_args: Optional[List[str]] = None,
side_channels: Optional[List[SideChannel]] = None,
log_folder: Optional[str] = None,
):
"""
Starts a new unity environment and establishes a connection with the environment.
Notice: Currently communication between Unity and Python takes place over an open socket without authentication.
Ensure that the network where training takes place is secure.
:string file_name: Name of Unity environment binary.
:int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this.
If no environment is specified (i.e. file_name is None), the DEFAULT_EDITOR_PORT will be used.
:int worker_id: Offset from base_port. Used for training multiple environments simultaneously.
:bool no_graphics: Whether to run the Unity simulator in no-graphics mode
:int timeout_wait: Time (in seconds) to wait for connection from environment.
:list args: Addition Unity command line arguments
:list side_channels: Additional side channel for no-rl communication with Unity
:str log_folder: Optional folder to write the Unity Player log file into. Requires absolute path.
"""
atexit.register(self._close)
self.additional_args = additional_args or []
self.no_graphics = no_graphics
# If base port is not specified, use BASE_ENVIRONMENT_PORT if we have
# an environment, otherwise DEFAULT_EDITOR_PORT
if base_port is None:
base_port = (
self.BASE_ENVIRONMENT_PORT if file_name else self.DEFAULT_EDITOR_PORT
)
self.port = base_port + worker_id
self._buffer_size = 12000
# If true, this means the environment was successfully loaded
self._loaded = False
# The process that is started. If None, no process was started
self.proc1 = None
self.timeout_wait: int = timeout_wait
self.communicator = self.get_communicator(worker_id, base_port, timeout_wait)
self.worker_id = worker_id
self.side_channels: Dict[uuid.UUID, SideChannel] = {}
if side_channels is not None:
for _sc in side_channels:
if _sc.channel_id in self.side_channels:
raise UnityEnvironmentException(
"There cannot be two side channels with the same channel id {0}.".format(
_sc.channel_id
)
)
self.side_channels[_sc.channel_id] = _sc
self.log_folder = log_folder
# If the environment name is None, a new environment will not be launched
# and the communicator will directly try to connect to an existing unity environment.
# If the worker-id is not 0 and the environment name is None, an error is thrown
if file_name is None and worker_id != 0:
raise UnityEnvironmentException(
"If the environment name is None, "
"the worker-id must be 0 in order to connect with the Editor."
)
if file_name is not None:
self.executable_launcher(file_name, no_graphics, additional_args)
else:
logger.info(
f"Listening on port {self.port}. "
f"Start training by pressing the Play button in the Unity Editor."
)
self._loaded = True
rl_init_parameters_in = UnityRLInitializationInputProto(
seed=seed,
communication_version=self.API_VERSION,
package_version=mlagents_envs.__version__,
capabilities=UnityEnvironment.get_capabilities_proto(),
)
try:
aca_output = self.send_academy_parameters(rl_init_parameters_in)
aca_params = aca_output.rl_initialization_output
except UnityTimeOutException:
self._close(0)
raise
if not UnityEnvironment.check_communication_compatibility(
aca_params.communication_version,
UnityEnvironment.API_VERSION,
aca_params.package_version,
):
self._close(0)
UnityEnvironment._raise_version_exception(aca_params.communication_version)
UnityEnvironment.warn_csharp_base_capabitlities(
aca_params.capabilities,
aca_params.package_version,
UnityEnvironment.API_VERSION,
)
self._env_state: Dict[str, Tuple[DecisionSteps, TerminalSteps]] = {}
self._env_specs: Dict[str, BehaviorSpec] = {}
self._env_actions: Dict[str, np.ndarray] = {}
self._is_first_message = True
self._update_behavior_specs(aca_output)
@staticmethod
def get_communicator(worker_id, base_port, timeout_wait):
return RpcCommunicator(worker_id, base_port, timeout_wait)
@staticmethod
def validate_environment_path(env_path: str) -> Optional[str]:
# Strip out executable extensions if passed
env_path = (
env_path.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
true_filename = os.path.basename(os.path.normpath(env_path))
logger.debug("The true file name is {}".format(true_filename))
if not (glob.glob(env_path) or glob.glob(env_path + ".*")):
return None
cwd = os.getcwd()
launch_string = None
true_filename = os.path.basename(os.path.normpath(env_path))
if platform == "linux" or platform == "linux2":
candidates = glob.glob(os.path.join(cwd, env_path) + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(os.path.join(cwd, env_path) + ".x86")
if len(candidates) == 0:
candidates = glob.glob(env_path + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(env_path + ".x86")
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "darwin":
candidates = glob.glob(
os.path.join(cwd, env_path + ".app", "Contents", "MacOS", true_filename)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(env_path + ".app", "Contents", "MacOS", true_filename)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(cwd, env_path + ".app", "Contents", "MacOS", "*")
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(env_path + ".app", "Contents", "MacOS", "*")
)
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "win32":
candidates = glob.glob(os.path.join(cwd, env_path + ".exe"))
if len(candidates) == 0:
candidates = glob.glob(env_path + ".exe")
if len(candidates) > 0:
launch_string = candidates[0]
return launch_string
def executable_args(self) -> List[str]:
args: List[str] = []
if self.no_graphics:
args += ["-nographics", "-batchmode"]
args += [UnityEnvironment.PORT_COMMAND_LINE_ARG, str(self.port)]
if self.log_folder:
log_file_path = os.path.join(
self.log_folder, f"Player-{self.worker_id}.log"
)
args += ["-logFile", log_file_path]
# Add in arguments passed explicitly by the user.
args += self.additional_args
return args
def executable_launcher(self, file_name, no_graphics, args):
launch_string = self.validate_environment_path(file_name)
if launch_string is None:
self._close(0)
raise UnityEnvironmentException(
f"Couldn't launch the {file_name} environment. Provided filename does not match any environments."
)
else:
logger.debug("This is the launch string {}".format(launch_string))
# Launch Unity environment
subprocess_args = [launch_string] + self.executable_args()
try:
self.proc1 = subprocess.Popen(
subprocess_args,
# start_new_session=True means that signals to the parent python process
# (e.g. SIGINT from keyboard interrupt) will not be sent to the new process on POSIX platforms.
# This is generally good since we want the environment to have a chance to shutdown,
# but may be undesirable in come cases; if so, we'll add a command-line toggle.
# Note that on Windows, the CTRL_C signal will still be sent.
start_new_session=True,
)
except PermissionError as perm:
# This is likely due to missing read or execute permissions on file.
raise UnityEnvironmentException(
f"Error when trying to launch environment - make sure "
f"permissions are set correctly. For example "
f'"chmod -R 755 {launch_string}"'
) from perm
def _update_behavior_specs(self, output: UnityOutputProto) -> None:
init_output = output.rl_initialization_output
for brain_param in init_output.brain_parameters:
# Each BrainParameter in the rl_initialization_output should have at least one AgentInfo
# Get that agent, because we need some of its observations.
agent_infos = output.rl_output.agentInfos[brain_param.brain_name]
if agent_infos.value:
agent = agent_infos.value[0]
new_spec = behavior_spec_from_proto(brain_param, agent)
self._env_specs[brain_param.brain_name] = new_spec
logger.info(f"Connected new brain:\n{brain_param.brain_name}")
def _update_state(self, output: UnityRLOutputProto) -> None:
"""
Collects experience information from all external brains in environment at current step.
"""
for brain_name in self._env_specs.keys():
if brain_name in output.agentInfos:
agent_info_list = output.agentInfos[brain_name].value
self._env_state[brain_name] = steps_from_proto(
agent_info_list, self._env_specs[brain_name]
)
else:
self._env_state[brain_name] = (
DecisionSteps.empty(self._env_specs[brain_name]),
TerminalSteps.empty(self._env_specs[brain_name]),
)
self._parse_side_channel_message(self.side_channels, output.side_channel)
def reset(self) -> None:
if self._loaded:
outputs = self.communicator.exchange(self._generate_reset_input())
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_behavior_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._is_first_message = False
self._env_actions.clear()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
@timed
def step(self) -> None:
if self._is_first_message:
return self.reset()
if not self._loaded:
raise UnityEnvironmentException("No Unity environment is loaded.")
# fill the blanks for missing actions
for group_name in self._env_specs:
if group_name not in self._env_actions:
n_agents = 0
if group_name in self._env_state:
n_agents = len(self._env_state[group_name][0])
self._env_actions[group_name] = self._env_specs[
group_name
].create_empty_action(n_agents)
step_input = self._generate_step_input(self._env_actions)
with hierarchical_timer("communicator.exchange"):
outputs = self.communicator.exchange(step_input)
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_behavior_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._env_actions.clear()
def get_behavior_names(self):
return list(self._env_specs.keys())
def _assert_behavior_exists(self, behavior_name: str) -> None:
if behavior_name not in self._env_specs:
raise UnityActionException(
"The group {0} does not correspond to an existing agent group "
"in the environment".format(behavior_name)
)
def set_actions(self, behavior_name: BehaviorName, action: np.ndarray) -> None:
self._assert_behavior_exists(behavior_name)
if behavior_name not in self._env_state:
return
spec = self._env_specs[behavior_name]
expected_type = np.float32 if spec.is_action_continuous() else np.int32
expected_shape = (len(self._env_state[behavior_name][0]), spec.action_size)
if action.shape != expected_shape:
raise UnityActionException(
"The behavior {0} needs an input of dimension {1} but received input of dimension {2}".format(
behavior_name, expected_shape, action.shape
)
)
if action.dtype != expected_type:
action = action.astype(expected_type)
self._env_actions[behavior_name] = action
def set_action_for_agent(
self, behavior_name: BehaviorName, agent_id: AgentId, action: np.ndarray
) -> None:
self._assert_behavior_exists(behavior_name)
if behavior_name not in self._env_state:
return
spec = self._env_specs[behavior_name]
expected_shape = (spec.action_size,)
if action.shape != expected_shape:
raise UnityActionException(
f"The Agent {0} with BehaviorName {1} needs an input of dimension "
f"{2} but received input of dimension {3}".format(
agent_id, behavior_name, expected_shape, action.shape
)
)
expected_type = np.float32 if spec.is_action_continuous() else np.int32
if action.dtype != expected_type:
action = action.astype(expected_type)
if behavior_name not in self._env_actions:
self._env_actions[behavior_name] = spec.create_empty_action(
len(self._env_state[behavior_name][0])
)
try:
index = np.where(self._env_state[behavior_name][0].agent_id == agent_id)[0][
0
]
except IndexError as ie:
raise IndexError(
"agent_id {} is did not request a decision at the previous step".format(
agent_id
)
) from ie
self._env_actions[behavior_name][index] = action
def get_steps(
self, behavior_name: BehaviorName
) -> Tuple[DecisionSteps, TerminalSteps]:
self._assert_behavior_exists(behavior_name)
return self._env_state[behavior_name]
def get_behavior_spec(self, behavior_name: BehaviorName) -> BehaviorSpec:
self._assert_behavior_exists(behavior_name)
return self._env_specs[behavior_name]
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the socket connection.
"""
if self._loaded:
self._close()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
def _close(self, timeout: Optional[int] = None) -> None:
"""
Close the communicator and environment subprocess (if necessary).
:int timeout: [Optional] Number of seconds to wait for the environment to shut down before
force-killing it. Defaults to `self.timeout_wait`.
"""
if timeout is None:
timeout = self.timeout_wait
self._loaded = False
self.communicator.close()
if self.proc1 is not None:
# Wait a bit for the process to shutdown, but kill it if it takes too long
try:
self.proc1.wait(timeout=timeout)
signal_name = self.returncode_to_signal_name(self.proc1.returncode)
signal_name = f" ({signal_name})" if signal_name else ""
return_info = f"Environment shut down with return code {self.proc1.returncode}{signal_name}."
logger.info(return_info)
except subprocess.TimeoutExpired:
logger.info("Environment timed out shutting down. Killing...")
self.proc1.kill()
# Set to None so we don't try to close multiple times.
self.proc1 = None
@classmethod
def _flatten(cls, arr: Any) -> List[float]:
"""
Converts arrays to list.
:param arr: numpy vector.
:return: flattened list.
"""
if isinstance(arr, cls.SCALAR_ACTION_TYPES):
arr = [float(arr)]
if isinstance(arr, np.ndarray):
arr = arr.tolist()
if len(arr) == 0:
return arr
if isinstance(arr[0], np.ndarray):
# pylint: disable=no-member
arr = [item for sublist in arr for item in sublist.tolist()]
if isinstance(arr[0], list):
# pylint: disable=not-an-iterable
arr = [item for sublist in arr for item in sublist]
arr = [float(x) for x in arr]
return arr
@staticmethod
def _parse_side_channel_message(
side_channels: Dict[uuid.UUID, SideChannel], data: bytes
) -> None:
offset = 0
while offset < len(data):
try:
channel_id = uuid.UUID(bytes_le=bytes(data[offset : offset + 16]))
offset += 16
message_len, = struct.unpack_from("<i", data, offset)
offset = offset + 4
message_data = data[offset : offset + message_len]
offset = offset + message_len
except Exception:
raise UnityEnvironmentException(
"There was a problem reading a message in a SideChannel. "
"Please make sure the version of MLAgents in Unity is "
"compatible with the Python version."
)
if len(message_data) != message_len:
raise UnityEnvironmentException(
"The message received by the side channel {0} was "
"unexpectedly short. Make sure your Unity Environment "
"sending side channel data properly.".format(channel_id)
)
if channel_id in side_channels:
incoming_message = IncomingMessage(message_data)
side_channels[channel_id].on_message_received(incoming_message)
else:
logger.warning(
"Unknown side channel data received. Channel type "
": {0}.".format(channel_id)
)
@staticmethod
def _generate_side_channel_data(
side_channels: Dict[uuid.UUID, SideChannel]
) -> bytearray:
result = bytearray()
for channel_id, channel in side_channels.items():
for message in channel.message_queue:
result += channel_id.bytes_le
result += struct.pack("<i", len(message))
result += message
channel.message_queue = []
return result
@timed
def _generate_step_input(
self, vector_action: Dict[str, np.ndarray]
) -> UnityInputProto:
rl_in = UnityRLInputProto()
for b in vector_action:
n_agents = len(self._env_state[b][0])
if n_agents == 0:
continue
for i in range(n_agents):
action = AgentActionProto(vector_actions=vector_action[b][i])
rl_in.agent_actions[b].value.extend([action])
rl_in.command = STEP
rl_in.side_channel = bytes(self._generate_side_channel_data(self.side_channels))
return self.wrap_unity_input(rl_in)
def _generate_reset_input(self) -> UnityInputProto:
rl_in = UnityRLInputProto()
rl_in.command = RESET
rl_in.side_channel = bytes(self._generate_side_channel_data(self.side_channels))
return self.wrap_unity_input(rl_in)
def send_academy_parameters(
self, init_parameters: UnityRLInitializationInputProto
) -> UnityOutputProto:
inputs = UnityInputProto()
inputs.rl_initialization_input.CopyFrom(init_parameters)
return self.communicator.initialize(inputs)
@staticmethod
def wrap_unity_input(rl_input: UnityRLInputProto) -> UnityInputProto:
result = UnityInputProto()
result.rl_input.CopyFrom(rl_input)
return result
@staticmethod
def returncode_to_signal_name(returncode: int) -> Optional[str]:
"""
Try to convert return codes into their corresponding signal name.
E.g. returncode_to_signal_name(-2) -> "SIGINT"
"""
try:
# A negative value -N indicates that the child was terminated by signal N (POSIX only).
s = signal.Signals(-returncode) # pylint: disable=no-member
return s.name
except Exception:
# Should generally be a ValueError, but catch everything just in case.
return None
|
py | b413b395a70cde5a30c60487d30bb6e89cd26c71 | #!/usr/bin/env python
#
# Created by: Pearu Peterson, September 2002
#
import numpy as np
from numpy.testing import *
from common import FUNCS_TP, FUNCS_CLAPACK, FUNCS_FLAPACK, FLAPACK_IS_EMPTY, \
CLAPACK_IS_EMPTY
class TestLapack(TestCase):
def _test_gebal_base(self, func, lang):
tp = FUNCS_TP[func]
a = np.array([[1,2,3],[4,5,6],[7,8,9]]).astype(tp)
a1 = np.array([[1,0,0,3e-4],
[4,0,0,2e-3],
[7,1,0,0],
[0,1,0,0]]).astype(tp)
if lang == 'C':
f = FUNCS_CLAPACK[func]
elif lang == 'F':
f = FUNCS_FLAPACK[func]
else:
raise ValueError("Lang %s ??" % lang)
ba, lo, hi, pivscale, info = f(a)
assert_(not info, msg=repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo,hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1,permute=1,scale=1)
assert_(not info, msg=repr(info))
def _test_gehrd_base(self, func, lang):
tp = FUNCS_TP[func]
a = np.array([[-149, -50,-154],
[ 537, 180, 546],
[ -27, -9, -25]]).astype(tp)
if lang == 'C':
f = FUNCS_CLAPACK[func]
elif lang == 'F':
f = FUNCS_FLAPACK[func]
else:
raise ValueError("Lang %s ??" % lang)
ht, tau, info = f(a)
assert_(not info, msg=repr(info))
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_sgebal(self):
self._test_gebal_base('sgebal', 'F')
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
def test_dgebal(self):
self._test_gebal_base('dgebal', 'F')
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip clapack test")
def test_sgehrd(self):
self._test_gehrd_base('sgehrd', 'F')
@dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip clapack test")
def test_dgehrd(self):
self._test_gehrd_base('dgehrd', 'F')
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgebal"],
"Clapack empty, skip flapack test")
def test_clapack_sgebal(self):
self._test_gebal_base('sgebal', 'C')
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgebal"],
"Clapack empty, skip flapack test")
def test_clapack_dgebal(self):
self._test_gebal_base('dgebal', 'C')
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgehrd"],
"Clapack empty, skip flapack test")
def test_clapack_sgehrd(self):
self._test_gehrd_base('sgehrd', 'C')
@dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgehrd"],
"Clapack empty, skip flapack test")
def test_clapack_dgehrd(self):
self._test_gehrd_base('dgehrd', 'C')
|
py | b413b3fa455b03656f0a6def41eeec79c9b4d5b6 | """
Django settings for polished_queen_31605 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'polished_queen_31605.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'polished_queen_31605.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
py | b413b51d39aefbc5ca03f00e1856885a0c401bb9 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = '7b8549b703e554ac883b29c2ad68df22'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
from flaskblog import routes |
py | b413b5b0020df13b44b4a97550a4f15f4156b16a | # -*- coding: utf-8 -*-
"""
Models are modeled after schema.org.
When model is going to be serialized as JSON(-LD), model name must be same as
Schema.org schema name, the model name is automatically published in @type
JSON-LD field.
Note: jsonld_type attribute value can be used to override @type definition in
rendering phase.
Schema definitions: http://schema.org/<ModelName>
(e.g. http://schema.org/Event)
Some models have custom fields not found from schema.org. Decide if there's a
need for custom extension types (e.g. Event/MyCustomEvent) as schema.org
documentation is suggesting: http://schema.org/docs/extension.html. Override
schema_org_type can be used to define custom types. Override jsonld_context
attribute to change @context when need to define schemas for custom fields.
"""
import datetime
import logging
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
import pytz
from django.contrib.gis.db import models
from rest_framework.exceptions import ValidationError
from reversion import revisions as reversion
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from mptt.querysets import TreeQuerySet
from django.contrib.contenttypes.models import ContentType
from events import translation_utils
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.postgres.fields import HStoreField
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.db import transaction
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from image_cropping import ImageRatioField
from munigeo.models import AdministrativeDivision
from notifications.models import render_notification_template, NotificationType, NotificationTemplateException
from smtplib import SMTPException
logger = logging.getLogger(__name__)
User = settings.AUTH_USER_MODEL
class PublicationStatus:
PUBLIC = 1
DRAFT = 2
PUBLICATION_STATUSES = (
(PublicationStatus.PUBLIC, "public"),
(PublicationStatus.DRAFT, "draft"),
)
class SchemalessFieldMixin(models.Model):
custom_data = HStoreField(null=True, blank=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class DataSource(models.Model):
id = models.CharField(max_length=100, primary_key=True)
name = models.CharField(verbose_name=_('Name'), max_length=255)
api_key = models.CharField(max_length=128, blank=True, default='')
owner = models.ForeignKey(
'django_orghierarchy.Organization', on_delete=models.SET_NULL,
related_name='owned_systems', null=True, blank=True)
user_editable = models.BooleanField(default=False, verbose_name=_('Objects may be edited by users'))
def __str__(self):
return self.id
class SimpleValueMixin(object):
"""
Used for models which are simple one-to-many fields
and can be compared by value when importing as part
of their related object. These models have no existence
outside their related object.
"""
def value_fields(self):
return []
def simple_value(self):
field_names = translation_utils.expand_model_fields(self, self.value_fields())
return tuple((f, getattr(self, f)) for f in field_names)
def value_equals(self, other):
return self.simple_value() == other.simple_value()
class BaseQuerySet(models.QuerySet):
def is_user_editable(self):
return not bool(self.filter(data_source__isnull=True) and
self.filter(data_source__user_editable=False))
def can_be_edited_by(self, user):
"""Check if the whole queryset can be edited by the given user"""
if user.is_superuser:
return True
for event in self:
if not user.can_edit_event(event.publisher, event.publication_status):
return False
return True
class BaseTreeQuerySet(TreeQuerySet, BaseQuerySet):
pass
class ReplacedByMixin():
def _has_circular_replacement(self):
replaced_by = self.replaced_by
while replaced_by is not None:
replaced_by = replaced_by.replaced_by
if replaced_by == self:
return True
return False
def get_replacement(self):
replacement = self.replaced_by
while replacement.replaced_by is not None:
replacement = replacement.replaced_by
return replacement
class License(models.Model):
id = models.CharField(max_length=50, primary_key=True)
name = models.CharField(verbose_name=_('Name'), max_length=255)
url = models.URLField(verbose_name=_('Url'), blank=True)
class Meta:
verbose_name = _('License')
verbose_name_plural = _('Licenses')
def __str__(self):
return self.name
class Image(models.Model):
jsonld_type = 'ImageObject'
objects = BaseQuerySet.as_manager()
# Properties from schema.org/Thing
name = models.CharField(verbose_name=_('Name'), max_length=255, db_index=True, default='')
data_source = models.ForeignKey(
DataSource, on_delete=models.CASCADE, related_name='provided_%(class)s_data', db_index=True, null=True)
publisher = models.ForeignKey(
'django_orghierarchy.Organization', on_delete=models.CASCADE, verbose_name=_('Publisher'),
db_index=True, null=True, blank=True, related_name='Published_images')
created_time = models.DateTimeField(auto_now_add=True)
last_modified_time = models.DateTimeField(auto_now=True, db_index=True)
created_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, blank=True, related_name='EventImage_created_by')
last_modified_by = models.ForeignKey(
User, on_delete=models.SET_NULL, related_name='EventImage_last_modified_by', null=True, blank=True)
image = models.ImageField(upload_to='images', null=True, blank=True)
url = models.URLField(verbose_name=_('Image'), max_length=400, null=True, blank=True)
cropping = ImageRatioField('image', '800x800', verbose_name=_('Cropping'))
license = models.ForeignKey(
License, on_delete=models.SET_NULL, verbose_name=_('License'), related_name='images', default='cc_by',
null=True)
photographer_name = models.CharField(verbose_name=_('Photographer name'), max_length=255, null=True, blank=True)
alt_text = models.CharField(verbose_name=_('Alt text'), max_length=320, null=True, blank=True)
def save(self, *args, **kwargs):
if not self.publisher:
try:
self.publisher = self.created_by.get_default_organization()
except AttributeError:
pass
# ensure that either image or url is provided
if not self.url and not self.image:
raise ValidationError(_('You must provide either image or url.'))
if self.url and self.image:
raise ValidationError(_('You can only provide image or url, not both.'))
self.last_modified_time = BaseModel.now()
super(Image, self).save(*args, **kwargs)
def is_user_editable(self):
return bool(self.data_source and self.data_source.user_editable)
def is_user_edited(self):
return bool(self.is_user_editable() and self.last_modified_by)
def can_be_edited_by(self, user):
"""Check if current image can be edited by the given user"""
if user.is_superuser:
return True
return user.is_admin(self.publisher)
class ImageMixin(models.Model):
image = models.ForeignKey(Image, verbose_name=_('Image'), on_delete=models.SET_NULL,
null=True, blank=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class BaseModel(models.Model):
objects = BaseQuerySet.as_manager()
id = models.CharField(max_length=100, primary_key=True)
data_source = models.ForeignKey(
DataSource, on_delete=models.CASCADE, related_name='provided_%(class)s_data', db_index=True)
# Properties from schema.org/Thing
name = models.CharField(verbose_name=_('Name'), max_length=255, db_index=True)
origin_id = models.CharField(verbose_name=_('Origin ID'), max_length=100, db_index=True, null=True,
blank=True)
created_time = models.DateTimeField(null=True, blank=True, auto_now_add=True)
last_modified_time = models.DateTimeField(null=True, blank=True, auto_now=True, db_index=True)
created_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, blank=True,
related_name="%(app_label)s_%(class)s_created_by")
last_modified_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, blank=True,
related_name="%(app_label)s_%(class)s_modified_by")
@staticmethod
def now():
return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
def __str__(self):
return self.name
class Meta:
abstract = True
def is_user_editable(self):
return self.data_source.user_editable
def is_user_edited(self):
return bool(self.data_source.user_editable and self.last_modified_by)
class Language(models.Model):
id = models.CharField(max_length=10, primary_key=True)
name = models.CharField(verbose_name=_('Name'), max_length=20)
def __str__(self):
return self.name
class Meta:
verbose_name = _('language')
verbose_name_plural = _('languages')
class KeywordLabel(models.Model):
name = models.CharField(verbose_name=_('Name'), max_length=255, db_index=True)
language = models.ForeignKey(Language, on_delete=models.CASCADE, blank=False, null=False)
def __str__(self):
return self.name + ' (' + str(self.language) + ')'
class Meta:
unique_together = (('name', 'language'),)
class Keyword(BaseModel, ImageMixin, ReplacedByMixin):
publisher = models.ForeignKey(
'django_orghierarchy.Organization', on_delete=models.CASCADE, verbose_name=_('Publisher'),
db_index=True, null=True, blank=True,
related_name='Published_keywords')
alt_labels = models.ManyToManyField(KeywordLabel, blank=True, related_name='keywords')
aggregate = models.BooleanField(default=False)
deprecated = models.BooleanField(default=False, db_index=True)
n_events = models.IntegerField(
verbose_name=_('event count'),
help_text=_('number of events with this keyword'),
default=0,
editable=False,
db_index=True
)
n_events_changed = models.BooleanField(default=False, db_index=True)
replaced_by = models.ForeignKey(
'Keyword', on_delete=models.SET_NULL, related_name='aliases', null=True, blank=True)
schema_org_type = "Thing/LinkedEventKeyword"
def __str__(self):
return self.name
def deprecate(self):
self.deprecated = True
self.save(update_fields=['deprecated'])
return True
def replace(self, replaced_by):
self.replaced_by = replaced_by
self.save(update_fields=['replaced_by'])
return True
@transaction.atomic
def save(self, *args, **kwargs):
if self._has_circular_replacement():
raise Exception("Trying to replace this keyword with a keyword that is replaced by this keyword"
"Please refrain from creating circular replacements and"
"remove one of the replacements.")
if self.replaced_by and not self.deprecated:
self.deprecated = True
logger.warning("Keyword replaced without deprecating. Deprecating automatically", extra={'keyword': self})
old_replaced_by = None
if self.id:
try:
old_replaced_by = Keyword.objects.get(id=self.id).replaced_by
except Keyword.DoesNotExist:
pass
super().save(*args, **kwargs)
if not old_replaced_by == self.replaced_by:
# Remap keyword sets
qs = KeywordSet.objects.filter(keywords__id__exact=self.id)
for kw_set in qs:
kw_set.keywords.remove(self)
kw_set.keywords.add(self.replaced_by)
kw_set.save()
# Remap events
qs = Event.objects.filter(keywords__id__exact=self.id) \
| Event.objects.filter(audience__id__exact=self.id)
for event in qs:
if self in event.keywords.all():
event.keywords.remove(self)
event.keywords.add(self.replaced_by)
if self in event.audience.all():
event.audience.remove(self)
event.audience.add(self.replaced_by)
class Meta:
verbose_name = _('keyword')
verbose_name_plural = _('keywords')
class KeywordSet(BaseModel, ImageMixin):
"""
Sets of pre-chosen keywords intended or specific uses and/or organizations,
for example the set of possible audiences for an event in a specific client.
"""
ANY = 1
KEYWORD = 2
AUDIENCE = 3
USAGES = (
(ANY, "any"),
(KEYWORD, "keyword"),
(AUDIENCE, "audience"),
)
usage = models.SmallIntegerField(verbose_name=_('Intended keyword usage'), choices=USAGES, default=ANY)
organization = models.ForeignKey('django_orghierarchy.Organization', on_delete=models.CASCADE,
verbose_name=_('Organization which uses this set'), null=True)
keywords = models.ManyToManyField(Keyword, blank=False, related_name='sets')
def save(self, *args, **kwargs):
if any([keyword.deprecated for keyword in self.keywords.all()]):
raise ValidationError(_("KeywordSet can't have deprecated keywords"))
super().save(*args, **kwargs)
class Place(MPTTModel, BaseModel, SchemalessFieldMixin, ImageMixin, ReplacedByMixin):
objects = BaseTreeQuerySet.as_manager()
geo_objects = objects
publisher = models.ForeignKey(
'django_orghierarchy.Organization', on_delete=models.CASCADE, verbose_name=_('Publisher'), db_index=True)
info_url = models.URLField(verbose_name=_('Place home page'), null=True, blank=True, max_length=1000)
description = models.TextField(verbose_name=_('Description'), null=True, blank=True)
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True,
related_name='children')
position = models.PointField(srid=settings.PROJECTION_SRID, null=True,
blank=True)
email = models.EmailField(verbose_name=_('E-mail'), null=True, blank=True)
telephone = models.CharField(verbose_name=_('Telephone'), max_length=128, null=True, blank=True)
contact_type = models.CharField(verbose_name=_('Contact type'), max_length=255, null=True, blank=True)
street_address = models.CharField(verbose_name=_('Street address'), max_length=255, null=True, blank=True)
address_locality = models.CharField(verbose_name=_('Address locality'), max_length=255, null=True, blank=True)
address_region = models.CharField(verbose_name=_('Address region'), max_length=255, null=True, blank=True)
postal_code = models.CharField(verbose_name=_('Postal code'), max_length=128, null=True, blank=True)
post_office_box_num = models.CharField(verbose_name=_('PO BOX'), max_length=128, null=True,
blank=True)
address_country = models.CharField(verbose_name=_('Country'), max_length=2, null=True, blank=True)
deleted = models.BooleanField(verbose_name=_('Deleted'), default=False)
replaced_by = models.ForeignKey('Place', on_delete=models.SET_NULL, related_name='aliases', null=True, blank=True)
divisions = models.ManyToManyField(AdministrativeDivision, verbose_name=_('Divisions'), related_name='places',
blank=True)
n_events = models.IntegerField(
verbose_name=_('event count'),
help_text=_('number of events in this location'),
default=0,
editable=False,
db_index=True
)
n_events_changed = models.BooleanField(default=False, db_index=True)
class Meta:
verbose_name = _('place')
verbose_name_plural = _('places')
unique_together = (('data_source', 'origin_id'),)
def __unicode__(self):
values = filter(lambda x: x, [
self.street_address, self.postal_code, self.address_locality
])
return u', '.join(values)
@transaction.atomic
def save(self, *args, **kwargs):
if self._has_circular_replacement():
raise Exception("Trying to replace this place with a place that is replaced by this place"
"Please refrain from creating circular replacements and"
"remove one of the replacements."
"We don't want homeless events.")
if self.replaced_by and not self.deleted:
self.deleted = True
logger.warning("Place replaced without soft deleting. Soft deleting automatically", extra={'place': self})
# needed to remap events to replaced location
old_replaced_by = None
if self.id:
try:
old_replaced_by = Place.objects.get(id=self.id).replaced_by
except Place.DoesNotExist:
pass
super().save(*args, **kwargs)
# needed to remap events to replaced location
if not old_replaced_by == self.replaced_by:
Event.objects.filter(location=self).update(location=self.replaced_by)
# Update doesn't call save so we update event numbers manually.
# Not all of the below are necessarily present.
ids_to_update = [event.id for event in (self, self.replaced_by, old_replaced_by) if event]
Place.objects.filter(id__in=ids_to_update).update(n_events_changed=True)
if self.position:
self.divisions.set(AdministrativeDivision.objects.filter(
type__type__in=('district', 'sub_district', 'neighborhood', 'muni'),
geometry__boundary__contains=self.position))
else:
self.divisions.clear()
reversion.register(Place)
class OpeningHoursSpecification(models.Model):
GR_BASE_URL = "http://purl.org/goodrelations/v1#"
WEEK_DAYS = (
(1, "Monday"), (2, "Tuesday"), (3, "Wednesday"), (4, "Thursday"),
(5, "Friday"), (6, "Saturday"), (7, "Sunday"), (8, "PublicHolidays")
)
place = models.ForeignKey(Place, on_delete=models.CASCADE, db_index=True,
related_name='opening_hours')
opens = models.TimeField(null=True, blank=True)
closes = models.TimeField(null=True, blank=True)
days_of_week = models.SmallIntegerField(choices=WEEK_DAYS, null=True,
blank=True)
valid_from = models.DateTimeField(null=True, blank=True)
valid_through = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name = _('opening hour specification')
verbose_name_plural = _('opening hour specifications')
class Event(MPTTModel, BaseModel, SchemalessFieldMixin, ReplacedByMixin):
jsonld_type = "Event/LinkedEvent"
objects = BaseTreeQuerySet.as_manager()
"""
eventStatus enumeration is based on http://schema.org/EventStatusType
"""
class Status:
SCHEDULED = 1
CANCELLED = 2
POSTPONED = 3
RESCHEDULED = 4
# Properties from schema.org/Event
STATUSES = (
(Status.SCHEDULED, "EventScheduled"),
(Status.CANCELLED, "EventCancelled"),
(Status.POSTPONED, "EventPostponed"),
(Status.RESCHEDULED, "EventRescheduled"),
)
class SuperEventType:
RECURRING = 'recurring'
UMBRELLA = 'umbrella'
SUPER_EVENT_TYPES = (
(SuperEventType.RECURRING, _('Recurring')),
(SuperEventType.UMBRELLA, _('Umbrella event')),
)
# Properties from schema.org/Thing
info_url = models.URLField(verbose_name=_('Event home page'), blank=True, null=True, max_length=1000)
description = models.TextField(verbose_name=_('Description'), blank=True, null=True)
short_description = models.TextField(verbose_name=_('Short description'), blank=True, null=True)
# Properties from schema.org/CreativeWork
date_published = models.DateTimeField(verbose_name=_('Date published'), null=True, blank=True)
# headline and secondary_headline are for cases where
# the original event data contains a title and a subtitle - in that
# case the name field is combined from these.
#
# secondary_headline is mapped to schema.org alternative_headline
# and is used for subtitles, that is for
# secondary, complementary headlines, not "alternative" headlines
headline = models.CharField(verbose_name=_('Headline'), max_length=255, null=True, db_index=True)
secondary_headline = models.CharField(verbose_name=_('Secondary headline'), max_length=255,
null=True, db_index=True)
provider = models.CharField(verbose_name=_('Provider'), max_length=512, null=True)
provider_contact_info = models.CharField(verbose_name=_("Provider's contact info"),
max_length=255, null=True, blank=True)
publisher = models.ForeignKey('django_orghierarchy.Organization', verbose_name=_('Publisher'), db_index=True,
on_delete=models.PROTECT, related_name='published_events')
# Status of the event itself
event_status = models.SmallIntegerField(verbose_name=_('Event status'), choices=STATUSES,
default=Status.SCHEDULED)
# Whether or not this data about the event is ready to be viewed by the general public.
# DRAFT means the data is considered incomplete or is otherwise undergoing refinement --
# or just waiting to be published for other reasons.
publication_status = models.SmallIntegerField(
verbose_name=_('Event data publication status'), choices=PUBLICATION_STATUSES,
default=PublicationStatus.PUBLIC)
location = models.ForeignKey(Place, related_name='events', null=True, blank=True, on_delete=models.PROTECT)
location_extra_info = models.CharField(verbose_name=_('Location extra info'),
max_length=400, null=True, blank=True)
start_time = models.DateTimeField(verbose_name=_('Start time'), null=True, db_index=True, blank=True)
end_time = models.DateTimeField(verbose_name=_('End time'), null=True, db_index=True, blank=True)
has_start_time = models.BooleanField(default=True)
has_end_time = models.BooleanField(default=True)
audience_min_age = models.SmallIntegerField(verbose_name=_('Minimum recommended age'),
blank=True, null=True, db_index=True)
audience_max_age = models.SmallIntegerField(verbose_name=_('Maximum recommended age'),
blank=True, null=True, db_index=True)
super_event = TreeForeignKey('self', null=True, blank=True,
on_delete=models.SET_NULL, related_name='sub_events')
super_event_type = models.CharField(max_length=255, blank=True, null=True, db_index=True,
default=None, choices=SUPER_EVENT_TYPES)
in_language = models.ManyToManyField(Language, verbose_name=_('In language'), related_name='events', blank=True)
images = models.ManyToManyField(Image, related_name='events', blank=True)
deleted = models.BooleanField(default=False, db_index=True)
replaced_by = models.ForeignKey('Event', on_delete=models.SET_NULL, related_name='aliases', null=True, blank=True)
# Custom fields not from schema.org
keywords = models.ManyToManyField(Keyword, related_name='events')
audience = models.ManyToManyField(Keyword, related_name='audience_events', blank=True)
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
class MPTTMeta:
parent_attr = 'super_event'
def save(self, *args, **kwargs):
if self._has_circular_replacement():
raise Exception("Trying to replace this event with an event that is replaced by this event"
"Please refrain from creating circular replacements and"
"remove one of the replacements.")
if self.replaced_by and not self.deleted:
self.deleted = True
logger.warning("Event replaced without soft deleting. Soft deleting automatically", extra={'event': self})
# needed to cache location event numbers
old_location = None
# needed for notifications
old_publication_status = None
old_deleted = None
created = True
if self.id:
try:
event = Event.objects.get(id=self.id)
created = False
old_location = event.location
old_publication_status = event.publication_status
old_deleted = event.deleted
except Event.DoesNotExist:
pass
# drafts may not have times set, so check that first
start = getattr(self, 'start_time', None)
end = getattr(self, 'end_time', None)
if start and end:
if start > end:
raise ValidationError({'end_time': _('The event end time cannot be earlier than the start time.')})
if any([keyword.deprecated for keyword in self.keywords.all() | self.audience.all()]):
raise ValidationError({'keywords': _("Event can't have deprecated keywords")})
super(Event, self).save(*args, **kwargs)
# needed to cache location event numbers
if not old_location and self.location:
Place.objects.filter(id=self.location.id).update(n_events_changed=True)
if old_location and not self.location:
# drafts (or imported events) may not always have location set
Place.objects.filter(id=old_location.id).update(n_events_changed=True)
if old_location and self.location and old_location != self.location:
Place.objects.filter(id__in=(old_location.id, self.location.id)).update(n_events_changed=True)
# send notifications
if old_publication_status == PublicationStatus.DRAFT and self.publication_status == PublicationStatus.PUBLIC:
self.send_published_notification()
if old_deleted is False and self.deleted is True:
self.send_deleted_notification()
if created and self.publication_status == PublicationStatus.DRAFT:
self.send_draft_posted_notification()
def __str__(self):
name = ''
languages = [lang[0] for lang in settings.LANGUAGES]
for lang in languages:
lang = lang.replace('-', '_') # to handle complex codes like e.g. zh-hans
s = getattr(self, 'name_%s' % lang, None)
if s:
name = s
break
val = [name, '(%s)' % self.id]
dcount = self.get_descendant_count()
if dcount > 0:
val.append(u" (%d children)" % dcount)
else:
val.append(str(self.start_time))
return u" ".join(val)
def is_admin(self, user):
if user.is_superuser:
return True
else:
return user.is_admin(self.publisher)
def can_be_edited_by(self, user):
"""Check if current event can be edited by the given user"""
if user.is_superuser:
return True
return user.can_edit_event(self.publisher, self.publication_status)
def soft_delete(self, using=None):
self.deleted = True
self.save(update_fields=("deleted",), using=using, force_update=True)
def undelete(self, using=None):
self.deleted = False
self.save(update_fields=("deleted",), using=using, force_update=True)
def _send_notification(self, notification_type, recipient_list, request=None):
if len(recipient_list) == 0:
logger.warning("No recipients for notification type '%s'" % notification_type, extra={'event': self})
return
context = {'event': self}
try:
rendered_notification = render_notification_template(notification_type, context)
except NotificationTemplateException as e:
logger.error(e, exc_info=True, extra={'request': request})
return
try:
send_mail(
rendered_notification['subject'],
rendered_notification['body'],
'noreply@%s' % Site.objects.get_current().domain,
recipient_list,
html_message=rendered_notification['html_body']
)
except SMTPException as e:
logger.error(e, exc_info=True, extra={'request': request, 'event': self})
def _get_author_emails(self):
author_emails = []
for user in (self.created_by, self.last_modified_by):
if user and user.email:
author_emails.append(user.email)
return author_emails
def send_deleted_notification(self, request=None):
recipient_list = self._get_author_emails()
self._send_notification(NotificationType.UNPUBLISHED_EVENT_DELETED, recipient_list, request)
def send_published_notification(self, request=None):
recipient_list = self._get_author_emails()
self._send_notification(NotificationType.EVENT_PUBLISHED, recipient_list, request)
def send_draft_posted_notification(self, request=None):
recipient_list = []
for admin in self.publisher.admin_users.all():
if admin.email:
recipient_list.append(admin.email)
self._send_notification(NotificationType.DRAFT_POSTED, recipient_list, request)
reversion.register(Event)
@receiver(m2m_changed, sender=Event.keywords.through)
@receiver(m2m_changed, sender=Event.audience.through)
def keyword_added_or_removed(sender, model=None,
instance=None, pk_set=None, action=None, **kwargs):
"""
Listens to event-keyword add signals to keep event number up to date
"""
if action in ('post_add', 'post_remove'):
if model is Keyword:
Keyword.objects.filter(pk__in=pk_set).update(n_events_changed=True)
if model is Event:
instance.n_events_changed = True
instance.save(update_fields=("n_events_changed",))
class Offer(models.Model, SimpleValueMixin):
event = models.ForeignKey(Event, on_delete=models.CASCADE, db_index=True, related_name='offers')
price = models.CharField(verbose_name=_('Price'), blank=True, max_length=1000)
info_url = models.URLField(verbose_name=_('Web link to offer'), blank=True, null=True, max_length=1000)
description = models.TextField(verbose_name=_('Offer description'), blank=True, null=True)
# Don't expose is_free as an API field. It is used to distinguish
# between missing price info and confirmed free entry.
is_free = models.BooleanField(verbose_name=_('Is free'), default=False)
def value_fields(self):
return ['price', 'info_url', 'description', 'is_free']
reversion.register(Offer)
class EventLink(models.Model, SimpleValueMixin):
name = models.CharField(verbose_name=_('Name'), max_length=100, blank=True)
event = models.ForeignKey(Event, on_delete=models.CASCADE, db_index=True, related_name='external_links')
language = models.ForeignKey(Language, on_delete=models.CASCADE)
link = models.URLField()
class Meta:
unique_together = (('name', 'event', 'language', 'link'),)
def value_fields(self):
return ['name', 'language_id', 'link']
class Video(models.Model, SimpleValueMixin):
name = models.CharField(verbose_name=_('Name'), max_length=255, db_index=True, default='')
event = models.ForeignKey(Event, on_delete=models.CASCADE, db_index=True, related_name='videos')
url = models.URLField()
alt_text = models.CharField(verbose_name=_('Alt text'), max_length=320, null=True, blank=True)
class Meta:
unique_together = (('name', 'event', 'url'),)
def value_fields(self):
return ['name', 'url']
class ExportInfo(models.Model):
target_id = models.CharField(max_length=255, db_index=True, null=True,
blank=True)
target_system = models.CharField(max_length=255, db_index=True, null=True,
blank=True)
last_exported_time = models.DateTimeField(null=True, blank=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.CharField(max_length=50)
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
unique_together = (('target_system', 'content_type', 'object_id'),)
def save(self, *args, **kwargs):
self.last_exported_time = BaseModel.now()
super(ExportInfo, self).save(*args, **kwargs)
class EventAggregate(models.Model):
super_event = models.OneToOneField(Event, on_delete=models.CASCADE, related_name='aggregate', null=True)
class EventAggregateMember(models.Model):
event_aggregate = models.ForeignKey(EventAggregate, on_delete=models.CASCADE, related_name='members')
event = models.OneToOneField(Event, on_delete=models.CASCADE)
|
py | b413b719e5d7dd4c0686d2b499c950aa9c44362e | from CommonServerPython import *
reload(sys)
sys.setdefaultencoding('utf-8') # pylint: disable=E1101
requests.packages.urllib3.disable_warnings()
URL = demisto.getParam('server')
if URL[-1] != '/':
URL += '/'
if not demisto.getParam('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
VALIDATE_CERT = not demisto.params().get('insecure', True)
ID_AND_API_KEY = demisto.getParam('credentials')['identifier'] + ':' + demisto.getParam('credentials')['password']
ENCODED_AUTH_KEY = base64.b64encode(ID_AND_API_KEY.encode("utf-8"))
MSSP_ACCOUNT_ID = demisto.getParam('mssp_sub_account_id')
HEADERS = {'Authorization': 'Basic {}'.format(ENCODED_AUTH_KEY.decode()), 'Content-Type': 'application/json',
'Account-Id': demisto.getParam('credentials')['identifier']}
# Change the Account-Id to the sub account id, so all actions will be on the sub account.
if MSSP_ACCOUNT_ID:
HEADERS['Account-Id'] = MSSP_ACCOUNT_ID
IOC_TYPE_TO_DBOT_TYPE = {
'IpAddresses': 'ip',
'Urls': 'url',
'Domains': 'domain',
'Hashes': 'hash'
}
DEFAULT_TIME_RANGE = '1 day'
SEVERITY_LEVEL = {
'All': 0,
'Low': 1,
'Medium': 2,
'High': 3
}
def http_request(method, path, json_data=None, params=None, json_response=False):
"""
Send the request to IntSights and return the JSON response
"""
try:
response = requests.request(method, URL + path, headers=HEADERS, json=json_data,
params=params, verify=VALIDATE_CERT)
except requests.exceptions.SSLError:
raise Exception('Connection error in the API call to IntSights.\nCheck your not secure parameter.')
except requests.ConnectionError:
raise Exception('Connection error in the API call to IntSights.\nCheck your Server URL parameter.')
if response.status_code < 200 or response.status_code > 299:
if not (response.text == 'SeverityNotChanged' or response.text == 'TagExist'
or response.text == 'IocBlocklistStatusNotChanged'):
return_error('Error in API call to IntSights service %s - [%d] %s' %
(path, response.status_code, response.text))
if response.status_code == 204:
return [] # type: ignore
if json_response:
try:
return response.json()
except ValueError:
raise Exception('Error in API call to IntSights service - check your configured URL address')
return response
def convert_iso_string_to_python_date(date_in_iso_format):
iso_format = "%Y-%m-%dT%H:%M:%S"
date_in_python_format = datetime.strptime(date_in_iso_format, iso_format)
return date_in_python_format
def convert_python_date_to_unix_millisecond(python_date_object):
timestamp_in_unix_millisecond = date_to_timestamp(python_date_object, 'datetime.datetime')
return timestamp_in_unix_millisecond
def increase_iso_by_x_days(date_in_iso_format, num_of_days):
date_in_python_format = convert_iso_string_to_python_date(date_in_iso_format)
new_date_in_python_format = date_in_python_format + timedelta(days=int(num_of_days))
new_date_in_iso_format = new_date_in_python_format.isoformat()
return new_date_in_iso_format
def remove_milliseconds_from_iso(date_in_iso_format):
date_parts_arr = date_in_iso_format.split('.')
date_in_iso_without_milliseconds = date_parts_arr[0]
return date_in_iso_without_milliseconds
def increase_timestamp_by_x_days(date_in_unix_ms_timestamp, num_of_days):
date_in_iso = timestamp_to_datestring(date_in_unix_ms_timestamp)
date_in_iso_without_ms = remove_milliseconds_from_iso(date_in_iso)
date_in_iso_plus_x_days = increase_iso_by_x_days(date_in_iso_without_ms, num_of_days)
timestamp_in_unix_ms_plus_x_days = date_to_timestamp(date_in_iso_plus_x_days)
return timestamp_in_unix_ms_plus_x_days
def update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp):
params['foundDateFrom'] = oldest_day_to_search_in_unix_timestamp
params['foundDateTo'] = now_date_in_unix_timestamp
params['sourceDateFrom'] = oldest_day_to_search_in_unix_timestamp
params['sourceDateTo'] = now_date_in_unix_timestamp
def update_params_with_delta_arg(params, time_delta_in_days_int):
now_date_in_iso = datetime.utcnow().isoformat()
now_date_in_iso_without_ms = remove_milliseconds_from_iso(now_date_in_iso)
now_date_in_unix_timestamp = date_to_timestamp(now_date_in_iso_without_ms)
oldest_day_to_search_in_unix_timestamp = increase_timestamp_by_x_days(now_date_in_unix_timestamp,
-1 * time_delta_in_days_int)
update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp)
del params['time-delta']
def update_params_dict_according_to_delta_arg(params, time_delta_in_days_int):
if 'foundDateFrom' in params or 'foundDateTo' in params:
demisto.debug(
"ERROR in get_alerts() - can't use found-date-to or found-date-from arguments with time-delta argument")
return_error("Error: can't assign delta when assigned both found-date-to or found-date-from")
else:
update_params_with_delta_arg(params, time_delta_in_days_int)
return params
def handle_filters(found_date_from=None):
"""
Apply filters to alert list
"""
args_camel_case = {
'alert-type': 'alertType',
'source-type': 'sourceType',
'network-type': 'networkType',
'source-date-from': 'sourceDateFrom',
'source-date-to': 'sourceDateTo',
'found-date-from': 'foundDateFrom',
'found-date-to': 'foundDateTo',
'is-flagged': 'isFlagged',
'is-closed': 'isClosed',
'source-ID': 'sourceId',
'first-seen-from': 'firstSeenFrom',
'first-seen-to': 'firstSeenTo',
'last-seen-from': 'lastSeenFrom',
'last-seen-to': 'lastSeenTo',
'value': 'iocValue',
}
params = {}
for key in demisto.args():
if demisto.getArg(key):
params[args_camel_case.get(key) or key] = demisto.getArg(key)
if demisto.getArg('time-delta'):
time_delta_in_days = demisto.getArg('time-delta')
update_params_dict_according_to_delta_arg(params, int(time_delta_in_days))
elif found_date_from:
params['foundDateFrom'] = found_date_from
return params
def get_alerts_helper(params):
demisto.info("Executing get_alerts with params: {}".format(params))
response = http_request('GET', 'public/v1/data/alerts/alerts-list', params=params, json_response=True)
alerts_human_readable = []
alerts_context = []
for alert_id in response:
alert_human_readable, alert_context = get_alert_by_id_helper(alert_id)
alerts_human_readable.append(alert_human_readable)
alerts_context.append(alert_context)
return alerts_human_readable, alerts_context
def extract_mail(replies):
if not replies:
return ''
mails = []
for reply in replies:
mails.append(reply.get('Email'))
return '\n'.join(mails)
def extract_remediation(remidiations):
if not remidiations:
return ''
remedies = []
string_format = "{0} - Status: {1}"
for remedy in remidiations:
remedies.append(string_format.format(remedy.get('Value'), remedy.get('Status')))
return '\n'.join(remedies)
def hash_identifier(hash_val):
if md5Regex.match(hash_val):
return 'MD5'
if sha1Regex.match(hash_val):
return 'SHA1'
if sha256Regex.match(hash_val):
return 'SHA256'
return 'Unknown'
def extract_tags(tags):
pretty_tags = []
string_format = "ID: {0} - Name: {1}"
for tag in tags:
pretty_tags.append(string_format.format(tag.get('_id'), tag.get('Name')))
return pretty_tags
def get_alerts():
"""
Gets all alerts and returns as a list.
"""
alerts_human_readable, alerts_context = get_alerts_helper(handle_filters())
headers = ['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL',
'SourceEmail', 'SourceNetworkType', 'IsClosed', 'Closed', 'IsFlagged', 'Images', 'Tags',
'Description', 'Title', 'TakedownStatus', 'SubType']
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alerts_context},
'Contents': alerts_context,
'HumanReadable': tableToMarkdown('IntSights Alerts', alerts_human_readable, headers=headers, removeNull=False),
'ContentsFormat': formats['json']
})
def alert_to_readable(alert, parse_tags):
"""
Convert alert to readable format
"""
is_closed = demisto.get(alert, 'IsClosed')
if is_closed is None:
is_closed = demisto.get(alert, 'Closed.IsClosed')
readable = {
'ID': demisto.get(alert, '_id'),
'Severity': demisto.get(alert, 'Details.Severity'),
'Type': demisto.get(alert, 'Details.Type'),
'FoundDate': demisto.get(alert, 'FoundDate'),
'SourceType': demisto.get(alert, 'Details.Source.Type'),
'SourceURL': demisto.get(alert, 'Details.Source.URL'),
'SourceEmail': demisto.get(alert, 'Details.Source.Email'),
'SourceNetworkType': demisto.get(alert, 'Details.Source.NetworkType'),
'IsClosed': is_closed,
'IsFlagged': demisto.get(alert, 'IsFlagged'),
'Assets': demisto.get(alert, 'Assets'),
'Images': demisto.get(alert, 'Details.Images'),
'Description': demisto.get(alert, 'Details.Description'),
'Title': demisto.get(alert, 'Details.Title'),
'TakedownStatus': demisto.get(alert, 'TakedownStatus'),
'SubType': demisto.get(alert, 'Details.SubType'),
}
tags = demisto.get(alert, 'Details.Tags')
if parse_tags:
readable['Tags'] = extract_tags(tags)
else:
readable['Tag'] = []
for tag in tags:
readable['Tag'].append({'ID': tag.get('_id'), 'Name': tag.get('Name')})
return readable
def get_alert_by_id_helper(alert_id):
"""
Helper for getting details by ID
"""
response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True)
return alert_to_readable(response, True), alert_to_readable(response, False)
def get_alert_by_id():
"""
Get alert details by id
"""
alert_id = demisto.getArg('alert-id')
activity_hr, activity_ctx = get_alert_by_id_helper(alert_id)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': activity_ctx},
'Contents': activity_hr,
'HumanReadable': tableToMarkdown('IntSights Alert Details', [activity_hr],
['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL',
'SourceEmail', 'SourceNetworkType', 'IsClosed', 'IsFlagged',
'Images', 'Tags', 'Description', 'Title', 'TakedownStatus', 'SubType']),
'ContentsFormat': formats['json']
})
def get_alert_image():
"""
Retrieves the alert image by image_id
"""
image_id = demisto.getArg('image-id')
response = http_request('GET', 'public/v1/data/alerts/alert-image/' + image_id)
demisto.results(fileResult(image_id + '-image.jpeg', response.content))
def ask_analyst():
"""
Send question to an analyst about the requested alert
"""
alert_id = demisto.getArg('alert-id')
question = demisto.getArg('question')
http_request('POST', 'public/v1/data/alerts/ask-the-analyst/' + alert_id, json_data={'Question': question})
question_details = {'ID': alert_id, 'Question': question}
title = 'IntSights Ask the Analyst: ' \
'Your question has been successfully sent to an analyst about the requested alert'
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': question_details},
'Contents': question_details,
'HumanReadable': tableToMarkdown(title, [question_details], ['ID', 'Question']),
'ContentsFormat': formats['json']
}
)
def get_alert_activity():
"""
Retrieves the alert activity by alert-id
"""
alert_id = demisto.getArg('alert-id')
response = http_request('GET', 'public/v1/data/alerts/activity-log/' + alert_id, json_response=True)
alert = {'ID': alert_id, 'Activities': []}
if not response:
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert},
'Contents': response,
'HumanReadable': 'Alert {} does not have activities.'.format(alert_id),
'ContentsFormat': formats['json']
})
else:
human_readable_arr = []
for activity in response:
alert['Activities'].append({
'ID': demisto.get(activity, '_id'),
'Type': demisto.get(activity, 'Type'),
'Initiator': demisto.get(activity, 'Initiator'),
'CreatedDate': demisto.get(activity, 'CreatedDate'),
'UpdateDate': demisto.get(activity, 'UpdateDate'),
'RemediationBlocklistUpdate': demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate'),
'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')},
'Mail': {'Replies': demisto.get(activity, 'AdditionalInformation.Mail.Replies')},
'ReadBy': demisto.get(activity, 'ReadBy')
})
human_readable_arr.append({
'ID': demisto.get(activity, '_id'),
'Type': demisto.get(activity, 'Type'),
'Initiator': demisto.get(activity, 'Initiator'),
'CreatedDate': demisto.get(activity, 'CreatedDate'),
'UpdateDate': demisto.get(activity, 'UpdateDate'),
'RemediationBlocklistUpdate': extract_remediation(
demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate'))
if demisto.get(activity, 'AdditionalInformation') else '',
'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')},
'Mail': extract_mail(
demisto.get(activity, 'AdditionalInformation.Mail.Replies'))
if demisto.get(activity, 'AdditionalInformation.Mail') else '',
'ReadBy': demisto.get(activity, 'ReadBy')
})
headers = ['ID', 'Type', 'Initiator', 'CreatedDate', 'UpdateDate',
'RemediationBlocklistUpdate', 'AskTheAnalyst', 'Mail', 'ReadBy']
human_readable = tableToMarkdown('IntSights Alert {} Activity Log'.format(alert_id),
t=human_readable_arr, headers=headers),
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert},
'Contents': response,
'HumanReadable': human_readable,
'ContentsFormat': formats['json']
})
def change_severity():
"""
Change severity of an alert
"""
alert_id = demisto.getArg('alert-id')
severity = demisto.getArg('severity')
http_request('PATCH', 'public/v1/data/alerts/change-severity/' + alert_id, json_data={'Severity': severity})
severity_details = {'ID': alert_id, 'Severity': severity}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': severity_details},
'Contents': severity_details,
'HumanReadable': tableToMarkdown(
'IntSights Update Alert Severity: The Alert severity has been successfully updated.', [severity_details],
['ID', 'Severity']),
'ContentsFormat': formats['json']
})
def get_assignee_id(assignee_email):
response = http_request('GET', 'public/v1/account/users-details', json_response=True)
for user in response:
if assignee_email == user.get('Email', ''):
return user.get('_id')
raise Exception('user not found')
def assign_alert():
"""
Assign alert to an Assignee ID
"""
alert_id = demisto.getArg('alert-id')
assignee_email = demisto.getArg('assignee-email')
is_mssp = demisto.getArg('is-mssp-optional')
assignee_id = get_assignee_id(assignee_email)
assign_details = {'ID': alert_id, 'Assignees.AssigneeID': assignee_id}
url = 'public/v1/data/alerts/assign-alert/' + alert_id
if is_mssp:
url += '?IsMssp=' + is_mssp
http_request('PATCH', url, json_data={'AssigneeID': assignee_id})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': assign_details},
'Contents': assign_details,
'HumanReadable': tableToMarkdown(
'IntSights Assign Alert: The Alert has been successfully assigned to assigneeID', [assign_details],
['ID', 'Assignees.AssigneeID']),
'ContentsFormat': formats['json']
})
def unassign_alert():
"""
Unassign an alert
"""
alert_id = demisto.getArg('alert-id')
http_request('PATCH', 'public/v1/data/alerts/unassign-alert/' + alert_id)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id}},
'Contents': {'ID': alert_id},
'HumanReadable': 'Alert id: ' + alert_id + ' successfully unassigned',
'ContentsFormat': formats['json']
})
def close_alert():
"""
Close an alert
"""
alert_id = demisto.getArg('alert-id')
reason = demisto.getArg('reason')
free_text = demisto.getArg('free-text')
is_hidden = demisto.getArg('is-hidden') == 'True'
rate = demisto.getArg('rate')
close_details = {'ID': alert_id, 'Close Reason': reason, 'Closed FreeText': free_text, 'Closed Rate': rate,
'IsHidden': is_hidden}
close_details_context = {'ID': alert_id, 'Closed': {'Reason': reason, 'FreeText': free_text, 'Rate': rate},
'IsHidden': is_hidden}
url = 'public/v1/data/alerts/close-alert/' + alert_id
json_data = {'Reason': reason}
if free_text:
json_data['FreeText'] = free_text
if is_hidden:
json_data['IsHidden'] = is_hidden
if rate:
json_data['Rate'] = rate
http_request('PATCH', url, json_data)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': close_details},
'Contents': close_details_context,
'HumanReadable': tableToMarkdown('IntSights Close Alert: The Alert has successfully been closed.',
[close_details],
['ID', 'Close Reason', 'Closed FreeText', 'Closed Rate', 'IsHidden']),
'ContentsFormat': formats['json']
})
def send_mail():
"""
Send email with the alert details and a question
"""
alert_id = demisto.getArg('alert-id')
emails = argToList(demisto.getArg('emails'))
content = demisto.getArg('content')
http_request('POST', 'public/v1/data/alerts/send-mail/' + alert_id, {'Emails': emails, 'Content': content})
context = {
'ID': alert_id,
'EmailID': emails,
'Question': content
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Email with content (' + content + ') sent to emails',
'ContentsFormat': formats['json']
})
def get_tag_id(alert_id, tag_name):
response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True)
details = response.get('Details', {})
tags = details.get('Tags', [])
for tag in tags:
if tag.get('Name', '') == tag_name:
return tag.get('_id', '')
return 'Not found'
def add_tag():
"""
Adds a tag to the alert
"""
alert_id = demisto.getArg('alert-id')
tag_name = demisto.getArg('tag-name')
http_request('PATCH', 'public/v1/data/alerts/add-tag/' + alert_id, json_data={'TagName': tag_name})
tag_info = {
'TagName': tag_name,
'ID': get_tag_id(alert_id, tag_name)
}
context = {
'ID': alert_id,
'Tags': tag_info
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Tag (' + tag_name + ') added to alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def remove_tag():
"""
Removes a tag from an alert
"""
alert_id = demisto.getArg('alert-id')
tag_id = demisto.getArg('tag-id')
http_request('PATCH', 'public/v1/data/alerts/remove-tag/' + alert_id, json_data={'TagID': tag_id})
context = {
'ID': alert_id,
'Tags': {'ID': tag_id}
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Tag id: ' + tag_id + ' removed from alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def add_comment():
"""
Adds a comment to an alert
"""
alert_id = demisto.getArg('alert-id')
comment = demisto.getArg('comment')
http_request('PATCH', 'public/v1/data/alerts/add-comment/' + alert_id, json_data={'Comment': comment})
context = {
'ID': alert_id,
'Comment': comment
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': 'Succesfully added comment "' + comment + '" to alert id: ' + alert_id,
'ContentsFormat': formats['json']
})
def ioc_to_readable(ioc_data):
"""
Convert IOC to readable format
"""
ioc_context = {
'ID': demisto.get(ioc_data, '_id'),
'SourceID': demisto.get(ioc_data, 'SourceID'),
'AccountID': demisto.get(ioc_data, 'AccountID'),
'Type': demisto.get(ioc_data, 'Type'),
'Value': demisto.get(ioc_data, 'Value'),
'FirstSeen': demisto.get(ioc_data, 'FirstSeen'),
'LastSeen': demisto.get(ioc_data, 'LastSeen'),
'Domain': demisto.get(ioc_data, 'Domain'),
'Status': demisto.get(ioc_data, 'Status'),
'Severity': demisto.get(ioc_data, 'Severity'),
'SourceName': demisto.get(ioc_data, 'Source.Name'),
'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'),
'Flags': {'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa')},
'Enrichment': {
'Status': demisto.get(ioc_data, 'Enrichment.Status'),
'Data': demisto.get(ioc_data, 'Enrichment.Data'),
'Date': demisto.get(ioc_data, 'Enrichment.Data') # Backwards compatibility issue
}
}
ioc_readable = {
'ID': demisto.get(ioc_data, '_id'),
'SourceID': demisto.get(ioc_data, 'SourceID'),
'AccountID': demisto.get(ioc_data, 'AccountID'),
'Type': demisto.get(ioc_data, 'Type'),
'Value': demisto.get(ioc_data, 'Value'),
'FirstSeen': demisto.get(ioc_data, 'FirstSeen'),
'LastSeen': demisto.get(ioc_data, 'LastSeen'),
'Domain': demisto.get(ioc_data, 'Domain'),
'Status': demisto.get(ioc_data, 'Status'),
'Severity': demisto.get(ioc_data, 'Severity').get('Value'),
'SourceName': demisto.get(ioc_data, 'Source.Name'),
'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'),
'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa'),
'Enrichment Status': demisto.get(ioc_data, 'Enrichment.Status'),
'Enrichment Data': demisto.get(ioc_data, 'Enrichment.Data')
}
dbot_score = {
'Indicator': ioc_context['Value'],
'Type': IOC_TYPE_TO_DBOT_TYPE[ioc_context['Type']],
'Vendor': 'IntSights',
'Score': translate_severity(ioc_readable['Severity'])
}
malicious_dict = {
'Vendor': 'IntSights',
'Description': 'IntSights severity level is High'
}
domain = {}
if ioc_context['Domain']:
domain['Name'] = ioc_context['Domain']
if translate_severity(ioc_readable['Severity']) == 3:
domain['Malicious'] = malicious_dict
ip_info = {}
if ioc_context['Type'] == 'IpAddresses':
ip_info['Address'] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
ip_info['Malicious'] = malicious_dict
url_info = {}
if ioc_context['Type'] == 'Urls':
url_info['Data'] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
url_info['Malicious'] = malicious_dict
hash_info = {}
if ioc_context['Type'] == 'Hashes':
hash_info['Name'] = ioc_context['Value']
hash_info[hash_identifier(ioc_context['Value'])] = ioc_context['Value']
if translate_severity(ioc_readable['Severity']) == 3:
hash_info['Malicious'] = malicious_dict
return ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info
def search_for_ioc():
"""
Search for IOC by value
"""
response = http_request('GET', 'public/v1/iocs/ioc-by-value', params=handle_filters(), json_response=True)
if response:
ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(response)
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Iocs(val.ID === obj.ID)': ioc_context,
'DBotScore': dbot_score,
'Domain': domain,
'IP': ip_info,
'URL': url_info,
'File': hash_info
},
'Contents': response,
'HumanReadable': tableToMarkdown('IOC Information', [ioc_readable],
['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen',
'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName',
'SourceConfidence', 'IsInAlexa', 'Enrichment Status',
'Enrichment Data']),
'ContentsFormat': formats['json']
}
)
else:
results_for_no_content('IOC Information')
def results_for_no_content(cmd_name):
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {'IntSights': {}},
'Contents': {},
'HumanReadable': '### {} \n\n Could not get any results.'.format(cmd_name),
'ContentsFormat': formats['json']
}
)
def translate_severity(sev):
"""
Translate alert severity to demisto
"""
if sev in ['Medium', 'High']:
return 3
if sev == 'Low':
return 2
return 0
def fetch_incidents():
"""
Fetch incidents for Demisto
"""
now = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
last_run = demisto.getLastRun()
demisto.info("IntSight fetch last run time is: {}".format(str(last_run)))
if not last_run or 'time' not in last_run:
fetch_delta, _ = parse_date_range(demisto.params().get('fetch_delta', DEFAULT_TIME_RANGE), to_timestamp=True)
else:
fetch_delta = last_run.get('time')
alert_type = demisto.getParam('type')
min_severity_level = demisto.params().get('severity_level', 'All')
if min_severity_level not in SEVERITY_LEVEL:
raise Exception("Minimum Alert severity level to fetch incidents incidents from, allowed values are: All,"
" Low, Medium, High. (Setting to All will fetch all incidents)")
_, alerts_context = get_alerts_helper(handle_filters(fetch_delta))
incidents = []
for alert in alerts_context:
if SEVERITY_LEVEL[min_severity_level] <= SEVERITY_LEVEL[alert.get('Severity', 'Low')]:
if not alert_type or alert_type.lower() == alert.get('Type', '').lower():
incidents.append({
'name': '{type} - {id}'.format(type=alert.get('Type', 'Type not found'), id=alert.get('ID')),
'occurred': alert.get('FoundDate'),
'severity': translate_severity(alert.get('Severity')),
'rawJSON': json.dumps(alert)
})
demisto.incidents(incidents)
demisto.setLastRun({'time': now})
def get_iocs():
"""
Gets all IOCs with the given filters
"""
response = http_request('GET', 'public/v1/iocs/complete-iocs-list', params=handle_filters(), json_response=True)
domains = []
ip_infos = []
url_infos = []
hash_infos = []
dbot_scores = []
iocs_context = []
iocs_readable = []
for indicator in response:
ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(indicator)
iocs_context.append(ioc_context)
iocs_readable.append(ioc_readable)
dbot_scores.append(dbot_score)
domains.append(domain)
ip_infos.append(ip_info)
url_infos.append(url_info)
hash_infos.append(hash_info)
headers = ['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen',
'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence',
'IsInAlexa', 'Enrichment Status', 'Enrichment Data']
demisto.results(
{
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Iocs': iocs_context,
'DBotScore': dbot_scores,
'Domain': domains,
'IP': ip_infos,
'URL': url_infos,
'File': hash_infos
},
'Contents': response,
'HumanReadable': tableToMarkdown('IOC Information', t=iocs_readable, headers=headers),
'ContentsFormat': formats['json']
}
)
def takedown_request():
"""
Request alert takedown
"""
alert_id = demisto.getArg('alert-id')
http_request('PATCH', 'public/v1/data/alerts/takedown-request/' + alert_id)
context = {
'ID': alert_id,
}
human_readable = '### IntSights Alert Takedown\n' \
'The Alert Takedown request has been sent successfully for {}'.format(str(alert_id))
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': human_readable,
'ContentsFormat': formats['json']
})
def get_alert_takedown_status():
"""
Get an alert's takedown status
"""
alert_id = demisto.getArg('alert-id')
response = http_request('GET', 'public/v1/data/alerts/takedown-status/' + alert_id)
context = {
'ID': alert_id,
'TakedownStatus': response.text
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context},
'Contents': context,
'HumanReadable': tableToMarkdown('IntSights Alert Takedown Status', [context], ['ID', 'TakedownStatus']),
'ContentsFormat': formats['json']
})
def update_ioc_blocklist_status():
alert_id = demisto.getArg('alert-id')
types = argToList(demisto.getArg('type'))
values = argToList(demisto.getArg('value'))
statuses = argToList(demisto.getArg('blocklist-status'))
if len(types) != len(values) or len(types) != len(statuses):
return_error('The lists must be of equal length. For each IOC, provide an entry in each list.')
data = []
for count, type_ in enumerate(types):
data.append({
'Type': type_,
'Value': values[count],
'BlocklistStatus': statuses[count]
})
http_request('PATCH', 'public/v1/data/alerts/change-iocs-blocklist-status/' + alert_id, json_data={'Iocs': data})
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': statuses}},
'Contents': {'ID': alert_id, 'Status': statuses},
'HumanReadable': tableToMarkdown('IntSights Update IOC BlockList Status for ' + alert_id, data,
['BlocklistStatus']),
'ContentsFormat': formats['json']
})
def get_ioc_blocklist_status():
alert_id = demisto.getArg('alert-id')
response = http_request('GET', 'public/v1/data/alerts/blocklist-status/' + alert_id, json_response=True)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': [ioc.get('Status') for ioc in response]}},
'Contents': response,
'HumanReadable': tableToMarkdown('IntSights Blocklist Status for ' + alert_id, response, ['Status']),
'ContentsFormat': formats['json']
})
def get_mssp_sub_accounts():
account_id = demisto.getParam('credentials')['identifier']
accounts = http_request('GET', 'public/v1/mssp/customers', json_response=True)
if not accounts:
return_error("intsights-mssp-get-sub-accounts failed to return data.")
# Fix accounts _id keys
for account in accounts:
account["ID"] = account["_id"]
del account["_id"]
if len(accounts) < 1:
return_error('Current MSSP Account has no sub accounts.')
account_ids = [i["ID"] for i in accounts]
if MSSP_ACCOUNT_ID not in account_ids:
demisto.log("[DEBUG] - MSSP sub accounts:" + str(accounts))
return_error('Entered sub account id ({}) is not part of this mssp account'.format(MSSP_ACCOUNT_ID))
for i, account in enumerate(account_ids):
# Call account
HEADERS['Account-Id'] = account
account_ua = http_request('GET', 'public/v1/account/used-assets', json_response=True)
if not account_ua:
continue
accounts[i].update(account_ua)
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'IntSights.MsspAccount(val.ID === obj.ID)': accounts},
'HumanReadable': tableToMarkdown('IntSights MSSP accounts used assets ' + account_id, accounts,
["ID", 'CompanyName', "Status", "AssetsLimit", "AssetsCount"]),
'Contents': accounts,
'ContentsFormat': formats['json']
})
# Restore the header
HEADERS['Account-Id'] = MSSP_ACCOUNT_ID
def test_module():
http_request('GET', 'public/v1/api/version')
if demisto.params().get('isFetch'):
min_severity_level = demisto.params().get('severity_level', 'All')
if min_severity_level not in SEVERITY_LEVEL:
return_error("Minimum Alert severity level to fetch incidents incidents from, allowed values are: "
"All, Low, Medium, High. (Setting to All will fetch all incidents)")
demisto.results('ok')
try:
if demisto.command() == 'test-module':
test_module()
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'intsights-mssp-get-sub-accounts':
get_mssp_sub_accounts()
elif demisto.command() == 'intsights-get-alerts':
get_alerts()
elif demisto.command() == 'intsights-get-alert-image':
get_alert_image()
elif demisto.command() == 'intsights-get-alert-activities':
get_alert_activity()
elif demisto.command() == 'intsights-assign-alert':
assign_alert()
elif demisto.command() == 'intsights-unassign-alert':
unassign_alert()
elif demisto.command() == 'intsights-send-mail':
send_mail()
elif demisto.command() == 'intsights-ask-the-analyst':
ask_analyst()
elif demisto.command() == 'intsights-add-tag-to-alert':
add_tag()
elif demisto.command() == 'intsights-remove-tag-from-alert':
remove_tag()
elif demisto.command() == 'intsights-add-comment-to-alert':
add_comment()
elif demisto.command() == 'intsights-update-alert-severity':
change_severity()
elif demisto.command() == 'intsights-get-alert-by-id':
get_alert_by_id()
elif demisto.command() == 'intsights-get-ioc-by-value':
search_for_ioc()
elif demisto.command() == 'intsights-get-iocs':
get_iocs()
elif demisto.command() == 'intsights-alert-takedown-request':
takedown_request()
elif demisto.command() == 'intsights-get-alert-takedown-status':
get_alert_takedown_status()
elif demisto.command() == 'intsights-get-ioc-blocklist-status':
get_ioc_blocklist_status()
elif demisto.command() == 'intsights-update-ioc-blocklist-status':
update_ioc_blocklist_status()
elif demisto.command() == 'intsights-close-alert':
close_alert()
else:
raise Exception('Unrecognized command: ' + demisto.command())
except Exception as err:
return_error(str(err))
|
py | b413b7d389481bb8b57cd87daabbc260be90f655 | # -*- coding: utf-8 -*-
"""
hooks.sendmail
~~~~~~~~~~~~~~
Encapsulate multiple mail sending methods.
:copyright: (c) 2020 by staugur.
:license: BSD 3-Clause, see LICENSE for more details.
"""
__version__ = '0.1.1'
__author__ = 'staugur'
__description__ = '多方式发送邮件'
from flask import request, g
from utils.tool import Mailbox, try_request, logger, is_true
intpl_emailsetting = '''
<div class="layui-col-xs12 layui-col-sm12 layui-col-md12">
<div class="layui-form-item">
<div class="layui-inline">
<label class="layui-form-label" style="width: auto;">不使用本地邮件服务发送</label>
<div class="layui-input-inline" style="width: auto;">
<input type="checkbox" name="email_nolocal" lay-skin="switch" lay-text="是|否"
{% if is_true(g.site.email_nolocal) %}checked="checked" {% endif %} autocomplete="off"
value="1">
</div>
</div>
</div>
<div class="layui-form-item">
<div class="layui-inline">
<label class="layui-form-label">SaintIC Open</label>
<div class="layui-input-block">
<input type="text" name="email_open_token"
value="{{ g.site.email_open_token }}" placeholder="Api密钥"
autocomplete="off" class="layui-input">
</div>
</div>
</div>
<fieldset class="layui-elem-field layui-field-title">
<legend><i class="saintic-icon saintic-icon-info" id="tip-sc" style="font-size:120%"></i> SendCloud</legend>
<div class="layui-field-box">
<div class="layui-form-item">
<label class="layui-form-label">Api User</label>
<div class="layui-input-block">
<input type="text" name="email_sendcloud_apiuser" value="{{ g.site.email_sendcloud_apiuser }}"
placeholder="SendCloud.sohu.com API USER" autocomplete="off" class="layui-input">
</div>
</div>
<div class="layui-form-item">
<label class="layui-form-label">Api Key</label>
<div class="layui-input-block">
<input type="text" name="email_sendcloud_apikey" value="{{ g.site.email_sendcloud_apikey }}"
placeholder="SendCloud KEY of API USER" autocomplete="off" class="layui-input">
</div>
</div>
<div class="layui-form-item">
<label class="layui-form-label">From</label>
<div class="layui-input-block">
<input type="text" name="email_sendcloud_from" value="{{ g.site.email_sendcloud_from }}"
placeholder="(可选)发件人地址" autocomplete="off" class="layui-input">
</div>
</div>
</div>
</fieldset>
</div>
'''
def _sendcloud(API_USER, API_KEY, subject, html, to, from_addr, from_name=""):
url = "https://api.sendcloud.net/apiv2/mail/send"
data = {
"apiUser": API_USER,
"apiKey": API_KEY,
"from": from_addr,
"fromName": from_name,
"to": to.replace(",", ";"),
"subject": subject,
"html": html,
}
r = try_request(url, data=data)
return r.json()
def _saintic_open(TOKEN, subject, html, to, from_name=""):
url = "https://open.saintic.com/api/sendmail"
data = dict(
token=TOKEN,
from_name=from_name,
to=to,
subject=subject,
html=html,
)
r = try_request(url, data=data)
return r.json()
def sendmail(subject, message, to_addr):
"""Web环境下发送邮件"""
#: from_addr建议设置发件人邮箱,否则基本会被拦截或进入垃圾邮箱
from_addr = "picbed@{}".format(request.host)
from_name = g.cfg.email_from_name or g.site_name
#: 关闭通过本地服务器发送邮件
no_local = g.cfg.email_nolocal
if is_true(no_local):
res = dict(code=1)
else:
#: 通过本地邮件服务发送
mb = Mailbox(from_addr, "", "localhost")
res = mb.send(subject, message, to_addr, from_name)
logger.debug("sendmail with localhost: {}".format(res))
if res["code"] != 0:
#: 根据钩子配置依次发送邮件,除非某次发送成功
API_USER = g.cfg.email_sendcloud_apiuser
API_KEY = g.cfg.email_sendcloud_apikey
TOKEN = g.cfg.email_open_token
#: SaintIC Open(open.saintic.com) Email Service(private now)
if TOKEN:
res = _saintic_open(TOKEN, subject, message, to_addr, from_name)
logger.debug("sendmail with saintic open: {}".format(res))
res.update(method="open")
#: Sohu(sendcloud.sohu.com) public service
if res["code"] != 0 and API_USER and API_KEY:
#: See docs: https://www.sendcloud.net/doc/email_v2/
_sr = _sendcloud(
API_USER, API_KEY, subject, message, to_addr,
g.cfg.email_sendcloud_from or from_addr, from_name
)
if is_true(_sr.get("result")):
res = dict(code=0, data=_sr.get("info"))
else:
res = dict(code=_sr.get("statusCode"), msg=_sr.get("message"))
logger.debug("sendmail with sendcloud: {}".format(res))
res.update(method="sendcloud")
else:
res.update(method="local")
return res
|
py | b413b7db4f10bccfb5345f8032d57acf202ff869 | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UX Configuration test."""
import logging
import socket
import unittest
from unittest.mock import MagicMock, patch
from neural_compressor.ux.utils.consts import WORKSPACE_LOCATION
from neural_compressor.ux.web.configuration import Configuration
from neural_compressor.ux.web.exceptions import NotFoundException
@patch("neural_compressor.ux.web.configuration.determine_ip", new=lambda: "127.0.0.1")
class TestConfiguration(unittest.TestCase):
"""UX Configuration tests."""
@patch(
"sys.argv",
[
"inc_bench.py",
],
)
@patch("secrets.token_hex")
def test_defaults(
self,
mock_secrets_token_hex: MagicMock,
) -> None:
"""Test default values."""
mock_secrets_token_hex.return_value = "this is a mocked token value"
configuration = Configuration()
configuration.set_up()
self.assertEqual(5000, configuration.server_port)
self.assertEqual(5000, configuration.gui_port)
self.assertEqual(logging.CRITICAL, configuration.log_level)
self.assertEqual("127.0.0.1", configuration.server_address)
self.assertEqual("https", configuration.scheme)
self.assertEqual("this is a mocked token value", configuration.token)
self.assertEqual(
"https://127.0.0.1:5000/?token=this is a mocked token value",
configuration.get_url(),
)
@patch("sys.argv", ["inc_bench.py", "-P1234"])
@patch("secrets.token_hex")
def test_changing_gui_port(
self,
mock_secrets_token_hex: MagicMock,
) -> None:
"""Test changing GUI port."""
mock_secrets_token_hex.return_value = "this is a mocked token value"
configuration = Configuration()
configuration.set_up()
self.assertEqual(1234, configuration.gui_port)
self.assertNotEqual(configuration.server_port, configuration.gui_port)
self.assertEqual(
"https://127.0.0.1:1234/?token=this is a mocked token value",
configuration.get_url(),
)
@patch("sys.argv", ["inc_bench.py", "-p1234"])
@patch("secrets.token_hex")
def test_changing_server_port(
self,
mock_secrets_token_hex: MagicMock,
) -> None:
"""Test changing API port."""
mock_secrets_token_hex.return_value = "this is a mocked token value"
configuration = Configuration()
configuration.set_up()
self.assertEqual(1234, configuration.server_port)
self.assertEqual(1234, configuration.gui_port)
self.assertEqual(
"https://127.0.0.1:1234/?token=this is a mocked token value",
configuration.get_url(),
)
@patch("sys.argv", ["inc_bench.py", "-p 0"])
def test_changing_server_port_too_low(self) -> None:
"""Test changing API port to invalid value."""
configuration = Configuration()
with self.assertRaisesRegex(
ValueError,
"Lowest allowed port number is 1, attempted to use: 0",
):
configuration.set_up()
@patch("sys.argv", ["inc_bench.py", "-p 65536"])
def test_changing_server_port_too_high(self) -> None:
"""Test changing API port to invalid value."""
configuration = Configuration()
with self.assertRaisesRegex(
ValueError,
"Highest allowed port number is 65535, attempted to use: 65536",
):
configuration.set_up()
@patch("sys.argv", ["inc_bench.py", "-P 0"])
def test_changing_gui_port_too_low(self) -> None:
"""Test changing GUI port to invalid value."""
configuration = Configuration()
with self.assertRaisesRegex(
ValueError,
"Lowest allowed port number is 1, attempted to use: 0",
):
configuration.set_up()
@patch("sys.argv", ["inc_bench.py", "-P 65536"])
def test_changing_gui_port_too_high(self) -> None:
"""Test changing GUI port to invalid value."""
configuration = Configuration()
with self.assertRaisesRegex(
ValueError,
"Highest allowed port number is 65535, attempted to use: 65536",
):
configuration.set_up()
@patch("sys.argv", ["inc_bench.py", "-p1234", "-P5678"])
@patch("secrets.token_hex")
def test_changing_server_and_gui_port(
self,
mock_secrets_token_hex: MagicMock,
) -> None:
"""Test changing API and GUI ports."""
mock_secrets_token_hex.return_value = "this is a mocked token value"
configuration = Configuration()
configuration.set_up()
self.assertEqual(1234, configuration.server_port)
self.assertEqual(5678, configuration.gui_port)
self.assertEqual(
"https://127.0.0.1:5678/?token=this is a mocked token value",
configuration.get_url(),
)
@patch("sys.argv", ["inc_bench.py", "-vv"])
def test_changing_log_level_to_defined_one(self) -> None:
"""Test changing log level."""
configuration = Configuration()
configuration.set_up()
self.assertEqual(logging.INFO, configuration.log_level)
@patch("sys.argv", ["inc_bench.py", "-vvvvvvvvvvvvv"])
def test_changing_log_level_to_not_defined_one(self) -> None:
"""Test changing log level to unknown one."""
configuration = Configuration()
configuration.set_up()
self.assertEqual(logging.DEBUG, configuration.log_level)
@patch("socket.socket.bind")
@patch("sys.argv", ["inc_bench.py", "-p1234"])
def test_changing_server_port_to_already_taken_fails(
self,
mock_socket_bind: MagicMock,
) -> None:
"""Test fail during attempting to use taken port."""
mock_socket_bind.configure_mock(side_effect=socket.error)
with self.assertRaises(NotFoundException):
configuration = Configuration()
configuration.set_up()
@patch("socket.socket.bind")
@patch("sys.argv", ["inc_bench.py"])
def test_when_all_ports_taken_it_fails(
self,
mock_socket_bind: MagicMock,
) -> None:
"""Test fail when all ports taken."""
mock_socket_bind.configure_mock(side_effect=socket.error)
with self.assertRaises(NotFoundException):
configuration = Configuration()
configuration.set_up()
@patch("sys.argv", ["inc_bench.py"])
def test_many_instances_are_the_same(self) -> None:
"""Test that all instances references same object."""
original_configuration = Configuration()
new_configuration = Configuration()
self.assertTrue(original_configuration is new_configuration)
@patch("sys.argv", ["inc_bench.py"])
def test_reloading_config_changes_token(self) -> None:
"""Test that reloading configuration changes token."""
configuration = Configuration()
original_token = configuration.token
configuration.set_up()
self.assertNotEqual(original_token, configuration.token)
@patch("sys.argv", ["inc_bench.py"])
def test_default_workdir(self) -> None:
"""Test that when no existing config given, default will be used."""
configuration = Configuration()
configuration.set_up()
self.assertEqual(WORKSPACE_LOCATION, configuration.workdir)
if __name__ == "__main__":
unittest.main()
|
py | b413b7eb66afb0a06e1deb620fd0953986b5b6c9 | import requests
from mozart import config
class Music(object):
"""
音乐封装类,实现下载链接,封面图,歌手,歌名的获取
"""
# 需要重定向的短链列表
should_redirect_lists = ['url.cn']
def __init__(self, url="", music_id="", use_id=False):
self.music_id = ""
self.real_url = ""
# output info
self._download_url = ""
self._cover = ""
self._singer = ""
self._song = ""
self._rate = 128
# whether use music id or not
self.use_id = use_id
if self.use_id:
self.music_id = music_id
else:
self.real_url = self.get_real_url(url)
# 初始化歌曲信息
pass
def __repr__(self):
return "[Music]\n" \
"singer:%s\n" \
"song:%s\n" \
"cover:%s\n" \
"download_url:%s" % (
self._singer, self._song, self._cover, self._download_url)
@property
def download_url(self):
return self._download_url
@property
def cover(self):
return self._cover
@property
def singer(self):
return self._singer
@property
def song(self):
return self._song
@property
def rate(self):
return self._rate
def get_music_from_id(self):
"""
从music id获得音乐信息及下载链接
"""
raise NotImplementedError("Music's method `get_music_id_from_url` does not implement!")
@classmethod
def get_music_id_from_url(cls, url):
"""
从real_url获得music id
"""
raise NotImplementedError("Music's method `get_music_id_from_url` does not implement!")
@classmethod
def get_real_url(cls, url):
"""
根据分享链接,获取最终访问的URL,支持redirect
:return: url
# Now, only qq need redirect
"""
for l in cls.should_redirect_lists:
if l in url:
r = requests.head(url, allow_redirects=True, headers={"User-Agent": config.ios_ua})
return r.url
return url
def _get_download_url(self):
raise NotImplementedError("Music's method `_get_download_url` does not implement!")
def _get_music_info(self):
raise NotImplementedError("Music's method `_get_music_info` does not implement!")
|
py | b413b8b0a57f9d64151b77937d31cce3af1c2f1c | from pathlib import Path
import click
from skimage.restoration import denoise_tv_chambolle
from dtoolbioimage import ImageDataSet, Image3D, zoom_to_match_scales
def denoise_stack_from_dataset(imageds, image_name, series_name, channel, output_filename):
stack = imageds.get_stack(image_name, series_name, channel)
zoomed_stack = zoom_to_match_scales(stack)
denoised_stack = denoise_tv_chambolle(zoomed_stack, weight=0.02)
denoised_stack.view(Image3D).save(output_filename)
def derive_output_filename(output_dirpath, image_name, series_name):
return output_dirpath/(image_name + '_denoised_venus.tif')
def denoise_all_stacks_in_dataset(imageds, channel, output_dirpath):
for image_name in imageds.get_image_names():
for series_name in imageds.get_series_names(image_name):
output_filename = derive_output_filename(output_dirpath, image_name, series_name)
denoise_stack_from_dataset(imageds, image_name, series_name, channel, output_filename)
@click.command()
@click.argument('dataset_uri')
def main(dataset_uri):
imageds = ImageDataSet(dataset_uri)
image_name = 'fca-3_FLC-Venus_root03'
series_name = 'fca-3_FLC-Venus_root03 #1'
channel = 0
output_filename = 'root_03_denoised.tif'
# denoise_stack_from_dataset(imageds, image_name, series_name, channel, output_filename)
output_dirpath = Path('scratch/')
denoise_all_stacks_in_dataset(imageds, channel, output_dirpath)
if __name__ == "__main__":
main()
|
py | b413b8e09ce5fb310fdcadbb74cd1d386bc77e3d | import phonetic_matcher
# match scores
score = phonetic_matcher.fuzzy_match("mycroft", "microsoft") # 0.7400408163265306
score = phonetic_matcher.fuzzy_match("cat", "dog") # 0.4999999999999999
# best match selection
query = "mycroft"
choices = ["microsoft", "minecraft", "mike roft", "mein kampf", "my raft"]
best, score = phonetic_matcher.best_match(query, choices) # mike roft 0.9179748822605965
# all matches
matches = phonetic_matcher.match(query, choices)
# [('mike roft', 0.9047095761381476),
# ('minecraft', 0.7416326530612245),
# ('microsoft', 0.7387755102040816),
# ('my raft', 0.7083333333333333),
# ('mein kampf', 0.48752834467120176)]
query = "cat"
choices = ["fat", "crab", "bat", "crap", "trap", "sat"]
matches = phonetic_matcher.match(query, choices)
# [('fat', 0.6666666666666666),
# ('bat', 0.6666666666666666),
# ('sat', 0.6666666666666666),
# ('crap', 0.6222222222222222),
# ('crab', 0.5388888888888889),
# ('trap', 0.5388888888888889)]
|
py | b413b9dffdd7d3eab31204aa67702f6de3dd6cbd | import src.helpers
import src.util
# To test logging w/o db connection simply set the db in .env to invalid name
def test_logging_db():
logger_wrapper = src.util.LoggerWrapper(entity_name=1, location="log/test_file.log")
db_connector = src.helpers.DBConnector(logger_wrapper.logger, mode="DEV")
print(db_connector._connection.closed, type(db_connector._connection.closed))
if db_connector._connection is not None:
print("SUCCESS: DB_Connection")
logger_wrapper.db_connector = db_connector
logger_wrapper.add_db_handler()
logger_wrapper._logger.error("This is a test")
db_connector._connection.close()
print(db_connector._connection.closed)
test_logging_db()
|
py | b413bb36837f38eb067f647bda6f89e53ea2d2c1 | import random
import os
import sys
from colorama import Fore
import requests
import random
from random import randint
gentype = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
g = Fore.GREEN
r = Fore.RED
b = Fore.BLUE
y = Fore.YELLOW
re = Fore.RESET
l = Fore.LIGHTBLACK_EX
blue = Fore.BLUE
payload={}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0',
'Accept': '*/*',
'Accept-Language': 'en-GB,en;q=0.5',
'Content-Type': 'application/json',
'X-Platform': 'CHIHIRO',
'Access-Control-Max-Age': '600',
'Origin': 'https://transact.playstation.com',
'Connection': 'keep-alive',
'Referer': 'https://transact.playstation.com/',
'Cookie': '', #Update
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'TE': 'trailers'
}
def clearscreen():
os.system("cls")
def mainfunction():
print(f"Welcome to The {blue}Playstation{re} Generator")
readyornot = input("Ready? Y/N > ")
if readyornot == "y":
clearscreen()
while True:
generate1 = random.choice(gentype)
generate2 = random.choice(gentype)
generate3 = random.choice(gentype)
generate4 = random.choice(gentype)
space1 = "-"
generate5 = random.choice(gentype)
generate6 = random.choice(gentype)
generate7 = random.choice(gentype)
generate8 = random.choice(gentype)
space2 = "-"
generate9 = random.choice(gentype)
generate10 = random.choice(gentype)
generate11 = random.choice(gentype)
generate12 = random.choice(gentype)
url = "https://web.np.playstation.com/api/graphql/v1/transact/wallets/vouchers/" + generate1+generate2+generate3+generate4+space1+generate5+generate6+generate7+generate8+space2+generate9+generate10+generate11+generate12 #Fx
response = requests.request("GET", url, headers=headers, data=payload)
if requests.get(url).status_code == 403:
print(f"[{r}ERROR{re}] " + generate1+generate2+generate3+generate4+space1+generate5+generate6+generate7+generate8+space2+generate9+generate10+generate11+generate12)
else:
print(f"[{g}SUCCES{re}] " + generate1+generate2+generate3+generate4+space1+generate5+generate6+generate7+generate8+space2+generate9+generate10+generate11+generate12)
if __name__ == "__main__":
mainfunction()
|
py | b413bb6fe0505df8fb09fa0759fefb6509b95bc9 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
import unittest
import google.protobuf.text_format as text_format
import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--profile_path',
type=str,
default='',
help='Input profile file name. If there are multiple file, the format '
'should be trainer1=file1,trainer2=file2,ps=file3')
parser.add_argument(
'--timeline_path', type=str, default='', help='Output timeline file name.')
args = parser.parse_args()
class _ChromeTraceFormatter(object):
def __init__(self):
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class Timeline(object):
def __init__(self, profile_dict):
self._profile_dict = profile_dict
self._pid = 0
self._devices = dict()
self._chrome_trace = _ChromeTraceFormatter()
def _allocate_pid(self):
cur_pid = self._pid
self._pid += 1
return cur_pid
def _allocate_pids(self):
for k, profile_pb in self._profile_dict.iteritems():
for event in profile_pb.events:
if event.type == profiler_pb2.Event.CPU:
if (k, event.device_id, "CPU") not in self._devices:
pid = self._allocate_pid()
self._devices[(k, event.device_id, "CPU")] = pid
self._chrome_trace.emit_pid("%s:cpu:block:%d" %
(k, event.device_id), pid)
elif event.type == profiler_pb2.Event.GPUKernel:
if (k, event.device_id, "GPUKernel") not in self._devices:
pid = self._allocate_pid()
self._devices[(k, event.device_id, "GPUKernel")] = pid
self._chrome_trace.emit_pid("%s:gpu:%d" %
(k, event.device_id), pid)
def _allocate_events(self):
for k, profile_pb in self._profile_dict.iteritems():
for event in profile_pb.events:
if event.type == profiler_pb2.Event.CPU:
type = "CPU"
elif event.type == profiler_pb2.Event.GPUKernel:
type = "GPUKernel"
pid = self._devices[(k, event.device_id, type)]
args = {'name': event.name}
if event.memcopy.bytes > 0:
args = {'mem_bytes': event.memcopy.bytes}
# TODO(panyx0718): Chrome tracing only handles ms. However, some
# ops takes micro-seconds. Hence, we keep the ns here.
self._chrome_trace.emit_region(
event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid,
event.sub_device_id, 'Op', event.name, args)
def generate_chrome_trace(self):
self._allocate_pids()
self._allocate_events()
return self._chrome_trace.format_to_string()
profile_path = '/tmp/profile'
if args.profile_path:
profile_path = args.profile_path
timeline_path = '/tmp/timeline'
if args.timeline_path:
timeline_path = args.timeline_path
profile_paths = profile_path.split(',')
profile_dict = dict()
if len(profile_paths) == 1:
with open(profile_path, 'r') as f:
profile_s = f.read()
profile_pb = profiler_pb2.Profile()
profile_pb.ParseFromString(profile_s)
profile_dict['trainer'] = profile_pb
else:
for profile_path in profile_paths:
k, v = profile_path.split('=')
with open(v, 'r') as f:
profile_s = f.read()
profile_pb = profiler_pb2.Profile()
profile_pb.ParseFromString(profile_s)
profile_dict[k] = profile_pb
tl = Timeline(profile_dict)
with open(timeline_path, 'w') as f:
f.write(tl.generate_chrome_trace())
|
py | b413bb7d0d5bf6e2d43c782b658fa0760acd4e56 | #Written by Neil McHenry
#September 23rd, 2018
#Load math relate packages
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.integrate import ode
#AERO 622 Hw #3 Page 49 Problem 2
#Solve for the motion of the bead on a spinning wire
#Using a numerical integration technique, then comment
#Physical parameters
m = 0.1 #kg
r = 0.4 #meters
g = 10 #m/s/s
#Initial Condition 1
OMEGA1 = 4 #rad/s
theta = np.pi/2 + 0.1 #radians
thetaDot = 0 #rad/s
time = 12 #seconds
#Initial Condition 2
OMEGA2 = 6 #rad/s
#Function Definition
def fun(t, z, omega):
"""
Right hand side of the differential equations
dx/dt = -omega * y
dy/dt = omega * x
"""
x, y = z
f = [-omega*y, omega*x]
return f
# Create an `ode` instance to solve the system of differential
# equations defined by `fun`, and set the solver method to 'dop853'.
solver = ode(fun)
solver.set_integrator('dop853')
# Give the value of omega to the solver. This is passed to
# `fun` when the solver calls it.
omega = 2 * np.pi
solver.set_f_params(omega)
# Set the initial value z(0) = z0.
t0 = 0.0
z0 = [1, -0.25]
solver.set_initial_value(z0, t0)
# Create the array `t` of time values at which to compute
# the solution, and create an array to hold the solution.
# Put the initial value in the solution array.
t1 = 2.5
N = 75
t = np.linspace(t0, t1, N)
sol = np.empty((N, 2))
sol[0] = z0
# Repeatedly call the `integrate` method to advance the
# solution to time t[k], and save the solution in sol[k].
k = 1
while solver.successful() and solver.t < t1:
solver.integrate(t[k])
sol[k] = solver.y
k += 1
# Plot the solution...
plt.plot(t, sol[:,0], label='x')
plt.plot(t, sol[:,1], label='y')
plt.xlabel('t')
plt.grid(True)
plt.legend()
plt.show() |
py | b413bb9f433055d6b75d99ca7b75d27ac362dea5 | import os
from enum import Enum
from enum import unique
from importlib.resources import open_text
from logging.config import dictConfig
import envtoml
from mipengine import AttrDict
from mipengine import controller
@unique
class DeploymentType(str, Enum):
LOCAL = "LOCAL"
KUBERNETES = "KUBERNETES"
if config_file := os.getenv("MIPENGINE_CONTROLLER_CONFIG_FILE"):
with open(config_file) as fp:
config = AttrDict(envtoml.load(fp))
else:
with open_text(controller, "config.toml") as fp:
config = AttrDict(envtoml.load(fp))
dictConfig(
{
"version": 1,
"formatters": {
"controller_request_frm": {
"format": "%(asctime)s - %(levelname)s - CONTROLLER - %(module)s - %(funcName)s(%(lineno)d) - %(message)s",
},
"controller_background_service_frm": {
"format": "%(asctime)s - %(levelname)s - CONTROLLER - BACKGROUND - %(module)s - %(funcName)s(%(lineno)d) - %(message)s",
},
},
"handlers": {
"controller_request_hdl": {
"level": config.log_level,
"formatter": "controller_request_frm",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
"controller_background_service_hdl": {
"level": config.log_level,
"formatter": "controller_background_service_frm",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"loggers": {
"controller_request": {
"level": config.log_level,
"handlers": ["controller_request_hdl"],
},
"controller_background_service": {
"level": config.log_level,
"handlers": ["controller_background_service_hdl"],
},
},
}
)
|
py | b413bba9934a78ee87c11c19a52576c203cafec0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-22 06:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SpamNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('phone_number', models.CharField(blank=True, max_length=200, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'User Contacts',
'verbose_name': 'User Contact',
},
),
migrations.CreateModel(
name='UserContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('phone_number', models.CharField(blank=True, max_length=200, null=True)),
('name', models.CharField(max_length=200)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'User Contacts',
'verbose_name': 'User Contact',
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('phone_number', models.CharField(blank=True, max_length=200, null=True)),
('name', models.CharField(max_length=200)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'User Profiles',
'verbose_name': 'User Profile',
},
),
]
|
py | b413bda964e34510d6c28c45fd61b434e284773e | #!/usr/bin/env python2
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Copyright (c) 2014-2020 The Martkist Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import MartkistTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(MartkistTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1'], ['-usehd=1'], ['-usehd=1'], ['-usehd=1']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 MARTK to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_martkistds()
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1'], ['-usehd=1'], ['-usehd=1'], ['-usehd=1']])
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(2) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
|
py | b413bdee207a7da51e0700ee6795816c38222a21 | import types
def _clean_acc(acc):
out = {}
for attr in ['genomic', 'protein', 'rna']:
if attr in acc:
v = acc[attr]
if type(v) is types.ListType:
out[attr] = [x.split('.')[0] for x in v]
else:
out[attr] = v.split('.')[0]
return out
def diff_doc1(doc_1, doc_2):
diff_d = {'update': {},
'delete': [],
'add': {}}
for attr in set(doc_1) | set(doc_2):
if attr in ['_rev', 'pir', 'Vega']:
continue
if attr in doc_1 and attr in doc_2:
_v1 = doc_1[attr]
_v2 = doc_2[attr]
if attr == 'MGI':
_v2 = _v2.split(':')[1]
elif attr in ['refseq', 'accession']:
_v1 = _clean_acc(_v1)
elif attr == 'interpro':
if type(_v1) is types.ListType:
_v1.sort()
if type(_v2) is types.ListType:
_v2.sort()
elif attr == 'reagent':
for k in _v1.keys():
if k.find('.') != -1:
_v1[k.replace('.', '_')] = _v1[k]
del _v1[k]
if _v1 != _v2:
diff_d['update'][attr] = _v2
elif attr in doc_1 and attr not in doc_2:
diff_d['delete'].append(attr)
else:
diff_d['add'][attr] = doc_2[attr]
if diff_d['update'] or diff_d['delete'] or diff_d['add']:
return diff_d |
py | b413c02a8f0d23705dc3cb830c50987b9d7b2ce5 | """A component registry, similar to nlp_saft::RegisteredClass<>.
Like nlp_saft::RegisteredClass<>, one does not need to explicitly import the
module containing each subclass. It is sufficient to add subclasses as build
dependencies.
Unlike nlp_saft::RegisteredClass<>, which allows subclasses to be registered
under arbitrary names, subclasses must be looked up based on their type name.
This restriction allows the registry to dynamically import the module containing
the desired subclass.
Example usage:
# In basepackage/base.py...
@registry.RegisteredClass
class MyBase:
def my_method(self):
pass
# In implpackage/impl.py...
class MyImpl(MyBase):
def my_method(self):
...
# In userpackage/user.py...
try
impl = MyBase.Create("implpackage.impl.MyImpl")
except ValueError as error:
...
Note that there is no registration statement in impl.py. For convenience, if
the base class and subclass share a package prefix, the shared portion of the
package path may be omitted in the call to Create(). For example, if the base
class is 'foo.bar.Base' and the subclass is 'foo.bar.baz.Impl', then these are
all equivalent:
Base.Create('foo.bar.baz.Impl')
Base.Create('bar.baz.Impl')
Base.Create('baz.Impl')
Name resolution happens in inside-out fashion, so if there is also a subclass
'foo.baz.Impl', then
Base.Create('baz.Impl') # returns foo.bar.baz.Impl
Base.Create('bar.baz.Impl') # returns foo.bar.baz.Impl
Base.Create('foo.baz.Impl') # returns foo.baz.Impl
NB: Care is required when moving code, because config files may refer to the
classes being moved by their type name, which may include the package path. To
preserve existing names, leave a stub in the original location that imports the
class from its new location. For example,
# Before move, in oldpackage/old.py...
class Foo(Base):
...
# After move, in newpackage/new.py...
class Bar(Base):
...
# After move, in oldpackage/old.py...
from newpackage import new
Foo = new.Bar
"""
import inspect
import sys
from tensorflow.python.platform import tf_logging as logging
def _GetClass(name):
"""Looks up a class by name.
Args:
name: The fully-qualified type name of the class to return.
Returns:
The class associated with the |name|, or None on error.
"""
elements = name.split('.')
# Need at least "module.Class".
if len(elements) < 2:
logging.debug('Malformed type: "%s"', name)
return None
module_path = '.'.join(elements[:-1])
class_name = elements[-1]
# Import the module.
try:
__import__(module_path)
except ImportError as e:
logging.debug('Unable to find module "%s": "%s"', module_path, e)
return None
module = sys.modules[module_path]
# Look up the class.
if not hasattr(module, class_name):
logging.debug('Name "%s" not found in module: "%s"', class_name,
module_path)
return None
class_obj = getattr(module, class_name)
# Check that it is actually a class.
if not inspect.isclass(class_obj):
logging.debug('Name does not refer to a class: "%s"', name)
return None
return class_obj
def _Create(baseclass, subclass_name, *args, **kwargs):
"""Creates an instance of a named subclass.
Args:
baseclass: The expected base class.
subclass_name: The fully-qualified type name of the subclass to create.
*args: Passed to the subclass constructor.
**kwargs: Passed to the subclass constructor.
Returns:
An instance of the named subclass, or None on error.
"""
subclass = _GetClass(subclass_name)
if subclass is None:
return None # _GetClass() already logged an error
if not issubclass(subclass, baseclass):
logging.debug('Class "%s" is not a subclass of "%s"', subclass_name,
baseclass.__name__)
return None
return subclass(*args, **kwargs)
def _ResolveAndCreate(baseclass, path, subclass_name, *args, **kwargs):
"""Resolves the name of a subclass and creates an instance of it.
The subclass is resolved with respect to a package path in an inside-out
manner. For example, if |path| is 'google3.foo.bar' and |subclass_name| is
'baz.ClassName', then attempts are made to create instances of the following
fully-qualified class names:
'google3.foo.bar.baz.ClassName'
'google3.foo.baz.ClassName'
'google3.baz.ClassName'
'baz.ClassName'
An instance corresponding to the first successful attempt is returned.
Args:
baseclass: The expected base class.
path: The path to use to resolve the subclass.
subclass_name: The name of the subclass to create.
*args: Passed to the subclass constructor.
**kwargs: Passed to the subclass constructor.
Returns:
An instance of the named subclass corresponding to the inner-most successful
name resolution, or None if the name could not be resolved.
Raises:
ValueError: If the subclass cannot be resolved and created.
"""
elements = path.split('.')
while True:
resolved_subclass_name = '.'.join(elements + [subclass_name])
subclass = _Create(baseclass, resolved_subclass_name, *args, **kwargs)
if subclass: return subclass # success
if not elements: break # no more paths to try
elements.pop() # try resolving against the next-outer path
raise ValueError(
'Failed to create subclass "%s" of base class %s using path %s' %
(subclass_name, baseclass.__name__, path))
def RegisteredClass(baseclass):
"""Decorates the |baseclass| with a static Create() method."""
assert not hasattr(baseclass, 'Create')
def Create(subclass_name, *args, **kwargs):
"""A wrapper around _Create() that curries the |baseclass|."""
path = inspect.getmodule(baseclass).__name__
return _ResolveAndCreate(baseclass, path, subclass_name, *args, **kwargs)
baseclass.Create = staticmethod(Create)
return baseclass
|
py | b413c23f1e52d4ab745b3475f920fd34ffb30ab6 | #TODO collecting the servers' MAC addresses and set a period
#TODO using etac (the topology information) to compute the route
#for each flow and add the flow entries to the switches, then add them to mysql db
import time
import random
import MySQLdb
from ryu.base import app_manager
from ryu.topology import event
from ryu.controller.controller import Datapath
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib import ofctl_v1_3
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.topology.switches import Switch
#from ryu.topology.switches import get_link, get_switch, Switch
from priodict import priorityDictionary
#from ryu.topology import event
from ryu.topology.api import get_link, get_switch
#hardware
DBADDRESS = 'localhost'
DBUSER = 'root'
DBPASSWD = 'mysql'
DBNAME = 'meshsr'
#TODO add the topology 4,12 (the computeflowok4_7_3.py is the ok edition with no topology collecting and ff:ff:ff:ff:ff:ff flow entries)
#TODO add the flow entry about FF:FF:FF:FF:FF:FF to let them learn each other's MAC without flood
#TODO judge that if the ports table is empty in the Packet_in_handler, 2014,4,18
#TODO move the topology recollecting to the Port_Status_Change handler, 2014,4,18
#TODO save the path of two servers, when the topology is changed, recompute the path if the original path is broken, 2014,4,18
#TODO 2014,4,21 add meter, flow to the same action list
conn = MySQLdb.connect(host=DBADDRESS, user=DBUSER, passwd=DBPASSWD, db=DBNAME)
cursor = conn.cursor()
start=time.time()
##def get_switch(app, dpid=None):
## rep = app.send_request(event.EventSwitchRequest(dpid))
## return rep.switches
##
##
##def get_all_switch(app):
## return get_switch(app)
##
##
##def get_link(app, dpid=None):
## rep = app.send_request(event.EventLinkRequest(dpid))
## return rep.links
##
##
##def get_all_link(app):
## return get_link(app)
def Dijkstra(G,start,end=None):#TODO test that if there is no way between start and end???
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # est.dist. of non-final vert.
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end: break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise ValueError, \
"Dijkstra: found better path to already-final vertex"
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D,P)
def shortestPath(G,start,end):
D,P = Dijkstra(G,start,end)
Path = []
while 1:
Path.append(end)
if end == start: break
end = P[end]
Path.reverse()
return Path
class SDNswitch(app_manager.RyuApp):
OFP_VERSIONS = {ofproto_v1_3.OFP_VERSION}
def __init__(self, *args, **kwargs):
super(SDNswitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}#{dpid:{src:port},}
self.flow={}
band={}
band['type']='DROP'#OFPMeterBandDrop
band['rate']=3000
band['burst_size']=100#TODO
bands=[]
bands.append(band)
self.flow['flags']='KBPS'
self.flow['meter_id']=0
self.flow['bands']=bands#TODO if it is right??
self.flow['burst_size']=100
print self.flow
self.addmeter={}
self.flowEntryID = 0
self.flowID = 0
self.servernum = 0
self.serNICID = 'F000'
self.k=4
self.half=10# the topology is half fattree(4)
self.T=5000#TODO 1 ms,(10s)?
#TODO add the parameters
self.swlabel_IP = {0:'10.0.0.1',
1:'10.0.1.1',
2:'10.1.0.1',
3:'10.1.1.1',
4:'10.0.2.1',
5:'10.0.3.1',
6:'10.1.2.1',
7:'10.1.3.1',
8:'10.2.1.1',
9:'10.2.1.2'}
self.dpid_to_label = {16:0,17:1,18:4,19:5,20:9,21:8,22:6,23:7,24:2,25:3}#read from the etac result, or the dpid,port->label,port? ?
#TODO TODO 2014,4,14 the server's (label,bport):(dpid,port) should be dynamic
self.bports_to_dports = {(0,2):(10,4),(0,3):(10,2),(1,2):(11,3),(1,3):(11,1),(2,2):(18,1),(2,3):(18,3),(3,2):(19,2),(3,3):(19,4),
(4,0):(12,2),(4,1):(12,1),(4,2):(12,4),(5,0):(13,4),(5,1):(13,3),(5,2):(13,1),(6,0):(16,3),(6,1):(16,4),
(6,2):(16,1),(7,0):(17,1),(7,1):(17,2),(7,2):(17,4),(8,0):(15,4),(8,1):(15,1),(9,0):(14,1),(9,1):(14,4)}
#TODO#TODO test if it is ok? {(blueprint_switch_label,port):(dpid,port)}#TODO TODO prepare to be done, add etac to this file to get the parameters
self.mac_to_dpid = {}#TODO server mac address -> the (dpid,port) of its connecting switch {mac:(dpid,port)}
self.label_to_port = {(0,4):(3,0),(0,5):(2,0),(1,4):(3,1),(1,5):(2,1),(2,6):(3,0),(2,7):(2,0),(3,6):(3,1),(3,7):(2,1),(4,8):(2,0),(5,9):(2,0),(6,8):(2,1),(7,9):(2,1),
(4,0):(0,3),(5,0):(0,2),(4,1):(1,3),(5,1):(1,2),(6,2):(0,3),(7,2):(0,2),(6,3):(1,3),(7,3):(1,2),(8,4):(0,2),(9,5):(0,2),(8,6):(1,2),(9,7):(1,2)}
self.label_to_dpid = {0:16,1:17,4:18,5:19,9:20,8:21,6:22,7:23,2:24,3:25}
self.graph={(0,4):1,(0,5):1,(1,4):1,(1,5):1,(2,6):1,(2,7):1,(3,6):1,(3,7):1,(4,8):1,(5,9):1,(6,8):1,(7,9):1,
(4,0):1,(5,0):1,(4,1):1,(5,1):1,(6,2):1,(7,2):1,(6,3):1,(7,3):1,(8,4):1,(9,5):1,(8,6):1,(9,7):1}
self.prepath1=[]
self.prepath2=[]
self.server1=''
self.server2=''
self.n=0
self.m=0
self.startnum=0
self.G={}
self.dpids_to_nums={}
self.nums_to_dpids={}
self.dpid_to_port={}
self.graph=[]
self.undirected=[]
self.linkgraph=[]
self.switch_num=10
self.link_num=24#bothway,edge number * 2
self.switches = []
self.links = {}
self.topo_col_period_max = 120 #TODO will 2min be enough for the topology collecting ?
self.topo_col_period = 0
self.topo_col_num_max = 10 #after topo_col_num times get_links(), no matter whether the topology is complete or not, we will auto-configure adresses
self.topo_col_num = 0
self.edgenum=0
self.maxtopocoltimes=30#TODO test the times when the topology can be stabilized
self.topocoltimes=0
self.inittime = time.time()
self.sleeptime=60
sql="DELETE FROM serverNIC;"
print sql
count=cursor.execute(sql)
sql="DELETE FROM flowEntry;"
print sql
count=cursor.execute(sql)
sql="SELECT * FROM serverNIC;"
print sql
count=cursor.execute(sql)
conn.commit()
self.noresult = cursor.fetchone()
for i in xrange(self.switch_num):
self.graph.append([])
self.undirected.append([])
self.linkgraph.append([])
for j in xrange(self.switch_num):
self.graph[i].append(0)
self.undirected[i].append(0)
self.linkgraph[i].append(0)
print 'init over'
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
print 'switch'
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 15, match, actions) # jamie change 0 to 15
def add_flow(self, datapath, priority, match, actions):
print 'add flow!!!!!!!!!!!!!!!'
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
idle_timeout=600
hard_timeout=10
mod = parser.OFPFlowMod(datapath=datapath,#idle_timeout=idle_timeout,hard_timeout=hard_timeout,
priority=priority,match=match, instructions=inst)
datapath.send_msg(mod)
print str(datapath)+' '+str(priority)+' '+str(match)+' '+str(inst)
def delete_flow(self, datapath, match):
print 'delete flow!!!!!!!!!!!!!!!'
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
# actions)]
table_id=1
#idle_timeout=6
#hard_timeout=10
#mod = parser.OFPFlowMod(datapath=datapath, table_id=table_id, command=OFPFC_DELETE, match=match, instructions=inst)
mod = parser.OFPFlowMod(datapath=datapath, #table_id=table_id,#TODO
command=ofproto.OFPFC_DELETE, match=match)
datapath.send_msg(mod)
print str(datapath)+' '+str(match)#+' '+str(inst)
def add_meter(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = actions
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def add_flow_meter(self, datapath, priority, match, flowaction, meteraction):
print 'add flow!!!!!!!!!!!!!!!'
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
flowaction),meteraction]
idle_timeout=600
hard_timeout=10
mod = parser.OFPFlowMod(datapath=datapath,#idle_timeout=idle_timeout,hard_timeout=hard_timeout,
priority=priority,match=match, instructions=inst)
datapath.send_msg(mod)
print str(datapath)+' '+str(priority)+' '+str(match)+' '+str(inst)
def MACLearning(self, ev):
print 'MACLearning'
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
strsrc=src.split(':')
print src
print dst
dpid = datapath.id
if None == self.mac_to_port.get(dpid):
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
#learn a mac address to avoid FLOOD next time
self.mac_to_port[dpid][src]=in_port
#self.mac_to_dpid[src]=(dpid,in_port)
print 'mac_to_port['+str(dpid)+']'+'['+str(src)+']='+str(in_port)
#only add server mac to the database
#if '00'!=strsrc[0]:#TODO test, because only servermac address should be added to the data base
sql="SELECT portID FROM ports WHERE MAC='%s';" \
% (src)
print sql
count=cursor.execute(sql)
result = cursor.fetchone()
#conn.commit()
nosrc='00:00:00:00:00:00'
sql="SELECT portID FROM ports WHERE MAC='%s';" \
% (nosrc)
print sql
count=cursor.execute(sql)
noresult = cursor.fetchone()
sql="SELECT serNICID FROM serverNIC WHERE MAC='%s';" \
% (src)
print sql
count=cursor.execute(sql)
resultser = cursor.fetchone()
#conn.commit()
nosrc='00:00:00:00:00:00'
sql="SELECT serNICID FROM serverNIC WHERE MAC='%s';" \
% (nosrc)
print sql
count=cursor.execute(sql)
noresultser = cursor.fetchone()
conn.commit()
if noresult==result and noresultser==resultser:
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!add server!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
self.servernum = self.servernum+1
self.serNICID = 'F00000000000000'+str(self.servernum)#TODO when the server number > 10, this should be change
self.mac_to_dpid[src]=(dpid,in_port)
MAC=src
port=in_port
self.add_server_to_sql(self.serNICID, dpid, port, MAC)
if 1==self.servernum:
self.server1=MAC
elif 2==self.servernum:
self.server2=MAC
else:
print result
elif None == self.mac_to_port.get(dpid).get(src):
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
#learn a mac address to avoid FLOOD next time
self.mac_to_port[dpid][src]=in_port
#self.mac_to_dpid[src]=(dpid,in_port)
print 'mac_to_port['+str(dpid)+']'+'['+str(src)+']='+str(in_port)
#only add server mac to the database
sql="SELECT portID FROM ports WHERE MAC='%s';" \
% (src)
print sql
count=cursor.execute(sql)
result = cursor.fetchone()
#conn.commit()
nosrc='00:00:00:00:00:00'
sql="SELECT portID FROM ports WHERE MAC='%s';" \
% (nosrc)
print sql
count=cursor.execute(sql)
noresult = cursor.fetchone()
sql="SELECT serNICID FROM serverNIC WHERE MAC='%s';" \
% (src)
print sql
count=cursor.execute(sql)
resultser = cursor.fetchone()
#conn.commit()
nosrc='00:00:00:00:00:00'
sql="SELECT serNICID FROM serverNIC WHERE MAC='%s';" \
% (nosrc)
print sql
count=cursor.execute(sql)
noresultser = cursor.fetchone()
conn.commit()
if noresult==result and noresultser==resultser:
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!add server!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
self.servernum = self.servernum+1
self.serNICID = 'F00000000000000'+str(self.servernum)#TODO when the server number > 10, this should be change
self.mac_to_dpid[src]=(dpid,in_port)
MAC=src
port=in_port
self.add_server_to_sql(self.serNICID, dpid, port, MAC)
if 1==self.servernum:
self.server1=MAC
elif 2==self.servernum:
self.server2=MAC
else:
print result
else:
print 'server has been added!!!!!!!!!!!'
#TODO the following flood of arp will make loop in fattree ....(data center topologies which has loop)
## out_port=ofproto.OFPP_FLOOD
## actions=[parser.OFPActionOutput(out_port)]
## data=None
## if msg.buffer_id == ofproto.OFP_NO_BUFFER:
## data=msg.data
## out=parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,#TODO consider that this packet maybe lose (if it will has some problems?)
## in_port=in_port, actions=actions, data=data)#TODO actions should be the action for the datapath
## datapath.send_msg(out)
def add_flow_to_sql(self, flowEntryID, flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue):
#TODO check the port table to get inPort and outPort
print 'add flow to sql'
dbdpid='00000000000000'+str(int(dpid)/16)+str(int(dpid)%16)
sql="SELECT portID FROM ports WHERE dpid='%s' AND number='%d';" \
% (dbdpid, in_port)
print sql
count=cursor.execute(sql)
result = cursor.fetchone()
inPort=result[0]
sql="SELECT portID FROM ports WHERE dpid='%s' AND number='%d';" \
% (dbdpid, out_port)
print sql
count=cursor.execute(sql)
result = cursor.fetchone()
outPort=result[0]
#TODO check if the ports has existed in the tables
#TODO check if this flow entry exists
sql="SELECT flowEntryID FROM flowEntry WHERE dpid='%s' AND inPort='%d';" \
% (dbdpid, inPort)
print sql
count=cursor.execute(sql)
result = cursor.fetchone()
nodpid='0000000000000020'
noinPort=0
sql="SELECT flowEntryID FROM flowEntry WHERE dpid='%s' AND inPort='%d';" \
% (nodpid,noinPort)
print sql
count=cursor.execute(sql)
noresultflow = cursor.fetchone()
conn.commit()
if result != noresultflow:
print result
print noresultflow
print 'the flow has existed'
#TODO 2014,4,21 delete the flowEntry
sql = "DELETE FROM flowEntry where flowEntryID='%d';" \
% (result[0])
print sql
cursor.execute(sql)
conn.commit()
flag=1
else:
flag=0
sql = "INSERT INTO flowEntry VALUE (%s, %s, %s, '%s', %s, %s, %s, %s, %s, %s);" \
% (flowEntryID, flowID, flowSeqNum, dbdpid, tableID, entryID, inPort, outPort, meterID, meterValue)#TODO
#sql = "INSERT INTO flowEntry VALUE (NULL, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');" \
# % (flowID, flowSeqNum, dbdpid, tableID, entryID, inPort, outPort, meterID, meterValue)#TODO
print sql
cursor.execute(sql)
conn.commit()
return flag
def add_server_to_sql(self, serNICID, dpid, port, MAC):
#TODO check the port table to get peer (the switch port which is connected by the server)
print dpid
print port
print MAC
dbdpid='00000000000000'+str(int(dpid)/16)+str(int(dpid)%16)
sql="SELECT portID FROM ports WHERE dpid='%s' AND number='%d';" \
% (dbdpid, port)
print sql
count=cursor.execute(sql)
result = cursor.fetchone()
peer=result[0]
sql = "INSERT INTO serverNIC VALUE ('%s', '%s', '%s');" \
% (serNICID, peer, MAC)
#sql = "INSERT INTO serverNIC VALUE (NULL, '%d', '%s');" \
# % (peer, MAC)
print sql
count=cursor.execute(sql)
#test
conn.commit()
sql = "SELECT peer FROM serverNIC WHERE MAC='%s';"\
% (MAC)
count=cursor.execute(sql)
result = cursor.fetchone()
print 'result'
print result
def addflowsql(self, dst, dpid, in_port, out_port, flag, priority):
print 'add flow sql !!!!!!!!!!!!!!!!!!!!!!!!!!!'
print dpid,in_port,out_port
data_path=get_switch(self,dpid)#TODO test
print type(data_path)
print '!!!!!!!!!!!!!!!!!!!'
print data_path
datapath=data_path[0].dp#TODO test
print 'datapath = '
print datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
print 'dst = '+str(dst)
print 'dpid = '+str(dpid)
print 'in_port = '+str(in_port)
print 'out port = '+str(out_port)
## actions=[parser.OFPActionOutput(out_port)]
match=parser.OFPMatch(in_port=in_port,eth_dst=dst)
f='ff:ff:ff:ff:ff:ff'
if 1==flag:
self.delete_flow(datapath, match)#TODO test delete flow!!
#TODO delete the ff:ff:ff:ff:ff:ff flow
matchf=parser.OFPMatch(in_port=in_port,eth_dst=f)
self.delete_flow(datapath, matchf)
print '!!!!!!!!!!!!!!!!!!add flow!!!!!!!!!!!!!!!!!!!!in_port='+str(in_port)+' dst='+str(dst)+' out_port='+str(out_port)+' dpid='+str(dpid)
## self.add_flow(datapath, 1, match, actions)
if None==self.addmeter.get(dpid):#TODO test add meter
cmd=datapath.ofproto.OFPMC_ADD
if datapath.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
print self.flow
ofctl_v1_3.mod_meter_entry(datapath,self.flow,cmd)#TODO complete the parameters def mod_meter_entry(dp, flow, cmd) #TODO check ofproto_v1_3_paser.py class OFPMeterMod(MsgBase):
self.addmeter[dpid]=1
#self.addflow[dpid]=0
print 'add meter table'
## action = [parser.OFPInstructionMeter(self.flow.get('meter_id'))]
## self.add_meter(datapath, 1, match, action)#TODO the action should be a meter TODO so we should add meter entry to meter table first
## print 'add meter'
flowaction=[parser.OFPActionOutput(out_port)]
meteraction=parser.OFPInstructionMeter(self.flow.get('meter_id'))
self.add_flow_meter(datapath,priority,match,flowaction,meteraction)
def short(self,ev,src,dst,G,priority):
#TODO add the flow entries of ff:ff:ff:ff:ff:ff
#TODO TODO just make ff:ff:ff:ff:ff:ff as the other server's mac (because there is only two server)
#TODO after controller learning two servers' MAC, then add all the needing flow entries (4,bothway of MAC and ff:ff:ff:ff:ff:ff)
#using self.mac_to_dpid to get the two servers' MAC and (dpid,port), then add the 4 flow entries
print 'FFRouting'
msg = ev.msg
datapath = msg.datapath
print type(datapath)
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
## dst = eth.dst
## src = eth.src
## macsrc=src.split(':')
## macdst=dst.split(':')
## print src
## print dst
## if None==self.mac_to_dpid.get(src) or None==self.mac_to_dpid.get(dst):#TODO
## print 'src or dst mac is not ok'
## self.MACLearning(ev)
## return
dpid = datapath.id
#TODO add meter
## if None==self.addmeter.get(dpid):#TODO test add meter
## cmd=datapath.ofproto.OFPMC_ADD
## if datapath.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
## print self.flow
## ofctl_v1_3.mod_meter_entry(datapath,self.flow,cmd)#TODO complete the parameters def mod_meter_entry(dp, flow, cmd) #TODO check ofproto_v1_3_paser.py class OFPMeterMod(MsgBase):
## self.addmeter[dpid]=1
## #self.addflow[dpid]=0
## print 'add meter table'
print '!!!!!!!!!!!!!dpid=!!!!!!!!!!!!1'
print str(dpid)
#TODO because if the switch has the flow entry it will not packet_in, when it packet_in we only
#need to compute the route and set a living period for every flow entry
#TODO how to get the topology just add etac (error tolerance address configuration) to this file?
#or write the blueprint id, port <-> dpid, port to the file ? then read that file ?????
#TODO compute the route in the blueprint
#and then get corresponding route in the physical graph (switch dpid as a node)
f='ff:ff:ff:ff:ff:ff'
#if None==self.mac_to
srcsw=self.mac_to_dpid[src]
dstsw=self.mac_to_dpid[dst]
## srcswip=self.swlabel_IP[self.dpid_to_label[srcsw[0]]].split('.')
## dstswip=self.swlabel_IP[self.dpid_to_label[dstsw[0]]].split('.')
self.flowID=self.flowID+1
flowSeqNum=0
tableID=0#TODO TODO prepare to be done how to get the value of tabelID, entryID ?
entryID=0
inPort=in_port
outPort=0
meterID=self.flow.get('meter_id')#TODO TODO prepare to be done write meter
meterValue=self.flow.get('bands')[0].get('rate')#kb/s
## meterID=0#TODO TODO prepare to be done write meter
## meterValue=100#kb/s
# install a flow to avoid packet_in next time
#TODO how to use dpid to get datapath get_switch(dpid)?
#TODO use the shortest path to compute the flow
start=self.dpids_to_nums[srcsw[0]]
end=self.dpids_to_nums[dstsw[0]]
path=shortestPath(G,start,end)#TODO test if there is no path between them
print path
#TODO add flow entry to the self.nums_to_dpids[path[i]], with the first in port (srcsw[1]), out_port (self.dpid_to_port[()])
if len(path)==0 or len(path)==1:
print 'no path'
print start,end,src,dst
return
x=1
dpflag=0
for i in xrange(len(path)):
dpid=self.nums_to_dpids[path[i]]
if x==1:
s=dpid
t=self.nums_to_dpids[path[i+1]]
in_port=srcsw[1]
out_port=self.dpid_to_port[(s,t)][0]
flag=self.add_flow_to_sql(self.flowEntryID, self.flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue)
self.addflowsql(dst, dpid, in_port, out_port, flag, priority)
print 'add_flow'
self.flowEntryID=self.flowEntryID+1
actions=[parser.OFPActionOutput(out_port)]
self.addflowsql(f, dpid, in_port, out_port, 0, priority+1)#TODO if it need to packet out the packet ?? test
elif x<len(path):
flowSeqNum=flowSeqNum+1
s=dpid
t=self.nums_to_dpids[path[i+1]]
in_port=self.dpid_to_port[(self.nums_to_dpids[path[i-1]],s)][1]
out_port=self.dpid_to_port[(s,t)][0]
flag=self.add_flow_to_sql(self.flowEntryID, self.flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue)
self.addflowsql(dst, dpid, in_port, out_port, flag, priority)
print 'add_flow'
self.flowEntryID=self.flowEntryID+1
actions=[parser.OFPActionOutput(out_port)]
self.addflowsql(f, dpid, in_port, out_port, 0, priority+1)
else:
flowSeqNum=flowSeqNum+1
s=self.nums_to_dpids[path[i-1]]
t=dpid
in_port=self.dpid_to_port[(s,t)][1]
out_port=dstsw[1]
flag=self.add_flow_to_sql(self.flowEntryID, self.flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue)
self.addflowsql(dst, dpid, in_port, out_port, flag, priority)
print 'add_flow'
self.flowEntryID=self.flowEntryID+1
actions=[parser.OFPActionOutput(out_port)]
self.addflowsql(f, dpid, in_port, out_port, 0, priority+1)
if datapath.id==s:
dpaction=actions
dpflag=1
x=x+1
if src==self.server1:
self.prepath1=path
else:
self.prepath2=path
if 1==dpflag:
in_port = msg.match['in_port']
data=None
out=parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,#TODO consider that this packet maybe lose (if it will has some problems?)
in_port=in_port, actions=dpaction, data=data)#TODO actions should be the action for the datapath
datapath.send_msg(out)
def shortnopacketout(self,ev,src,dst,G,priority):
print 'FFRouting'
msg = ev.msg
datapath = msg.datapath
print type(datapath)
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
#in_port = msg.match['in_port']
#pkt = packet.Packet(msg.data)
#eth = pkt.get_protocols(ethernet.ethernet)[0]
## dst = eth.dst
## src = eth.src
## macsrc=src.split(':')
## macdst=dst.split(':')
## print src
## print dst
## if None==self.mac_to_dpid.get(src) or None==self.mac_to_dpid.get(dst):#TODO
## print 'src or dst mac is not ok'
## self.MACLearning(ev)
## return
dpid = datapath.id
print '!!!!!!!!!!!!!dpid=!!!!!!!!!!!!1'
print str(dpid)
#TODO because if the switch has the flow entry it will not packet_in, when it packet_in we only
#need to compute the route and set a living period for every flow entry
#TODO how to get the topology just add etac (error tolerance address configuration) to this file?
#or write the blueprint id, port <-> dpid, port to the file ? then read that file ?????
#TODO compute the route in the blueprint
#and then get corresponding route in the physical graph (switch dpid as a node)
f='ff:ff:ff:ff:ff:ff'
#if None==self.mac_to
srcsw=self.mac_to_dpid[src]
dstsw=self.mac_to_dpid[dst]
## srcswip=self.swlabel_IP[self.dpid_to_label[srcsw[0]]].split('.')
## dstswip=self.swlabel_IP[self.dpid_to_label[dstsw[0]]].split('.')
self.flowID=self.flowID+1
flowSeqNum=0
tableID=0#TODO TODO prepare to be done how to get the value of tabelID, entryID ?
entryID=0
#inPort=in_port
outPort=0
meterID=self.flow.get('meter_id')#TODO TODO prepare to be done write meter
meterValue=self.flow.get('bands')[0].get('rate')#kb/s
## meterID=0#TODO TODO prepare to be done write meter
## meterValue=100#kb/s
# install a flow to avoid packet_in next time
#TODO how to use dpid to get datapath get_switch(dpid)?
#TODO use the shortest path to compute the flow
start=self.dpids_to_nums[srcsw[0]]
end=self.dpids_to_nums[dstsw[0]]
path=shortestPath(G,start,end)#TODO test if there is no path between them
print path
#TODO add flow entry to the self.nums_to_dpids[path[i]], with the first in port (srcsw[1]), out_port (self.dpid_to_port[()])
if len(path)==0:
print 'no path'
return
x=1
dpflag=0
for i in xrange(len(path)):
dpid=self.nums_to_dpids[path[i]]
if x==1:
s=dpid
t=self.nums_to_dpids[path[i+1]]
in_port=srcsw[1]
out_port=self.dpid_to_port[(s,t)][0]
flag=self.add_flow_to_sql(self.flowEntryID, self.flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue)
self.addflowsql(dst, dpid, in_port, out_port, flag, priority)
print 'add_flow'
self.flowEntryID=self.flowEntryID+1
actions=[parser.OFPActionOutput(out_port)]
self.addflowsql(f, dpid, in_port, out_port, 0, priority+1)#TODO if it need to packet out the packet ?? test
elif x<len(path):
flowSeqNum=flowSeqNum+1
s=dpid
t=self.nums_to_dpids[path[i+1]]
in_port=self.dpid_to_port[(self.nums_to_dpids[path[i-1]],s)][1]
out_port=self.dpid_to_port[(s,t)][0]
flag=self.add_flow_to_sql(self.flowEntryID, self.flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue)
self.addflowsql(dst, dpid, in_port, out_port, flag, priority)
print 'add_flow'
self.flowEntryID=self.flowEntryID+1
actions=[parser.OFPActionOutput(out_port)]
self.addflowsql(f, dpid, in_port, out_port, 0, priority+1)
else:
flowSeqNum=flowSeqNum+1
s=self.nums_to_dpids[path[i-1]]
t=dpid
in_port=self.dpid_to_port[(s,t)][1]
out_port=dstsw[1]
flag=self.add_flow_to_sql(self.flowEntryID, self.flowID, flowSeqNum, dpid, tableID, entryID, in_port, out_port, meterID, meterValue)
self.addflowsql(dst, dpid, in_port, out_port, flag, priority)
print 'add_flow'
self.flowEntryID=self.flowEntryID+1
actions=[parser.OFPActionOutput(out_port)]
self.addflowsql(f, dpid, in_port, out_port, 0, priority+1)
## if datapath.id==s:
## dpaction=actions
## dpflag=1
x=x+1
if src==self.server1:
self.prepath1=path
else:
self.prepath2=path
#TODO how to packet out the message with the buffer_id, dose the post status change message has the buffer_id?
## if 1==dpflag:
## in_port = msg.match['in_port']
## data=None
## out=parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,#TODO consider that this packet maybe lose (if it will has some problems?)
## in_port=in_port, actions=dpaction, data=data)#TODO actions should be the action for the datapath
## datapath.send_msg(out)
def topology(self):
#TODO add the topology collecting periodically
self.edgenum=0
start = time.time()
print start
self.switches = get_switch(self)
self.links = get_link(self)
self.topo_col_num = self.topo_col_num + 1
end = time.time()
print end
print 'topology collecting time:'
print end-start
self.topo_col_period = self.topo_col_period + end-start
#n=len(self.switches)
#m=len(self.links)
## self.startnum=0
## self.dpids_to_nums={}
## self.nums_to_dpids={}
print 'dpids nums:'
for switch in self.switches:#TODO this may has error
if self.dpids_to_nums.get(switch.dp.id)==None:
self.nums_to_dpids[self.startnum] = switch.dp.id
self.dpids_to_nums[switch.dp.id] = self.startnum
print str(switch.dp.id)+' '+str(self.startnum)
self.startnum = self.startnum + 1
print self.dpids_to_nums
self.n=self.startnum
print 'edges:'
self.linkgraph=[]
for i in xrange(self.switch_num):
self.linkgraph.append([])
for j in xrange(self.switch_num):
self.linkgraph[i].append(0)
for link in self.links:
self.edgenum=self.edgenum+1
srcnum = self.dpids_to_nums[link.src.dpid]
dstnum = self.dpids_to_nums[link.dst.dpid]
self.linkgraph[srcnum][dstnum]=1
if self.graph[srcnum][dstnum]==0 and self.graph[dstnum][srcnum]==0:
print str(srcnum)+' '+str(dstnum)
self.dpid_to_port[(link.src.dpid, link.dst.dpid)] = (link.src.port_no, link.dst.port_no)
self.dpid_to_port[(link.dst.dpid, link.src.dpid)]=(link.dst.port_no, link.src.port_no)
#print>>devicegraph, str(srcnum)+' '+str(dstnum)
self.graph[srcnum][dstnum] = 1
self.graph[dstnum][srcnum] = 1
self.undirected[srcnum][dstnum] = 1
self.m=self.m+1
self.G={}
for i in xrange(self.switch_num):
self.G[i]={}
for j in xrange(self.switch_num):
if self.linkgraph[i][j]==1 and self.linkgraph[j][i]==1:#TODO if only one way is ok then regard it as not ok
self.G[i][j]=1
print self.G
#print self.linkgraph
#print self.graph
#print self.undirected
def whether(self,src,dst,G):
flag=0
if src==self.server1:
path=self.prepath1
else:
path=self.prepath2
for i in xrange(len(path)-1):
if None==G.get(path[i]):
flag=1
break
elif None==G.get(path[i]).get(path[i+1]):
flag=1
break
return flag
@set_ev_cls(ofp_event.EventOFPPortStatus)
def _port_status_handler(self, ev):
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!port change!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#print ev.port
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
dpid = dp.id
print 'dp=' + str(dp)
print 'port=' + str(msg.desc)
print dpid
if msg.reason == ofp.OFPPR_ADD:
reason = 'ADD'
elif msg.reason == ofp.OFPPR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPPR_MODIFY:
reason = 'MODIFY'#TODO the reason only has EventOFPPortStatus
else:
reason = 'UNKNOWN'
print 'reason=' + reason
if 2==self.servernum:
self.topology()
#TODO judge that if the original path is broken,2014,4,18
src=self.server1
dst=self.server2
#TODO 2014,4,21 when the flow Entry is changed, the original path should be deleted
## flag=0
## flag=self.whether(src,dst,self.G)
## if 1==flag:
## self.shortnopacketout(ev, src, dst, self.G)#add the flow entries of FF:FF:FF:FF:FF:FF
## flag=self.whether(dst,src,self.G)
## if 1==flag:
## self.shortnopacketout(ev, dst, src, self.G)
flag1=0
flag2=0
flag1=self.whether(src,dst,self.G)
flag2=self.whether(dst,src,self.G)
if 1==flag1 or 1==flag2:
#TODO delete all the flowID<=self.flowID
for i in range(1, self.flowID+1):
#TODO 2014,4,21 delete the flowEntry
sql = "DELETE FROM flowEntry where flowID='%d';" \
% (i)
print sql
cursor.execute(sql)
conn.commit()
priority=1
## revsrc=list(src)
## revsrc.reverse()
## revsrc="".join(revsrc)
## revdst=list(dst)
## revdst.reverse()
## revdst="".join(revdst)
self.shortnopacketout(ev, src, dst, self.G,priority)#add the flow entries of FF:FF:FF:FF:FF:FF
priority=3
self.shortnopacketout(ev, dst, src, self.G,priority)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
#TODO using two methods to do two things respectively ?
#TODO using server MAC learning period T to separate the two methods ?
## end=time.time()
## if (end-start)*1000 < self.T:
## #TODO compute the route
## print (end-start)*1000
## self.MACLearning(ev)
## else:
## print (end-start)*1000
## #TODO learning server MAC
## self.Routing(ev)
now=time.time()
if now-self.inittime<self.sleeptime:#wait for the ports table to be ok
self.topology()
return
self.topology()
flag=0
## if self.n<4 or self.m<4 or self.topo_col_num<self.topo_col_num_max or self.topo_col_period<self.topo_col_period_max:#TODO test,but when the edge is error, then it will not be ok
## flag=1#TODO
print 'topology ok'
if self.servernum<2:
sql="SELECT * FROM ports;"
print sql
count=cursor.execute(sql)
conn.commit()
result = cursor.fetchone()
if result==self.noresult:
return
self.MACLearning(ev)
if 2==self.servernum: #and 0==flag:
#TODO judge whether the ports table is ok, 2014,4,18
sql="SELECT * FROM ports;"
print sql
count=cursor.execute(sql)
conn.commit()
result = cursor.fetchone()
if result==self.noresult:
return
#TODO the topology may be not complete in the first collecting
self.topology()
if self.edgenum!=self.link_num:
return
src=self.server1
dst=self.server2
## revsrc=list(src)
## revsrc.reverse()
## revsrc="".join(revsrc)
## revdst=list(dst)
## revdst.reverse()
## revdst="".join(revdst)
priority=1
self.short(ev, src, dst, self.G,priority)#add the flow entries of FF:FF:FF:FF:FF:FF
src=self.server2
dst=self.server1
## revsrc=list(src)
## revsrc.reverse()
## revsrc="".join(revsrc)
## revdst=list(dst)
## revdst.reverse()
## revdst="".join(revdst)
priority=3
self.short(ev, src, dst, self.G,priority)
sql="SELECT * FROM ports;"
print sql
count=cursor.execute(sql)
conn.commit()
result = cursor.fetchone()
if result==self.noresult:
return
elif 0==len(self.prepath1) and 2==self.servernum:
self.topology()
if self.edgenum!=self.link_num:
return
src=self.server1
dst=self.server2
## revsrc=list(src)
## revsrc.reverse()
## revsrc="".join(revsrc)
## revdst=list(dst)
## revdst.reverse()
## revdst="".join(revdst)
priority=1
self.short(ev, src, dst, self.G,priority)#add the flow entries of FF:FF:FF:FF:FF:FF
src=self.server2
dst=self.server1
## revsrc=list(src)
## revsrc.reverse()
## revsrc="".join(revsrc)
## revdst=list(dst)
## revdst.reverse()
## revdst="".join(revdst)
priority=3
self.short(ev, src, dst, self.G,priority)
## elif self.edgenum!=8:
## #it means that two servers' MAC have been added, then add the
## src=self.server1
## dst=self.server2
## self.short(ev, src, dst, self.G)#add the flow entries of FF:FF:FF:FF:FF:FF
## src=self.server2
## dst=self.server1
## self.short(ev, src, dst, self.G)
## #elif 2==self.servernum or self.edgenum!=8: #and 0==flag:
## elif self.edgenum!=8
## #it means that two servers' MAC have been added, then add the
## src=self.server1
## dst=self.server2
## self.short(ev, src, dst, self.G)#add the flow entries of FF:FF:FF:FF:FF:FF
## src=self.server2
## dst=self.server1
## self.short(ev, src, dst, self.G)
## if self.m!=4:#TODO when the edge has error ,recompute the routing between two servers
##
## src=self.server1
## dst=self.server2
## self.short(ev, src, dst, self.G)#add the flow entries of FF:FF:FF:FF:FF:FF
## src=self.server2
## dst=self.server1
## self.short(ev, src, dst, self.G)
|
py | b413c2c4aa089f72c665239b5d8c486dbaa8cfcb | #!/usr/bin/env python
try:
import asyncio
except ImportError:
import trollius as asyncio
import unittest
import socket
import sys
import aiodns
import pycares
class DNSTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
self.addCleanup(self.loop.close)
self.resolver = aiodns.DNSResolver(loop=self.loop)
def tearDown(self):
self.resolver = None
def test_query_a(self):
f = self.resolver.query('google.com', 'A')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_a_bad(self):
f = self.resolver.query('hgf8g2od29hdohid.com', 'A')
try:
self.loop.run_until_complete(f)
except aiodns.error.DNSError as e:
self.assertEqual(e.args[0], aiodns.error.ARES_ENOTFOUND)
def test_query_aaaa(self):
f = self.resolver.query('ipv6.google.com', 'AAAA')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_cname(self):
f = self.resolver.query('livechat.ripe.net', 'CNAME')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_mx(self):
f = self.resolver.query('google.com', 'MX')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_ns(self):
f = self.resolver.query('google.com', 'NS')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_txt(self):
f = self.resolver.query('google.com', 'TXT')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_soa(self):
f = self.resolver.query('google.com', 'SOA')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_srv(self):
f = self.resolver.query('_xmpp-server._tcp.jabber.org', 'SRV')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_naptr(self):
f = self.resolver.query('sip2sip.info', 'NAPTR')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_ptr(self):
ip = '8.8.8.8'
f = self.resolver.query(pycares.reverse_address(ip), 'PTR')
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_query_bad_type(self):
self.assertRaises(ValueError, self.resolver.query, 'google.com', 'XXX')
def test_query_timeout(self):
self.resolver = aiodns.DNSResolver(timeout=0.1, loop=self.loop)
self.resolver.nameservers = ['1.2.3.4']
f = self.resolver.query('google.com', 'A')
try:
self.loop.run_until_complete(f)
except aiodns.error.DNSError as e:
self.assertEqual(e.args[0], aiodns.error.ARES_ETIMEOUT)
def test_query_cancel(self):
f = self.resolver.query('google.com', 'A')
self.resolver.cancel()
try:
self.loop.run_until_complete(f)
except aiodns.error.DNSError as e:
self.assertEqual(e.args[0], aiodns.error.ARES_ECANCELLED)
# def test_future_cancel(self):
# # TODO: write this in such a way it also works with trollius
# f = self.resolver.query('google.com', 'A')
# f.cancel()
# def coro():
# yield from asyncio.sleep(0.1, loop=self.loop)
# yield from f
# try:
# self.loop.run_until_complete(coro())
# except asyncio.CancelledError as e:
# self.assertTrue(e)
def test_query_twice(self):
if sys.version[:3] >= '3.3':
exec('''if 1:
@asyncio.coroutine
def coro(self, host, qtype, n=2):
for i in range(n):
result = yield from self.resolver.query(host, qtype)
self.assertTrue(result)
''')
else:
exec('''if 1:
@asyncio.coroutine
def coro(self, host, qtype, n=2):
for i in range(n):
result = yield asyncio.From(self.resolver.query(host, qtype))
self.assertTrue(result)
''')
self.loop.run_until_complete(locals()['coro'](self, 'gmail.com', 'MX'))
def test_gethostbyname(self):
f = self.resolver.gethostbyname("google.com", socket.AF_INET)
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_gethostbyname_ipv6(self):
f = self.resolver.gethostbyname("ipv6.google.com", socket.AF_INET6)
result = self.loop.run_until_complete(f)
self.assertTrue(result)
def test_gethostbyname_bad_family(self):
f = self.resolver.gethostbyname("ipv6.google.com", -1)
with self.assertRaises(aiodns.error.DNSError):
self.loop.run_until_complete(f)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | b413c3dcd9ec734fe96cbe3201fb87e43ea69eb6 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
import numpy as np
from logging import getLogger
from enum import Enum
from onnx import helper, numpy_helper, TensorProto
from onnx_model import OnnxModel
from fusion_base import Fusion
from fusion_utils import FusionUtils
logger = getLogger(__name__)
class AttentionMaskFormat:
MaskIndexEnd = 0
MaskIndexEndAndStart = 1
AttentionMask = 2
NoMask = 3
class AttentionMask():
"""
Fuse Attention subgraph into one Attention node.
"""
def __init__(self, model: OnnxModel):
self.model = model
# A lookup table with mask input as key, and mask index output as value
self.mask_indice = {}
# A lookup table with mask input as key, and cast (to int32) output as value
self.mask_casted = {}
self.utils = FusionUtils(model)
self.mask_format = AttentionMaskFormat.MaskIndexEnd
def set_mask_format(self, mask_format: AttentionMaskFormat):
self.mask_format = mask_format
def set_mask_indice(self, mask, mask_index):
if mask in self.mask_indice:
assert mask_index == self.mask_indice[mask]
self.mask_indice[mask] = mask_index
def get_first_mask(self):
assert len(self.mask_indice) > 0
return next(iter(self.mask_indice))
def process_mask(self, input):
if self.mask_format == AttentionMaskFormat.NoMask:
return None
if input in self.mask_indice:
return self.mask_indice[input]
# Add cast to convert int64 to int32
if self.model.find_graph_input(input):
casted, input_name = self.utils.cast_graph_input_to_int32(input)
else:
input_name, cast_node = self.utils.cast_input_to_int32(input)
casted = True
if casted:
self.mask_casted[input] = input_name
# Attention supports int32 attention mask (2D) since 1.4.0
if self.mask_format == AttentionMaskFormat.AttentionMask:
self.mask_indice[input] = input_name
return input_name
# Add a mask processing node to convert attention mask to mask index (1D)
output_name = self.model.create_node_name('mask_index')
mask_index_node = helper.make_node('ReduceSum',
inputs=[input_name],
outputs=[output_name],
name=self.model.create_node_name('ReduceSum', 'MaskReduceSum'))
mask_index_node.attribute.extend([helper.make_attribute("axes", [1]), helper.make_attribute("keepdims", 0)])
self.model.add_node(mask_index_node)
self.mask_indice[input] = output_name
return output_name
class FusionAttention(Fusion):
"""
Fuse Attention subgraph into one Attention node.
"""
def __init__(self, model: OnnxModel, hidden_size: int, num_heads: int, attention_mask: AttentionMask):
super().__init__(model, "Attention", ["SkipLayerNormalization", "LayerNormalization"])
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_mask = attention_mask
def create_attention_node(self, mask_index, q_matmul, k_matmul, v_matmul, q_add, k_add, v_add, input, output):
q_weight = self.model.get_initializer(q_matmul.input[1])
k_weight = self.model.get_initializer(k_matmul.input[1])
v_weight = self.model.get_initializer(v_matmul.input[1])
q_bias = self.model.get_initializer(q_add.input[1])
k_bias = self.model.get_initializer(k_add.input[1])
v_bias = self.model.get_initializer(v_add.input[1])
if q_weight is None:
print(f"{q_matmul.input[1]} is not initializer. Please set do_constant_folding=True in torch.onnx.export")
return None
if not (k_weight and v_weight and q_bias and k_bias):
return None
qw = numpy_helper.to_array(q_weight)
assert qw.shape == (self.hidden_size, self.hidden_size)
kw = numpy_helper.to_array(k_weight)
assert kw.shape == (self.hidden_size, self.hidden_size)
vw = numpy_helper.to_array(v_weight)
assert vw.shape == (self.hidden_size, self.hidden_size)
qkv_weight = np.stack((qw, kw, vw), axis=-2)
qb = numpy_helper.to_array(q_bias)
assert qb.shape == (self.hidden_size, )
kb = numpy_helper.to_array(k_bias)
assert kb.shape == (self.hidden_size, )
vb = numpy_helper.to_array(v_bias)
assert vb.shape == (self.hidden_size, )
qkv_bias = np.stack((qb, kb, vb), axis=-2)
attention_node_name = self.model.create_node_name('Attention')
weight = helper.make_tensor(name=attention_node_name + '_qkv_weight',
data_type=TensorProto.FLOAT,
dims=[self.hidden_size, 3 * self.hidden_size],
vals=qkv_weight.flatten().tolist())
# Sometimes weights and bias are stored in fp16
if q_weight.data_type == 10:
weight.CopyFrom(numpy_helper.from_array(numpy_helper.to_array(weight).astype(np.float16), weight.name))
self.model.add_initializer(weight)
bias = helper.make_tensor(name=attention_node_name + '_qkv_bias',
data_type=TensorProto.FLOAT,
dims=[3 * self.hidden_size],
vals=qkv_bias.flatten().tolist())
if q_bias.data_type == 10:
bias.CopyFrom(numpy_helper.from_array(numpy_helper.to_array(bias).astype(np.float16), bias.name))
self.model.add_initializer(bias)
attention_inputs = [input, attention_node_name + '_qkv_weight', attention_node_name + '_qkv_bias']
if mask_index is not None:
attention_inputs.append(mask_index)
attention_node = helper.make_node('Attention',
inputs=attention_inputs,
outputs=[output],
name=attention_node_name)
attention_node.domain = "com.microsoft"
attention_node.attribute.extend([helper.make_attribute("num_heads", self.num_heads)])
return attention_node
def fuse(self, normalize_node, input_name_to_nodes, output_name_to_node):
# Sometimes we can not fuse skiplayernormalization since the add before layernorm has an output that used by nodes outside skiplayernorm
# Conceptually we treat add before layernorm as skiplayernorm node since they share the same pattern
start_node = normalize_node
if normalize_node.op_type == 'LayerNormalization':
add_before_layernorm = self.model.match_parent(normalize_node, 'Add', 0)
if add_before_layernorm is not None:
start_node = add_before_layernorm
else:
return
# SkipLayerNormalization has two inputs, and one of them is the root input for attention.
qkv_nodes = self.model.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'],
[None, 0, 0, 0, 0])
einsum_node = None
if qkv_nodes is not None:
(_, matmul_qkv, reshape_qkv, transpose_qkv, matmul_qkv) = qkv_nodes
else:
# Match Albert
qkv_nodes = self.model.match_parent_path(start_node, ['Add', 'Einsum', 'Transpose', 'MatMul'], [1, 0, 0, 0])
if qkv_nodes is not None:
(_, einsum_node, transpose_qkv, matmul_qkv) = qkv_nodes
else:
return
other_inputs = []
for i, input in enumerate(start_node.input):
if input not in output_name_to_node:
continue
if input == qkv_nodes[0].output[0]:
continue
other_inputs.append(input)
if len(other_inputs) != 1:
return
root_input = other_inputs[0]
"""
Match flaubert Mask
|
Mul --> LayerNormalization --> Attention --> MatMul --> Add
| |
| |
+---------------------------------------------------------
"""
mul_before_layernorm = self.model.match_parent(start_node, 'Mul', 0)
if mul_before_layernorm is not None:
mul_children = input_name_to_nodes[mul_before_layernorm.output[0]]
if mul_children is not None and len(mul_children) == 2:
layernorm_node = mul_children[1]
if layernorm_node.op_type == 'LayerNormalization':
root_input = layernorm_node.output[0]
else:
return
else:
return
children = input_name_to_nodes[root_input]
children_types = [child.op_type for child in children]
if children_types.count('MatMul') != 3:
return
v_nodes = self.model.match_parent_path(matmul_qkv, ['Transpose', 'Reshape', 'Add', 'MatMul'], [1, 0, 0, 0])
if v_nodes is None:
logger.debug("fuse_attention: failed to match v path")
return
(_, _, add_v, matmul_v) = v_nodes
is_distill = False
qk_nodes = self.model.match_parent_path(matmul_qkv, ['Softmax', 'Add', 'Div', 'MatMul'], [0, 0, 0, 0])
if qk_nodes is None:
qk_nodes = self.model.match_parent_path(matmul_qkv, ['Softmax', 'Add', 'Mul', 'MatMul'], [0, 0, 0, 0])
if qk_nodes is None:
qk_nodes = self.model.match_parent_path(matmul_qkv, ['Softmax', 'Where', 'MatMul', 'Div'], [0, 0, 2, 0])
is_distill = True
if qk_nodes is None:
logger.debug("fuse_attention: failed to match qk path")
return
add_qk = None
matmul_qk = None
where_qk = None
if is_distill:
(_, where_qk, matmul_qk, _) = qk_nodes
else:
(_, add_qk, _, matmul_qk) = qk_nodes
q_nodes = self.model.match_parent_path(matmul_qk, ['Transpose', 'Reshape', 'Add', 'MatMul'], [0, 0, 0, 0])
if q_nodes is None:
q_nodes = self.model.match_parent_path(matmul_qk, ['Div', 'Transpose', 'Reshape', 'Add', 'MatMul'],
[0, 0, 0, 0, 0])
if q_nodes is None:
logger.debug("fuse_attention: failed to match q path")
return
add_q = q_nodes[-2]
matmul_q = q_nodes[-1]
k_nodes = self.model.match_parent_path(matmul_qk, ['Transpose', 'Reshape', 'Add', 'MatMul'], [1, 0, 0, 0])
if k_nodes is None:
k_nodes = self.model.match_parent_path(matmul_qk, ['Transpose', 'Transpose', 'Reshape', 'Add', 'MatMul'],
[1, 0, 0, 0, 0])
if k_nodes is None:
logger.debug("fuse_attention: failed to match k path")
return
add_k = k_nodes[-2]
matmul_k = k_nodes[-1]
# Note that Cast might be removed by OnnxRuntime so we match two patterns here.
mask_nodes = None
if is_distill:
_, mask_nodes, _ = self.model.match_parent_paths(where_qk,
[(['Expand', 'Reshape', 'Equal'], [0, 0, 0]),
(['Cast', 'Expand', 'Reshape', 'Equal'], [0, 0, 0, 0])],
output_name_to_node)
else:
_, mask_nodes, _ = self.model.match_parent_paths(
add_qk, [(['Mul', 'Sub', 'Cast', 'Unsqueeze', 'Unsqueeze'], [1, 0, 1, 0, 0]),
(['Mul', 'Sub', 'Unsqueeze', 'Unsqueeze'], [1, 0, 1, 0])], output_name_to_node)
if mask_nodes is None:
logger.debug("fuse_attention: failed to match mask path")
return
if matmul_v.input[0] == root_input and matmul_q.input[0] == root_input and matmul_v.input[0] == root_input:
mask_index = self.attention_mask.process_mask(mask_nodes[-1].input[0])
attention_last_node = reshape_qkv if einsum_node is None else transpose_qkv
new_node = self.create_attention_node(mask_index, matmul_q, matmul_k, matmul_v, add_q, add_k, add_v,
root_input, attention_last_node.output[0])
if new_node is None:
return
self.nodes_to_add.append(new_node)
if einsum_node is not None:
unique_index = einsum_node.input[0]
new_edge = "edge_modified_" + unique_index
shape_tensor = helper.make_tensor(name="shape_modified_tensor" + unique_index,
data_type=TensorProto.INT64,
dims=[4],
vals=np.int64(
[0, 0, self.num_heads,
int(self.hidden_size / self.num_heads)]).tobytes(),
raw=True)
self.model.add_initializer(shape_tensor)
self.model.add_node(
helper.make_node("Reshape", [attention_last_node.output[0], shape_tensor.name], [new_edge],
"reshape_modified_" + unique_index))
einsum_node.input[0] = new_edge
self.nodes_to_remove.extend([attention_last_node, transpose_qkv, matmul_qkv])
self.nodes_to_remove.extend(qk_nodes)
self.nodes_to_remove.extend(q_nodes)
self.nodes_to_remove.extend(k_nodes)
self.nodes_to_remove.extend(v_nodes)
# Use prune graph to remove mask nodes since they are shared by all attention nodes.
#self.nodes_to_remove.extend(mask_nodes)
self.prune_graph = True
|
py | b413c41dfc295a26e23102d663fe53bc9796b17c | import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from tensorboardX import SummaryWriter
class Conv_bn_relu(nn.Module):
def __init__(self, inp, oup, kernel_size=3, stride=1, pad=1, use_relu = True):
super(Conv_bn_relu, self).__init__()
self.use_relu = use_relu
if self.use_relu:
self.convs = nn.Sequential(
nn.Conv2d(inp, oup, kernel_size, stride, pad, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
else:
self.convs = nn.Sequential(
nn.Conv2d(inp, oup, kernel_size, stride, pad, bias=False),
nn.BatchNorm2d(oup,eps=1e-03),
)
def forward(self, x):
out = self.convs(x)
return out
'''
class ConvDW_bn_relu(nn.Module):
def __init__(self, inp, kernel_size=3, stride=1, pad=1, use_relu = True):
super(ConvDW_bn_relu, self).__init__()
self.use_relu = use_relu
if self.use_relu:
self.convs_dw = nn.Sequential(
nn.Conv2d(inp, inp, kernel_size, stride, pad, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
)
else:
self.convs_dw = nn.Sequential(
nn.Conv2d(inp, inp, kernel_size, stride, pad, groups=inp, bias=False),
nn.BatchNorm2d(inp),
)
def forward(self, x):
out = self.convs_dw(x)
return out
'''
class StemBlock(nn.Module):
def __init__(self, inp=3):
super(StemBlock, self).__init__()
self.stem_1 = Conv_bn_relu(inp, num_init_features, 3, 2, 1)
self.stem_2a = Conv_bn_relu(num_init_features,int(num_init_features/2),1,1,0)
self.stem_2b = Conv_bn_relu(int(num_init_features/2), num_init_features, 3, 2, 1)
self.stem_2p = nn.MaxPool2d(kernel_size=2,stride=2)
self.stem_3 = Conv_bn_relu(num_init_features*2,num_init_features,1,1,0)
def forward(self, x):
stem_1_out = self.stem_1(x)
stem_2a_out = self.stem_2a(stem_1_out)
stem_2b_out = self.stem_2b(stem_2a_out)
stem_2p_out = self.stem_2p(stem_1_out)
out = self.stem_3(torch.cat((stem_2b_out,stem_2p_out),1))
return out
class DenseBlock(nn.Module):
def __init__(self, inp, inter_channel):
super(DenseBlock, self).__init__()
self.cb1_a = Conv_bn_relu(inp,inter_channel, 1, 1, 0)
self.cb1_b = Conv_bn_relu(inter_channel, inter_channel, 3, 1, 1)
self.cb2_a = Conv_bn_relu(inp,inter_channel, 1, 1, 0)
self.cb2_b = Conv_bn_relu(inter_channel, inter_channel, 3, 1, 1)
self.cb2_c = Conv_bn_relu(inter_channel, inter_channel, 3, 1, 1)
def forward(self, x):
cb1_a_out = self.cb1_a(x)
cb1_b_out = self.cb1_b(cb1_a_out)
cb2_a_out = self.cb2_a(x)
cb2_b_out = self.cb2_b(cb2_a_out)
cb2_c_out = self.cb2_c(cb2_b_out)
out = torch.cat((x,cb1_b_out,cb2_c_out),1)
return out
class TransitionBlock(nn.Module):
def __init__(self, inp, oup):
super(TransitionBlock, self).__init__()
self.tb = Conv_bn_relu(inp,oup,1,1,0)
def forward(self, x):
out = self.tb(x)
return out
class Eltwise(nn.Module):
def __init__(self, operation='+'):
super(Eltwise, self).__init__()
self.operation = operation
def __repr__(self):
return 'Eltwise %s' % self.operation
def forward(self, *inputs):
if self.operation == '+' or self.operation == 'SUM':
x = inputs[0]
for i in range(1,len(inputs)):
x = x + inputs[i]
elif self.operation == '*' or self.operation == 'MUL':
x = inputs[0]
for i in range(1,len(inputs)):
x = x * inputs[i]
elif self.operation == '/' or self.operation == 'DIV':
x = inputs[0]
for i in range(1,len(inputs)):
x = x / inputs[i]
elif self.operation == 'MAX':
x = inputs[0]
for i in range(1,len(inputs)):
x =torch.max(x, inputs[i])
else:
print('forward Eltwise, unknown operator')
return x
class PeleeNet(nn.Module):
def __init__(self, nDenseBlocks = [4,6,6,6]):
super(PeleeNet, self).__init__()
self.stage = nn.Sequential()
inter_channel = [32,48,64,80]
total_filter = [128,128,256,64]
dense_inp = [128,128,128,256]
with_pooling = [1,1,0,0]
# building stemblock
self.stage.add_module('stage_0', StemBlock(3))
# building middle stageblock
for i in range(4) :
self.stage.add_module('stage_{}'.format(i+1),self._make_dense_transition(dense_inp[i], total_filter[i],
inter_channel[i],nDenseBlocks[i],with_pooling[i]))
def _make_dense_transition(self, dense_inp,total_filter, inter_channel, ndenseblocks,with_pooling = 1):
layers = []
for i in range(ndenseblocks):
layers.append(DenseBlock(dense_inp, inter_channel))
dense_inp += inter_channel * 2
#Transition Layer without Compression
layers.append(TransitionBlock(dense_inp,total_filter))
if with_pooling == 1:
layers.append(nn.MaxPool2d(kernel_size=2,stride=2))
return nn.Sequential(*layers)
def forward(self, x):
x = self.stage(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
'''
if __name__ == '__main__':
p = PeleeNet()
#input = torch.ones(1, 3, 320, 320)
#with SummaryWriter(comment='PeleeNet') as w:
#w.add_graph(p, (input,))
#output = p(input)
from visualize import make_dot
from torch.autograd import Variable
x = Variable(torch.randn(8,3,320,320))#change 12 to the channel number of network input
y = p(x)
g = make_dot(y)
g.view()
#print(output.size())
#print(p)
# torch.save(p.state_dict(), 'peleenet.pth.tar')
'''
|
py | b413c44e76ca2aa2b71f8e588665884dfe412313 | import pyjokes as pyjokes
import pywhatkit
import speech_recognition as sr
import pyttsx3
import datetime
import wikipedia
import webbrowser
import os
import time
import subprocess
import ecapture as ec
import wolframalpha
import json
import requests
import pyaudio
import headlines
import getpass
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import librosa
import soundfile
import numpy as np
import os, pickle,glob
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
import pickle
from scipy.io import wavfile
from bs4 import BeautifulSoup
import requests
pyttsx3.speak("Enter your password")
inpass = getpass.getpass("Enter your password :")
apass = "ashwin"
if inpass != apass:
pyttsx3.speak("Incorrect Password Try Again ")
exit()
pyttsx3.speak("Access Granted")
print("Loading your AI personal assistant - Ashtech ")
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', 'voices[0].id')
def speak(text):
engine.say(text)
engine.runAndWait()
def wishMe():
hour = datetime.datetime.now().hour
if hour >= 0 and hour < 12:
speak("Hello,Good Morning")
print("Hello,Good Morning")
elif hour >= 12 and hour < 18:
speak("Hello,Good Afternoon")
print("Hello,Good Afternoon")
else:
speak("Hello,Good Evening")
print("Hello,Good Evening")
def take_First_Command():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
with open("audio_file.wav", "wb") as file:
file.write(audio.get_wav_data())
user_mood()
try:
statement = r.recognize_google(audio, language='en-in')
print(f"user said:{statement}\n")
except Exception as e:
speak("Pardon me, please say that again")
return "None"
return statement
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
try:
statement = r.recognize_google(audio, language='en-in')
print(f"user said:{statement}\n")
except Exception as e:
speak("Pardon me, please say that again")
return "None"
return statement
def whatsapp(to, message):
person = [to]
string = message
chrome_driver_binary = "C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe"
# Selenium chromedriver path
driver = webdriver.Chrome(chrome_driver_binary)
driver.get("https://web.whatsapp.com/")
#wait = WebDriverWait(driver,10)
sleep(15)
for name in person:
print('IN')
user = driver.find_element_by_xpath("//span[@title='{}']".format(name))
user.click()
print(user)
for _ in range(10):
text_box = driver.find_element_by_xpath(
'//*[@id="main"]/footer/div[1]/div[2]/div/div[2]')
text_box.send_keys(string)
sendbutton = driver.find_elements_by_xpath(
'//*[@id="main"]/footer/div[1]/div[3]/button')[0]
sendbutton.click()
def user_mood():
with soundfile.SoundFile('audio_file.wav') as s_file:
x = s_file.read(dtype="float32")
sample_rate = s_file.samplerate
# x,sample_rate=soundfile.read(s_file)
chroma=True
mfcc=True
mel=True
if chroma:
stft=np.abs(librosa.stft(x))
result=np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=x, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(x, sr=sample_rate).T, axis=0)
result = np.hstack((result, mel))
with open('model.pkl', 'rb') as file:
model = pickle.load(file)
result=np.array(result)
result=result.reshape(180,1)
result=result.transpose()
pred=model.predict(result)
if(pred==1):
speak('You seem happy today')
print('You seem happy today :)')
elif(pred==0):
speak(' Should I tell you some jokes to make your mood before')
print('Should I tell you some jokes to make your mood before')
statement1 = takeCommand().lower()
if 'yes' in statement1:
joke = pyjokes.get_joke('en', 'all')
print(joke)
speak(joke)
else:
return
speak("Loading your AI personal assistant AshTech")
wishMe()
if __name__ == '__main__':
statement = take_First_Command().lower()
while True:
if statement == 0:
continue
if "good bye" in statement or "ok bye" in statement or "stop" in statement or "quit" in statement or "close" in statement:
print('your personal assistant Ashtech is shutting down, Good bye')
speak('your personal assistant Ashtech is shutting down, Good bye')
break
if 'wikipedia' in statement:
speak('Searching Wikipedia...')
statement = statement.replace("wikipedia", "")
results = wikipedia.summary(statement, sentences=3)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in statement:
webbrowser.open_new_tab("https://www.youtube.com")
speak("youtube is open now")
time.sleep(5)
elif 'open google' in statement:
webbrowser.open_new_tab("https://www.google.com")
speak("Google chrome is open now")
time.sleep(5)
elif 'open gmail' in statement:
webbrowser.open_new_tab("gmail.com")
speak("Google Mail open now")
time.sleep(5)
elif 'covid-19 tracker' in statement:
webbrowser.open_new_tab(
"https://news.google.com/covid19/map?hl=en-IN&gl=IN&ceid=IN%3Aen")
speak("covid-19 tracker is open now")
time.sleep(5)
elif "shoping" in statement or 'shopping' in statement:
websites = ['amazon', 'flipkart', 'myntra', 'limeroad']
print('\n'.join(websites))
speak("nice mood sir!, what do you want to open?")
user_ip = takeCommand().lower().replace(' ', '')
for website in websites:
if website in user_ip:
webbrowser.open(website + '.com')
speak("here you are sir")
elif 'online courses' in statement or 'course' in statement:
platforms = ['coursera', 'udemy', 'edx',
'skillshare', 'datacamp', 'udacity']
speak("Select a platform that you prefer : ")
print("\n".join(platforms))
statement1 = takeCommand().lower()
if statement1 == 0:
continue
if 'coursera' in statement1:
webbrowser.open_new_tab("https://www.coursera.org")
speak("Coursera is open now")
time.sleep(2)
elif 'udemy' in statement1:
webbrowser.open_new_tab("https://www.udemy.com")
speak("udemy is open now")
time.sleep(2)
elif 'edx' in statement1:
webbrowser.open_new_tab("https://www.edx.org/")
speak("edx is open now")
time.sleep(2)
elif 'skillshare' in statement1:
webbrowser.open_new_tab("https://www.skillshare.com")
speak("skill share is open now")
time.sleep(2)
elif 'datacamp' in statement1:
webbrowser.open_new_tab("https://www.datacamp.com")
speak("datacamp is open now")
time.sleep(2)
elif 'udacity' in statement1:
webbrowser.open_new_tab("https://www.udacity.com")
speak("udacity is open now")
time.sleep(2)
else:
speak("Sorry we couldn't find your search!!!")
time.sleep(3)
elif 'jobs' in statement or 'job' in statement or 'job recommandation' in statement or 'work' in statement:
platforms = ['linkedin', 'indeed', 'glassdoor', 'hackerrank', 'naukri', 'intern shala']
speak("Select a platform that you prefer:")
print('\n'.join(platforms))
statement1 = takeCommand().lower()
if(statement1 == 0):
continue
if 'linkedIn' in statement1:
webbrowser.open_new_tab("https://www.linkedin.com/jobs")
speak("LinkedIn is open now")
time.sleep(2)
elif 'indeed' in statement1:
webbrowser.open_new_tab("https://www.indeed.com/jobs")
speak("Indeed is open now")
time.sleep(2)
elif 'glassdoor' in statement1:
webbrowser.open_new_tab("https://www.glassdoor.com/jobs")
speak("Glassdoor is open now")
time.sleep(2)
elif 'hackerrank' in statement1:
webbrowser.open_new_tab("https://www.hackerrank.com/jobs/search")
speak("HackerRank is open now")
time.sleep(2)
elif 'naukri' in statement1:
webbrowser.open_new_tab("https://www.naukri.com/jobs")
speak("Naukri is open now")
time.sleep(2)
elif 'intern shala' in statement:
webbrowser.open_new_tab('internshala.com')
speak('Intern Shala is open now')
time.sleep(2)
else:
speak("Sorry we couldn't find your search!!!")
time.sleep(3)
elif 'news' in statement or 'news headline' in statement or 'top news' in statement or 'some news' in statement:
speak('Here are some headlines from the India today')
res = requests.get('https://www.indiatoday.in/top-stories')
soup = BeautifulSoup(res.text, 'lxml')
news_box = soup.find('div', {'class': 'top-takes-video-container'})
all_news = news_box.find_all('p')
for news in all_news:
print('\n'+news.text)
speak(news.text)
print()
time.sleep(6)
time.sleep(8)
elif "weather" in statement:
api_key = "8ef61edcf1c576d65d836254e11ea420"
base_url = "https://api.openweathermap.org/data/2.5/weather?"
speak("whats the city name")
city_name = takeCommand()
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
print(" Temperature in kelvin unit is " +
str(current_temperature) +
"\n humidity in percentage is " +
str(current_humidiy) +
"\n description " +
str(weather_description))
speak(" Temperature in kelvin unit = " +
str(current_temperature) +
"\n humidity (in percentage) = " +
str(current_humidiy) +
"\n description = " +
str(weather_description))
elif 'time' in statement:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(f"the time is {strTime}")
speak(f"the time is {strTime}")
elif 'who are you' in statement or 'what can you do' in statement:
speak('I am Ashwin friend Ashtech version 1 point O your persoanl assistant. I am programmed to minor tasks like'
'opening youtube,google chrome,gmail and stackoverflow ,predict time,take a photo,search wikipedia,predict weather'
'in different cities , get top headline news from times of india and you can ask me computational or geographical questions too!')
elif "who made you" in statement or "who created you" in statement or "who discovered you" in statement:
speak("I was built by Ashwin Kumar Ramaswamy")
print("I was built by Ashwin Kumar Ramaswamy")
elif "open stackoverflow" in statement:
webbrowser.open_new_tab("https://stackoverflow.com/login")
speak("Here is stackoverflow")
elif 'news' in statement:
news = webbrowser.open_new_tab(
"https://timesofindia.indiatimes.com/home/headlines")
speak('Here are some headlines from the Times of India,Happy reading')
speak(
'If you like the headline, say "visit" to open the page and read details')
headlines = headlines.get_headlines(
"https://timesofindia.indiatimes.com/home/headlines")
for i in range(15):
speak(headlines['text'][i])
command = takeCommand()
if 'visit' in command:
webbrowser.open_new_tab(headlines['link'][i])
break
elif 'stop' in command:
break
time.sleep(5)
time.sleep(6)
elif "camera" in statement or "take a photo" in statement:
ec.capture(0, "robo camera", "img.jpg")
elif 'search' in statement:
statement = statement.replace("search", "")
webbrowser.open_new_tab(statement)
time.sleep(5)
elif 'ask' in statement:
speak('I can answer to computational and geographical questions and what question do you want to ask now')
question = takeCommand()
app_id = "R2K75H-7ELALHR35X"
client = wolframalpha.Client('R2K75H-7ELALHR35X')
res = client.query(question)
answer = next(res.results).text
speak(answer)
print(answer)
elif 'jokes' in statement or 'joke' in statement:
joke = pyjokes.get_joke('en', 'all')
print(joke)
speak(joke)
elif 'pycharm' in statement or 'open pycharm' in statement:
os.startfile('pycharm')
speak("pycharm is open now")
elif 'visual studio code' in statement or 'open code' in statement or 'code' in statement or 'visual code' in statement:
os.startfile('code')
speak('visual studio code is open now')
elif 'on youtube' in statement or 'youtube' in statement:
statement = statement.replace("youtube", "")
pywhatkit.playonyt(statement)
speak('here you are sir!')
time.sleep(120)
elif 'what is my current location' in statement or 'what is my location' in statement or 'where am I' in statement:
ip = "https://api.ipify.org/"
ip_r = requests.get(ip).text
geoip = "http://ip-api.com/json/"+ip_r
geo_r = requests.get(geoip)
geo_json = geo_r.json()
print(f"Your current location is {geo_json['city']}, {geo_json['regionName']}, {geo_json['country']} {geo_json['zip']}")
speak(f"Your current location is {geo_json['city']}, {geo_json['regionName']}, {geo_json['country']} {geo_json['zip']}")
elif "notepad" in statement:
speak("Opening Notepad")
os.system("start Notepad")
elif "outlook" in statement:
speak("Opening Microsoft Outlook")
os.system("start outlook")
elif "word" in statement:
speak("Opening Word")
os.system("start winword")
elif "paint" in statement:
speak("Opening Paint")
os.system("start mspaint")
elif "excel" in statement:
speak("Opening Excel")
os.system("start excel")
elif "chrome" in statement:
speak("Opening Google Chrome")
os.system("start chrome")
elif "power point" in statement or "powerpoint" in statement or "ppt" in statement:
speak("Opening Notepad")
os.system("start powerpnt")
elif "edge" in statement:
speak("Opening Microsoft Edge")
os.system("start msedge")
elif "snipping tool" in statement:
speak("Opening Snipping Tool")
os.system("start snippingtool")
elif "show deleted files" in statement or "Recycle Bin" in statement or "Delete files" in statement or "search deleted files" in statement:
speak("Opening Recycle Bin")
os.system("start shell:RecycleBinFolder")
elif "calculator" in statement:
speak("Opening Calculator")
os.system("start calc")
elif "log off" in statement or "sign out" in statement:
speak(
"Ok , your pc will log off in 10 sec make sure you exit from all applications")
subprocess.call(["shutdown", "/l"])
#Writing notes
elif "write a note" in statement:
speak("What should i write, sir")
print("J: What should i write, sir")
note = takeCommand()
file = open('jarvis.txt', 'w')
speak("Sir, Should i include date and time")
print("J: Sir, Should i include date and time")
snfm = takeCommand()
if 'yes' in snfm or 'sure' in snfm:
strTime = datetime.datetime.now()
file.write(strTime)
file.write(" :- ")
file.write(note)
else:
file.write(note)
#Showing note
elif "show the note" in statement:
speak("Showing Notes")
print("J: Showing Notes")
file = open("jarvis.txt", "r")
print(file.read())
speak(file.read(6))
#whatsapp messaging
elif 'whatsapp' in statement:
try:
print("J: To whom should i send? Can you please type in the name.")
speak("To whom should i send? Can you please type in the name.")
to = input('Name: ')
print("J: What should i send? Can you please type in the message.")
speak("What should i send? Can you please type in the message.")
content = input("Enter the message: ")
speak('You will have to scan for whatsapp web. ')
print('J: You will have to scan for whatsapp web. ')
whatsapp(to, content)
speak("Message has been sent !")
print("* J: Message has been sent !")
except Exception as e:
print(e)
speak("I am not able to send this message")
speak("Tell me how can I help you now?")
statement = takeCommand().lower()
time.sleep(3)
|
py | b413c46d5bf2d1d42f0aa6e600f5711c0f78b565 | # -*- coding: utf-8 -*-
"""
Created on Sat May 2 11:26:18 2020
@author: max
"""
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import requests
def make_first_page(src,text):
def cv2ImgAddText(img, text, left, top, textColor=(0, 0, 0), textSize=10):
if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype("font/simsun.ttc", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
try:
response = requests.get(src,timeout=3)
nparr = np.frombuffer(response.content, np.uint8)
im = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
except:
im=np.zeros((90,120,3), np.uint8)
im=cv2.resize(im,(90,120))
im=cv2.copyMakeBorder(src=im,left=0,right=0,top=0,bottom=25,borderType=cv2.BORDER_CONSTANT,value=[255, 255, 255])
tex=''
if len(text)>12:
if len(text)>24:
tex=text[:12]+'\n'+text[12:24]+'\n'+text[24:]
else:
tex=text
im=cv2ImgAddText(im,tex,2,121)
#cv2.imwrite("CV.jpg", im)
return im |
py | b413c5262bef01af46ff80b09d8fc70e8e7de674 |
import os
import json
import zipfile
import numpy as np
import pickle
import torch
from collections import OrderedDict
from itertools import repeat
class AverageMeter(object):
"""Computes and stores the average and current/max/min value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.max = -1e10
self.min = 1e10
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.max = -1e10
self.min = 1e10
def update(self, val, n=1):
self.max = max(val, self.max)
self.min = min(val, self.min)
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def read_json(fname):
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def load_json(filename):
with open(filename, "r") as f:
return json.loads(f.readlines()[0].strip("\n"))
def save_jsonl(data, filename):
"""data is a list"""
with open(filename, "w") as f:
f.write("\n".join([json.dumps(e) for e in data]))
def load_jsonl(filename):
with open(filename, "r") as f:
return [json.loads(l.strip("\n")) for l in f.readlines()]
def inf_loop(data_loader):
''' wrapper function for endless data loader. '''
for loader in repeat(data_loader):
yield from loader
def load_from_feature_package(group_handle):
feature_dict = dict()
vids = group_handle.keys()
for vid in vids:
feature_dict[vid] = dict()
sub_groups = group_handle[vid].keys()
for sub_group in sub_groups:
if '.jpg' in sub_group:
regions = group_handle[vid][sub_group].keys()
region_feature_list = [[] for r in regions]
for region in regions:
if region == 'image':
region_feature_list[0] = group_handle[vid][sub_group][region][0].squeeze()
elif region == 'bbox' or region == 'box':
region_feature_list[1] = group_handle[vid][sub_group][region][0].squeeze()
else:
bbox_idx = int(region[4:])
region_feature_list[bbox_idx] = group_handle[vid][sub_group][region][0].squeeze()
feature_dict[vid][sub_group] = np.array(region_feature_list)
else:
feature_dict[vid][sub_group] = dict()
datas = group_handle[vid][sub_group].keys()
for data in datas:
if data == 'img_alignment':
img_alignment_rows = group_handle[vid][sub_group][data].keys()
feature_dict[vid][sub_group][data] = [[] for i in img_alignment_rows]
for img_alignment_row in img_alignment_rows:
int(img_alignment_row)
feature_dict[vid][sub_group][data][int(img_alignment_row)] = \
group_handle[vid][sub_group][data][img_alignment_row][:].tolist()
elif data == 'token':
token_list = group_handle[vid][sub_group][data][:].tolist()
feature_dict[vid][sub_group][data] = [str(token)[2:-1] for token in token_list]
else:
if len(group_handle[vid][sub_group][data][:]) == 4:
feature_dict[vid][sub_group][data] = group_handle[vid][sub_group][data][:].squeeze()
else:
feature_dict[vid][sub_group][data] = group_handle[vid][sub_group][data][:]
return feature_dict |
py | b413c5bf2b918c8e74f9a3c73cd926a2c1298c9d | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for meterstick.v2.models."""
from absl.testing import absltest
from absl.testing import parameterized
from meterstick import metrics
from meterstick import models
from meterstick import operations
from meterstick import utils
import numpy as np
import pandas as pd
from sklearn import linear_model
np.random.seed(42)
n = 40
DF = pd.DataFrame({
'X1': np.random.random(n),
'X2': np.random.random(n),
'Y': np.random.randint(0, 100, n),
'grp1': np.random.choice(['A', 'B', 'C'], n),
'grp2': np.random.choice(('foo', 'bar'), n),
})
GRPED1 = DF.groupby('grp1').sum()
GRPED2 = DF.groupby(['grp2', 'grp1']).sum()
@parameterized.named_parameters(
('linear_regression', models.LinearRegression,
linear_model.LinearRegression, 'OLS'),
('ridge', models.Ridge, linear_model.Ridge, 'Ridge'),
('lasso', models.Lasso, linear_model.Lasso, 'Lasso'),
('elastic_net', models.ElasticNet, linear_model.ElasticNet, 'ElasticNet'))
class ModelsTest(parameterized.TestCase):
def test_model(self, model, sklearn_model, name):
m = model(metrics.Sum('Y'), metrics.Sum('X1'), 'grp1')
output = m.compute_on(DF)
model = sklearn_model().fit(GRPED1[['X1']], GRPED1[['Y']])
expected = pd.DataFrame(
[[model.intercept_[0], model.coef_.flatten()[0]]],
columns=[
name + '(sum(Y) ~ sum(X1)) Coefficient: intercept',
name + '(sum(Y) ~ sum(X1)) Coefficient: sum(X1)'
])
pd.testing.assert_frame_equal(output, expected)
def test_melted(self, model, sklearn_model, name):
del sklearn_model, name # unused
m = model(metrics.Sum('Y'), metrics.Sum('X1'), 'grp1')
output = m.compute_on(DF, melted=True)
expected = utils.melt(m.compute_on(DF))
pd.testing.assert_frame_equal(output, expected)
def test_multi_var(self, model, sklearn_model, name):
m = model(metrics.Sum('Y'), [metrics.Sum('X1'), metrics.Sum('X2')], 'grp1')
output = m.compute_on(DF)
model = sklearn_model().fit(GRPED1[['X1', 'X2']], GRPED1[['Y']])
expected = pd.DataFrame(
[[
model.intercept_[0],
model.coef_.flatten()[0],
model.coef_.flatten()[1]
]],
columns=[
name + '(sum(Y) ~ sum(X1) + sum(X2)) Coefficient: intercept',
name + '(sum(Y) ~ sum(X1) + sum(X2)) Coefficient: sum(X1)',
name + '(sum(Y) ~ sum(X1) + sum(X2)) Coefficient: sum(X2)'
])
pd.testing.assert_frame_equal(output, expected)
def test_split_by(self, model, sklearn_model, name):
m = model(metrics.Sum('Y'), metrics.Sum('X1'), 'grp1')
output = m.compute_on(DF, 'grp2')
model = sklearn_model()
model.fit(GRPED2.loc['bar'][['X1']], GRPED2.loc['bar'][['Y']])
expected1 = pd.DataFrame(
[[model.intercept_[0], model.coef_.flatten()[0]]],
columns=[
name + '(sum(Y) ~ sum(X1)) Coefficient: intercept',
name + '(sum(Y) ~ sum(X1)) Coefficient: sum(X1)'
])
model.fit(GRPED2.loc['foo'][['X1']], GRPED2.loc['foo'][['Y']])
expected2 = pd.DataFrame(
[[model.intercept_[0], model.coef_.flatten()[0]]],
columns=[
name + '(sum(Y) ~ sum(X1)) Coefficient: intercept',
name + '(sum(Y) ~ sum(X1)) Coefficient: sum(X1)'
])
expected = pd.concat([expected1, expected2],
keys=['bar', 'foo'],
names=['grp2'])
expected = expected.droplevel(-1)
pd.testing.assert_frame_equal(output, expected)
def test_no_intercept(self, model, sklearn_model, name):
m = model(metrics.Sum('Y'), metrics.Sum('X1'), 'grp1', fit_intercept=False)
output = m.compute_on(DF)
model = sklearn_model(fit_intercept=False)
model.fit(GRPED1[['X1']], GRPED1[['Y']])
expected = pd.DataFrame(
[[model.coef_.flatten()[0]]],
columns=[name + '(sum(Y) ~ sum(X1)) Coefficient: sum(X1)'])
pd.testing.assert_frame_equal(output, expected)
def test_operation_on(self, model, sklearn_model, name):
del sklearn_model # unused
m = model(metrics.Sum('Y'), metrics.Sum('X1'), 'grp1')
output = operations.Distribution('grp2', m).compute_on(DF)
expected = m.compute_on(DF, 'grp2') / m.compute_on(DF, 'grp2').sum()
expected.columns = [
'Distribution of %s(sum(Y) ~ sum(X1)) Coefficient: intercept' % name,
'Distribution of %s(sum(Y) ~ sum(X1)) Coefficient: sum(X1)' % name
]
pd.testing.assert_frame_equal(output, expected)
class LogisticRegressionTest(absltest.TestCase):
def test_model(self):
m = models.LogisticRegression(metrics.Sum('grp2'), metrics.Sum('X1'), 'X1')
output = m.compute_on(DF)
model = linear_model.LogisticRegression().fit(DF[['X1']], DF[['grp2']])
expected = pd.DataFrame(
[[model.intercept_[0], model.coef_.flatten()[0]]],
columns=[
'LogisticRegression(sum(grp2) ~ sum(X1)) Coefficient: intercept',
'LogisticRegression(sum(grp2) ~ sum(X1)) Coefficient: sum(X1)'
])
pd.testing.assert_frame_equal(output, expected)
def test_melted(self):
m = models.LogisticRegression(metrics.Sum('grp2'), metrics.Sum('X1'), 'X1')
output = m.compute_on(DF, melted=True)
expected = utils.melt(m.compute_on(DF))
pd.testing.assert_frame_equal(output, expected)
def test_multi_var(self):
m = models.LogisticRegression(
metrics.Sum('grp2'),
[metrics.Sum('X1'), metrics.Sum('X2')], 'X1')
output = m.compute_on(DF)
model = linear_model.LogisticRegression().fit(DF[['X1', 'X2']],
DF[['grp2']])
expected = pd.DataFrame(
[[
model.intercept_[0],
model.coef_.flatten()[0],
model.coef_.flatten()[1]
]],
columns=[
'LogisticRegression(sum(grp2) ~ sum(X1) + sum(X2)) Coefficient: intercept',
'LogisticRegression(sum(grp2) ~ sum(X1) + sum(X2)) Coefficient: sum(X1)',
'LogisticRegression(sum(grp2) ~ sum(X1) + sum(X2)) Coefficient: sum(X2)'
])
pd.testing.assert_frame_equal(output, expected)
def test_split_by(self):
m = models.LogisticRegression(metrics.Sum('grp2'), metrics.Sum('X1'), 'X1')
output = m.compute_on(DF, 'grp1')
res = []
grps = ['A', 'B', 'C']
for g in grps:
expected = m.compute_on(DF[DF.grp1 == g])
res.append(expected)
expected = pd.concat(res, keys=grps, names=['grp1'])
expected = expected.droplevel(-1)
pd.testing.assert_frame_equal(output, expected)
def test_no_intercept(self):
m = models.LogisticRegression(
metrics.Sum('grp2'), metrics.Sum('X1'), 'X1', fit_intercept=False)
output = m.compute_on(DF)
model = linear_model.LogisticRegression(fit_intercept=False)
model.fit(DF[['X1']], DF[['grp2']])
expected = pd.DataFrame(
[[model.coef_.flatten()[0]]],
columns=[
'LogisticRegression(sum(grp2) ~ sum(X1)) Coefficient: sum(X1)'
])
pd.testing.assert_frame_equal(output, expected)
def test_operation_on(self):
m = models.LogisticRegression(
metrics.Sum('grp2'), metrics.Sum('X1'), 'X1', name='LR')
output = operations.Distribution('grp1', m).compute_on(DF)
expected = m.compute_on(DF, 'grp1') / m.compute_on(DF, 'grp1').sum()
expected.columns = [
'Distribution of LR Coefficient: intercept',
'Distribution of LR Coefficient: sum(X1)'
]
pd.testing.assert_frame_equal(output, expected)
def test_logistic_regression_multi_classes(self):
m = models.LogisticRegression(
metrics.Sum('Y'),
[metrics.Sum('X1'), metrics.Sum('X2')], 'grp1')
output = m.compute_on(DF)
res = []
model = linear_model.LogisticRegression()
model.fit(GRPED1[['X1', 'X2']], GRPED1['Y'])
prefix = 'LogisticRegression(sum(Y) ~ sum(X1) + sum(X2)) Coefficient: '
for c, i, cl in zip(model.coef_, model.intercept_, model.classes_):
expected = pd.DataFrame([[i, c[0], c[1]]],
columns=[
prefix + 'intercept for class %s' % cl,
prefix + 'sum(X1) for class %s' % cl,
prefix + 'sum(X2) for class %s' % cl
])
res.append(expected)
expected = pd.concat(res, 1)
pd.testing.assert_frame_equal(output, expected)
def test_logistic_regression_multi_classes_no_intercept(self):
m = models.LogisticRegression(
metrics.Sum('Y'), metrics.Sum('X1'), 'grp1', fit_intercept=False)
output = m.compute_on(DF)
res = []
model = linear_model.LogisticRegression(fit_intercept=False)
model.fit(GRPED1[['X1']], GRPED1['Y'])
for c, cl in zip(model.coef_, model.classes_):
expected = pd.DataFrame(
[c],
columns=[
'LogisticRegression(sum(Y) ~ sum(X1)) Coefficient: '
'sum(X1) for class %s' % cl
])
res.append(expected)
expected = pd.concat(res, 1)
pd.testing.assert_frame_equal(output, expected)
class MiscellaneousTests(absltest.TestCase):
def test_model_composition(self):
lm = models.LinearRegression(
metrics.Sum('Y'), metrics.Sum('X1'), 'grp1', name='lm')
ridge = models.Ridge(
metrics.Sum('Y'), metrics.Sum('X1'), 'grp1', name='ridge')
output = (lm - ridge).compute_on(DF)
a = lm.compute_on(DF, return_dataframe=False)
b = ridge.compute_on(DF, return_dataframe=False)
cols = [
'lm Coefficient: intercept - ridge Coefficient: intercept',
'lm Coefficient: sum(X1) - ridge Coefficient: sum(X1)'
]
a.columns = cols
b.columns = cols
expected = a - b
pd.testing.assert_frame_equal(output, expected)
def test_count_features(self):
s = metrics.Sum('x')
self.assertEqual(models.count_features(metrics.Sum('x')), 1)
self.assertEqual(models.count_features(metrics.MetricList([s, s])), 2)
self.assertEqual(
models.count_features(
metrics.MetricList([metrics.Sum('x'),
metrics.MetricList([s])])), 2)
self.assertEqual(
models.count_features(operations.AbsoluteChange('a', 'b', s)), 1)
self.assertEqual(
models.count_features(
operations.AbsoluteChange(
'a', 'b', metrics.MetricList([s, metrics.MetricList([s])]))), 2)
self.assertEqual(
models.count_features(
operations.AbsoluteChange(
'a', 'b',
metrics.MetricList([
operations.AbsoluteChange('a', 'b',
metrics.MetricList([s, s]))
]))), 2)
self.assertEqual(models.count_features(metrics.Ratio('x', 'y')), 1)
self.assertEqual(models.count_features(metrics.MetricList([s, s]) / 2), 2)
if __name__ == '__main__':
absltest.main()
|
py | b413c5d086bc21c1407ee46d8b7c71e53aaa42ce | # adapted from https://github.com/facebookresearch/fvcore/blob/master/fvcore/transforms/transform_util.py
import numpy as np
import torch
def to_float_tensor(numpy_array: np.ndarray) -> torch.Tensor:
"""
Convert the numpy array to torch float tensor with dimension of NxCxHxW.
Pytorch is not fully supporting uint8, so convert tensor to float if the
numpy_array is uint8.
Args:
numpy_array (ndarray): of shape NxHxWxC, or HxWxC or HxW to
represent an image. The array can be of type uint8 in range
[0, 255], or floating point in range [0, 1] or [0, 255].
Returns:
float_tensor (tensor): converted float tensor.
"""
assert isinstance(numpy_array, np.ndarray)
assert len(numpy_array.shape) in (2, 3, 4)
# Some of the input numpy array has negative strides. Pytorch currently
# does not support negative strides, perform ascontiguousarray to
# resolve the issue.
float_tensor = torch.from_numpy(np.ascontiguousarray(numpy_array))
if numpy_array.dtype in (np.uint8, np.int32, np.int64):
float_tensor = float_tensor.float()
if len(numpy_array.shape) == 2:
# HxW -> 1x1xHxW.
float_tensor = float_tensor[None, None, :, :]
elif len(numpy_array.shape) == 3:
# HxWxC -> 1xCxHxW.
float_tensor = float_tensor.permute(2, 0, 1)
float_tensor = float_tensor[None, :, :, :]
elif len(numpy_array.shape) == 4:
# NxHxWxC -> NxCxHxW
float_tensor = float_tensor.permute(0, 3, 1, 2)
else:
raise NotImplementedError(
"Unknow numpy_array dimension of {}".format(float_tensor.shape)
)
return float_tensor
def to_numpy(
float_tensor: torch.Tensor, target_shape: list, target_dtype: np.dtype
) -> np.ndarray:
"""
Convert float tensor with dimension of NxCxHxW back to numpy array.
Args:
float_tensor (tensor): a float pytorch tensor with shape of NxCxHxW.
target_shape (list): the target shape of the numpy array to represent
the image as output. options include NxHxWxC, or HxWxC or HxW.
target_dtype (dtype): the target dtype of the numpy array to represent
the image as output. The array can be of type uint8 in range
[0, 255], or floating point in range [0, 1] or [0, 255].
Returns:
(ndarray): converted numpy array.
"""
assert len(target_shape) in (2, 3, 4)
if len(target_shape) == 2:
# 1x1xHxW -> HxW.
assert float_tensor.shape[0] == 1
assert float_tensor.shape[1] == 1
float_tensor = float_tensor[0, 0, :, :]
elif len(target_shape) == 3:
assert float_tensor.shape[0] == 1
# 1xCxHxW -> HxWxC.
float_tensor = float_tensor[0].permute(1, 2, 0)
elif len(target_shape) == 4:
# NxCxHxW -> NxHxWxC
float_tensor = float_tensor.permute(0, 2, 3, 1)
else:
raise NotImplementedError(
"Unknow target shape dimension of {}".format(target_shape)
)
if target_dtype == np.uint8:
# Need to specifically call round here, notice in pytroch the round
# is half to even.
# https://github.com/pytorch/pytorch/issues/16498
float_tensor = float_tensor.round().byte()
return float_tensor.numpy()
|
py | b413c66119ecb4f2e5cd837375f7dc57de79c8da | import json
import os
PROJECT_ID = 'project_id'
APPLICATION_ID = 'APPLICATION_ID'
GOOGLE_APPLICATION_CREDENTIALS = 'GOOGLE_APPLICATION_CREDENTIALS'
BIGQUERY_DATASET_ID = 'BIGQUERY_DATASET_ID'
def get_creds(creds_path):
"""
loads a credentials as a json object
:param creds_path: path to GCP credentials file
:return:
"""
with open(creds_path, 'rb') as creds_fp:
return json.load(creds_fp)
def activate_creds(creds_path):
"""
activates google cloud service account credentials
:param creds_path: path to the service account key file
:return:
"""
creds = get_creds(creds_path)
project_id = creds.get(PROJECT_ID)
if not project_id:
raise OSError('%s does not refer to a valid GCP key file' % creds_path)
os.environ[APPLICATION_ID] = project_id
os.environ[GOOGLE_APPLICATION_CREDENTIALS] = creds_path
return creds
def set_default_dataset_id(dataset_id):
"""
sets BIGQUERY_DATASET_ID environment variable to a name of dataset
:param dataset_id: name of the dataset_id
:return:
"""
os.environ[BIGQUERY_DATASET_ID] = dataset_id
|
py | b413c7165202927f43689a0569e61261e013d0ee | # Icon made by surang from www.flaticon.com
# Icon made by Freepik from www.flaticon.com
import pygame
import random
import math
import time
from pygame import mixer
pygame.init()
clock = pygame.time.Clock()
# Game variables
SCREENWIDTH = 800
SCREENHEIGHT = 600
black = (0, 0, 0)
player_vel = 5
target_vel = 2
num_targets = 5
enemy_vel = 2
num_enemies = 5
fps = 60
target_fps = 60
# Preparing the screen and the background
screen = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
background = pygame.image.load("Images/background.png")
try:
mixer.music.load('Sounds/bgmusic.mp3')
except pygame.error:
mixer.music.load('Sounds/bgmusic.wav')
mixer.music.play(-1)
pygame.display.set_caption("Catch the Starfish")
icon = pygame.image.load('Images/starfish.png')
pygame.display.set_icon(icon)
# Player (Octopus)
playerImg = pygame.image.load("Images/octopus.png")
playerX = SCREENWIDTH//2 - 32
playerY = SCREENHEIGHT - 74
playerX_change = 0
playerY_change = 0
# Target (Starfish)
targetImgs = []
targetX = []
targetY = []
targetY_change = []
for _ in range(num_targets):
targetImgs.append(pygame.image.load("Images/starfish.png"))
targetX.append(random.randint(0, SCREENWIDTH - 32))
targetY.append(random.randint(0, 20))
targetY_change.append(target_vel)
# Enemy (Bomb)
enemyImgs = []
enemyX = []
enemyY = []
enemyY_change = []
for _ in range(num_enemies):
enemyImgs.append(pygame.image.load("Images/bomb.png"))
enemyX.append(random.randint(0, SCREENWIDTH - 32))
enemyY.append(random.randint(0, 40))
enemyY_change.append(enemy_vel)
# Variables to display score and game over text
score_value = 0
font = pygame.font.Font('ARIAL.TTF', 32)
over_font = pygame.font.Font('ARIAL.TTF', 70)
# Functions
def show_score(x, y):
score = font.render("Score : " + str(score_value), True, black)
screen.blit(score, (x, y))
def player(x, y):
screen.blit(playerImg, (x, y))
def target(x, y, k):
screen.blit(targetImgs[k], (x, y))
def enemy(x, y, k):
screen.blit(enemyImgs[k], (x, y))
def is_collision(x1, y1, x2, y2):
d = math.sqrt(math.pow((x1 - x2), 2) + math.pow((y1 - y2), 2))
if d < 40:
return True
else:
return False
def game_over_text():
over_text = over_font.render("GAME OVER", True, black)
screen.blit(over_text, (200, 250))
restart_text = font.render("Press SPACE to start a new game", True, black)
screen.blit(restart_text, (190, 330))
def reset_game():
for i in range(num_targets):
targetX[i] = random.randint(0, SCREENWIDTH - 32)
targetY[i] = random.randint(0, 20)
for i in range(num_enemies):
enemyX[i] = random.randint(0, SCREENWIDTH - 32)
enemyY[i] = random.randint(0, 40)
prev_time = time.time()
running = True
game_over = False
# Game Loop
while running:
screen.fill(black)
screen.blit(background, (0, 0))
now = time.time()
dt = now - prev_time
prev_time = now
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
playerX_change = -player_vel
if event.key == pygame.K_RIGHT:
playerX_change = player_vel
if event.key == pygame.K_UP:
playerY_change = -player_vel
if event.key == pygame.K_DOWN:
playerY_change = player_vel
if event.key == pygame.K_SPACE and game_over:
score_value = 0
game_over = False
reset_game()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN:
playerX_change = 0
playerY_change = 0
if event.type == pygame.QUIT:
running = False
# Player movement
playerX += playerX_change * dt * target_fps
playerY += playerY_change * dt * target_fps
if playerX <= 0:
playerX = 0
elif playerX >= SCREENWIDTH - 64:
playerX = SCREENWIDTH - 64
elif playerY <= 400:
playerY = 400
elif playerY >= SCREENHEIGHT - 64:
playerY = SCREENHEIGHT - 64
if game_over:
game_over_text()
else:
# Target movement
for i in range(num_targets):
targetY[i] += targetY_change[i] * dt * target_fps
if targetY[i] >= SCREENHEIGHT - 32:
targetX[i] = random.randint(0, SCREENWIDTH - 32)
targetY[i] = random.randint(0, 20)
if is_collision(targetX[i], targetY[i], playerX, playerY):
hit = mixer.Sound('Sounds/bubble.wav')
hit.play()
score_value += 1
targetX[i] = random.randint(0, SCREENWIDTH - 32)
targetY[i] = random.randint(0, 20)
target(targetX[i], targetY[i], i)
# Enemy movement
for i in range(num_enemies):
enemyY[i] += enemyY_change[i] * dt * target_fps
if enemyY[i] >= SCREENHEIGHT - 32:
enemyX[i] = random.randint(0, SCREENWIDTH - 32)
enemyY[i] = random.randint(0, 40)
if is_collision(enemyX[i], enemyY[i], playerX, playerY):
bomb_hit = mixer.Sound('Sounds/explosion.wav')
bomb_hit.play()
game_over = True
enemy(enemyX[i], enemyY[i], i)
player(playerX, playerY)
show_score(10, 10)
clock.tick(fps)
pygame.display.update()
|
py | b413c7342dfee87a46dce4f194f4b3478135f091 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import scrape_common as sc
xls_url = 'https://www.ne.ch/autorites/DFS/SCSP/medecin-cantonal/maladies-vaccinations/Documents/Covid-19-Statistiques/COVID19_PublicationInternet.xlsx'
xls = sc.xlsdownload(xls_url, silent=True)
rows = sc.parse_xls(xls)
for i, row in enumerate(rows):
print('NE')
sc.timestamp()
print('Downloading:', xls_url)
print('Date and time:', row['A'].date().isoformat())
print('Confirmed cases:', row['Cumul'])
print('Hospitalized:', row['Total des cas hospitalisés'])
if row['Soins intensifs (intubés)'] is not None and row['Soins intensifs (non intubés)'] is not None:
ICU=row['Soins intensifs (intubés)']
ICU2=row['Soins intensifs (non intubés)']
print('ICU:', int(ICU)+int(ICU2))
print('Vent:', row['Soins intensifs (intubés)'])
print('Deaths:', row['Cumul des décès'])
# do not print record delimiter for last record
# this is an indicator for the next script to check
# for expected values.
if len(rows) - 1 > i:
print('-' * 10)
|
py | b413c86041a0b2a5cd3c8b16af72285a6a7c71f9 | from django.apps import AppConfig
from templatestore import app_settings as ts_settings
class TemplateStoreAppConfig(AppConfig):
name = "templatestore"
|
py | b413c8f57fffec09fcecc85728e290124f5f5250 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-23 12:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0014_auto_20180223_1254'),
]
operations = [
migrations.AddField(
model_name='contactpage',
name='map_address',
field=models.CharField(default='technic-alu', max_length=250),
preserve_default=False,
),
]
|
py | b413c94eb0efef6e37950da85de88b514fca99b4 | from .params_init import dict_portfolio, time_params, Mu, Rfree, Std, det_income, norm_factor, age_plot_params
import numpy as np
from HARK.utilities import approxNormal
time_params['Age_born'] = 27
dict_portfolio['T_age'] = time_params['Age_death'] - time_params['Age_born'] + 1
dict_portfolio['T_cycle'] = time_params['Age_death'] - time_params['Age_born']
dict_portfolio['T_retire'] = time_params['Age_retire'] - time_params['Age_born']
dict_portfolio['T_sim'] = (time_params['Age_death'] - time_params['Age_born'] + 1)*50
time_params['Age_retire'] = 70
dict_portfolio['T_age'] = time_params['Age_death'] - time_params['Age_born'] + 1
dict_portfolio['T_cycle'] = time_params['Age_death'] - time_params['Age_born']
dict_portfolio['T_retire'] = time_params['Age_retire'] - time_params['Age_born']
dict_portfolio['T_sim'] = (time_params['Age_death'] - time_params['Age_born'] + 1)*50
time_params['Age_death'] = 100
dict_portfolio['T_age'] = time_params['Age_death'] - time_params['Age_born'] + 1
dict_portfolio['T_cycle'] = time_params['Age_death'] - time_params['Age_born']
dict_portfolio['T_retire'] = time_params['Age_retire'] - time_params['Age_born']
dict_portfolio['T_sim'] = (time_params['Age_death'] - time_params['Age_born'] + 1)*50
dict_portfolio['TranShkStd'] = np.array([0.063 , 0.066 , 0.0675, 0.0685, 0.069 , 0.069 , 0.067 , 0.0665, 0.066 , 0.064 , 0.063 , 0.062 , 0.061 , 0.06 , 0.0585, 0.057 , 0.055 , 0.053 , 0.0515, 0.05 , 0.047 , 0.045 , 0.043 , 0.04 ,0.038 , 0.036 , 0.033 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 ,0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 ,0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 , 0.03 ])
dict_portfolio['PermShkStd'] = np.array([0.15 , 0.122, 0.115, 0.105, 0.093, 0.09 , 0.087, 0.08 , 0.075, 0.075, 0.067, 0.068, 0.061, 0.062, 0.058, 0.06 , 0.058, 0.058, 0.057, 0.056, 0.054, 0.057, 0.059, 0.059, 0.063, 0.066, 0.07 , 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073,0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073,0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073])
age_plot_params = [30, 40, 50, 60, 70, 80]
dict_portfolio['LivPrb'] = dict_portfolio['LivPrb'][(time_params['Age_born'] - 20):(time_params['Age_death'] - 20)]
|
py | b413ca920ff3c9727ebe0c5c0d81489db1e8c458 | import logging
from pajbot import utils
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger("pajbot")
class FollowAgeModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Follow age"
DESCRIPTION = "Enables the usage of the !followage and !followsince commands"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="action_followage",
label="MessageAction for !followage",
type="options",
required=True,
default="say",
options=["say", "whisper", "reply"],
),
ModuleSetting(
key="action_followsince",
label="MessageAction for !followsince",
type="options",
required=True,
default="say",
options=["say", "whisper", "reply"],
),
ModuleSetting(
key="global_cd",
label="Global cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=4,
constraints={"min_value": 0, "max_value": 120},
),
ModuleSetting(
key="user_cd",
label="Per-user cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=8,
constraints={"min_value": 0, "max_value": 240},
),
]
def load_commands(self, **options):
# TODO: Have delay modifiable in settings
self.commands["followage"] = Command.raw_command(
self.follow_age,
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
description="Check your or someone elses follow age for a channel",
can_execute_with_whisper=True,
examples=[
CommandExample(
None,
"Check your own follow age",
chat="user:!followage\n" "bot:troydota, you have been following Karl_Kons for 4 months and 24 days",
description="Check how long you have been following the current streamer (Karl_Kons in this case)",
).parse(),
CommandExample(
None,
"Check someone elses follow age",
chat="user:!followage NightNacht\n"
"bot:troydota, NightNacht has been following Karl_Kons for 5 months and 4 days",
description="Check how long any user has been following the current streamer (Karl_Kons in this case)",
).parse(),
CommandExample(
None,
"Check someones follow age for a certain streamer",
chat="user:!followage NightNacht forsenlol\n"
"bot:troydota, NightNacht has been following forsenlol for 1 year and 4 months",
description="Check how long NightNacht has been following forsenlol",
).parse(),
CommandExample(
None,
"Check your own follow age for a certain streamer",
chat="user:!followage troydota forsenlol\n"
"bot:troydota, you have been following forsenlol for 1 year and 3 months",
description="Check how long you have been following forsenlol",
).parse(),
],
)
self.commands["followsince"] = Command.raw_command(
self.follow_since,
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
description="Check from when you or someone else first followed a channel",
can_execute_with_whisper=True,
examples=[
CommandExample(
None,
"Check your own follow since",
chat="user:!followsince\n"
"bot:troydota, you have been following Karl_Kons since 04 March 2015, 07:02:01 UTC",
description="Check when you first followed the current streamer (Karl_Kons in this case)",
).parse(),
CommandExample(
None,
"Check someone elses follow since",
chat="user:!followsince NightNacht\n"
"bot:troydota, NightNacht has been following Karl_Kons since 03 July 2014, 04:12:42 UTC",
description="Check when NightNacht first followed the current streamer (Karl_Kons in this case)",
).parse(),
CommandExample(
None,
"Check someone elses follow since for another streamer",
chat="user:!followsince NightNacht forsenlol\n"
"bot:troydota, NightNacht has been following forsenlol since 13 June 2013, 13:10:51 UTC",
description="Check when NightNacht first followed the given streamer (forsenlol)",
).parse(),
CommandExample(
None,
"Check your follow since for another streamer",
chat="user:!followsince troydota forsenlol\n"
"bot:troydota, you have been following forsenlol since 16 December 1990, 03:06:51 UTC",
description="Check when you first followed the given streamer (forsenlol)",
).parse(),
],
)
@staticmethod
def _format_for_follow_age(follow_since):
human_age = time_since(utils.now().timestamp() - follow_since.timestamp(), 0)
return f"for {human_age}"
@staticmethod
def _format_for_follow_since(follow_since):
human_age = follow_since.strftime("%d %B %Y, %X %Z")
return f"since {human_age}"
@staticmethod
def _parse_message(message):
from_input = None
to_input = None
if message is not None and len(message) > 0:
message_split = message.split(" ")
if len(message_split) >= 1:
from_input = message_split[0]
if len(message_split) >= 2:
to_input = message_split[1]
return from_input, to_input
def _handle_command(self, bot, source, message, event, format_cb, message_method):
from_input, to_input = self._parse_message(message)
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
if from_input is not None:
from_user = User.find_or_create_from_user_input(db_session, bot.twitch_helix_api, from_input)
if from_user is None:
bot.execute_now(
bot.send_message_to_user,
source,
f'User "{from_input}" could not be found',
event,
method=self.settings["action_followage"],
)
return
else:
from_user = source
if to_input is None:
to_input = bot.streamer # TODO make bot.streamer a User() instance?
to_user = User.find_or_create_from_user_input(db_session, bot.twitch_helix_api, to_input)
if to_user is None:
bot.execute_now(
bot.send_message_to_user,
source,
f'User "{to_input}" could not be found',
event,
method=message_method,
)
return
follow_since = bot.twitch_helix_api.get_follow_since(from_user.id, to_user.id)
is_self = source == from_user
if follow_since is not None:
# Following
suffix = f"been following {to_user} {format_cb(follow_since)}"
if is_self:
message = "You have " + suffix
else:
message = from_user.name + " has " + suffix
else:
# Not following
suffix = f"not following {to_user}"
if is_self:
message = "You are " + suffix
else:
message = from_user.name + " is " + suffix
bot.execute_now(bot.send_message_to_user, source, message, event, method=message_method)
def follow_age(self, bot, source, message, event, **rest):
self.bot.action_queue.submit(
self._handle_command,
bot,
source,
message,
event,
self._format_for_follow_age,
self.settings["action_followage"],
)
def follow_since(self, bot, source, message, event, **rest):
self.bot.action_queue.submit(
self._handle_command,
bot,
source,
message,
event,
self._format_for_follow_since,
self.settings["action_followsince"],
)
|
py | b413cc5f71bf6322f736b99bbe01a67a72ebb015 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import exc as heat_exc
import mock
from sahara.service import engine
from sahara.service.heat import heat_engine
from sahara.tests.unit import base
from sahara.utils import general as g
class EngineTest(engine.Engine):
def __init__(self):
super(EngineTest, self).__init__()
self.order = []
def create_cluster(self, cluster):
pass
def get_type_and_version(self):
pass
def rollback_cluster(self, cluster, reason):
pass
def scale_cluster(self, cluster, node_group_id_map):
pass
def shutdown_cluster(self, cluster):
pass
class TestEngine(base.SaharaWithDbTestCase):
def setUp(self):
super(TestEngine, self).setUp()
self.eng = EngineTest()
@mock.patch('sahara.utils.openstack.images.SaharaImageManager')
def test_get_node_group_image_username(self, mock_manager):
ng = mock.Mock()
manager = mock.Mock()
manager.get.return_value = mock.Mock(username='username')
mock_manager.return_value = manager
self.assertEqual(
'username', self.eng.get_node_group_image_username(ng))
@mock.patch('sahara.utils.cluster_progress_ops.add_successful_event')
@mock.patch('sahara.service.networks.init_instances_ips',
return_value=True)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.cluster.check_cluster_exists', return_value=True)
def test_ips_assign(self, g, ctx, init, ops):
cluster = mock.Mock()
instances = [mock.Mock(id='1'), mock.Mock(id='2')]
instances_with_ip = set()
self.eng._ips_assign(instances_with_ip, cluster, instances)
self.assertEqual({'1', '2'}, instances_with_ip)
@mock.patch('datetime.datetime')
@mock.patch('sahara.context.ctx')
@mock.patch('sahara.service.engine.conductor')
def test_clean_job_executions(self, conductor, ctx, date):
cluster = mock.Mock()
je = mock.Mock(info=None, end_time=None)
conductor.job_execution_get_all.return_value = [je]
date.now.return_value = '28.04.2015'
self.eng._clean_job_executions(cluster)
args, kwargs = conductor.job_execution_update.call_args
update = {
'info': {'status': 'KILLED'},
'cluster_id': None,
'end_time': '28.04.2015'}
self.assertEqual(update, args[2])
class TestDeletion(base.SaharaTestCase):
def setUp(self):
super(TestDeletion, self).setUp()
self.engine = EngineTest()
@mock.patch('sahara.utils.openstack.nova.client')
def test_delete_auto_security_group(self, nova_client):
ng = mock.Mock(id="16fd2706-8baf-433b-82eb-8c7fada847da",
auto_security_group=True)
ng.name = "ngname"
ng.cluster.name = "cluster"
auto_name = g.generate_auto_security_group_name(ng)
ng.security_groups = [auto_name]
client = mock.Mock()
nova_client.return_value = client
client.security_groups.get.side_effect = lambda x: SecurityGroup(x)
self.engine._delete_auto_security_group(ng)
client.security_groups.delete.assert_called_once_with(auto_name)
@mock.patch('sahara.utils.openstack.nova.client')
def test_delete_auto_security_group_other_groups(self, nova_client):
ng = mock.Mock(id="16fd2706-8baf-433b-82eb-8c7fada847da",
auto_security_group=True)
ng.name = "ngname"
ng.cluster.name = "cluster"
auto_name = g.generate_auto_security_group_name(ng)
ng.security_groups = ['1', '2', auto_name]
client = mock.Mock()
nova_client.return_value = client
client.security_groups.get.side_effect = lambda x: SecurityGroup(x)
self.engine._delete_auto_security_group(ng)
client.security_groups.delete.assert_called_once_with(auto_name)
@mock.patch('sahara.utils.openstack.nova.client')
def test_delete_auto_security_group_no_groups(self, nova_client):
ng = mock.Mock(id="16fd2706-8baf-433b-82eb-8c7fada847da",
auto_security_group=True)
ng.name = "ngname"
ng.cluster.name = "cluster"
ng.security_groups = []
client = mock.Mock()
nova_client.return_value = client
client.security_groups.get.side_effect = lambda x: SecurityGroup(x)
self.engine._delete_auto_security_group(ng)
self.assertEqual(0, client.security_groups.delete.call_count)
@mock.patch('sahara.utils.openstack.nova.client')
def test_delete_auto_security_group_wrong_group(self, nova_client):
ng = mock.Mock(id="16fd2706-8baf-433b-82eb-8c7fada847da",
auto_security_group=True)
ng.name = "ngname"
ng.cluster.name = "cluster"
ng.security_groups = ['1', '2']
client = mock.Mock()
nova_client.return_value = client
client.security_groups.get.side_effect = lambda x: SecurityGroup(x)
self.engine._delete_auto_security_group(ng)
self.assertEqual(0, client.security_groups.delete.call_count)
@mock.patch('sahara.service.engine.Engine._delete_aa_server_groups')
@mock.patch('sahara.service.engine.Engine._shutdown_instances')
@mock.patch('sahara.service.engine.Engine._remove_db_objects')
@mock.patch('sahara.service.engine.Engine._clean_job_executions')
@mock.patch('sahara.utils.openstack.heat.client')
@mock.patch('sahara.service.heat.heat_engine.LOG.warning')
def test_calls_order(self, logger, heat_client, _job_ex, _db_ob,
_shutdown, _delete_aa):
class FakeHeatEngine(heat_engine.HeatEngine):
def __init__(self):
super(FakeHeatEngine, self).__init__()
self.order = []
def _clean_job_executions(self, cluster):
self.order.append('clean_job_executions')
super(FakeHeatEngine, self)._clean_job_executions(cluster)
def _remove_db_objects(self, cluster):
self.order.append('remove_db_objects')
super(FakeHeatEngine, self)._remove_db_objects(cluster)
def _shutdown_instances(self, cluster):
self.order.append('shutdown_instances')
super(FakeHeatEngine, self)._shutdown_instances(cluster)
def _delete_aa_server_groups(self, cluster):
self.order.append('delete_aa_server_groups')
super(FakeHeatEngine, self)._delete_aa_server_groups(cluster)
fake_cluster = mock.Mock()
heat_client.side_effect = heat_exc.HTTPNotFound()
engine = FakeHeatEngine()
engine.shutdown_cluster(fake_cluster)
self.assertEqual(['shutdown_instances', 'delete_aa_server_groups',
'clean_job_executions', 'remove_db_objects'],
engine.order)
self.assertEqual(
[mock.call('Did not find stack for cluster. Trying to '
'delete cluster manually.')], logger.call_args_list)
class SecurityGroup(object):
def __init__(self, name):
self.name = name
|
py | b413ccf2525496fede2c6e034ab13279342af9a7 | #!/usr/bin/env python
"""
Learns context2vec's parametric model
"""
import argparse
import time
import sys
import os
import numpy as np
from chainer import cuda
import chainer.links as L
import chainer.optimizers as O
import chainer.serializers as S
import chainer.computational_graph as C
from chainer.optimizer_hooks import GradientClipping
from sentence_reader import SentenceReaderDir
from context2vec.common.context_models import BiLstmContext
from context2vec.common.defs import IN_TO_OUT_UNITS_RATIO, NEGATIVE_SAMPLING_NUM
#TODO: LOWER AS ARG
def dump_embeddings(filename, w, units, index2word):
with open(filename, 'w') as f:
f.write('%d %d\n' % (len(index2word), units))
for i in range(w.shape[0]):
v = ' '.join(['%f' % v for v in w[i]])
f.write('%s %s\n' % (index2word[i], v))
def dump_comp_graph(filename, vs):
g = C.build_computational_graph(vs)
with open(filename, 'w') as o:
o.write(g.dump())
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--indir', '-i',
default=None,
help='input corpus directory')
parser.add_argument('--trimfreq', '-t', default=0, type=int,
help='minimum frequency for word in training')
parser.add_argument('--ns_power', '-p', default=0.75, type=float,
help='negative sampling power')
parser.add_argument('--dropout', '-o', default=0.0, type=float,
help='NN dropout')
parser.add_argument('--wordsfile', '-w',
default=None,
help='word embeddings output filename')
parser.add_argument('--modelfile', '-m',
default=None,
help='model output filename')
parser.add_argument('--cgfile', '-cg',
default=None,
help='computational graph output filename (for debug)')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--unit', '-u', default=300, type=int,
help='number of units (dimensions) of one context word')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='learning minibatch size')
parser.add_argument('--epoch', '-e', default=10, type=int,
help='number of epochs to learn')
parser.add_argument('--context', '-c', choices=['lstm'],
default='lstm',
help='context type ("lstm")')
parser.add_argument('--deep', '-d', choices=['yes', 'no'],
default=None,
help='use deep NN architecture')
parser.add_argument('--alpha', '-a', default=0.001, type=float,
help='alpha param for Adam, controls the learning rate')
parser.add_argument('--grad-clip', '-gc', default=None, type=float,
help='if specified, clip l2 of the gradient to this value')
args = parser.parse_args()
if args.deep == 'yes':
args.deep = True
elif args.deep == 'no':
args.deep = False
else:
raise Exception("Invalid deep choice: " + args.deep)
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('Context type: {}'.format(args.context))
print('Deep: {}'.format(args.deep))
print('Dropout: {}'.format(args.dropout))
print('Trimfreq: {}'.format(args.trimfreq))
print('NS Power: {}'.format(args.ns_power))
print('Alpha: {}'.format(args.alpha))
print('Grad clip: {}'.format(args.grad_clip))
print('')
return args
def savePartialModel(epoch):
print("saving partial epoch :- ", epoch)
if not os.path.exists(epoch):
os.makedirs(epoch)
if args.wordsfile != None:
dump_embeddings(str(epoch) + r'\\' + args.wordsfile + '.targets', model.loss_func.W.data, target_word_units, reader.index2word)
if args.modelfile != None:
S.save_npz(str(epoch) + r'\\' + args.modelfile, model)
with open(str(epoch) + r'\\' + args.modelfile + '.params', 'w', encoding='utf-8') as f:
f.write('model_file\t' + args.modelfile[args.modelfile.rfind('/') + 1:] + '\n')
f.write('words_file\t' + args.wordsfile[args.wordsfile.rfind('/') + 1:] + '.targets\n')
f.write('unit\t' + str(args.unit) + '\n')
if args.deep:
f.write('deep\tyes\n')
else:
f.write('deep\tno\n')
f.write('drop_ratio\t' + str(args.dropout) + '\n')
f.write('#\t{}\n'.format(' '.join(sys.argv)))
args = parse_arguments()
context_word_units = args.unit
lstm_hidden_units = IN_TO_OUT_UNITS_RATIO*args.unit
target_word_units = IN_TO_OUT_UNITS_RATIO*args.unit
if args.gpu >= 0:
print("GPU")
print(cuda.available)
print(cuda.get_device_from_id(args.gpu))
xp = cuda.cupy if args.gpu >= 0 else np
reader = SentenceReaderDir(args.indir, args.trimfreq, args.batchsize)
print('n_vocab: %d' % (len(reader.word2index)-3)) # excluding the three special tokens
print('corpus size: %d' % (reader.total_words))
cs = [reader.trimmed_word2count[w] for w in range(len(reader.trimmed_word2count))]
loss_func = L.NegativeSampling(target_word_units, cs, NEGATIVE_SAMPLING_NUM, args.ns_power)
if args.context == 'lstm':
model = BiLstmContext(args.deep, args.gpu, reader.word2index, context_word_units, lstm_hidden_units, target_word_units, loss_func, True, args.dropout)
else:
raise Exception('Unknown context type: {}'.format(args.context))
optimizer = O.Adam(alpha=args.alpha)
optimizer.setup(model)
if args.grad_clip:
optimizer.add_hook(GradientClipping(args.grad_clip))
STATUS_INTERVAL = 1000000
for epoch in range(args.epoch):
begin_time = time.time()
cur_at = begin_time
word_count = 0
next_count = STATUS_INTERVAL
accum_loss = 0.0
last_accum_loss = 0.0
last_word_count = 0
print('epoch: {0}'.format(epoch))
reader.open()
for sent in reader.next_batch():
model.zerograds()
loss = model(sent)
accum_loss += loss.data
loss.backward()
del loss
optimizer.update()
word_count += len(sent)*len(sent[0]) # all sents in a batch are the same length
accum_mean_loss = float(accum_loss)/word_count if accum_loss > 0.0 else 0.0
if word_count >= next_count:
now = time.time()
duration = now - cur_at
throuput = float((word_count-last_word_count)) / (now - cur_at)
cur_mean_loss = (float(accum_loss)-last_accum_loss)/(word_count-last_word_count)
print('{} words, {:.2f} sec, {:.2f} words/sec, {:.4f} accum_loss/word, {:.4f} cur_loss/word'.format(
word_count, duration, throuput, accum_mean_loss, cur_mean_loss))
next_count += STATUS_INTERVAL
cur_at = now
last_accum_loss = float(accum_loss)
last_word_count = word_count
print('accum words per epoch', word_count, 'accum_loss', accum_loss, 'accum_loss/word', accum_mean_loss)
reader.close()
savePartialModel(str(epoch))
"""
if args.wordsfile != None:
dump_embeddings(args.wordsfile+'.targets', model.loss_func.W.data, target_word_units, reader.index2word)
if args.modelfile != None:
S.save_npz(args.modelfile, model)
with open(args.modelfile + '.params', 'w') as f:
f.write('model_file\t' + args.modelfile[args.modelfile.rfind('/')+1:]+'\n')
f.write('words_file\t' + args.wordsfile[args.wordsfile.rfind('/')+1:]+'.targets\n')
f.write('unit\t' + str(args.unit)+'\n')
if args.deep:
f.write('deep\tyes\n')
else:
f.write('deep\tno\n')
f.write('drop_ratio\t' + str(args.dropout)+'\n')
f.write('#\t{}\n'.format(' '.join(sys.argv)))
"""
|
py | b413ced60341a3d49beb683d7168cb977340bed1 | from datetime import datetime
from typing import Any, Dict, Iterator
from urllib.parse import urlparse
import pycountry # type: ignore
import structlog # type: ignore
import xml.etree.ElementTree as ET
from hoard.client import OAIClient
from hoard.models import (
Author,
Contact,
Dataset,
Description,
Distributor,
GrantNumber,
Keyword,
OtherId,
Publication,
Series,
TimePeriodCovered,
)
logger = structlog.get_logger()
namespace = {
"oai": "http://www.openarchives.org/OAI/2.0/",
"dim": "http://www.dspace.org/xmlns/dspace/dim",
}
class WHOAS:
def __init__(self, client: OAIClient) -> None:
self.client = client
def __iter__(self) -> Iterator[Dataset]:
return self
def __next__(self) -> Dataset:
while True:
record = next(self.client)
parsed_record = ET.fromstring(record)
if parsed_record.find(".//oai:error", namespace) is not None:
continue
else:
try:
dataset = create_from_whoas_dim_xml(record, self.client)
return dataset
except TypeError as ex:
id_elem = parsed_record.find(".//oai:identifier", namespace)
if id_elem is not None:
rec_id = id_elem.text
logger.info(f"Error with {rec_id}: {str(ex)}")
def create_from_whoas_dim_xml(data: str, client: OAIClient) -> Dataset:
kwargs: Dict[str, Any] = {}
record = ET.fromstring(data)
fields = record.findall(".//dim:field", namespace)
kwargs["contacts"] = [
Contact(
datasetContactName="Woods Hole Open Access Server",
datasetContactEmail="[email protected]",
)
]
notesText = ""
for field in fields:
if field.attrib["element"] == "title" and "qualifier" not in field.attrib:
kwargs["title"] = field.text
if (
field.attrib["element"] == "contributor"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "author"
):
if field.text is not None:
kwargs.setdefault("authors", []).append(
Author(authorName=field.text, authorAffiliation="Woods Hole")
)
if (
field.attrib["element"] == "description"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "abstract"
):
if field.text is not None:
kwargs.setdefault("description", []).append(
Description(dsDescriptionValue=field.text)
)
if field.attrib["element"] == "subject":
kwargs.setdefault("keywords", []).append(Keyword(keywordValue=field.text))
if (
field.attrib["element"] == "identifier"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "uri"
):
kwargs["alternativeURL"] = field.text
if (
field.attrib["element"] == "date"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "issued"
and field.text is not None
):
try:
datetime.strptime(field.text, "%Y-%m-%d")
kwargs["distributionDate"] = field.text
except ValueError:
pass
if field.attrib["element"] == "publisher":
kwargs.setdefault("distributors", []).append(
Distributor(distributorName=field.text)
)
if (
field.attrib["element"] == "description"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "sponsorship"
):
kwargs.setdefault("grantNumbers", []).append(
GrantNumber(grantNumberInformation=field.text)
)
if field.attrib["element"] == "description" and "qualifier" not in field.attrib:
if field.text is not None and notesText == "":
notesText = field.text
else:
notesText += f" {field.text}"
if (
field.attrib["element"] == "language"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "iso"
):
if field.text is not None:
lang_value = pycountry.languages.get(alpha_2=field.text[:2])
if lang_value != "":
kwargs.setdefault("language", []).append(lang_value.name)
if field.attrib["element"] == "identifier":
kwargs.setdefault("otherIds", []).append(OtherId(otherIdValue=field.text))
if (
field.attrib["element"] == "coverage"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "spatial"
):
kwargs["productionPlace"] = field.text
if field.attrib["element"] == "relation" and "qualifier" not in field.attrib:
kwargs.setdefault("publications", []).append(
Publication(publicationCitation=field.text)
)
if (
field.attrib["element"] == "relation"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "ispartof"
):
if field.text is not None and field.text.startswith(
"https://hdl.handle.net/"
):
series_args = {"seriesInformation": field.text}
parsed_url = urlparse(field.text)
id = f"oai:darchive.mblwhoilibrary.org:{parsed_url.path[1:]}"
series_name = client.get_record_title(id)
series_args["seriesName"] = series_name
kwargs["series"] = Series(**series_args)
if (
field.attrib["element"] == "coverage"
and "qualifier" in field.attrib
and field.attrib["qualifier"] == "temporal"
):
if field.text is not None and " - " in field.text:
dates = field.text.split(" - ")
start = dates[0]
end = dates[1].rstrip(" (UTC)")
time_kwargs = {}
try:
datetime.strptime(start, "%Y-%m-%d")
time_kwargs["timePeriodCoveredStart"] = start
except ValueError:
pass
try:
datetime.strptime(end, "%Y-%m-%d")
time_kwargs["timePeriodCoveredEnd"] = end
except ValueError:
pass
kwargs.setdefault("timePeriodsCovered", []).append(
TimePeriodCovered(**time_kwargs)
)
if field.attrib["element"] == "rights" and "qualifier" not in field.attrib:
kwargs["license"] = field.text
kwargs["termsOfUse"] = field.text
kwargs["subjects"] = ["Earth and Environmental Sciences"]
if "description" not in kwargs:
kwargs["description"] = [Description(dsDescriptionValue=kwargs["title"])]
if notesText != "":
kwargs["notesText"] = notesText
return Dataset(**kwargs)
|
py | b413d05a50a145942a1cf0a1ca8c72dbae7e27e8 | # python script for 0-core testcases
import os
from argparse import ArgumentParser
from subprocess import Popen, PIPE
import random
import uuid
import time
import shlex
from jumpscale import j
from termcolor import colored
from multiprocessing import Process, Manager
SETUP_ENV_SCRIPT = "tests/integration_tests/travis/setup_env.sh"
SETUP_ENV_SCRIPT_NAME = "setup_env.sh"
class Utils(object):
def __init__(self, options):
self.options = options
def run_cmd(self, cmd, timeout=20):
now = time.time()
while time.time() < now + timeout:
sub = Popen([cmd], stdout=PIPE, stderr=PIPE, shell=True)
out, err = sub.communicate()
if sub.returncode == 0:
return out.decode('utf-8')
elif any(x in err.decode('utf-8') for x in ['Connection refused', 'No route to host']):
time.sleep(1)
continue
else:
break
raise RuntimeError("Failed to execute command.\n\ncommand:\n{}\n\n{}".format(cmd, err.decode('utf-8')))
def stream_run_cmd(self, cmd):
sub = Popen(shlex.split(cmd), stdout=PIPE)
while True:
out = sub.stdout.readline()
if out == b'' and sub.poll() is not None:
break
if out:
print(out.strip())
rc = sub.poll()
return rc
def send_script_to_remote_machine(self, script, ip, password):
cmd = 'wget "https://raw.githubusercontent.com/threefoldtech/jumpscale_lib/sal_testcases/tests/integration_tests/travis/setup_env.sh"'
cmd = 'sshpass -p {} ssh -o StrictHostKeyChecking=no root@{} {}'.format(password, ip, cmd)
self.run_cmd(cmd)
def run_cmd_on_remote_machine(self, cmd, ip, password):
templ = 'sshpass -p {} ssh -o StrictHostKeyChecking=no root@{} {}'
cmd = templ.format(password, ip, cmd)
return self.stream_run_cmd(cmd)
def run_cmd_on_remote_machine_without_stream(self, cmd, ip, port, password):
templ = 'sshpass -p {} ssh -o StrictHostKeyChecking=no root@{} {}'
cmd = templ.format(password, ip, cmd)
return self.run_cmd(cmd)
def create_disk(self, zos_client):
zdb_name = str(uuid.uuid4())[0:8]
zdb = zos_client.primitives.create_zerodb(name=zdb_name, path='/mnt/zdbs/sda',
mode='user', sync=False, admin='mypassword')
zdb.namespaces.add(name='mynamespace', size=50, password='namespacepassword', public=True)
zdb.deploy()
disk = zos_client.primitives.create_disk('mydisk', zdb, size=50)
disk.deploy()
return disk
def random_mac(self):
return "52:54:00:%02x:%02x:%02x" % (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
def get_farm_available_node_to_execute_testcases(self):
capacity = j.clients.threefold_directory.get(interactive=False)
resp = capacity.api.ListCapacity(query_params={'farmer': 'kristof-farm'})[1]
nodes = resp.json() # nodes
return random.choice(nodes)
def random_string(self, size=10):
return str(uuid.uuid4()).replace('-', '')[:size]
def create_ubuntu_vm(self, zos_client, ubuntu_port):
print('* Creating ubuntu vm to fire the testsuite from')
keypath = '/root/.ssh/id_rsa.pub'
if not os.path.isfile(keypath):
os.system("echo | ssh-keygen -P ''")
with open(keypath, "r") as key:
pub_key = key.read()
pub_key.replace('\n', '')
vm_ubuntu_name = "ubuntu{}".format(self.random_string())
vm_ubuntu = zos_client.primitives.create_virtual_machine(name=vm_ubuntu_name, type_='ubuntu:lts')
vm_ubuntu.nics.add(name='default_nic', type_='default')
vm_ubuntu.configs.add('sshkey', '/root/.ssh/authorized_keys', pub_key)
vm_ubuntu.ports.add('ssh_port', ubuntu_port, 22)
vm_ubuntu.vcpus = 4
vm_ubuntu.memory = 8192
vm_ubuntu.deploy()
return vm_ubuntu
def main(options):
utils = Utils(options)
# Send the script to setup the envirnment and run testcases
utils.send_script_to_remote_machine(SETUP_ENV_SCRIPT, options.vm_ip, options.vm_password)
# get available node to run testcaases against it
print('* get available node to run test cases on it ')
zos_available_node = utils.get_farm_available_node_to_execute_testcases()
node_ip = zos_available_node["robot_address"][7:-5]
print('* The available node ip {} '.format(node_ip))
# Access the ubuntu vm and install requirements
cmd = 'bash {script} {branch} {nodeip} {zt_token}'.format(
script=SETUP_ENV_SCRIPT_NAME, branch="sal_testcases", nodeip=options.zos_ip, zt_token=options.zt_token)
utils.run_cmd_on_remote_machine(cmd, options.vm_ip, options.vm_password)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-z", "--zos_ip", type=str, dest="zos_ip", required=True,
help="IP of the zeroos machine that will be used")
parser.add_argument("-v", "--vm_ip", type=str, dest="vm_ip", required=True,
help="IP of the zeroos machine that will be used")
parser.add_argument("-b", "--branch", type=str, dest="branch", required=True,
help="0-core branch that the tests will run from")
parser.add_argument("-jp", "--ubuntu_port", type=str, dest="ubuntu_port", required=False,
help="if you have jumpscale machine on the node provide its port")
parser.add_argument("-jf", "--js_flag", type=str, dest="js_flag", required=False,
help="flag if you have jumpscale machine")
parser.add_argument("-t", "--zt_token", type=str, dest="zt_token", default='sgtQtwEMbRcDgKgtHEMzYfd2T7dxtbed', required=True,
help="zerotier token that will be used for the core0 tests")
parser.add_argument("-p", "--password", type=str, dest="vm_password", default='root', required=True,
help="js vm password")
options = parser.parse_args()
main(options)
|
py | b413d1405ce02fd04c69a852f73b8e8e7cec7768 | from google.cloud import ndb
from backend.common.consts.comp_level import CompLevel
from backend.common.models.event import Event
from backend.common.models.match import Match
from backend.common.queries.match_query import EventMatchesQuery
def preseed_matches(n: int) -> None:
matches = [
Match(
id=f"2010ct_qm{i}",
event=ndb.Key(Event, "2010ct"),
year=2010,
comp_level=CompLevel.QM,
set_number=1,
match_number=i,
alliances_json="",
)
for i in range(1, n + 1)
]
ndb.put_multi(matches)
def test_no_matches() -> None:
matches = EventMatchesQuery(event_key="2010ct").fetch()
assert matches == []
def test_matches_exist() -> None:
preseed_matches(5)
matches = EventMatchesQuery(event_key="2010ct").fetch()
assert len(matches) == 5
|
py | b413d1ea96343d0b59f978d6751217905041c9b4 | """
Module for managing an DPT Up/Down remote value.
DPT 1.008.
"""
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING
from xknx.dpt import DPTArray, DPTBinary
from xknx.exceptions import ConversionError, CouldNotParseTelegram
from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue
if TYPE_CHECKING:
from xknx.xknx import XKNX
class RemoteValueUpDown(RemoteValue[DPTBinary, "RemoteValueUpDown.Direction"]):
"""Abstraction for remote value of KNX DPT 1.008 / DPT_UpDown."""
class Direction(Enum):
"""Enum for indicating the direction."""
UP = 0
DOWN = 1
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
device_name: str | None = None,
feature_name: str = "Up/Down",
after_update_cb: AsyncCallbackType | None = None,
invert: bool = False,
):
"""Initialize remote value of KNX DPT 1.008."""
super().__init__(
xknx,
group_address,
group_address_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
self.invert = invert
def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTBinary | None:
"""Test if telegram payload may be parsed."""
# pylint: disable=no-self-use
return payload if isinstance(payload, DPTBinary) else None
def to_knx(self, value: RemoteValueUpDown.Direction) -> DPTBinary:
"""Convert value to payload."""
if value == self.Direction.UP:
return DPTBinary(1) if self.invert else DPTBinary(0)
if value == self.Direction.DOWN:
return DPTBinary(0) if self.invert else DPTBinary(1)
raise ConversionError(
"value invalid",
value=value,
device_name=self.device_name,
feature_name=self.feature_name,
)
def from_knx(self, payload: DPTBinary) -> RemoteValueUpDown.Direction:
"""Convert current payload to value."""
if payload == DPTBinary(0):
return self.Direction.DOWN if self.invert else self.Direction.UP
if payload == DPTBinary(1):
return self.Direction.UP if self.invert else self.Direction.DOWN
raise CouldNotParseTelegram(
"payload invalid",
payload=payload,
device_name=self.device_name,
feature_name=self.feature_name,
)
async def down(self) -> None:
"""Set value to down."""
await self.set(self.Direction.DOWN)
async def up(self) -> None:
"""Set value to UP."""
# pylint: disable=invalid-name
await self.set(self.Direction.UP)
|
py | b413d37716f8baf3fa90d1b3bc8066b8465be497 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/ithorian/shared_ith_gloves_s02.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","ith_gloves_s02")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | b413d42511ebf53f1da88f920be2d3311a5d7146 | import requests
from common import logging
import config
def tokenize(issuee, input_file_name):
form_data = {"issuee": issuee}
files = {
'input': open(input_file_name, 'rb')
}
try:
res = requests.post(config.API_BASE_URL + "/issue", data=form_data, files=files, verify=True)
return res.json()
except requests.exceptions.ConnectionError:
return None
except Exception as e:
logging.error(e)
return None
|
py | b413d47c8b35e5d7a818456cfea9a385f3004873 | """
This file contains DB related functions.
.. module:: GMDA_main
:platform: linux
.. moduleauthor:: Ivan Syzonenko <[email protected]>
"""
__license__ = "MIT"
__docformat__ = 'reStructuredText'
import os
import sqlite3 as lite
import numpy as np
lite.register_adapter(np.int64, lambda val: int(val))
lite.register_adapter(np.int32, lambda val: int(val))
lite.register_adapter(np.float, lambda val: float(val))
lite.register_adapter(np.float32, lambda val: float(val))
# import numpy as np
from typing import NoReturn, Mapping, Sequence, List, Set
def get_db_con(tot_seeds: int = 4) -> tuple:
"""Creates the database with structure that fits exact number of seeds.
Filename for DB is generated as next number after the highest consequent found.
If there is results_0.sqlite3, then next will be results_1.sqlite3 if it did not exist.
Args:
:param int tot_seeds: number of seeds used in the current run
:type tot_seeds: int
Returns:
:return: database connection and name
Connection to the new database and it's name.
"""
counter = 0
# db_path = '/dev/shm/GMDApy'
db_path = os.getcwd()
db_name = 'results_{}.sqlite3'.format(counter)
full_path = os.path.join(db_path, 'results_{}.sqlite3'.format(counter))
while os.path.exists(full_path):
counter += 1
full_path = os.path.join(db_path, 'results_{}.sqlite3'.format(counter))
con = lite.connect(full_path, check_same_thread=False, isolation_level=None)
cur = con.cursor()
cur.execute("""CREATE TABLE main_storage (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bbrmsd_goal_dist FLOAT NOT NULL,
bbrmsd_prev_dist FLOAT NOT NULL,
bbrmsd_tot_dist FLOAT NOT NULL,
aarmsd_goal_dist FLOAT NOT NULL,
aarmsd_prev_dist FLOAT NOT NULL,
aarmsd_tot_dist FLOAT NOT NULL,
angl_goal_dist FLOAT NOT NULL,
angl_prev_dist FLOAT NOT NULL,
angl_tot_dist FLOAT NOT NULL,
andh_goal_dist INTEGER NOT NULL,
andh_prev_dist INTEGER NOT NULL,
andh_tot_dist INTEGER NOT NULL,
and_goal_dist INTEGER NOT NULL,
and_prev_dist INTEGER NOT NULL,
and_tot_dist INTEGER NOT NULL,
xor_goal_dist INTEGER NOT NULL,
xor_prev_dist INTEGER NOT NULL,
xor_tot_dist INTEGER NOT NULL,
curr_gc INTEGER NOT NULL,
Timestamp DATETIME DEFAULT (CURRENT_TIMESTAMP),
hashed_name CHAR (32) NOT NULL UNIQUE,
name TEXT
);""")
con.commit()
cur.execute("""CREATE TABLE visited (
vid INTEGER PRIMARY KEY AUTOINCREMENT, \
id REFERENCES main_storage (id),
cur_gc INTEGER,
Timestamp DATETIME DEFAULT (CURRENT_TIMESTAMP)
);""")
con.commit()
add_ind_q = 'CREATE INDEX viz_id_idx ON visited (id);'
cur.execute(add_ind_q)
con.commit()
# id REFERENCES main_storage (id), \
init_query = 'CREATE TABLE log ( \
lid INTEGER PRIMARY KEY AUTOINCREMENT, \
operation INTEGER, \
id INTEGER, \
src CHAR (8), \
dst CHAR(8), \
cur_metr CHAR(5), \
gc INTEGER , \
mul FLOAT, \
bsfrb FLOAT, \
bsfr FLOAT, \
bsfn FLOAT, \
bsfh FLOAT, \
bsfa FLOAT, \
bsfx FLOAT, \
Timestamp DATETIME DEFAULT (CURRENT_TIMESTAMP)' # no this is not an error
for i in range(tot_seeds):
init_query += ", \
dist_from_prev_{0} FLOAT, \
dist_to_goal_{0} FLOAT ".format(i+1)
init_query += ');'
cur.execute(init_query)
con.commit()
add_ind_q = 'CREATE INDEX log_id_idx ON log (id);'
cur.execute(add_ind_q)
con.commit()
cur.execute('PRAGMA mmap_size=-64000') # 32M
cur.execute('PRAGMA journal_mode = OFF')
cur.execute('PRAGMA synchronous = OFF')
cur.execute('PRAGMA temp_store = MEMORY')
cur.execute('PRAGMA threads = 32')
return con, db_name
def log_error(con: lite.Connection, type: str, id: int) -> NoReturn:
"""Writes an error message into the log table
Args:
:param con: current DB connection
:param type: error type
:param id: id associated with the error
Returns:
Adds one row in the log table.
"""
qry = 'INSERT INTO log (id, operation, dst) VALUES ({}, "ERROR", "{}")'.format(id, type)
try:
con.cursor().execute(qry)
con.commit()
except Exception as e:
print(e)
print('Error in "log_error": {}'.format(qry))
# def get_id_for_name(con, name):
# con.commit()
# qry = "SELECT id FROM main_storage WHERE name='{}'".format(name)
# cur = con.cursor()
# result = cur.execute(qry)
# num = int(result.fetchone()[0])
# if not isinstance(num, int):
# raise Exception("ID was not found in main stor")
# return num
def get_id_for_hash(con: lite.Connection, h_name: str) -> int:
"""Searches main storage for id with given hash
Args:
:param lite.Connection con: DB connection
:param str h_name: hashname to use during the search
Returns:
:return: id or None if not found
"""
con.commit()
qry = "SELECT id FROM main_storage WHERE hashed_name='{}'".format(h_name)
cur = con.cursor()
result = cur.execute(qry)
row = result.fetchone()
if row is not None:
num = int(row[0])
else:
num = None
# if not isinstance(num, int):
# print("ID was not found in main stor")
return num
def get_corr_vid_for_id(con: lite.Connection, max_id: int, prev_ids: list, last_gc: float) -> tuple:
"""Used for recovery procedure. Tries to find matching sequence of nodes in the visited table
Args:
:param lite.Connection con: DB connection
:param int max_id: maximum value of the id (defined by previous search as the common latest id)
:param list prev_ids: several ids that should match
:param float last_gc: extra check, whether greed counters also match
Returns:
:return: last common visited id, timestamp, and id
:rtype: tuple
"""
qry = "SELECT vid, id, CAST(strftime('%s', Timestamp) AS INT), cur_gc FROM visited WHERE id<'{}' AND id in ({}, {}, {}) order by vid desc".format(max_id, prev_ids[0], prev_ids[1], prev_ids[2])
cur = con.cursor()
result = cur.execute(qry)
rows = result.fetchall()
i = 0
while i+2 < len(rows): # 3 for next version
if rows[i][0] - rows[i+1][0] == 1 and rows[i+1][0] - rows[i+2][0] == 1:
break
i += 1
if i+2 >= len(rows):
raise Exception("Sequence of events from pickle dump not found in DB")
last_good_vid = rows[i][0]
last_good_ts = rows[i][2]
last_good_id = rows[i][1]
if last_gc != int(rows[i][3]):
raise Exception('Everything looked good, but greed counters did not match.\n Check manually and comment this exception if you are sure that this is normal.\n')
return last_good_vid, last_good_ts, last_good_id
def get_corr_lid_for_id(con: lite.Connection, next_id: int, vid_ts: int, last_vis_id: int) -> int:
"""
Used for recovery procedure. Tries to find matching sequence of nodes in the log table
Args:
:param lite.Connection con: DB connection
:param int next_id: next id we expect to see in the log, used for double check
:param int vid_ts: visited timestampt
:param int last_vis_id: last visited id
Returns:
:return: the latest valid log_id
"""
qry = "SELECT lid, CAST(strftime('%s', Timestamp) AS INT) FROM log WHERE id='{}' AND src='WQ' AND dst='VIZ' order by lid".format(last_vis_id)
cur = con.cursor()
result = cur.execute(qry)
rows = result.fetchall()
if len(rows) > 1:
# find the smallest dist between vid_ts and all ts
dist = abs(rows[0][1] - vid_ts)
good_lid = int(rows[0][0])
i = 1
while i < len(rows):
if abs(rows[i][1] - vid_ts) <= dist:
dist = abs(rows[i][1] - vid_ts)
good_lid = int(rows[i][0])
i += 1
else:
good_lid = int(rows[0][0])
# so now we have good_lid which is very close, but may be not exact
qry = "SELECT lid, operation, id, src, dst FROM log WHERE lid > {} order by lid limit 4".format(good_lid)
result = cur.execute(qry)
rows = result.fetchall()
i = 0
if (rows[i][1] == 'current' and rows[i][4] == 'WQ') or rows[i][1] == 'skip':
good_lid += 1
i += 1
if rows[i][1] == 'prom_O':
good_lid += 1
i += 1
if rows[i][1] == 'result' and rows[i][4] == 'VIZ' and int(rows[i][2]) == next_id:
print("Log table ID computed perfectly.")
return good_lid
# I am not using it
# def get_max_id_from_main(con):
# qry = "SELECT max(id) FROM main_storage"
# cur = con.cursor()
# result = cur.execute(qry)
# row = result.fetchone()
# if row is not None:
# num = int(row[0])
# else:
# num = None
# return num
def get_all_hashed_names(con: lite.Connection) -> list:
"""Fetches all hashes from the main_storage
Args:
:param lite.Connection con: DB connection
Returns:
:return: list of all hashes in the main_storage
:rtype: list
"""
qry = "SELECT hashed_name FROM main_storage order by id desc"
cur = con.cursor()
result = cur.execute(qry)
rows = result.fetchall()
return rows
def insert_into_main_stor(con: lite.Connection, node_info: dict, curr_gc: int, digest_name: str, name: str) -> NoReturn:
"""Inserts main information into the DB.
Args:
:param lite.Connection con: DB connection
:param dict node_info: all metric values associated with the node
:param int curr_gc: current greedy counter
:param str digest_name: hash name for the path, same as filenames for MD simulations
:param str name: path from the origin separated by _
Returns:
Stores data in the DB in a main_storage table.
"""
# con = lite.connect('results_8.sqlite3', timeout=300, check_same_thread=False, isolation_level=None)
# qry = "INSERT OR IGNORE INTO main_storage(rmsd_goal_dist, rmsd_prev_dist, rmsd_tot_dist, angl_goal_dist,
# angl_prev_dist, angl_tot_dist," \
qry = "INSERT INTO main_storage(bbrmsd_goal_dist, bbrmsd_prev_dist, bbrmsd_tot_dist, aarmsd_goal_dist, aarmsd_prev_dist, aarmsd_tot_dist, angl_goal_dist, angl_prev_dist, angl_tot_dist," \
" andh_goal_dist, andh_prev_dist, andh_tot_dist, and_goal_dist, and_prev_dist, and_tot_dist," \
" xor_goal_dist, xor_prev_dist, xor_tot_dist, curr_gc, hashed_name, name) " \
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
cur = con.cursor()
try:
cur.execute(qry, [str(elem) for elem in (node_info['BBRMSD_to_goal'], node_info['BBRMSD_from_prev'], node_info['BBRMSD_dist_total'],
node_info['AARMSD_to_goal'], node_info['AARMSD_from_prev'], node_info['AARMSD_dist_total'],
node_info['ANGL_to_goal'], node_info['ANGL_from_prev'], node_info['ANGL_dist_total'],
node_info['AND_H_to_goal'], node_info['AND_H_from_prev'], node_info['AND_H_dist_total'],
node_info['AND_to_goal'], node_info['AND_from_prev'], node_info['AND_dist_total'],
node_info['XOR_to_goal'], node_info['XOR_from_prev'], node_info['XOR_dist_total'],
curr_gc, digest_name, name)])
con.commit()
except Exception as e:
nid = get_id_for_hash(con, digest_name)
log_error(con, 'MAIN', nid)
qry = "SELECT * FROM main_storage WHERE id=?"
cur = con.cursor()
result = cur.execute(qry, nid)
row = result.fetchone()
print('Original elment in MAIN:', row)
qry = "SELECT * FROM log WHERE id=?"
cur = con.cursor()
result = cur.execute(qry, nid)
rows = result.fetchall()
print('Printing all I found in the log about this ID:')
for row in rows:
print(row)
print('Error element message: ', e, '\nqry: ', node_info, curr_gc, digest_name, name)
def insert_into_visited(con: lite.Connection, hname: str, gc: int) -> NoReturn:
"""
Inserts node processing event.
Args:
:param lite.Connection con: DB connection
:param str hname: hashname, same as MD filenames
:param int gc: greedy counter
Returns:
Stores data in the DB in a visited table.
"""
nid = get_id_for_hash(con, hname)
qry = 'INSERT INTO visited( id, cur_gc ) VALUES (?, ?)'
cur = con.cursor()
try:
cur.execute(qry, (nid, gc))
con.commit()
except Exception as e:
print(e, '\nqry: ', hname, gc)
log_error(con, 'VIZ', nid)
def insert_into_log(con: lite.Connection, operation: str, hname: str, src: str, dst: str, bsf: list, gc: int, mul: float, prev_arr: list,
goal_arr: list, cur_metr_name: str) -> NoReturn:
"""Inserts various information, like new best_so_far events, insertions into the open queue, etc.
Args:
:param lite.Connection con: DB connection
:param str operation: result, current, prom_O, skip
:param str hname: hash name, same as MD filenames
:param str src: from WQ (open queue)
:param str dst: to VIZ (visited)
:param list bsf: all best_so_far values for each metric
:param int gc: greedy counter - affects events like seed change
:param float mul: greedy multiplier - controls greediness
:param list prev_arr: distance from the previous node
:param list goal_arr: distance to the goal
:param str cur_metr_name: name of the current metric
Returns:
Stores data in the DB in a log table.
"""
src = 'None' if src == '' else src
dst = 'None' if dst == '' else dst
nid = get_id_for_hash(con, hname)
nid = 'None' if nid is None else nid
columns = 'operation, id, src, dst, cur_metr, bsfr, bsfrb, bsfn, bsfh, bsfa, bsfx, gc, mul, '
if not isinstance(goal_arr, (list,)): # short version for skip operation
columns += 'dist_from_prev_1, dist_to_goal_1'
final_str = ', '.join('"{}"'.format(elem) if isinstance(elem, str) else str(elem)
for elem in (operation, nid, src, dst, cur_metr_name, bsf["BBRMSD"], bsf["AARMSD"], bsf["ANGL"],
bsf["AND_H"], bsf["AND"], bsf["XOR"], gc, mul, prev_arr, goal_arr))
else:
nseeds = len(prev_arr) # long version for append operation
columns += ', '.join(('dist_from_prev_{0}'.format(i+1) for i in range(nseeds))) + ', '
columns += ', '.join(('dist_to_goal_{0}'.format(i+1) for i in range(nseeds)))
prev_arr_str = ', '.join((str(elem) for elem in prev_arr))
goal_arr_str = ', '.join((str(elem) for elem in goal_arr))
final_str = ', '.join('"{}"'.format(elem) if isinstance(elem, str) else str(elem)
for elem in (operation, nid, src, dst, cur_metr_name, bsf["BBRMSD"], bsf["AARMSD"], bsf["ANGL"],
bsf["AND_H"], bsf["AND"], bsf["XOR"], gc, mul))
final_str += ", ".join(('', prev_arr_str, goal_arr_str))
qry = 'INSERT INTO log({}) VALUES ({})'.format(columns, final_str)
cur = con.cursor()
try:
cur.execute(qry)
con.commit()
except Exception as e:
print(e, '\nqry: ', operation, hname, src, dst, bsf, gc, mul, prev_arr, goal_arr)
print('Extra info: ', qry)
print('Type of function : {}'.format('Short' if not isinstance(goal_arr, (list,)) else 'Long'))
log_error(con, 'LOG', nid)
# def prep_insert_into_log(con, operation, name, src, dst, bsf, gc, mul, prev_arr, goal_arr):
# src = 'None' if src == '' else src
# nid = get_id_for_name(con, name)
# columns = 'operation, id, src, dst, bsf, gc, mul, '
#
# if isinstance(goal_arr, (float, int)): # short version
# columns += 'dist_from_prev_1, dist_to_goal_1'
# final_str = ', '.join('"{}"'.format(elem) if isinstance(elem, str) else str(elem)
# for elem in (operation, nid, src, dst, bsf, gc, mul, prev_arr, goal_arr))
# else:
# nseeds = len(prev_arr)
# columns += ', '.join(('dist_from_prev_{0}, dist_to_goal_{0}'.format(i+1) for i in range(nseeds)))
# prev_arr_str = ', '.join((str(elem) for elem in prev_arr))
# goal_arr_str = ', '.join((str(elem) for elem in goal_arr))
# final_str = ', '.join('"{}"'.format(elem) if isinstance(elem, str) else str(elem)
# for elem in (operation, nid, src, dst, bsf, gc, mul))
# final_str += ", ".join(('', prev_arr_str, goal_arr_str))
#
# return final_str
def copy_old_db(main_dict_keys: list, last_visited: list, next_in_oq: str, last_gc: float) -> NoReturn:
"""Used during the recovery procedure.
Args:
:param list main_dict_keys: all hash values from the main_dict - storage of all metric information
:param list last_visited: several (3) recent values from the visited queue
:param str next_in_oq: next hash (id) in the open queue, used for double check
:param float last_gc: last greedy counter observed in the information from the pickle
Returns:
Conditionally copies data from the previous DB into a new one as a part of the restore process.
"""
counter = 0
db_path = os.getcwd()
# db_name = 'results_{}.sqlite3'.format(counter)
full_path = os.path.join(db_path, 'results_{}.sqlite3'.format(counter))
while os.path.exists(full_path):
prev_db = full_path
counter += 1
full_path = os.path.join(db_path, 'results_{}.sqlite3'.format(counter))
# yes, prev_db - the last one which exists
cur_con = lite.connect(prev_db, check_same_thread=False, isolation_level=None)
current_db_cur = cur_con.cursor()
current_db_cur.execute("DELETE FROM log")
current_db_cur.execute("DELETE FROM visited")
current_db_cur.execute("DELETE FROM main_storage")
cur_con.commit()
prev_db_con = lite.connect(os.path.join(db_path, 'results_{}.sqlite3'.format(counter - 2)), check_same_thread=False, isolation_level=None)
hashes = get_all_hashed_names(prev_db_con)
for hash_hame in hashes:
if hash_hame[0] in main_dict_keys:
break
max_id = get_id_for_hash(prev_db_con, hash_hame[0])
prev_ids = [get_id_for_hash(prev_db_con, last_visited[0][2]), get_id_for_hash(prev_db_con, last_visited[1][2]), get_id_for_hash(prev_db_con, last_visited[2][2])]
next_id = get_id_for_hash(prev_db_con, next_in_oq)
# del last_visited, next_in_oq
max_vid, vid_ts, last_vis_id = get_corr_vid_for_id(prev_db_con, max_id, prev_ids, last_gc)
max_lid = get_corr_lid_for_id(prev_db_con, next_id, vid_ts, last_vis_id)
prev_db_con.close()
del prev_db_con, hash_hame, hashes, main_dict_keys
current_db_cur.execute("ATTACH DATABASE ? AS prev_db", ('results_{}.sqlite3'.format(counter-2),)) # -1 - cur, -2 - prev
current_db_cur.execute("INSERT INTO main.main_storage SELECT * FROM prev_db.main_storage WHERE prev_db.main_storage.id <= ?", (max_id,))
cur_con.commit()
current_db_cur.execute("INSERT INTO main.visited SELECT * FROM prev_db.visited WHERE prev_db.visited.vid <= ?", (max_vid,))
cur_con.commit()
current_db_cur.execute("INSERT INTO main.log SELECT * FROM prev_db.log WHERE prev_db.log.lid <= ?", (max_lid,))
cur_con.commit()
#
# def sync_state_with_db(state):
# counter = 0
# db_path = os.getcwd()
# db_name = 'results_{}.sqlite3'.format(counter)
# full_path = os.path.join(db_path, 'results_{}.sqlite3'.format(counter))
#
# while os.path.exists(full_path):
# prev_db = full_path
# counter += 1
# full_path = os.path.join(db_path, 'results_{}.sqlite3'.format(counter))
#
# # yes, prev_db - last one which exists
# cur_con = lite.connect(prev_db, check_same_thread=False, isolation_level=None)
#
# current_db_cur = cur_con.cursor()
#
# current_db_cur.execute("DELETE FROM log")
# # get_conn
# # get indexes
# # drop all log with
# # drop all vis with
# # drop all main with
# # vacuum
# return True |
py | b413d4c64466c1ac2ab0c7a677e68e83319efea3 | from __future__ import absolute_import
from builtins import object
from elixir import *
from elixir.ext.perform_ddl import perform_ddl, preload_data
def setup():
metadata.bind = "sqlite://"
class TestPerformDDL(object):
def teardown(self):
cleanup_all(True)
def test_one(self):
class Movie(Entity):
title = Field(Unicode(30), primary_key=True)
year = Field(Integer)
perform_ddl('after-create',
"insert into %(fullname)s values ('Alien', 1979)")
setup_all(True)
assert Movie.query.count() == 1
def test_several(self):
class Movie(Entity):
title = Field(Unicode(30), primary_key=True)
year = Field(Integer)
perform_ddl('after-create',
["insert into %(fullname)s values ('Alien', 1979)",
"insert into %(fullname)s " +
"values ('Star Wars', 1977)"])
perform_ddl('after-create',
"insert into %(fullname)s (year, title) " +
"values (1982, 'Blade Runner')")
setup_all(True)
assert Movie.query.count() == 3
class TestPreloadData(object):
def teardown(self):
cleanup_all(True)
def test_several(self):
class Movie(Entity):
title = Field(Unicode(30), primary_key=True)
year = Field(Integer)
preload_data(('title', 'year'),
[(u'Alien', 1979), (u'Star Wars', 1977)])
preload_data(('year', 'title'),
[(1982, u'Blade Runner')])
preload_data(data=[(u'Batman', 1966)])
setup_all(True)
assert Movie.query.count() == 4
|
py | b413d4dd997ebc4daba904b6d27cbb83b5329265 | import setuptools
with open("README.md", "r") as README:
long_description = README.read()
setuptools.setup(
name="je_web_runner",
version="0.0.09",
author="JE-Chen",
author_email="[email protected]",
description="selenium get_webdriver_wrapper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JE-Chen/WebRunner",
packages=setuptools.find_packages(),
install_requires=[
"selenium",
"webdriver-manager"
],
classifiers=[
"Programming Language :: Python :: 3.5",
"Development Status :: 2 - Pre-Alpha",
"Environment :: Win32 (MS Windows)",
"Environment :: MacOS X",
"Environment :: X11 Applications",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
)
# python setup.py sdist bdist_wheel
# python -m twine upload dist/*
|
py | b413d4fdd968861cb7de93a5fe2ef7d6e45af2f2 | """
55.二叉树的深度.py
时间复杂度:O(n)
空间复杂度:O(1)
"""
# -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def CreateTreeNode(listNode):
if not listNode:
return None
root = TreeNode(listNode[0])
level = [root]
j = 1
for node in level:
if node:
node.left = (TreeNode(listNode[j]) if listNode[j] != None else None)
level.append(node.left)
j += 1
if j == len(listNode):
return root
node.right = (TreeNode(listNode[j]) if listNode[j] != None else None)
level.append(node.right)
j += 1
if j == len(listNode):
return root
class Solution:
def TreeDepth(self, pRoot):
# write code here
if not pRoot:
return 0
return 1 + max(self.TreeDepth(pRoot.left), self.TreeDepth(pRoot.right))
def LevelOrder(self, pRoot):
if not pRoot:
return list()
ret = list()
level = [pRoot]
while level:
ret.append([node.val for node in level])
tmp = list()
for node in level:
tmp.extend([node.left, node.right])
level = [leaf for leaf in tmp if leaf]
return ret
if __name__ == "__main__":
listNode = [1, 2, 3, 4, 5, None, 6, None, None, 7, None, None, None]
pRoot = CreateTreeNode(listNode)
s = Solution()
level_ret = s.LevelOrder(pRoot)
print(level_ret)
ret = s.TreeDepth(pRoot)
print(ret)
|
py | b413d5174d40f38cd8a93161391f293e41a6e677 | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a sample product object for the product samples."""
from shopping.content import _constants
def create_product_sample(config, offer_id, **overwrites):
"""Creates a sample product object for the product samples.
Args:
config: dictionary, Python version of config JSON
offer_id: string, offer id for new product
**overwrites: dictionary, a set of product attributes to overwrite
Returns:
A new product in dictionary form.
"""
website_url = config.get('websiteUrl', 'http://my-book-shop.com')
product = {
'offerId':
offer_id,
'title':
'A Tale of Two Cities',
'description':
'A classic novel about the French Revolution',
'link':
website_url + '/tale-of-two-cities.html',
'imageLink':
website_url + '/tale-of-two-cities.jpg',
'contentLanguage':
_constants.CONTENT_LANGUAGE,
'targetCountry':
_constants.TARGET_COUNTRY,
'channel':
_constants.CHANNEL,
'availability':
'in stock',
'condition':
'new',
'googleProductCategory':
'Media > Books',
'gtin':
'9780007350896',
'price': {
'value': '2.50',
'currency': 'USD'
},
'shipping': [{
'country': 'US',
'service': 'Standard shipping',
'price': {
'value': '0.99',
'currency': 'USD'
}
}],
'shippingWeight': {
'value': '200',
'unit': 'grams'
}
}
product.update(overwrites)
return product
|
py | b413d6eafd26d1bd7c3082db05917a7da0d5672b | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines for quantization."""
from typing import Any
import chex
import jax.numpy as jnp
from flax import struct
# pylint:disable=no-value-for-parameter
@struct.dataclass
class QuantizedValue:
"""State associated with quantized value."""
quantized: chex.Array
diagonal: chex.Array # Diagonal (if extract_diagonal is set)
bucket_size: chex.Array
quantized_dtype: jnp.dtype = struct.field(
pytree_node=False
) # Dtype for the quantized value.
extract_diagonal: bool = struct.field(pytree_node=False) # In case its centered.
shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
@classmethod
def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False):
if isinstance(fvalue, list) and not fvalue:
return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, [])
quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize(
fvalue, quantized_dtype, extract_diagonal
)
return QuantizedValue(
quantized,
diagonal_fvalue,
bucket_size,
quantized_dtype,
extract_diagonal,
list(quantized.shape),
)
# Quantization is from Lingvo JAX optimizers.
# We extend it for int16 quantization of PSD matrices.
@classmethod
def quantize(cls, fvalue, quantized_dtype, extract_diagonal=False):
"""Returns quantized value and the bucket."""
if quantized_dtype == jnp.float32:
return fvalue, [], []
elif quantized_dtype == jnp.bfloat16:
return fvalue.astype(jnp.bfloat16), [], []
float_dtype = fvalue.dtype
if quantized_dtype == jnp.int8:
# value -128 is not used.
num_buckets = jnp.array(127.0, dtype=float_dtype)
elif quantized_dtype == jnp.int16:
# value -32768 is not used.
num_buckets = jnp.array(32767.0, dtype=float_dtype)
else:
raise ValueError(f"Quantized dtype {quantized_dtype} not supported.")
# max value is mapped to num_buckets
if extract_diagonal and fvalue.ndim != 2:
raise ValueError(
f"Input array {fvalue} must be 2D to work with extract_diagonal."
)
diagonal_fvalue = []
if extract_diagonal:
diagonal_fvalue = jnp.diag(fvalue)
# Remove the diagonal entries.
fvalue = fvalue - jnp.diag(diagonal_fvalue)
# TODO(rohananil): Extend this by making use of information about the blocks
# SM3 style which will be useful for diagonal statistics
# We first decide the scale.
if fvalue.ndim < 1:
raise ValueError(
f"Input array {fvalue} must have a strictly positive number of "
"dimensions."
)
max_abs = jnp.max(jnp.abs(fvalue), axis=0)
bucket_size = max_abs / num_buckets
bs_expanded = bucket_size[jnp.newaxis, Ellipsis]
# To avoid divide by 0.0
bs_nonzero = jnp.where(
bs_expanded > 0.0, bs_expanded, jnp.ones_like(bs_expanded)
)
ratio = fvalue / bs_nonzero
# We use rounding to remove bias.
quantized = jnp.round(ratio)
return quantized.astype(quantized_dtype), diagonal_fvalue, bucket_size
def to_float(self):
"""Returns the float value."""
if isinstance(self.quantized, list) and not self.quantized:
return self.quantized
if self.quantized_dtype == jnp.float32:
return self.quantized
if self.quantized_dtype == jnp.bfloat16:
return self.quantized.astype(jnp.float32)
float_dtype = self.bucket_size.dtype
bucket_size = self.bucket_size[jnp.newaxis, Ellipsis]
val = self.quantized.astype(float_dtype) * bucket_size
if self.extract_diagonal:
val += jnp.diag(self.diagonal)
return val
|
py | b413d73408934d274cd93239ed6405220d9973ae | # Copyright 2020 Oscar Higgott
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Union, List, Set, Tuple, Dict
import matplotlib.cbook
import numpy as np
import networkx as nx
import retworkx as rx
import scipy
from scipy.sparse import csc_matrix
from pymatching._cpp_mwpm import (exact_matching, local_matching,
MatchingGraph)
def _find_boundary_nodes(graph: nx.Graph):
"""Find all boundary nodes in G
Find the boundary nodes in G, each of which have the attribute
`is_boundary' set to `True'. Return the indices of the
boundary nodes.
Parameters
----------
graph : NetworkX graph
The matching graph.
Returns
-------
set of int
The indices of the boundary nodes in G.
"""
return {i for i, attr in graph.nodes(data=True)
if attr.get("is_boundary", False)}
class Matching:
"""A class for constructing matching graphs and decoding using the minimum-weight perfect matching decoder
The Matching class provides most of the core functionality of PyMatching.
A PyMatching object can be constructed from a check matrix with one or two non-zero
elements in each column (e.g. the :math:`Z` or
:math:`X` check matrix of some classes of CSS quantum code), given as a `scipy.sparse`
matrix or `numpy.ndarray`, along with additional argument specifying the
edge weights, error probabilities and number of repetitions.
Alternatively, a Matching object can be constructed from a NetworkX
graph, with node and edge attributes used to specify edge weights,
fault ids, boundaries and error probabilities.
"""
def __init__(self,
H: Union[scipy.sparse.spmatrix, np.ndarray, rx.PyGraph, nx.Graph, List[List[int]]] = None,
spacelike_weights: Union[float, np.ndarray, List[float]] = None,
error_probabilities: Union[float, np.ndarray, List[float]] = None,
repetitions: int = None,
timelike_weights: Union[float, np.ndarray, List[float]] = None,
measurement_error_probabilities: Union[float, np.ndarray, List[float]] = None,
precompute_shortest_paths: bool = False,
**kwargs
):
r"""Constructor for the Matching class
Parameters
----------
H : `scipy.spmatrix` or `numpy.ndarray` or `networkx.Graph` object, optional
The quantum code to be decoded with minimum-weight perfect
matching, given either as a binary check matrix (scipy sparse
matrix or numpy.ndarray), or as a matching graph (NetworkX graph).
Each edge in the NetworkX graph can have optional
attributes ``fault_ids``, ``weight`` and ``error_probability``.
``fault_ids`` should be an int or a set of ints.
Each fault id corresponds to a self-inverse fault that is flipped when the
corresponding edge is flipped. These self-inverse faults could correspond to
physical Pauli errors (physical frame changes)
or to the logical observables that are flipped by the fault
(a logical frame change, equivalent to an obersvable ID in an error instruction in a Stim
detector error model). The `fault_ids` attribute was previously named `qubit_id` in an
earlier version of PyMatching, and `qubit_id` is still accepted instead of `fault_ids` in order
to maintain backward compatibility.
Each ``weight`` attribute should be a non-negative float. If
every edge is assigned an error_probability between zero and one,
then the ``add_noise`` method can be used to simulate noise and
flip edges independently in the graph. By default, None
spacelike_weights : float or numpy.ndarray, optional
If `H` is given as a scipy or numpy array, `spacelike_weights` gives the weights
of edges in the matching graph corresponding to columns of `H`.
If spacelike_weights is a numpy.ndarray, it should be a 1D array with length
equal to `H.shape[1]`. If spacelike_weights is a float, it is used as the weight for all
edges corresponding to columns of `H`. By default None, in which case
all weights are set to 1.0
error_probabilities : float or numpy.ndarray, optional
The probabilities with which an error occurs on each edge corresponding
to a column of the check matrix. If a
single float is given, the same error probability is used for each
edge. If a numpy.ndarray of floats is given, it must have a
length equal to the number of columns in the check matrix H. This parameter is only
needed for the Matching.add_noise method, and not for decoding.
By default None
repetitions : int, optional
The number of times the stabiliser measurements are repeated, if
the measurements are noisy. This option is only used if `H` is
provided as a check matrix, not a NetworkX graph. By default None
timelike_weights : float, optional
If `H` is given as a scipy or numpy array and `repetitions>1`,
`timelike_weights` gives the weight of timelike edges.
If a float is given, all timelike edges weights are set to
the same value. If a numpy array of size `(H.shape[0],)` is given, the
edge weight for each vertical timelike edge associated with the `i`th check (row)
of `H` is set to `timelike_weights[i]`. By default None, in which case all
timelike weights are set to 1.0
measurement_error_probabilities : float, optional
If `H` is given as a scipy or numpy array and `repetitions>1`,
gives the probability of a measurement error to be used for
the add_noise method. If a float is given, all measurement
errors are set to the same value. If a numpy array of size `(H.shape[0],)` is given,
the error probability for each vertical timelike edge associated with the `i`th check
(row) of `H` is set to `measurement_error_probabilities[i]`. By default None
precompute_shortest_paths : bool, optional
It is almost always recommended to leave this as False. If
the exact matching is used for decoding (setting
`num_neighbours=None` in `decode`), then setting this option
to True will precompute the all-pairs shortest paths.
By default False
Examples
--------
>>> import pymatching
>>> import math
>>> m = pymatching.Matching()
>>> m.add_edge(0, 1, fault_ids={0}, weight=0.1)
>>> m.add_edge(1, 2, fault_ids={1}, weight=0.15)
>>> m.add_edge(2, 3, fault_ids={2, 3}, weight=0.2)
>>> m.add_edge(0, 3, fault_ids={4}, weight=0.1)
>>> m.set_boundary_nodes({3})
>>> m
<pymatching.Matching object with 3 detectors, 1 boundary node, and 4 edges>
Matching objects can also be created from a check matrix (provided as a scipy.sparse matrix,
dense numpy array, or list of lists):
>>> import pymatching
>>> m = pymatching.Matching([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1]])
>>> m
<pymatching.Matching object with 3 detectors, 1 boundary node, and 4 edges>
"""
self.matching_graph = MatchingGraph()
if H is None:
return
if not isinstance(H, (nx.Graph, rx.PyGraph)):
try:
H = csc_matrix(H)
except TypeError:
raise TypeError("H must be a NetworkX graph or convertible "
"to a scipy.csc_matrix")
self.load_from_check_matrix(H, spacelike_weights, error_probabilities,
repetitions, timelike_weights, measurement_error_probabilities,
**kwargs)
elif isinstance(H, nx.Graph):
self.load_from_networkx(H)
else:
self.load_from_retworkx(H)
if precompute_shortest_paths:
self.matching_graph.compute_all_pairs_shortest_paths()
def add_edge(
self,
node1: int,
node2: int,
fault_ids: Union[int, Set[int]] = None,
weight: float = 1.0,
error_probability: float = None,
**kwargs
) -> None:
"""
Add an edge to the matching graph
Parameters
----------
node1: int
The ID of node1 in the new edge (node1, node2)
node2: int
The ID of node2 in the new edge (node1, node2)
fault_ids: set[int] or int, optional
The IDs of any self-inverse faults which are flipped when the edge is flipped, and which should be tracked.
This could correspond to the IDs of physical Pauli errors that occur when this
edge flips (physical frame changes). Alternatively,
this attribute can be used to store the IDs of any logical observables that are
flipped when an error occurs on an edge (logical frame changes). In earlier versions of PyMatching, this
attribute was instead named `qubit_id` (since for CSS codes and physical frame changes, there can be
a one-to-one correspondence between each fault ID and physical qubit ID). For backward
compatibility, `qubit_id` can still be used instead of `fault_ids` as a keyword argument.
By default None
weight: float, optional
The weight of the edge, which must be non-negative, by default 1.0
error_probability: float, optional
The probability that the edge is flipped. This is used by the `add_noise()` method
to sample from the distribution defined by the matching graph (in which each edge
is flipped independently with the corresponding `error_probability`). By default None
Examples
--------
>>> import pymatching
>>> m = pymatching.Matching()
>>> m.add_edge(0, 1)
>>> m.add_edge(1, 2)
>>> print(m.num_edges)
2
>>> print(m.num_nodes)
3
>>> import pymatching
>>> import math
>>> m = pymatching.Matching()
>>> m.add_edge(0, 1, fault_ids=2, weight=math.log((1-0.05)/0.05), error_probability=0.05)
>>> m.add_edge(1, 2, fault_ids=0, weight=math.log((1-0.1)/0.1), error_probability=0.1)
>>> m.add_edge(2, 0, fault_ids={1, 2}, weight=math.log((1-0.2)/0.2), error_probability=0.2)
>>> m
<pymatching.Matching object with 3 detectors, 0 boundary nodes, and 3 edges>
"""
if fault_ids is not None and "qubit_id" in kwargs:
raise ValueError("Both `fault_ids` and `qubit_id` were provided as arguments. Please "
"provide `fault_ids` instead of `qubit_id` as an argument, as use of `qubit_id` has "
"been deprecated.")
if fault_ids is None and "qubit_id" in kwargs:
fault_ids = kwargs["qubit_id"]
if isinstance(fault_ids, (int, np.integer)):
fault_ids = set() if fault_ids == -1 else {int(fault_ids)}
fault_ids = set() if fault_ids is None else fault_ids
has_error_probability = error_probability is not None
error_probability = error_probability if has_error_probability else -1
self.matching_graph.add_edge(node1, node2, fault_ids, weight,
error_probability, has_error_probability)
def load_from_networkx(self, graph: nx.Graph) -> None:
r"""
Load a matching graph from a NetworkX graph
Parameters
----------
graph : networkx.Graph
Each edge in the NetworkX graph can have optional
attributes ``fault_ids``, ``weight`` and ``error_probability``.
``fault_ids`` should be an int or a set of ints.
Each fault id corresponds to a self-inverse fault that is flipped when the
corresponding edge is flipped. These self-inverse faults could correspond to
physical Pauli errors (physical frame changes)
or to the logical observables that are flipped by the fault
(a logical frame change, equivalent to an obersvable ID in an error instruction in a Stim
detector error model). The `fault_ids` attribute was previously named `qubit_id` in an
earlier version of PyMatching, and `qubit_id` is still accepted instead of `fault_ids` in order
to maintain backward compatibility.
Each ``weight`` attribute should be a non-negative float. If
every edge is assigned an error_probability between zero and one,
then the ``add_noise`` method can be used to simulate noise and
flip edges independently in the graph.
Examples
--------
>>> import pymatching
>>> import networkx as nx
>>> import math
>>> g = nx.Graph()
>>> g.add_edge(0, 1, fault_ids=0, weight=math.log((1-0.1)/0.1), error_probability=0.1)
>>> g.add_edge(1, 2, fault_ids=1, weight=math.log((1-0.15)/0.15), error_probability=0.15)
>>> g.nodes[0]['is_boundary'] = True
>>> g.nodes[2]['is_boundary'] = True
>>> m = pymatching.Matching(g)
>>> m
<pymatching.Matching object with 1 detector, 2 boundary nodes, and 2 edges>
"""
if not isinstance(graph, nx.Graph):
raise TypeError("G must be a NetworkX graph")
boundary = _find_boundary_nodes(graph)
num_nodes = graph.number_of_nodes()
all_fault_ids = set()
g = MatchingGraph(self.num_detectors, boundary)
for (u, v, attr) in graph.edges(data=True):
u, v = int(u), int(v)
if "fault_ids" in attr and "qubit_id" in attr:
raise ValueError("Both `fault_ids` and `qubit_id` were provided as edge attributes, however use "
"of `qubit_id` has been deprecated in favour of `fault_ids`. Please only supply "
"`fault_ids` as an edge attribute.")
if "fault_ids" not in attr and "qubit_id" in attr:
fault_ids = attr["qubit_id"] # Still accept qubit_id as well for now
else:
fault_ids = attr.get("fault_ids", set())
if isinstance(fault_ids, (int, np.integer)):
fault_ids = {int(fault_ids)} if fault_ids != -1 else set()
else:
try:
fault_ids = set(fault_ids)
if not all(isinstance(q, (int, np.integer)) for q in fault_ids):
raise ValueError("fault_ids must be a set of ints, not {}".format(fault_ids))
except:
raise ValueError(
"fault_ids property must be an int or a set of int"\
" (or convertible to a set), not {}".format(fault_ids))
all_fault_ids = all_fault_ids | fault_ids
weight = attr.get("weight", 1) # Default weight is 1 if not provided
e_prob = attr.get("error_probability", -1)
g.add_edge(u, v, fault_ids, weight, e_prob, 0 <= e_prob <= 1)
self.matching_graph = g
def load_from_retworkx(self, graph: rx.PyGraph) -> None:
r"""
Load a matching graph from a retworkX graph
Parameters
----------
graph : retworkx.PyGraph
Each edge in the retworkx graph can have dictionary payload with keys
``fault_ids``, ``weight`` and ``error_probability``. ``fault_ids`` should be
an int or a set of ints. Each fault id corresponds to a self-inverse fault
that is flipped when the corresponding edge is flipped. These self-inverse
faults could correspond to physical Pauli errors (physical frame changes)
or to the logical observables that are flipped by the fault
(a logical frame change, equivalent to an obersvable ID in an error instruction in a Stim
detector error model). The `fault_ids` attribute was previously named `qubit_id` in an
earlier version of PyMatching, and `qubit_id` is still accepted instead of `fault_ids` in order
to maintain backward compatibility.
Each ``weight`` attribute should be a non-negative float. If
every edge is assigned an error_probability between zero and one,
then the ``add_noise`` method can be used to simulate noise and
flip edges independently in the graph.
Examples
--------
>>> import pymatching
>>> import retworkx as rx
>>> import math
>>> g = rx.PyGraph()
>>> matching = g.add_nodes_from([{} for _ in range(3)])
>>> edge_a =g.add_edge(0, 1, dict(fault_ids=0, weight=math.log((1-0.1)/0.1), error_probability=0.1))
>>> edge_b = g.add_edge(1, 2, dict(fault_ids=1, weight=math.log((1-0.15)/0.15), error_probability=0.15))
>>> g[0]['is_boundary'] = True
>>> g[2]['is_boundary'] = True
>>> m = pymatching.Matching(g)
>>> m
<pymatching.Matching object with 1 detector, 2 boundary nodes, and 2 edges>
"""
if not isinstance(graph, rx.PyGraph):
raise TypeError("G must be a retworkx graph")
boundary = {i for i in graph.node_indices() if graph[i].get("is_boundary", False)}
num_nodes = len(graph)
g = MatchingGraph(self.num_detectors, boundary)
for (u, v, attr) in graph.weighted_edge_list():
u, v = int(u), int(v)
if "fault_ids" in attr and "qubit_id" in attr:
raise ValueError("Both `fault_ids` and `qubit_id` were provided as edge attributes, however use "
"of `qubit_id` has been deprecated in favour of `fault_ids`. Please only supply "
"`fault_ids` as an edge attribute.")
if "fault_ids" not in attr and "qubit_id" in attr:
fault_ids = attr["qubit_id"] # Still accept qubit_id as well for now
else:
fault_ids = attr.get("fault_ids", set())
if isinstance(fault_ids, (int, np.integer)):
fault_ids = {int(fault_ids)} if fault_ids != -1 else set()
else:
try:
fault_ids = set(fault_ids)
if not all(isinstance(q, (int, np.integer)) for q in fault_ids):
raise ValueError("fault_ids must be a set of ints, not {}".format(fault_ids))
except:
raise ValueError(
"fault_ids property must be an int or a set of int"\
" (or convertible to a set), not {}".format(fault_ids))
weight = attr.get("weight", 1) # Default weight is 1 if not provided
e_prob = attr.get("error_probability", -1)
g.add_edge(u, v, fault_ids, weight, e_prob, 0 <= e_prob <= 1)
self.matching_graph = g
def load_from_check_matrix(self,
H: Union[scipy.sparse.spmatrix, np.ndarray, List[List[int]]],
spacelike_weights: Union[float, np.ndarray, List[float]] = None,
error_probabilities: Union[float, np.ndarray, List[float]] = None,
repetitions: int = None,
timelike_weights: Union[float, np.ndarray, List[float]] = None,
measurement_error_probabilities: Union[float, np.ndarray, List[float]] = None,
**kwargs
) -> None:
"""
Load a matching graph from a check matrix
Parameters
----------
H : `scipy.spmatrix` or `numpy.ndarray` or List[List[int]]
The quantum code to be decoded with minimum-weight perfect
matching, given as a binary check matrix (scipy sparse
matrix or numpy.ndarray)
spacelike_weights : float or numpy.ndarray, optional
If `H` is given as a scipy or numpy array, `spacelike_weights` gives the weights
of edges in the matching graph corresponding to columns of `H`.
If spacelike_weights is a numpy.ndarray, it should be a 1D array with length
equal to `H.shape[1]`. If spacelike_weights is a float, it is used as the weight for all
edges corresponding to columns of `H`. By default None, in which case
all weights are set to 1.0
error_probabilities : float or numpy.ndarray, optional
The probabilities with which an error occurs on each edge associated with a
column of H. If a
single float is given, the same error probability is used for each
column. If a numpy.ndarray of floats is given, it must have a
length equal to the number of columns in H. This parameter is only
needed for the Matching.add_noise method, and not for decoding.
By default None
repetitions : int, optional
The number of times the stabiliser measurements are repeated, if
the measurements are noisy. By default None
timelike_weights : float or numpy.ndarray, optional
If `repetitions>1`, `timelike_weights` gives the weight of
timelike edges. If a float is given, all timelike edges weights are set to
the same value. If a numpy array of size `(H.shape[0],)` is given, the
edge weight for each vertical timelike edge associated with the `i`th check (row)
of `H` is set to `timelike_weights[i]`. By default None, in which case all
timelike weights are set to 1.0
measurement_error_probabilities : float or numpy.ndarray, optional
If `repetitions>1`, gives the probability of a measurement
error to be used for the add_noise method. If a float is given, all measurement
errors are set to the same value. If a numpy array of size `(H.shape[0],)` is given,
the error probability for each vertical timelike edge associated with the `i`th check
(row) of `H` is set to `measurement_error_probabilities[i]`. This argument can also be
given using the keyword argument `measurement_error_probability` to maintain backward
compatibility with previous versions of Pymatching. By default None
Examples
--------
>>> import pymatching
>>> m = pymatching.Matching([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1]])
>>> m
<pymatching.Matching object with 3 detectors, 1 boundary node, and 4 edges>
Matching objects can also be initialised from a sparse scipy matrix:
>>> import pymatching
>>> from scipy.sparse import csc_matrix
>>> H = csc_matrix([[1, 1, 0], [0, 1, 1]])
>>> m = pymatching.Matching(H)
>>> m
<pymatching.Matching object with 2 detectors, 1 boundary node, and 3 edges>
"""
try:
H = csc_matrix(H)
except TypeError:
raise TypeError("H must be convertible to a scipy.csc_matrix")
unique_elements = np.unique(H.data)
if len(unique_elements) > 1 or unique_elements[0] != 1:
raise ValueError("Nonzero elements in the parity check matrix" \
" must be 1, not {}.".format(unique_elements))
H = H.astype(np.uint8)
num_edges = H.shape[1]
weights = 1.0 if spacelike_weights is None else spacelike_weights
if isinstance(weights, (int, float, np.integer, np.floating)):
weights = np.ones(num_edges, dtype=float)*weights
weights = np.asarray(weights)
if error_probabilities is None:
error_probabilities = np.ones(num_edges) * -1
elif isinstance(error_probabilities, (int, float)):
error_probabilities = np.ones(num_edges) * error_probabilities
column_weights = np.asarray(H.sum(axis=0))[0]
unique_column_weights = np.unique(column_weights)
if np.setdiff1d(unique_column_weights, np.array([1, 2])).size > 0:
raise ValueError("Each column of H must have weight "
"1 or 2, not {}".format(unique_column_weights))
H.eliminate_zeros()
H.sort_indices()
num_fault_ids = H.shape[1]
if weights.shape[0] != num_fault_ids:
raise ValueError("Weights array must have num_fault_ids elements")
timelike_weights = 1.0 if timelike_weights is None else timelike_weights
if isinstance(timelike_weights, (int, float, np.integer, np.floating)):
timelike_weights = np.ones(H.shape[0], dtype=float) * timelike_weights
elif isinstance(timelike_weights, (np.ndarray, list)):
timelike_weights = np.array(timelike_weights, dtype=float)
if timelike_weights.shape != (H.shape[0],):
raise ValueError("timelike_weights should have the same number of elements as there are rows in H")
else:
raise ValueError("timelike_weights should be a float or a 1d numpy array")
repetitions = 1 if repetitions is None else repetitions
mep = kwargs.get("measurement_error_probability")
if measurement_error_probabilities is not None and mep is not None:
raise ValueError("Both `measurement_error_probabilities` and `measurement_error_probability` "
"were provided as arguments. Please "
"provide `measurement_error_probabilities` instead of `measurement_error_probability` "
"as an argument, as use of `measurement_error_probability` has been deprecated.")
if measurement_error_probabilities is None and mep is not None:
measurement_error_probabilities = mep
p_meas = measurement_error_probabilities if measurement_error_probabilities is not None else -1
if isinstance(p_meas, (int, float, np.integer, np.floating)):
p_meas = np.ones(H.shape[0], dtype=float)
elif isinstance(p_meas, (np.ndarray, list)):
p_meas = np.array(p_meas, dtype=float)
if p_meas.shape != (H.shape[0],):
raise ValueError("measurement_error_probabilities should have dimensions {}"
" not {}".format((H.shape[0],), p_meas.shape))
else:
raise ValueError("measurement_error_probabilities should be a float or 1d numpy array")
boundary = {H.shape[0] * repetitions} if 1 in unique_column_weights else set()
self.matching_graph = MatchingGraph(H.shape[0] * repetitions, boundary=boundary)
for t in range(repetitions):
for i in range(len(H.indptr) - 1):
s, e = H.indptr[i:i + 2]
v1 = H.indices[s] + H.shape[0] * t
v2 = H.indices[e - 1] + H.shape[0] * t if e - s == 2 else next(iter(boundary))
self.matching_graph.add_edge(v1, v2, {i}, weights[i],
error_probabilities[i], error_probabilities[i] >= 0)
for t in range(repetitions - 1):
for i in range(H.shape[0]):
self.matching_graph.add_edge(i + t * H.shape[0], i + (t + 1) * H.shape[0],
set(), timelike_weights[i], p_meas[i], p_meas[i] >= 0)
def set_boundary_nodes(self, nodes: Set[int]) -> None:
"""
Set boundary nodes in the matching graph. This defines the
nodes in `nodes` to be boundary nodes.
Parameters
----------
nodes: set[int]
The IDs of the nodes to be set as boundary nodes
Examples
--------
>>> import pymatching
>>> m = pymatching.Matching()
>>> m.add_edge(0, 1)
>>> m.add_edge(1, 2)
>>> m.set_boundary_nodes({0, 2})
>>> m.boundary
{0, 2}
>>> m
<pymatching.Matching object with 1 detector, 2 boundary nodes, and 2 edges>
"""
self.matching_graph.set_boundary(nodes)
@property
def num_fault_ids(self) -> int:
"""
The number of fault IDs defined in the matching graph
Returns
-------
int
Number of fault IDs
"""
return self.matching_graph.get_num_fault_ids()
@property
def boundary(self) -> Set[int]:
"""Return the indices of the boundary nodes.
Note that this property is a copy of the set of boundary nodes.
In-place modification of the set Matching.boundary will not
change the boundary nodes of the matching graph - boundary nodes should
instead be set or updated using the `Matching.set_boundary_nodes` method.
Returns
-------
set of int
The indices of the boundary nodes
"""
return self.matching_graph.get_boundary()
@property
def num_nodes(self) -> int:
"""
The number of nodes in the matching graph
Returns
-------
int
The number of nodes
"""
return self.matching_graph.get_num_nodes()
@property
def num_edges(self) -> int:
"""
The number of edges in the matching graph
Returns
-------
int
The number of edges
"""
return self.matching_graph.get_num_edges()
@property
def num_detectors(self) -> int:
"""
The number of detectors in the matching graph. A
detector is a node that can have a non-trivial syndrome
(i.e. it is a node that is not a boundary node).
Returns
-------
int
The number of detectors
"""
return self.num_nodes - len(self.boundary)
def decode(self,
z: Union[np.ndarray, List[int]],
num_neighbours: Union[int, None] = 30,
return_weight: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, int]]:
"""Decode the syndrome `z` using minimum-weight perfect matching
If the parity of the weight of `z` is odd and the matching graph has one connected component,
then an arbitrarily chosen boundary node in
``self.boundary`` is flipped, and all other stabiliser and
boundary nodes are left unchanged. If the matching graph has multiple connected
components, then the parity of the syndrome weight within each connected component is
checked separately, and if a connected component has odd parity then an arbitrarily
chosen boundary node in the same connected component is highlighted. If the parity of the
syndrome weight in a connected component is odd, and the same connected component does not
have a boundary node, then a `ValueError` is raised.
Parameters
----------
z : numpy.ndarray
A binary syndrome vector to decode. The number of elements in
`z` should equal the number of nodes in the matching graph. If
`z` is a 1D array, then `z[i]` is the syndrome at node `i` of
the matching graph. If `z` is 2D then `z[i,j]` is the difference
(modulo 2) between the (noisy) measurement of stabiliser `i` in time
step `j+1` and time step `j` (for the case where the matching graph is
constructed from a check matrix with `repetitions>1`).
num_neighbours : int, optional
Number of closest neighbours (with non-trivial syndrome) of each matching
graph node to consider when decoding. If `num_neighbours` is set
(as it is by default), then the local matching decoder in
https://arxiv.org/abs/2105.13082 is used, and `num_neighbours`
corresponds to the parameter `m` in the paper. It is recommended
to leave `num_neighbours` set to at least 20.
If `num_neighbours is None`, then instead full matching is
performed, with the all-pairs shortest paths precomputed and
cached the first time it is used. Since full matching is more
memory intensive, it is not recommended to be used for matching graphs
with more than around 10,000 nodes, and is only faster than
local matching for matching graphs with less than around 1,000
nodes. By default 30
return_weight : bool, optional
If `return_weight==True`, the sum of the weights of the edges in the
minimum weight perfect matching is also returned. By default False
Returns
-------
correction : numpy.ndarray or list[int]
A 1D numpy array of ints giving the minimum-weight correction operator as a
binary vector. The number of elements in `correction` is one greater than
the largest fault ID. The ith element of `correction` is 1 if the
minimum-weight perfect matching (MWPM) found by PyMatching contains an odd
number of edges that have `i` as one of the `fault_ids`, and is 0 otherwise.
If each edge in the matching graph is assigned a unique integer in its
`fault_ids` attribute, then the locations of nonzero entries in `correction`
correspond to the edges in the MWPM. However, `fault_ids` can instead be used,
for example, to store IDs of the physical or logical frame changes that occur
when an edge flips (see the documentation for ``Matching.add_edge`` for more information).
weight : float
Present only if `return_weight==True`.
The sum of the weights of the edges in the minimum-weight perfect
matching.
Examples
--------
>>> import pymatching
>>> import numpy as np
>>> H = np.array([[1, 1, 0, 0],
... [0, 1, 1, 0],
... [0, 0, 1, 1]])
>>> m = pymatching.Matching(H)
>>> z = np.array([0, 1, 0])
>>> m.decode(z)
array([1, 1, 0, 0], dtype=uint8)
Each bit in the correction provided by Matching.decode corresponds to a
fault_ids. The index of a bit in a correction corresponds to its fault_ids.
For example, here an error on edge (0, 1) flips fault_ids 2 and 3, as
inferred by the minimum-weight correction:
>>> import pymatching
>>> m = pymatching.Matching()
>>> m.add_edge(0, 1, fault_ids={2, 3})
>>> m.add_edge(1, 2, fault_ids=1)
>>> m.add_edge(2, 0, fault_ids=0)
>>> m.decode([1, 1, 0])
array([0, 0, 1, 1], dtype=uint8)
To decode with a phenomenological noise model (qubits and measurements both suffering
bit-flip errors), you can provide a check matrix and number of syndrome repetitions to
construct a matching graph with a time dimension (where nodes in consecutive time steps
are connected by an edge), and then decode with a 2D syndrome
(dimension 0 is space, dimension 1 is time):
>>> import pymatching
>>> import numpy as np
>>> np.random.seed(0)
>>> H = np.array([[1, 1, 0, 0],
... [0, 1, 1, 0],
... [0, 0, 1, 1]])
>>> m = pymatching.Matching(H, repetitions=5)
>>> data_qubit_noise = (np.random.rand(4, 5) < 0.1).astype(np.uint8)
>>> print(data_qubit_noise)
[[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 1]
[1 1 0 0 0]]
>>> cumulative_noise = (np.cumsum(data_qubit_noise, 1) % 2).astype(np.uint8)
>>> syndrome = H@cumulative_noise % 2
>>> print(syndrome)
[[0 0 0 0 0]
[0 0 0 0 1]
[1 0 0 0 1]]
>>> syndrome[:,:-1] ^= (np.random.rand(3, 4) < 0.1).astype(np.uint8)
>>> # Take the parity of consecutive timesteps to construct a difference syndrome:
>>> syndrome[:,1:] = syndrome[:,:-1] ^ syndrome[:,1:]
>>> m.decode(syndrome)
array([0, 0, 1, 0], dtype=uint8)
"""
try:
z = np.array(z, dtype=np.uint8)
except:
raise TypeError("Syndrome must be of type numpy.ndarray or "\
"convertible to numpy.ndarray, not {}".format(z))
if len(z.shape) == 1 and (self.num_detectors <= z.shape[0]
<= self.num_detectors + len(self.boundary)):
defects = z.nonzero()[0]
elif len(z.shape) == 2 and z.shape[0]*z.shape[1] == self.num_detectors:
times, checks = z.T.nonzero()
defects = times*z.shape[0] + checks
else:
raise ValueError("The shape ({}) of the syndrome vector z is not valid.".format(z.shape))
if num_neighbours is None:
res = exact_matching(self.matching_graph, defects, return_weight)
else:
res = local_matching(self.matching_graph, defects, num_neighbours, return_weight)
if return_weight:
return res.correction, res.weight
else:
return res.correction
def add_noise(self) -> Union[Tuple[np.ndarray, np.ndarray], None]:
"""Add noise by flipping edges in the matching graph with
a probability given by the error_probility edge attribute.
The ``error_probability`` must be set for all edges for this
method to run, otherwise it returns `None`.
All boundary nodes are always given a 0 syndrome.
Returns
-------
numpy.ndarray of dtype int
Noise vector (binary numpy int array of length self.num_fault_ids)
numpy.ndarray of dtype int
Syndrome vector (binary numpy int array of length
self.num_detectors if there is no boundary, or self.num_detectors+len(self.boundary)
if there are boundary nodes)
"""
if not self.matching_graph.all_edges_have_error_probabilities():
return None
return self.matching_graph.add_noise()
def edges(self) -> List[Tuple[int, int, Dict]]:
"""Edges of the matching graph
Returns a list of edges of the matching graph. Each edge is a
tuple `(source, target, attr)` where `source` and `target` are ints corresponding to the
indices of the source and target nodes, and `attr` is a dictionary containing the
attributes of the edge.
The dictionary `attr` has keys `fault_ids` (a set of ints), `weight` (the weight of the edge,
set to 1.0 if not specified), and `error_probability`
(the error probability of the edge, set to -1 if not specified).
Returns
-------
List of (int, int, dict) tuples
A list of edges of the matching graph
"""
edata = self.matching_graph.get_edges()
return [(e[0], e[1], {
'fault_ids': e[2].fault_ids,
'weight': e[2].weight,
'error_probability': e[2].error_probability
}) for e in edata]
def to_networkx(self) -> nx.Graph:
"""Convert to NetworkX graph
Returns a NetworkX graph corresponding to the matching graph. Each edge
has attributes `fault_ids`, `weight` and `error_probability` and each node has
the attribute `is_boundary`.
Returns
-------
NetworkX.Graph
NetworkX Graph corresponding to the matching graph
"""
G = nx.Graph()
G.add_edges_from(self.edges())
boundary = self.boundary
for i in G.nodes:
is_boundary = i in boundary
G.nodes[i]['is_boundary'] = is_boundary
return G
def to_retworkx(self) -> rx.PyGraph:
"""Convert to retworkx graph
Returns a retworkx graph object corresponding to the matching graph. Each edge
payload is a ``dict`` with keys `fault_ids`, `weight` and `error_probability` and
each node has a ``dict`` payload with the key ``is_boundary`` and the value is
a boolean.
Returns
-------
retworkx.PyGraph
retworkx graph corresponding to the matching graph
"""
G = rx.PyGraph(multigraph=False)
G.add_nodes_from([{} for _ in range(self.num_nodes)])
G.extend_from_weighted_edge_list(self.edges())
boundary = self.boundary
for i in G.node_indices():
is_boundary = i in boundary
G[i]['is_boundary'] = is_boundary
return G
def draw(self) -> None:
"""Draw the matching graph using matplotlib
Draws the matching graph as a matplotlib graph. Stabiliser nodes are
filled grey and boundary nodes are filled white. The line thickness of each
edge is determined from its weight (with min and max thicknesses of 0.2 pts
and 2 pts respectively).
Note that you may need to call `plt.figure()` before and `plt.show()` after calling
this function.
"""
# Ignore matplotlib deprecation warnings from networkx.draw_networkx
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
warnings.filterwarnings("ignore", category=DeprecationWarning)
G = self.to_networkx()
pos=nx.spectral_layout(G, weight=None)
c = "#bfbfbf"
ncolors = ['w' if n[1]['is_boundary'] else c for n in G.nodes(data=True)]
nx.draw_networkx_nodes(G, pos=pos, node_color=ncolors, edgecolors=c)
nx.draw_networkx_labels(G, pos=pos)
weights=np.array([e[2]['weight'] for e in G.edges(data=True)])
normalised_weights = 0.2+2*weights/np.max(weights)
nx.draw_networkx_edges(G, pos=pos, width=normalised_weights)
def qid_to_str(qid):
if len(qid) == 0:
return ""
elif len(qid) == 1:
return str(qid.pop())
else:
return str(qid)
edge_labels = {(s, t): qid_to_str(d['fault_ids']) for (s,t,d) in G.edges(data=True)}
nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)
def __repr__(self) -> str:
M = self.num_detectors
B = len(self.boundary)
E = self.matching_graph.get_num_edges()
return "<pymatching.Matching object with "\
"{} detector{}, "\
"{} boundary node{}, "\
"and {} edge{}>".format(
M, 's' if M != 1 else '', B, 's' if B != 1 else '',
E, 's' if E != 1 else '')
|
py | b413d75cf4089febd2cf65381c56b0bd4d6dc2cd | import math
import numpy
from spynnaker.pyNN.models.neuron.synapse_dynamics\
.abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics
# How large are the time-stamps stored with each event
TIME_STAMP_BYTES = 4
# When not using the MAD scheme, how many pre-synaptic events are buffered
NUM_PRE_SYNAPTIC_EVENTS = 4
class SynapseDynamicsSTDP(AbstractPlasticSynapseDynamics):
def __init__(
self, timing_dependence=None, weight_dependence=None,
voltage_dependence=None,
dendritic_delay_fraction=1.0, mad=True):
AbstractPlasticSynapseDynamics.__init__(self)
self._timing_dependence = timing_dependence
self._weight_dependence = weight_dependence
self._dendritic_delay_fraction = float(dendritic_delay_fraction)
self._mad = mad
if (self._dendritic_delay_fraction < 0.5 or
self._dendritic_delay_fraction > 1.0):
raise NotImplementedError(
"dendritic_delay_fraction must be in the interval [0.5, 1.0]")
if self._timing_dependence is None or self._weight_dependence is None:
raise NotImplementedError(
"Both timing_dependence and weight_dependence must be"
"specified")
if voltage_dependence is not None:
raise NotImplementedError(
"Voltage dependence has not been implemented")
@property
def weight_dependence(self):
return self._weight_dependence
@property
def timing_dependence(self):
return self._timing_dependence
@property
def dendritic_delay_fraction(self):
return self._dendritic_delay_fraction
def is_same_as(self, synapse_dynamics):
if not isinstance(synapse_dynamics, SynapseDynamicsSTDP):
return False
return (
self._timing_dependence.is_same_as(
synapse_dynamics._timing_dependence) and
self._weight_dependence.is_same_as(
synapse_dynamics._weight_dependence) and
(self._dendritic_delay_fraction ==
synapse_dynamics._dendritic_delay_fraction) and
(self._mad == synapse_dynamics._mad))
def are_weights_signed(self):
return False
def get_vertex_executable_suffix(self):
name = "_stdp_mad" if self._mad else "_stdp"
name += "_" + self._timing_dependence.vertex_executable_suffix
name += "_" + self._weight_dependence.vertex_executable_suffix
return name
def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types):
size = 0
size += self._timing_dependence.get_parameters_sdram_usage_in_bytes()
size += self._weight_dependence.get_parameters_sdram_usage_in_bytes(
n_synapse_types, self._timing_dependence.n_weight_terms)
return size
def write_parameters(self, spec, region, machine_time_step, weight_scales):
spec.comment("Writing Plastic Parameters")
# Switch focus to the region:
spec.switch_write_focus(region)
# Write timing dependence parameters to region
self._timing_dependence.write_parameters(
spec, machine_time_step, weight_scales)
# Write weight dependence information to region
self._weight_dependence.write_parameters(
spec, machine_time_step, weight_scales,
self._timing_dependence.n_weight_terms)
@property
def _n_header_bytes(self):
if self._mad:
# If we're using MAD, the header contains a single timestamp and
# pre-trace
return (
TIME_STAMP_BYTES + self.timing_dependence.pre_trace_n_bytes)
else:
# Otherwise, headers consist of a counter followed by
# NUM_PRE_SYNAPTIC_EVENTS timestamps and pre-traces
return (
4 + (NUM_PRE_SYNAPTIC_EVENTS *
(TIME_STAMP_BYTES +
self.timing_dependence.pre_trace_n_bytes)))
def get_n_words_for_plastic_connections(self, n_connections):
synapse_structure = self._timing_dependence.synaptic_structure
fp_size_words = \
n_connections if n_connections % 2 == 0 else n_connections + 1
pp_size_bytes = (
self._n_header_bytes +
(synapse_structure.get_n_bytes_per_connection() * n_connections))
pp_size_words = int(math.ceil(float(pp_size_bytes) / 4.0))
return fp_size_words + pp_size_words
def get_plastic_synaptic_data(
self, connections, connection_row_indices, n_rows,
post_vertex_slice, n_synapse_types):
n_synapse_type_bits = int(math.ceil(math.log(n_synapse_types, 2)))
dendritic_delays = (
connections["delay"] * self._dendritic_delay_fraction)
axonal_delays = (
connections["delay"] * (1.0 - self._dendritic_delay_fraction))
# Get the fixed data
fixed_plastic = (
((dendritic_delays.astype("uint16") & 0xF) <<
(8 + n_synapse_type_bits)) |
((axonal_delays.astype("uint16") & 0xF) <<
(12 + n_synapse_type_bits)) |
(connections["synapse_type"].astype("uint16") << 8) |
((connections["target"].astype("uint16") -
post_vertex_slice.lo_atom) & 0xFF))
fixed_plastic_rows = self.convert_per_connection_data_to_rows(
connection_row_indices, n_rows,
fixed_plastic.view(dtype="uint8").reshape((-1, 2)))
fp_size = self.get_n_items(fixed_plastic_rows, 2)
fp_data = self.get_words(fixed_plastic_rows)
# Get the plastic data
synapse_structure = self._timing_dependence.synaptic_structure
plastic_plastic = synapse_structure.get_synaptic_data(connections)
plastic_headers = numpy.zeros(
(n_rows, self._n_header_bytes), dtype="uint8")
plastic_plastic_row_data = self.convert_per_connection_data_to_rows(
connection_row_indices, n_rows, plastic_plastic)
plastic_plastic_rows = [
numpy.concatenate((
plastic_headers[i], plastic_plastic_row_data[i]))
for i in range(n_rows)]
pp_size = self.get_n_items(plastic_plastic_rows, 4)
pp_data = self.get_words(plastic_plastic_rows)
return (fp_data, pp_data, fp_size, pp_size)
def get_n_plastic_plastic_words_per_row(self, pp_size):
# pp_size is in words, so return
return pp_size
def get_n_fixed_plastic_words_per_row(self, fp_size):
# fp_size is in half-words
return numpy.ceil(fp_size / 2.0).astype(dtype="uint32")
def get_n_synapses_in_rows(self, pp_size, fp_size):
# Each fixed-plastic synapse is a half-word and fp_size is in half
# words so just return it
return fp_size
def read_plastic_synaptic_data(
self, post_vertex_slice, n_synapse_types, pp_size, pp_data,
fp_size, fp_data):
n_rows = len(fp_size)
n_synapse_type_bits = int(math.ceil(math.log(n_synapse_types, 2)))
data_fixed = numpy.concatenate([
fp_data[i].view(dtype="uint16")[0:fp_size[i]]
for i in range(n_rows)])
pp_without_headers = [
row.view(dtype="uint8")[self._n_header_bytes:] for row in pp_data]
synapse_structure = self._timing_dependence.synaptic_structure
connections = numpy.zeros(
data_fixed.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
connections["source"] = numpy.concatenate(
[numpy.repeat(i, fp_size[i]) for i in range(len(fp_size))])
connections["target"] = (data_fixed & 0xFF) + post_vertex_slice.lo_atom
connections["weight"] = synapse_structure.read_synaptic_data(
fp_size, pp_without_headers)
connections["delay"] = (data_fixed >> (8 + n_synapse_type_bits)) & 0xF
connections["delay"][connections["delay"] == 0] = 16
return connections
def get_weight_mean(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
# Because the weights could all be changed to the maximum, the mean
# has to be given as the maximum for scaling
return self._weight_dependence.weight_maximum
def get_weight_variance(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
# Because the weights could all be changed to the maximum, the variance
# has to be given as no variance
return 0.0
def get_weight_maximum(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
# The maximum weight is the largest that it could be set to from
# the weight dependence
return self._weight_dependence.weight_maximum
def get_provenance_data(self, pre_population_label, post_population_label):
prov_data = list()
if self._timing_dependence is not None:
prov_data.extend(self._timing_dependence.get_provenance_data(
pre_population_label, post_population_label))
if self._weight_dependence is not None:
prov_data.extend(self._weight_dependence.get_provenance_data(
pre_population_label, post_population_label))
return prov_data
|
py | b413d75e9cb2cbb74c8b2098f1a96a9b83192625 | import RPi.GPIO as GPIO
import time
import logging
class LEDs:
def __init__(self, logger=logging.getLogger(__name__)):
self.logger = logger
GPIO.setmode(GPIO.BOARD)
#GPIO.setwarnings(False)
#success LED
GPIO.setup(13,GPIO.OUT)
GPIO.output(13,0)
#Busy LED
GPIO.setup(7,GPIO.OUT)
GPIO.output(7,0)
#fail LED
GPIO.setup(12,GPIO.OUT)
GPIO.output(12,0)
logging.debug("LEDS initialised")
def __enter__(self):
return self
def busyOff(self):
GPIO.output(7,0)
def orange(self):
GPIO.output(7,1)
def green(self):
self.busyOff();
for _ in range(6):
GPIO.output(13,1)
time.sleep(0.3)
GPIO.output(13,0)
time.sleep(0.3)
self.logger.debug("Flashed success LED")
def red(self):
self.busyOff();
for _ in range(6):
GPIO.output(12,1)
time.sleep(0.3)
GPIO.output(12,0)
time.sleep(0.3)
logging.info("Flashed failed LED")
def __exit__(self, type, value, traceback):
self.logger.debug("lights exited")
GPIO.cleanup() |
py | b413d77206f2edad95142e9964041939c92342e7 | #!/usr/bin/env python3
#
# Author: Soft9000.com
# 2018/12/19: File created
# 2018/01/15: File renamed
''' Mission: Manage a factory-order for the GUI, with
legacy API (OrderClass) conversion. '''
# Status: Mutli-table serialization testing okay
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from collections import OrderedDict
from SqltDAO.SchemaDef.Table import TableDef
from SqltDAO.CodeGen01.OrderClass import OrderClass
from SqltDAO.CodeGen01.DaoExceptions import GenOrderError
from SqltDAO.CodeGen01.Normalizers import Norm
class OrderDef1:
''' The official project-definition. Unlike OrderClass, an OrderDef
is designed to be used in conjunction with 'Preferences.' As such,
the use of fully-qualified path-names is discouraged. Rather,
absolute OUTPUT file locations require user-preferenced to be
specified.
Designed for more comprehensive database support / user selectable
support, things like schema names and multiple table definitions
(etc.) are what this order-type is all about.
A user-specified endpoint, note that the INPUT DATA FILE s-h-o-u-l-d never
be saved by an order, because it m-u-s-t NEVER be changed.
'''
NONAME = "_-$junker!__.~" # Invalid SQL name - for "must init" testing
ProjType = ".daop1" # "Official Format, Version 1" - Always used.
DbType = ".sqlt3"
CodeType = ".py"
TEXT_DATA_TYPE = ".txt"
DEFAULT_SCHEMA = "MySchema"
IOKEY = ".~OrdrDf Ky$." # Space elimination marks unique key.
SEPS = [os.path.sep, "/", "\\"] # Zero tolerance for path names here.
DELIMITERS = (
(0, 'PIPE', '|'), # Code-Base Default (positional.) We prefer PIPE or TAB!
(1, 'TAB', '\t'),
(2, 'CSV', '","'),# Classic support enabled.
(3, 'COMMA', ','), # Catch-All: Try CSV before COMMA. (Might even be depricated, now?)
)
def __init__(self, name=None):
if not name:
name = OrderDef1.NONAME
OrderDef1.PATH_EXCEPT(name)
self._zdict = OrderedDict()
self._zdict['schema_name'] = OrderDef1.DEFAULT_SCHEMA
self._zdict['class_name'] = name
self._zdict['code_fname'] = name
self._zdict['db_fname'] = name
self._zdict['project_fname'] = name
self._zdict['data_encoding'] = None
self._zdict['data_sep'] = OrderDef1.DELIMITERS[0]
self._zdict_tables = OrderedDict()
def assign(self, detect, table_name):
''' Populate ourselves based upon a detected set of fields. Effects are cumulative.
Returns True upon success, else False. '''
from SqltDAO.CodeGen01.TextDataDetector import TextData
assert(isinstance(detect, TextData))
self.sep = detect.sep
self.encoding = detect.encoding
ztable = TableDef(name=table_name)
for field in detect.fields:
if ztable.add_field(field[0], field[1]) is False:
return False
return self.add_table(ztable)
@staticmethod
def PATH_EXCEPT(name):
''' Raise an exception if a platform pathname is found. '''
for sep in OrderDef1.SEPS:
if name.find(sep) != -1:
raise TypeError("Error: Path name inclusion is not supported.")
def fixup(self):
''' Enforce our "no file type" and "no file path" policies.
'''
self._zdict['project_fname'] = self.remove(self._zdict['project_fname'], OrderDef1.ProjType)
self._zdict['db_fname'] = self.remove(self._zdict['db_fname'], OrderDef1.DbType)
self._zdict['code_fname'] = self.remove(self._zdict['code_fname'], OrderDef1.CodeType)
def coin_input_file(self):
''' Suggest a text-data file-name based upon the database file name & location.
Handy when user has specified none, for example, when working in "ProjectMode."
'''
result = self._zdict['db_fname']
if result is OrderDef1.NONAME:
result = OrderDef1.DEFAULT_SCHEMA
if result.endswith(OrderDef1.DbType):
return result + OrderDef1.TEXT_DATA_TYPE
return result
@staticmethod
def BaseName(source):
''' Remove any path characters. '''
for sep in OrderDef1.SEPS:
ipos = source.find(sep)
if ipos != -1:
source = source.split(sep)[-1]
return source
def remove(self, source, suffix):
''' Detect & remove (1) file suffix, (2) junker name, and (3) Path Names.
On (2) defers to (2.1) class-name when defined, else (2.2) NONAME is returned.
A user-specified endpoint, note that the INPUT DATA FILE must NEVER be changed?
'''
source = OrderDef1.BaseName(source)
if source.find(OrderDef1.NONAME) != -1:
if self.name.find(OrderDef1.NONAME) != -1:
return OrderDef1.DEFAULT
else:
return self.name
if source.endswith(suffix):
return source[0:-len(suffix)]
return source
@property
def encoding(self):
return self._zdict['data_encoding']
@encoding.setter
def encoding(self, value):
self._zdict['data_encoding'] = value
@property
def sep(self):
return self._zdict['data_sep']
@sep.setter
def sep(self, value):
''' Delimiter can be specified by number, name, pattern, or unique.
Unique patterns must follow those of DataDef.DELIMITERS.'''
for line in OrderDef1.DELIMITERS:
for row in line:
if value == row:
self._zdict['data_sep'] = line
return
if len(value) == 3:
self._zdict['data_sep'] = value
@property
def name(self):
return self._zdict['class_name']
@property
def project_name(self):
''' Concoct a file-name from the schema name.'''
return self._zdict['project_fname'] + OrderDef1.ProjType
@property
def code_name(self):
''' Concoct a file-name from the schema name.'''
return self._zdict['code_fname'] + OrderDef1.CodeType
@property
def database_name(self):
''' Concoct the database file name.'''
result = self._zdict['db_fname']
if result.endswith(OrderDef1.DbType):
return result
return result + OrderDef1.DbType
@property
def schema_name(self):
''' Query the schema name. '''
return self._zdict['schema_name']
@schema_name.setter
def schema_name(self, name):
''' Change the schema name. True if all is well, else False. '''
if not name:
return False
self._zdict['schema_name'] = Norm.NormCol(name)
return True
@property
def class_name(self):
''' Query the class name. '''
return self._zdict['class_name']
def add_table(self, table_def):
''' Add a table. False if not added, or already added. '''
if not isinstance(table_def, TableDef):
return False
if table_def._name in self._zdict_tables:
return False
self._zdict_tables[table_def._name] = table_def
return True
def table_names(self):
''' Return all of the table-names. Can be empty.'''
return tuple(self._zdict_tables.keys())
def find_table(self, name):
''' Lookup a table definition, by name. None if not found. '''
if name in self._zdict_tables:
return self._zdict_tables[name]
return None
def remove_table(self, name):
''' Remove a table. Always returns True. '''
if name in self._zdict_tables:
self._zdict_tables.pop(name)
return True
def change_table(self, name, table_def):
''' Change the definition for an existing TableDef. '''
if name not in self._zdict_tables:
return False
self._zdict_tables[name] = table_def
return True
def __str__(self):
''' Program usable string. '''
return str(self.__dict__())
def __repr__(self):
''' Factory usable string. '''
result = str(type(self)) + ' : '
result = result + str(self)
return result
def __iter__(self):
''' Basic object iteration assured. '''
values = self.__dict__()
for key in values:
yield key, values[key]
def __dict__(self):
results = OrderedDict(self._zdict)
for key in self._zdict_tables:
results[key] = self._zdict_tables[key]
return results
# Main Test Case: ./Order.py
|
py | b413d8121a8e723317378a420730ad963424ea8f | """Default value for eisen collection values
Revision ID: ad795c7f1fd8
Revises: 23912567470d
Create Date: 2021-03-30 17:01:17.757610
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad795c7f1fd8'
down_revision = '23912567470d'
branch_labels = None
depends_on = None
def upgrade() -> None:
op.execute("update metric set collection_eisen=json_array()")
def downgrade() -> None:
pass
|
py | b413d8df29eae4f518a9075862d32f824dcec255 | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, MutableMapping, Optional
import yaml
from google.protobuf import json_format
from google.protobuf.json_format import MessageToDict, MessageToJson
from google.protobuf.timestamp_pb2 import Timestamp
from feast.loaders import yaml as feast_yaml
from feast.protos.feast.core.Entity_pb2 import Entity as EntityV2Proto
from feast.protos.feast.core.Entity_pb2 import EntityMeta as EntityMetaProto
from feast.protos.feast.core.Entity_pb2 import EntitySpecV2 as EntitySpecProto
from feast.value_type import ValueType
class Entity:
"""
Represents a collection of entities and associated metadata.
"""
def __init__(
self,
name: str,
value_type: ValueType,
description: str = "",
join_key: Optional[str] = None,
labels: Optional[MutableMapping[str, str]] = None,
):
self._name = name
self._description = description
self._value_type = value_type
if join_key:
self._join_key = join_key
else:
self._join_key = name
if labels is None:
self._labels = dict() # type: MutableMapping[str, str]
else:
self._labels = labels
self._created_timestamp: Optional[Timestamp] = None
self._last_updated_timestamp: Optional[Timestamp] = None
def __eq__(self, other):
if not isinstance(other, Entity):
raise TypeError("Comparisons should only involve Entity class objects.")
if (
self.labels != other.labels
or self.name != other.name
or self.description != other.description
or self.value_type != other.value_type
or self.join_key != other.join_key
):
return False
return True
def __str__(self):
return str(MessageToJson(self.to_proto()))
@property
def name(self):
"""
Returns the name of this entity
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this entity
"""
self._name = name
@property
def description(self):
"""
Returns the description of this entity
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this entity
"""
self._description = description
@property
def join_key(self):
"""
Returns the join key of this entity
"""
return self._join_key
@join_key.setter
def join_key(self, join_key):
"""
Sets the join key of this entity
"""
self._join_key = join_key
@property
def value_type(self) -> ValueType:
"""
Returns the type of this entity
"""
return self._value_type
@value_type.setter
def value_type(self, value_type: ValueType):
"""
Set the type for this entity
"""
self._value_type = value_type
@property
def labels(self):
"""
Returns the labels of this entity. This is the user defined metadata
defined as a dictionary.
"""
return self._labels
@labels.setter
def labels(self, labels: MutableMapping[str, str]):
"""
Set the labels for this entity
"""
self._labels = labels
@property
def created_timestamp(self):
"""
Returns the created_timestamp of this entity
"""
return self._created_timestamp
@property
def last_updated_timestamp(self):
"""
Returns the last_updated_timestamp of this entity
"""
return self._last_updated_timestamp
def is_valid(self):
"""
Validates the state of a entity locally. Raises an exception
if entity is invalid.
"""
if not self.name:
raise ValueError("No name found in entity.")
if not self.value_type:
raise ValueError("No type found in entity {self.value_type}")
@classmethod
def from_yaml(cls, yml: str):
"""
Creates an entity from a YAML string body or a file path
Args:
yml: Either a file path containing a yaml file or a YAML string
Returns:
Returns a EntityV2 object based on the YAML file
"""
return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))
@classmethod
def from_dict(cls, entity_dict):
"""
Creates an entity from a dict
Args:
entity_dict: A dict representation of an entity
Returns:
Returns a EntityV2 object based on the entity dict
"""
entity_proto = json_format.ParseDict(
entity_dict, EntityV2Proto(), ignore_unknown_fields=True
)
return cls.from_proto(entity_proto)
@classmethod
def from_proto(cls, entity_proto: EntityV2Proto):
"""
Creates an entity from a protobuf representation of an entity
Args:
entity_proto: A protobuf representation of an entity
Returns:
Returns a EntityV2 object based on the entity protobuf
"""
entity = cls(
name=entity_proto.spec.name,
description=entity_proto.spec.description,
value_type=ValueType(entity_proto.spec.value_type),
labels=entity_proto.spec.labels,
join_key=entity_proto.spec.join_key,
)
entity._created_timestamp = entity_proto.meta.created_timestamp
entity._last_updated_timestamp = entity_proto.meta.last_updated_timestamp
return entity
def to_proto(self) -> EntityV2Proto:
"""
Converts an entity object to its protobuf representation
Returns:
EntityV2Proto protobuf
"""
meta = EntityMetaProto(
created_timestamp=self.created_timestamp,
last_updated_timestamp=self.last_updated_timestamp,
)
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return EntityV2Proto(spec=spec, meta=meta)
def to_dict(self) -> Dict:
"""
Converts entity to dict
Returns:
Dictionary object representation of entity
"""
entity_dict = MessageToDict(self.to_proto())
# Remove meta when empty for more readable exports
if entity_dict["meta"] == {}:
del entity_dict["meta"]
return entity_dict
def to_yaml(self):
"""
Converts a entity to a YAML string.
Returns:
Entity string returned in YAML format
"""
entity_dict = self.to_dict()
return yaml.dump(entity_dict, allow_unicode=True, sort_keys=False)
def to_spec_proto(self) -> EntitySpecProto:
"""
Converts an EntityV2 object to its protobuf representation.
Used when passing EntitySpecV2 object to Feast request.
Returns:
EntitySpecV2 protobuf
"""
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return spec
def _update_from_entity(self, entity):
"""
Deep replaces one entity with another
Args:
entity: Entity to use as a source of configuration
"""
self.name = entity.name
self.description = entity.description
self.value_type = entity.value_type
self.labels = entity.labels
self.join_key = entity.join_key
self._created_timestamp = entity.created_timestamp
self._last_updated_timestamp = entity.last_updated_timestamp
|
py | b413d92b086af2587980070a7c3a904612ac959c | import os
import hashlib
import dateutil.parser
import glob
import six
import sys
import shutil
import stat
import logging
import traceback
import contextlib
from setuptools.archive_util import unpack_archive
from setuptools.archive_util import unpack_tarfile
from setuptools.archive_util import unpack_zipfile
from contextlib import contextmanager
from tornado.log import LogFormatter
from dateutil.tz import gettz
from datetime import datetime
# pwd is for unix passwords only, so we shouldn't import it on
# windows machines
if sys.platform != 'win32':
import pwd
else:
pwd = None
def is_task(cell):
"""Returns True if the cell is a task cell."""
if 'nbgrader' not in cell.metadata:
return False
return cell.metadata['nbgrader'].get('task', False)
def is_grade(cell):
"""Returns True if the cell is a grade cell."""
if 'nbgrader' not in cell.metadata:
return False
return cell.metadata['nbgrader'].get('grade', False)
def is_solution(cell):
"""Returns True if the cell is a solution cell."""
if 'nbgrader' not in cell.metadata:
return False
return cell.metadata['nbgrader'].get('solution', False)
def is_locked(cell):
"""Returns True if the cell source is locked (will be overwritten)."""
if 'nbgrader' not in cell.metadata:
return False
elif is_solution(cell):
return False
elif is_grade(cell):
return True
else:
return cell.metadata['nbgrader'].get('locked', False)
def determine_grade(cell):
if not is_grade(cell):
raise ValueError("cell is not a grade cell")
max_points = float(cell.metadata['nbgrader']['points'])
if is_solution(cell):
# if it's a solution cell and the checksum hasn't changed, that means
# they didn't provide a response, so we can automatically give this a
# zero grade
if "checksum" in cell.metadata.nbgrader and cell.metadata.nbgrader["checksum"] == compute_checksum(cell):
return 0, max_points
else:
return None, max_points
elif cell.cell_type == 'code':
for output in cell.outputs:
if output.output_type == 'error':
return 0, max_points
return max_points, max_points
else:
return None, max_points
def to_bytes(string):
"""A python 2/3 compatible function for converting a string to bytes.
In Python 2, this just returns the 8-bit string. In Python 3, this first
encodes the string to utf-8.
"""
if sys.version_info[0] == 3 or (sys.version_info[0] == 2 and isinstance(string, unicode)):
return bytes(string.encode('utf-8'))
else:
return bytes(string)
def compute_checksum(cell):
m = hashlib.md5()
# add the cell source and type
m.update(to_bytes(cell.source))
m.update(to_bytes(cell.cell_type))
# add whether it's a grade cell and/or solution cell
m.update(to_bytes(str(is_grade(cell))))
m.update(to_bytes(str(is_solution(cell))))
m.update(to_bytes(str(is_locked(cell))))
# include the cell id
m.update(to_bytes(cell.metadata.nbgrader['grade_id']))
# include the number of points that the cell is worth, if it is a grade cell
if is_grade(cell):
m.update(to_bytes(str(float(cell.metadata.nbgrader['points']))))
return m.hexdigest()
def parse_utc(ts):
"""Parses a timestamp into datetime format, converting it to UTC if necessary."""
if ts is None:
return None
if isinstance(ts, six.string_types):
parts = ts.split(" ")
if len(parts) == 3:
ts = " ".join(parts[:2] + ["TZ"])
tz = parts[2]
try:
tz = int(tz)
except ValueError:
tz = dateutil.tz.gettz(tz)
ts = dateutil.parser.parse(ts, tzinfos=dict(TZ=tz))
else:
ts = dateutil.parser.parse(ts)
if ts.tzinfo is not None:
ts = (ts - ts.utcoffset()).replace(tzinfo=None)
return ts
def to_numeric_tz(timezone):
"""Converts a timezone to a format which can be read by parse_utc."""
return as_timezone(datetime.utcnow(), timezone).strftime('%z')
def as_timezone(ts, timezone):
"""Converts UTC timestamp ts to have timezone tz."""
if not timezone:
return ts
tz = gettz(timezone)
if tz:
return (ts + tz.utcoffset(ts)).replace(tzinfo=tz)
else:
return ts
def check_mode(path, read=False, write=False, execute=False):
"""Can the current user can rwx the path."""
mode = 0
if read:
mode |= os.R_OK
if write:
mode |= os.W_OK
if execute:
mode |= os.X_OK
return os.access(path, mode)
def check_directory(path, read=False, write=False, execute=False):
"""Does that path exist and can the current user rwx."""
if os.path.isdir(path) and check_mode(path, read=read, write=write, execute=execute):
return True
else:
return False
def get_osusername():
"""Get the username of the current process."""
if pwd is None:
raise OSError("get_username cannot be called on Windows")
return pwd.getpwuid(os.getuid())[0]
def get_username():
""" Get the username, use os user name but override if username is jovyan ."""
osname = get_osusername()
if osname == 'jovyan':
return os.environ.get('JUPYTERHUB_USER', 'jovyan')
else:
return osname
def find_owner(path):
"""Get the username of the owner of path."""
if pwd is None:
raise OSError("find_owner cannot be called on Windows")
return pwd.getpwuid(os.stat(os.path.abspath(path)).st_uid).pw_name
def self_owned(path):
"""Is the path owned by the current user of this process?"""
return get_osusername() == find_owner(os.path.abspath(path))
def is_ignored(filename, ignore_globs=None):
"""Determines whether a filename should be ignored, based on whether it
matches any file glob in the given list. Note that this only matches on the
base filename itself, not the full path."""
if ignore_globs is None:
return False
dirname = os.path.dirname(filename)
for expr in ignore_globs:
globs = glob.glob(os.path.join(dirname, expr))
if filename in globs:
return True
return False
def find_all_files(path, exclude=None):
"""Recursively finds all filenames rooted at `path`, optionally excluding
some based on filename globs."""
files = []
to_skip = []
for dirname, dirnames, filenames in os.walk(path):
if is_ignored(dirname, exclude) or dirname in to_skip:
to_skip.extend([os.path.join(dirname, x) for x in dirnames])
continue
for filename in filenames:
fullpath = os.path.join(dirname, filename)
if is_ignored(fullpath, exclude):
continue
else:
files.append(fullpath)
return files
def find_all_notebooks(path):
"""Return a sorted list of notebooks recursively found rooted at `path`."""
notebooks = list()
rootpath = os.path.abspath(path)
for _file in find_all_files(rootpath):
if os.path.splitext(_file)[-1] == '.ipynb':
notebooks.append(os.path.relpath(_file, rootpath))
notebooks.sort()
return notebooks
def full_split(path):
rest, last = os.path.split(path)
if last == path:
return (path,)
elif rest == path:
return (rest,)
else:
return full_split(rest) + (last,)
@contextlib.contextmanager
def chdir(dirname):
currdir = os.getcwd()
if dirname:
os.chdir(dirname)
try:
yield
finally:
os.chdir(currdir)
def rmtree(path):
# for windows, we need to go through and make sure everything
# is writeable, otherwise rmtree will fail
if sys.platform == 'win32':
for dirname, _, filenames in os.walk(path):
os.chmod(dirname, stat.S_IWRITE)
for filename in filenames:
os.chmod(os.path.join(dirname, filename), stat.S_IWRITE)
# now we can remove the path
shutil.rmtree(path)
def remove(path):
# for windows, we need to make sure that the file is writeable,
# otherwise remove will fail
if sys.platform == 'win32':
os.chmod(path, stat.S_IWRITE)
# now we can remove the path
os.remove(path)
def unzip(src, dest, zip_ext=None, create_own_folder=False, tree=False):
"""Extract all content from an archive file to a destination folder.
Arguments
---------
src: str
Absolute path to the archive file ('/path/to/archive_filename.zip')
dest: str
Asolute path to extract all content to ('/path/to/extract/')
Keyword Arguments
-----------------
zip_ext: list
Valid zip file extensions. Default: ['.zip', '.gz']
create_own_folder: bool
Create a sub-folder in 'dest' with the archive file name if True
('/path/to/extract/archive_filename/'). Default: False
tree: bool
Extract archive files within archive files (into their own
sub-directory) if True. Default: False
"""
zip_ext = list(zip_ext or ['.zip', '.gz'])
filename, ext = os.path.splitext(os.path.basename(src))
if ext not in zip_ext:
raise ValueError("Invalid archive file extension {}: {}".format(ext, src))
if not check_directory(dest, write=True, execute=True):
raise OSError("Directory not found or unwritable: {}".format(dest))
if create_own_folder:
# double splitext for .tar.gz
fname, ext = os.path.splitext(os.path.basename(filename))
if ext == '.tar':
filename = fname
dest = os.path.join(dest, filename)
if not os.path.isdir(dest):
os.makedirs(dest)
unpack_archive(src, dest, drivers=(unpack_zipfile, unpack_tarfile))
# extract flat, don't extract archive files within archive files
if not tree:
return
def find_archive_files(skip):
found = []
# find archive files in dest that are not in skip
for root, _, filenames in os.walk(dest):
for basename in filenames:
src_file = os.path.join(root, basename)
_, ext = os.path.splitext(basename)
if ext in zip_ext and src_file not in skip:
found.append(src_file)
return found
skip = []
new_files = find_archive_files(skip)
# keep walking dest until no new archive files are found
while new_files:
# unzip (flat) new archive files found in dest
for src_file in new_files:
dest_path = os.path.split(src_file)[0]
unzip(
src_file,
dest_path,
zip_ext=zip_ext,
create_own_folder=True,
tree=False
)
skip.append(src_file)
new_files = find_archive_files(skip)
@contextmanager
def temp_attrs(app, **newvals):
oldvals = {}
for k, v in newvals.items():
oldvals[k] = getattr(app, k)
setattr(app, k, v)
yield app
for k, v in oldvals.items():
setattr(app, k, v)
def capture_log(app, fmt="[%(levelname)s] %(message)s"):
"""Adds an extra handler to the given application the logs to a string
buffer, calls ``app.start()``, and returns the log output. The extra
handler is removed from the application before returning.
Arguments
---------
app: LoggingConfigurable
An application, withh the `.start()` method implemented
fmt: string
A format string for formatting log messages
Returns
-------
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
log_buff = six.StringIO()
handler = logging.StreamHandler(log_buff)
formatter = LogFormatter(fmt="[%(levelname)s] %(message)s")
handler.setFormatter(formatter)
app.log.addHandler(handler)
try:
app.start()
except:
log_buff.flush()
val = log_buff.getvalue()
result = {"success": False}
result["error"] = traceback.format_exc()
if val:
result["log"] = val
else:
log_buff.flush()
val = log_buff.getvalue()
result = {"success": True}
if val:
result["log"] = val
finally:
log_buff.close()
app.log.removeHandler(handler)
return result
|
py | b413d9dcd0896a169e5bd154c1e21f6419f7fa36 | from .base_model import BaseModel, db
class Order(BaseModel):
__tablename__ = 'order'
order_id = db.Column(db.Integer(), primary_key=True)
total_amount = db.Column(db.DECIMAL(), nullable=False)
created_on = db.Column(db.DateTime(), nullable=False)
shipped_on = db.Column(db.DateTime())
status = db.Column(db.Integer(), nullable=False, default=0)
comments = db.Column(db.String(255))
auth_code = db.Column(db.String(50))
reference = db.Column(db.String(50))
customer_id = db.Column(db.Integer(), db.ForeignKey('customer.customer_id'), default=1)
customer = db.relationship('Customer', lazy=False)
shipping_id = db.Column(db.Integer(), db.ForeignKey('shipping.shipping_id'), default=1)
shipping = db.relationship('Shipping', lazy=False)
tax_id = db.Column(db.Integer(), db.ForeignKey('tax.tax_id'), default=1)
tax = db.relationship('Tax', lazy=False)
|
py | b413da97f4cfb491df42f1c27d23ff2bb924bfe8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, dirname, join
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.8'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_color',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_releases',
'bokeh.sphinxext.bokeh_settings',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.bokehjs_content',
'bokeh.sphinxext.collapsible_code_block',
]
napoleon_include_init_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015-2018, Anaconda and Bokeh Contributors.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# get all the versions that will appear in the version dropdown
f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
all_versions = [x.strip() for x in reversed(f.readlines())]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#
# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
bokeh_plot_pyfile_include_dirs = ['docs']
# Whether to allow builds to succeed if a Google API key is not defined and plots
# containing "GOOGLE_API_KEY" are processed
bokeh_missing_google_api_key_ok = False
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# patterns to exclude
exclude_patterns = ['docs/releases/*']
# This would more properly be done with rst_epilog but something about
# the combination of this with the bokeh-gallery directive breaks the build
rst_prolog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
html_context = {
'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
'NAV': (
('Github', '//github.com/bokeh/bokeh'),
),
'ABOUT': (
('Roadmap', '//bokeh.org/roadmap'),
('Team', '//bokeh.org/team'),
('Citation', '//bokeh.org/citation'),
('Contact', '//bokeh.org'),
),
'SOCIAL': (
('Contribute', 'contribute'),
('Discourse', '//discourse.bokeh.org'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
),
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),
('Reference', 'reference'),
('Releases', 'releases'),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': all_versions,
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None)
}
|
py | b413dd4cd517682925a6c70964cb4d13fe3ef86d | __author__ = 'brett israelsen'
__email__ = '[email protected]'
import os
def get_participant_data_props():
"""
Utility function to associate participant data with start/stop times, stop times are important right now becasue
parsing past a certain point on different files will break.
:return:
"""
fldr = 'data'
# the 'end_t' below was obtained by trying to parse a file, and the observing at what step an error occurred. -1 indicates that the entire file will be parsed.
auto_named_data = {'file_prefix': 'processed_ASIST_data_study_id_XXX_condition_id_YYY_trial_id_ZZZ_messages.csv',
'studies': [1], 'trials': {1: [1, 5, 8], 2: [2, 3, 10], 3: [6, 13]},
'start_t': {1: [0, 0, 0], 2: [0, 0, 0], 3: [0, 0]},
# 'end_t':{1:[36,12,-1], 2:[-1,-1,13], 3:[30,62]},
'end_t': {1: [-1, -1, -1], 2: [-1, -1, -1], 3: [-1, -1]},
}
data_list = []
for cond in auto_named_data['trials']:
for i in range(len(auto_named_data['trials'][cond])):
trial = auto_named_data['trials'][cond][i]
fname = auto_named_data['file_prefix']
study_str = str.zfill('1', 6)
cond_str = str.zfill(f'{cond}', 6)
id_str = str.zfill(f'{trial}', 6)
fname = fname.replace('XXX', study_str)
fname = fname.replace('YYY', cond_str)
fname = fname.replace('ZZZ', id_str)
entry = {'fname': os.path.join(fldr, fname),
'start': auto_named_data['start_t'][cond][i],
'stop': auto_named_data['end_t'][cond][i]}
data_list.append(entry)
return data_list
if __name__ == '__main__':
lst = get_participant_data_props()
for itm in lst:
print(itm)
|
py | b413ddb2e61fe128564dc7d03229dd53f6d05548 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='torchvision://resnet101',
backbone=dict(type='ResNet', depth=101),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11),
loss_pose=dict(type='JointsMSELoss', use_target_weight=True))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='data/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/ochuman'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file='data/coco/annotations/person_keypoints_train2017.json',
img_prefix='data/coco//train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownOCHumanDataset',
ann_file=f'{data_root}/annotations/'
'ochuman_coco_format_val_range_0.00_1.00.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownOCHumanDataset',
ann_file=f'{data_root}/annotations/'
'ochuman_coco_format_test_range_0.00_1.00.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
py | b413dddad60bf7d01a9c5d250980ede5284b6cb5 | import os,unittest
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.utils.tools.deeptools_utils import run_plotCoverage,run_bamCoverage,run_plotFingerprint
class Deeptools_util_test1(unittest.TestCase):
def setUp(self):
self.temp_dir = get_temp_dir()
self.input_bam = os.path.join(self.temp_dir,'input.bam')
self.blacklist_file = os.path.join(self.temp_dir,'blacklist.bed')
with open(self.input_bam,'w') as fp:
fp.write('1')
with open(self.blacklist_file,'w') as fp:
fp.write('1')
def tearDown(self):
remove_dir(self.temp_dir)
def test_run_plotCoverage(self):
deeptools_cmd = \
run_plotCoverage(\
bam_files=[self.input_bam],
output_raw_counts='out.raw.txt',
plotcov_stdout='out.stdout.txt',
output_plot='out.plot.pdf',
blacklist_file=self.blacklist_file,
thread=1,
params_list=None,
dry_run=True)
self.assertTrue(self.input_bam in deeptools_cmd)
self.assertTrue(self.blacklist_file in deeptools_cmd)
def test_run_bamCoverage(self):
deeptools_cmd = \
run_bamCoverage(\
bam_files=[self.input_bam],
output_file='out.bw',
blacklist_file=self.blacklist_file,
thread=1,
dry_run=True)
self.assertTrue(self.input_bam in deeptools_cmd)
self.assertTrue(self.blacklist_file in deeptools_cmd)
self.assertTrue('--blackListFileName' in deeptools_cmd)
self.assertTrue('bigwig' in deeptools_cmd)
def test_run_plotFingerprint(self):
deeptools_cmd = \
run_plotFingerprint(\
bam_files=[self.input_bam],
output_raw_counts='out.raw.txt',
output_matrics='out.metrics.txt',
output_plot='out.plot.pdf',
dry_run=True,
blacklist_file=self.blacklist_file,
thread=1)
self.assertTrue(self.input_bam in deeptools_cmd)
self.assertTrue(self.blacklist_file in deeptools_cmd)
self.assertTrue('--blackListFileName' in deeptools_cmd)
deeptools_cmd = \
run_plotFingerprint(\
bam_files=[self.input_bam],
output_raw_counts='out.raw.txt',
output_matrics='out.metrics.txt',
output_plot='out.plot.pdf',
dry_run=True,
thread=1)
self.assertTrue(self.input_bam in deeptools_cmd)
self.assertFalse(self.blacklist_file in deeptools_cmd)
self.assertFalse('--blackListFileName' in deeptools_cmd)
if __name__=='__main__':
unittest.main() |
py | b413de49fa832d23032f2f477412946cd229e473 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/cloud/language/v1/language_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.cloud.language.v1 LanguageService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.language.v1 import enums
from google.cloud.proto.language.v1 import language_service_pb2
class LanguageServiceClient(object):
"""
Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
SERVICE_ADDRESS = 'language.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A LanguageServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'google-cloud-language', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'language_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.cloud.language.v1.LanguageService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.language_service_stub = config.create_stub(
language_service_pb2.LanguageServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._analyze_sentiment = api_callable.create_api_call(
self.language_service_stub.AnalyzeSentiment,
settings=defaults['analyze_sentiment'])
self._analyze_entities = api_callable.create_api_call(
self.language_service_stub.AnalyzeEntities,
settings=defaults['analyze_entities'])
self._analyze_syntax = api_callable.create_api_call(
self.language_service_stub.AnalyzeSyntax,
settings=defaults['analyze_syntax'])
self._annotate_text = api_callable.create_api_call(
self.language_service_stub.AnnotateText,
settings=defaults['annotate_text'])
# Service calls
def analyze_sentiment(self, document, encoding_type=None, options=None):
"""
Analyzes the sentiment of the provided text.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_sentiment(document)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate sentence offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnalyzeSentimentResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeSentimentRequest(
document=document, encoding_type=encoding_type)
return self._analyze_sentiment(request, options)
def analyze_entities(self, document, encoding_type=None, options=None):
"""
Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_entities(document)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnalyzeEntitiesResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeEntitiesRequest(
document=document, encoding_type=encoding_type)
return self._analyze_entities(request, options)
def analyze_syntax(self, document, encoding_type=None, options=None):
"""
Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_syntax(document)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnalyzeSyntaxResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeSyntaxRequest(
document=document, encoding_type=encoding_type)
return self._analyze_syntax(request, options)
def annotate_text(self,
document,
features,
encoding_type=None,
options=None):
"""
A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> features = language_service_pb2.AnnotateTextRequest.Features()
>>> response = client.annotate_text(document, features)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
features (:class:`google.cloud.proto.language.v1.language_service_pb2.AnnotateTextRequest.Features`): The enabled features.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnnotateTextResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnnotateTextRequest(
document=document, features=features, encoding_type=encoding_type)
return self._annotate_text(request, options)
|
py | b413df4e053ccb53a33dd1bce11387a3aa989286 | # Auto-generated at 2021-09-27T17:12:29.723871+08:00
# from: Justice DsmController Service (2.4.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsDeploymentConfig(Model):
"""Models deployment config
Properties:
buffer_count: (buffer_count) REQUIRED int
configuration: (configuration) REQUIRED str
game_version: (game_version) REQUIRED str
max_count: (max_count) REQUIRED int
min_count: (min_count) REQUIRED int
regions: (regions) REQUIRED List[str]
"""
# region fields
buffer_count: int # REQUIRED
configuration: str # REQUIRED
game_version: str # REQUIRED
max_count: int # REQUIRED
min_count: int # REQUIRED
regions: List[str] # REQUIRED
# endregion fields
# region with_x methods
def with_buffer_count(self, value: int) -> ModelsDeploymentConfig:
self.buffer_count = value
return self
def with_configuration(self, value: str) -> ModelsDeploymentConfig:
self.configuration = value
return self
def with_game_version(self, value: str) -> ModelsDeploymentConfig:
self.game_version = value
return self
def with_max_count(self, value: int) -> ModelsDeploymentConfig:
self.max_count = value
return self
def with_min_count(self, value: int) -> ModelsDeploymentConfig:
self.min_count = value
return self
def with_regions(self, value: List[str]) -> ModelsDeploymentConfig:
self.regions = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "buffer_count") and self.buffer_count:
result["buffer_count"] = int(self.buffer_count)
elif include_empty:
result["buffer_count"] = int()
if hasattr(self, "configuration") and self.configuration:
result["configuration"] = str(self.configuration)
elif include_empty:
result["configuration"] = str()
if hasattr(self, "game_version") and self.game_version:
result["game_version"] = str(self.game_version)
elif include_empty:
result["game_version"] = str()
if hasattr(self, "max_count") and self.max_count:
result["max_count"] = int(self.max_count)
elif include_empty:
result["max_count"] = int()
if hasattr(self, "min_count") and self.min_count:
result["min_count"] = int(self.min_count)
elif include_empty:
result["min_count"] = int()
if hasattr(self, "regions") and self.regions:
result["regions"] = [str(i0) for i0 in self.regions]
elif include_empty:
result["regions"] = []
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
buffer_count: int,
configuration: str,
game_version: str,
max_count: int,
min_count: int,
regions: List[str],
) -> ModelsDeploymentConfig:
instance = cls()
instance.buffer_count = buffer_count
instance.configuration = configuration
instance.game_version = game_version
instance.max_count = max_count
instance.min_count = min_count
instance.regions = regions
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelsDeploymentConfig:
instance = cls()
if not dict_:
return instance
if "buffer_count" in dict_ and dict_["buffer_count"] is not None:
instance.buffer_count = int(dict_["buffer_count"])
elif include_empty:
instance.buffer_count = int()
if "configuration" in dict_ and dict_["configuration"] is not None:
instance.configuration = str(dict_["configuration"])
elif include_empty:
instance.configuration = str()
if "game_version" in dict_ and dict_["game_version"] is not None:
instance.game_version = str(dict_["game_version"])
elif include_empty:
instance.game_version = str()
if "max_count" in dict_ and dict_["max_count"] is not None:
instance.max_count = int(dict_["max_count"])
elif include_empty:
instance.max_count = int()
if "min_count" in dict_ and dict_["min_count"] is not None:
instance.min_count = int(dict_["min_count"])
elif include_empty:
instance.min_count = int()
if "regions" in dict_ and dict_["regions"] is not None:
instance.regions = [str(i0) for i0 in dict_["regions"]]
elif include_empty:
instance.regions = []
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"buffer_count": "buffer_count",
"configuration": "configuration",
"game_version": "game_version",
"max_count": "max_count",
"min_count": "min_count",
"regions": "regions",
}
# endregion static methods
|
py | b413df796c2142c8c90508e46f967c6fa01fb02e | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' argparser.py: HERON RC parser support for specifying config level arguments in an RC file.'''
import argparse
import collections
import json
import os
import re
import sys
from heron.common.src.python.utils.log import Log
import heron.tools.common.src.python.utils.config as config
##########################################################################
# Run the command
##########################################################################
# pylint: disable=invalid-name
HERON_RC = os.path.join(os.path.expanduser('~'), '.heronrc')
# pylint: disable=anomalous-backslash-in-string
# pylint: disable=invalid-name
heron_command_pattern = re.compile('(^[^:]*):([^:]*):([^\s]*) (.*)')
filters = ['^@']
expressions = [re.compile(x) for x in filters]
help_epilog = '''Getting more help:
heron help <command> Prints help and options for <command>
For detailed documentation, go to http://heronstreaming.io'''
class HeronArgumentParser(argparse.ArgumentParser):
"""
HERON RC parser support for specifying config level arguments in an RC file.
check README.md.
"""
cmdmap = collections.defaultdict(dict)
"""
HERON RC parser support for specifying config level arguments in an RC file.
check README.md.
"""
def __init__(self, *args, **kwargs):
rcfile = HeronArgumentParser.getAndRemoveKey(kwargs, "rcfile")
self.rccommand = HeronArgumentParser.getAndRemoveKey(
kwargs, "rccommand")
self.rcclusterrole = HeronArgumentParser.getAndRemoveKey(
kwargs, "rcclusterrole")
HeronArgumentParser.initializeFromRC(rcfile)
super(HeronArgumentParser, self).__init__(*args, **kwargs)
@classmethod
def remove_comments(cls, string):
pattern = r"(\#.*$)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
def _replacer(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(1) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(_replacer, string)
@classmethod
def getAndRemoveKey(cls, dictionary, key):
val = None
if key in dictionary:
val = dictionary[key]
del dictionary[key]
return val
# tear down
@classmethod
def clear(cls):
cls.cmdmap.clear()
# initialize the command map from heron rc file in the parser,
# that can be later used for command substitution during parse_args phase
# patterns
@classmethod
def initializeFromRC(cls, rcfile):
if len(cls.cmdmap) > 0:
return
effective_rc = (rcfile, HERON_RC)[rcfile is None]
Log.debug('Effective RC file is %s', effective_rc)
if os.path.exists(effective_rc):
with open(effective_rc) as f:
cls.cmdmap['*']['*'] = collections.defaultdict(dict)
cls.cmdmap['*']['*']['*'] = ''
for line in f:
m = heron_command_pattern.match(line)
app, value, command, env = '', '', '', ''
if m is not None:
value = cls.remove_comments(m.group(4).rstrip(os.linesep))
app = (m.group(1), '')[m.group(1) is None or m.group(1) == '']
command = (m.group(2), '')[m.group(2) is None or m.group(1) == '']
env = (m.group(3), '')[m.group(3) is None or m.group(2) == '']
else:
continue
# make sure that all the single args have a boolean value
# associated so that we can load the args to a key value
# structure
args_list = config.insert_bool_values(value.split())
args_list_string = ' '.join(args_list)
if not command or not app or not env:
Log.warn("heronrc config entry %s does not have key parameters (command:app:env) ",
line)
continue
if app not in cls.cmdmap:
cls.cmdmap[app] = collections.defaultdict(dict)
if command in cls.cmdmap[app] and env in cls.cmdmap[app][command]:
cls.cmdmap[app][command][env] = cls.cmdmap[app][command][env] + ' ' + args_list_string
else:
cls.cmdmap[app][command][env] = args_list_string
Log.debug("RC cmdmap %s", json.dumps(cls.cmdmap))
else:
Log.debug("%s is not an existing file", effective_rc)
# for each command / cluster-role-env combination, get the commands from heronrc
# remove any duplicates that have already been supplied already and
# present in the command-dictionary
@classmethod
def get_args_for_command_role(cls, app, command, role):
args_for_command_role = ''
if app in cls.cmdmap and command in cls.cmdmap[app] and role in cls.cmdmap[app][command]:
args_for_command_role = (cls.cmdmap[app][command][role],
args_for_command_role)[cls.cmdmap[app][command][role] is None]
return args_for_command_role.split()
# this is invoked when the parser.parse_args is called
# apply the commands in the following precedence order
# use the defaults in the command line
def _read_args_from_files(self, arg_strings):
new_arg_strings = []
command = self.rccommand
if len(sys.argv) > 1:
command = (sys.argv[1], self.rccommand)[self.rccommand is not None]
role = self.rcclusterrole
if len(sys.argv) > 2:
role = (sys.argv[2], self.rcclusterrole)[self.rccommand is not None]
app = self.prog
new_arg_strings.extend(
self.get_args_for_command_role(app, command, role))
new_arg_strings.extend(
self.get_args_for_command_role(app, command, '*'))
new_arg_strings.extend(self.get_args_for_command_role(app, '*', '*'))
new_arg_strings.extend(self.get_args_for_command_role('*', '*', '*'))
arg_strings.extend(new_arg_strings)
Log.debug("heronparser: _read_args_from_files : %s %s %d %s %s %s %s", arg_strings,
new_arg_strings, len(sys.argv), sys.argv, app, command, role)
return arg_strings
# get the positional arguments for the given sub parser, remove the known obvious
def get_positional_args(self):
positional_args_map = collections.defaultdict(dict)
for key in self._actions:
# pylint: disable=protected-access
if isinstance(key, argparse._StoreAction) and len(key.option_strings) == 0:
if key.dest == 'cluster/[role]/[env]':
continue
positional_args_map['--'+key.dest] = key.dest
Log.debug("get_positional_args : key: %s, dest : %s", key, key.dest)
return positional_args_map
def parse_known_args(self, args=None, namespace=None):
namespace, args = super(HeronArgumentParser,
self).parse_known_args(args, namespace)
positional_args_map = self.get_positional_args()
if self.prog != 'heron':
## sub parser specific validation
Log.debug('sub parser expansion %s %s', self.prog, args)
## if the expanded args contains a optional equivalent of a positional argument
## i.e --topology-name xyz for positional argument topology-name
## need to prevent that for that sub parser. bail out
for key in args:
if key in positional_args_map:
raise ValueError(
'positional argument for command {} : {} specified in heronrc'.format(
self.prog, positional_args_map[key]))
return namespace, args
def main():
parser = HeronArgumentParser(
prog='heron',
epilog=help_epilog,
formatter_class=config.SubcommandHelpFormatter,
fromfile_prefix_chars='@',
add_help=False,
rcfile="./.heronrc")
parser.add_subparsers(
title="Available commands",
metavar='<command> <options>')
args, unknown_args = parser.parse_known_args()
Log.info("parse results args: %s unknown: %s ", args, unknown_args)
if __name__ == "__main__":
sys.exit(main())
|
py | b413dffc3c4405ba0e18621dd7bed956b4d9462e | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM templates/instrumentation_setup.py.txt.
# RUN `python scripts/generate_setup.py` TO REGENERATE.
import distutils.cmd
import json
import os
from configparser import ConfigParser
import setuptools
config = ConfigParser()
config.read("setup.cfg")
# We provide extras_require parameter to setuptools.setup later which
# overwrites the extras_require section from setup.cfg. To support extras_require
# section in setup.cfg, we load it here and merge it with the extras_require param.
extras_require = {}
if "options.extras_require" in config:
for key, value in config["options.extras_require"].items():
extras_require[key] = [v for v in value.split("\n") if v.strip()]
BASE_DIR = os.path.dirname(__file__)
PACKAGE_INFO = {}
VERSION_FILENAME = os.path.join(
BASE_DIR, "src", "opentelemetry", "instrumentation", "httpx", "version.py"
)
with open(VERSION_FILENAME, encoding="utf-8") as f:
exec(f.read(), PACKAGE_INFO)
PACKAGE_FILENAME = os.path.join(
BASE_DIR, "src", "opentelemetry", "instrumentation", "httpx", "package.py"
)
with open(PACKAGE_FILENAME, encoding="utf-8") as f:
exec(f.read(), PACKAGE_INFO)
# Mark any instruments/runtime dependencies as test dependencies as well.
extras_require["instruments"] = PACKAGE_INFO["_instruments"]
test_deps = extras_require.get("test", [])
for dep in extras_require["instruments"]:
test_deps.append(dep)
extras_require["test"] = test_deps
class JSONMetadataCommand(distutils.cmd.Command):
description = (
"print out package metadata as JSON. This is used by OpenTelemetry dev scripts to ",
"auto-generate code in other places",
)
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
metadata = {
"name": config["metadata"]["name"],
"version": PACKAGE_INFO["__version__"],
"instruments": PACKAGE_INFO["_instruments"],
}
print(json.dumps(metadata))
setuptools.setup(
cmdclass={"meta": JSONMetadataCommand},
version=PACKAGE_INFO["__version__"],
extras_require=extras_require,
)
|
py | b413e0d286a5c1127b1fddc2cb4c440f890710d9 | # Generated by Django 3.2.5 on 2021-07-05 18:38
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
py | b413e20048b7e470e0de37146bacd4904d67319b | checkpoint_config = dict(interval=100)
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = None
load_from = None
resume_from = None
workflow = [('train', 1)]
lr = 1e-3
optimizer = dict(type='Adam', lr=lr, weight_decay=0)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(policy='step', warmup=None, step=[24, 32])
# runtime settings
total_epochs = 200
model = dict(type='CategoryPPF',
category=2)
train_dataset = 'ShapeNetDatasetForPPF'
data = dict(
samples_per_gpu=1,
workers_per_gpu=0,
train=dict(type=train_dataset,
category=2,
data_root='/hdd0/data/shapenet_v2/ShapeNetCore.v2',
ann_file='/hdd0/data/ppf_dataset/shapenet_train.txt'),
)
gpu_ids = [0]
seed = 0
|
py | b413e243c189c3b7bdb8f8bd2d072844de92a73b | from django.conf.urls import url
from django.urls import include
from rest_framework import routers
from rest_framework.authtoken import views as authtoken_views
from oldp.api.views import CourtViewSet, CityViewSet, StateViewSet, CountryViewSet
from oldp.apps.accounts.api_views import UserViewSet
from oldp.apps.annotations.api_views import CaseAnnotationViewSet, AnnotationLabelViewSet, CaseMarkerViewSet
from oldp.apps.cases.api_views import CaseViewSet, CaseSearchViewSet
from oldp.apps.laws.api_views import LawSearchViewSet, LawBookViewSet, LawViewSet
from . import schema_view
router = routers.DefaultRouter()
# Search views (must be declared before model views)
router.register(r'laws/search', LawSearchViewSet, base_name='law-search')
router.register(r'cases/search', CaseSearchViewSet, base_name='case-search')
# Model views
router.register(r'users', UserViewSet)
router.register(r'laws', LawViewSet)
router.register(r'law_books', LawBookViewSet)
router.register(r'cases', CaseViewSet)
router.register(r'courts', CourtViewSet)
router.register(r'cities', CityViewSet)
router.register(r'states', StateViewSet)
router.register(r'countries', CountryViewSet)
router.register(r'annotation_labels', AnnotationLabelViewSet)
router.register(r'case_annotations', CaseAnnotationViewSet)
router.register(r'case_markers', CaseMarkerViewSet)
urlpatterns = [
url(r'^schema(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=None), name='schema-json'),
url(r'^schema/$', schema_view.with_ui('swagger', cache_timeout=None), name='schema-swagger-ui'),
url(r'^docs/$', schema_view.with_ui('redoc', cache_timeout=None), name='schema-redoc'),
url(r'^token-auth/', authtoken_views.obtain_auth_token),
url(r'^', include(router.urls)),
]
|
py | b413e2b876f1fe3e24ed823b7569540a87a9eed4 | import mysql.connector
import logging
import sys, traceback
from mysql.connector import errorcode
from crawler_config import *
log = logging.getLogger('rss_crawler')
#####################################
# template method to run a function in a transaction on provided cnx
#####################################
def run_in_transaction(func, cnx, *args):
try:
# print '>> run_in_transaction: ', func, '; cnx=', cnx
if args:
value = func(args, cnx)
else:
value =func(cnx)
except KeyboardInterrupt:
log.warning('WARN: CTRL-C detected. Program will exit!')
cnx.rollback() # rollback transaction if interrupted by user
cnx.close()
exit(1)
except:
# traceback.print_exc(file=sys.stdout) # print exception to console
log.error('really bad. program will exit now!', exc_info=True)
cnx.rollback()
exit(2)
# restart process again ??? Recursive call is ok ???
# if args:
# run_in_transaction(func, cnx, args)
# else:
# run_in_transaction(func, cnx)
else:
cnx.commit() # commit transaction if all good
# return same values as the function
# print '>> run_in_transaction: ', func, '; cnx=', cnx, ' COMMIT ------> RETURNS: ', value
return value
##########################
# get maildb connection
##########################
def get_crawlerdb_connection():
cnx = mysql.connector.connect(**CRAWLER_DB_CONFIG)
return cnx
######################
# check if
######################
def urlExists(url, table, cursor):
# log.debug('urlExists: ' + url)
sql = "SELECT url FROM {} WHERE hash = MD5(%s)".format(table)
cursor.execute(sql, (url,))
result = cursor.fetchone()
return result
##########################
# get one url from todo
##########################
def getTodo(cursor):
cursor.execute("SELECT url FROM todo LIMIT 1")
result = cursor.fetchone()
if result:
return result[0]
################
# count records
###############
def count(table, cursor):
cursor.execute("SELECT count(1) FROM {}".format(table))
return cursor.fetchone()[0]
##################3
# remove a url from todo list
###################
def removeTodo(url, cursor, reason):
cursor.execute("DELETE FROM todo WHERE hash=MD5(%s)",(url,))
addURL(url, 'crawled', cursor, reason)
####################################################
# add url to a table if does not exists already (feeds, bad_feeds, crawled, todo)
#####################################################
def addURL(url, table, cursor, reason='', lang=''):
# print 'addURL: ', url, ' table=', table, ' reason=', reason
if not urlExists(url, table, cursor):
if table =='bad_feeds':
sql = "INSERT INTO {}(url, hash, reason) VALUES(%s, MD5(%s), %s)".format(table)
reason = reason[:1000] # truncate message if > 1000 characters
cursor.execute(sql, (url, url, reason))
elif table == 'crawled':
sql = "INSERT INTO {}(url, hash, status) VALUES(%s, MD5(%s), %s)".format(table)
reason = reason[:1000] # truncate message if > 1000 characters
cursor.execute(sql, (url, url, reason))
elif table == 'feeds':
sql = "INSERT INTO {}(url, lang, hash) VALUES(%s, %s, MD5(%s))".format(table)
cursor.execute(sql, (url, lang, url))
else:
sql = "INSERT INTO {}(url, hash) VALUES(%s, MD5(%s))".format(table)
cursor.execute(sql, (url, url))
############################
#
############################
def acceptPageFromDomain(name, cursor):
cursor.execute("SELECT page_count FROM domains where hash = MD5(%s)", (name,))
page_count = cursor.fetchone()
if page_count:
if page_count[0] < MAX_PAGES_PER_DOMAIN:
# update database
cursor.execute("UPDATE domains set page_count=page_count+1 where hash = MD5(%s)",(name,))
return True
else:
cursor.execute("INSERT INTO domains(name, hash) VALUES(%s, MD5(%s))", (name, name))
return True
# by default return false
return False
#################################
# creates the database
#################################
def create_database(cnx):
cursor = cnx.cursor(buffered=True)
##################################################
def _create_db():
try:
cursor.execute(
"CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'".format(CRAWLER_DB_CONFIG['database']))
log.info("OK: create database '{}'".format(CRAWLER_DB_CONFIG['database']))
except mysql.connector.Error as err:
if (err.errno != 1007): # 1007: Can't create database ; database exists
log.error("ERROR: Failed creating database: {}".format(err), exc_info=True)
# traceback.print_exc(file=sys.stdout)
exit(1)
##################################################
def _create_db_user():
try:
cursor.execute(
"GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP ON {}.* TO '{}'@'localhost' IDENTIFIED by '{}'".format(CRAWLER_DB_CONFIG['database'], CRAWLER_DB_CONFIG['user'], CRAWLER_DB_CONFIG['password']))
cursor.execute(
"GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP ON {}.* TO '{}'@'%' IDENTIFIED by '{}'".format(CRAWLER_DB_CONFIG['database'], CRAWLER_DB_CONFIG['user'], CRAWLER_DB_CONFIG['password']))
log.info("OK: create database user '{}'".format(CRAWLER_DB_CONFIG['user']) )
except mysql.connector.Error as err:
log.error("ERROR: Failed creating user: {}".format(err), exc_info=True)
#traceback.print_exc(file=sys.stdout)
exit(1)
##################################################
def _setup_all():
# create database
_create_db()
# create db user and privileges
_create_db_user()
# create database tables if yes
crawlerdb_cnx = get_crawlerdb_connection()
run_in_transaction(create_database_tables, crawlerdb_cnx)
crawlerdb_cnx.close()
##################################################
# check if database exists
cursor.execute("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '{}'".format(CRAWLER_DB_CONFIG['database']))
if cursor.fetchone():
if GLOBAL_CONFIG['drop_existing_database']:
cursor.execute("DROP DATABASE {}".format(CRAWLER_DB_CONFIG['database']))
log.info("OK: drop database '{}'".format(CRAWLER_DB_CONFIG['database']))
_setup_all()
else:
log.warning("MySQL database '{}' aready exists! Existing database tables will be used for crawling!".format(CRAWLER_DB_CONFIG['database']))
else: #database does not exist
_setup_all()
# make sure to close the cursor
cursor.close()
###########################
# create db tables
###########################
def create_database_tables(cnx):
cursor = cnx.cursor(buffered=True)
# print("OK: create_database_tables: {}".format(cnx.database))
TABLES = {}
TABLES['crawled'] = (
"CREATE TABLE `crawled` ("
" `pkid` bigint unsigned NOT NULL AUTO_INCREMENT,"
" `url` varchar(3000) NOT NULL,"
" `status` varchar(1000) NOT NULL,"
" `hash` varchar(32) NOT NULL,"
" PRIMARY KEY (`pkid`),"
" UNIQUE KEY `hash` (`hash`)"
") ENGINE=InnoDB")
TABLES['todo'] = (
"CREATE TABLE `todo` ("
" `pkid` bigint unsigned NOT NULL AUTO_INCREMENT,"
" `url` varchar(3000) NOT NULL,"
" `hash` varchar(32) NOT NULL,"
" PRIMARY KEY (`pkid`),"
" UNIQUE KEY `hash` (`hash`)"
") ENGINE=InnoDB")
TABLES['feeds'] = (
"CREATE TABLE `feeds` ("
" `pkid` bigint unsigned NOT NULL AUTO_INCREMENT,"
" `url` varchar(3000) NOT NULL,"
" `lang` varchar(50),"
" `hash` varchar(32) NOT NULL,"
" PRIMARY KEY (`pkid`),"
" UNIQUE KEY `hash` (`hash`)"
") ENGINE=InnoDB")
TABLES['bad_feeds'] = (
"CREATE TABLE `bad_feeds` ("
" `pkid` bigint unsigned NOT NULL AUTO_INCREMENT,"
" `url` varchar(3000) NOT NULL,"
" `reason` varchar(1000) NOT NULL DEFAULT '',"
" `hash` varchar(32) NOT NULL,"
" PRIMARY KEY (`pkid`),"
" UNIQUE KEY `hash` (`hash`)"
") ENGINE=InnoDB")
TABLES['domains'] = (
"CREATE TABLE `domains` ("
" `pkid` bigint unsigned NOT NULL AUTO_INCREMENT,"
" `name` varchar(1000) NOT NULL,"
" `page_count` smallint(3) NOT NULL DEFAULT 1,"
" `hash` varchar(32) NOT NULL,"
" PRIMARY KEY (`pkid`),"
" UNIQUE KEY `hash` (`hash`)"
") ENGINE=InnoDB")
### creating tables =========
for name, ddl in TABLES.iteritems():
try:
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
log.warning("WARN: database table '{}' already exists.".format(name))
else:
log.error('ERROR create_database_tables -> for table:' + name, exc_info=True)
exit(1)
else:
log.info("OK: create table '{}'".format(name))
#close cursor in the end
cursor.close()
######################
######################
def prepare_database():
log.debug('prepare_database')
cnx = mysql.connector.connect(**ROOT_DB_CONFIG)
run_in_transaction(create_database, cnx)
cnx.close()
###########################
# if executed as a script
###########################
if __name__ == "__main__":
prepare_database();
|
py | b413e319763f8a8b4cfa3d0889e573c01914faac | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import Counter
import os
from argparse import Namespace
flags = Namespace(
train_file='harry.txt',
seq_size=32,
batch_size=16,
embedding_size=64,
lstm_size=64,
gradients_norm=5,
initial_words=['I', 'am'],
predict_top_k=5,
checkpoint_path='checkpoint',
)
def get_data_from_file(train_file, batch_size, seq_size):
with open(train_file, 'r', encoding='utf-8') as f:
text = f.read()
text = text.split()
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {k: w for k, w in enumerate(sorted_vocab)}
vocab_to_int = {w: k for k, w in int_to_vocab.items()}
n_vocab = len(int_to_vocab)
print('Vocabulary size', n_vocab)
int_text = [vocab_to_int[w] for w in text]
num_batches = int(len(int_text) / (seq_size * batch_size))
in_text = int_text[:num_batches * batch_size * seq_size]
out_text = np.zeros_like(in_text)
out_text[:-1] = in_text[1:]
out_text[-1] = in_text[0]
in_text = np.reshape(in_text, (batch_size, -1))
out_text = np.reshape(out_text, (batch_size, -1))
return int_to_vocab, vocab_to_int, n_vocab, in_text, out_text
def get_batches(in_text, out_text, batch_size, seq_size):
num_batches = np.prod(in_text.shape) // (seq_size * batch_size)
for i in range(0, num_batches * seq_size, seq_size):
yield in_text[:, i:i+seq_size], out_text[:, i:i+seq_size]
class RNNModule(nn.Module):
def __init__(self, n_vocab, seq_size, embedding_size, lstm_size):
super(RNNModule, self).__init__()
self.seq_size = seq_size
self.lstm_size = lstm_size
self.embedding = nn.Embedding(n_vocab, embedding_size)
self.lstm = nn.LSTM(embedding_size,
lstm_size,
batch_first=True)
self.dense = nn.Linear(lstm_size, n_vocab)
def forward(self, x, prev_state):
embed = self.embedding(x)
output, state = self.lstm(embed, prev_state)
logits = self.dense(output)
return logits, state
def zero_state(self, batch_size):
return (torch.zeros(1, batch_size, self.lstm_size),
torch.zeros(1, batch_size, self.lstm_size))
def get_loss_and_train_op(net, lr=0.001):
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
return criterion, optimizer
def predict(device, net, words, n_vocab, vocab_to_int, int_to_vocab, top_k=5):
net.eval()
words = ['I', 'am']
state_h, state_c = net.zero_state(1)
state_h = state_h.to(device)
state_c = state_c.to(device)
for w in words:
ix = torch.tensor([[vocab_to_int[w]]]).to(device)
output, (state_h, state_c) = net(ix, (state_h, state_c))
_, top_ix = torch.topk(output[0], k=top_k)
choices = top_ix.tolist()
choice = np.random.choice(choices[0])
words.append(int_to_vocab[choice])
for _ in range(100):
ix = torch.tensor([[choice]]).to(device)
output, (state_h, state_c) = net(ix, (state_h, state_c))
_, top_ix = torch.topk(output[0], k=top_k)
choices = top_ix.tolist()
choice = np.random.choice(choices[0])
words.append(int_to_vocab[choice])
print(' '.join(words).encode('utf-8'))
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
int_to_vocab, vocab_to_int, n_vocab, in_text, out_text = get_data_from_file(
flags.train_file, flags.batch_size, flags.seq_size)
net = RNNModule(n_vocab, flags.seq_size,
flags.embedding_size, flags.lstm_size)
net = net.to(device)
criterion, optimizer = get_loss_and_train_op(net, 0.01)
iteration = 0
for e in range(200):
batches = get_batches(in_text, out_text, flags.batch_size, flags.seq_size)
state_h, state_c = net.zero_state(flags.batch_size)
state_h = state_h.to(device)
state_c = state_c.to(device)
for x, y in batches:
iteration += 1
net.train()
optimizer.zero_grad()
x = torch.tensor(x).to(device)
y = torch.tensor(y).to(device)
logits, (state_h, state_c) = net(x, (state_h, state_c))
loss = criterion(logits.transpose(1, 2), y)
loss_value = loss.item()
loss.backward()
state_h = state_h.detach()
state_c = state_c.detach()
_ = torch.nn.utils.clip_grad_norm_(
net.parameters(), flags.gradients_norm)
optimizer.step()
if iteration % 100 == 0:
print('Epoch: {}/{}'.format(e, 200),
'Iteration: {}'.format(iteration),
'Loss: {}'.format(loss_value))
if iteration % 1000 == 0:
predict(device, net, flags.initial_words, n_vocab,
vocab_to_int, int_to_vocab, top_k=5)
torch.save(net.state_dict(),
'checkpoint_pt/model-{}.pth'.format(iteration))
if __name__ == '__main__':
main()
|
py | b413e52d69f794dc9b25173f6e99e148138f7903 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logforce_settings as settings
from logforce_request import LogforceRequest
def main():
#Base url is given here
request = LogforceRequest(base_url=settings.URLS['development'],
client_version=settings.CLIENT_VERSION )
#Authentication is required to make request to Logforce server.
#Authentication cookies are stored in request._authentication_request
#those cookies are passed to every service request.
response = request.authenticate(username='[email protected]',
password='Logf2014')
#Service url is concatenated with base url to form whole url address
service_url = '/server/services/1006/maps/vehicle/2916746?_=1460107399102'
#Response JSON can be printed out like this, the last response object
#is stored in request._reqeuest variable.
json = request.get(service_url).pretty_response()
print(json)
if __name__ == '__main__':
main() |
py | b413e65f3cf23cc6c34df801548bae7ad8deb5b5 | # *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import unittest
import itertools
import pandas as pd
import numpy as np
import random
import string
import pyarrow.parquet as pq
import numba
import sdc
import os
from sdc import hiframes
from sdc.str_arr_ext import StringArray
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs, dist_IR_contains,
get_start_end)
class TestHiFrames(unittest.TestCase):
def test_column_list_select2(self):
# make sure SDC copies the columns like Pandas does
def test_impl(df):
df2 = df[['A']]
df2['A'] += 10
return df2.A, df.A
hpat_func = sdc.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
np.testing.assert_array_equal(hpat_func(df.copy())[1], test_impl(df)[1])
def test_pd_DataFrame_from_series_par(self):
def test_impl(n):
S1 = pd.Series(np.ones(n))
S2 = pd.Series(np.random.ranf(n))
df = pd.DataFrame({'A': S1, 'B': S2})
return df.A.sum()
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
def test_getitem_bool_series(self):
def test_impl(df):
return df['A'][df['B']].values
hpat_func = sdc.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
np.testing.assert_array_equal(test_impl(df), hpat_func(df))
def test_fillna(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
B = df.A.fillna(5.0)
return B.sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_fillna_inplace(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
df.A.fillna(5.0, inplace=True)
return df.A.sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_column_mean(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
return df.A.mean()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_column_var(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.var()
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
def test_column_std(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.std()
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
def test_column_map(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df['B'] = df.A.map(lambda a: 2 * a)
return df.B.sum()
n = 121
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_column_map_arg(self):
def test_impl(df):
df['B'] = df.A.map(lambda a: 2 * a)
return
n = 121
df1 = pd.DataFrame({'A': np.arange(n)})
df2 = pd.DataFrame({'A': np.arange(n)})
hpat_func = sdc.jit(test_impl)
hpat_func(df1)
self.assertTrue(hasattr(df1, 'B'))
test_impl(df2)
np.testing.assert_equal(df1.B.values, df2.B.values)
def test_cumsum(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.cumsum()
return Ac.sum()
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_array_OneDs(), 2)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 2)
self.assertTrue(dist_IR_contains('dist_cumsum'))
def test_column_distribution(self):
# make sure all column calls are distributed
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df.A.fillna(5.0, inplace=True)
DF = df.A.fillna(5.0)
s = DF.sum()
m = df.A.mean()
v = df.A.var()
t = df.A.std()
Ac = df.A.cumsum()
return Ac.sum() + s + m + v + t
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(dist_IR_contains('dist_cumsum'))
def test_quantile_parallel(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.quantile(.25)
hpat_func = sdc.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_float_nan(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32)})
df.A[0:100] = np.nan
df.A[200:331] = np.nan
return df.A.quantile(.25)
hpat_func = sdc.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_int(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.int32)})
return df.A.quantile(.25)
hpat_func = sdc.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_sequential(self):
def test_impl(A):
df = pd.DataFrame({'A': A})
return df.A.quantile(.25)
hpat_func = sdc.jit(test_impl)
n = 1001
A = np.arange(0, n, 1, np.float64)
np.testing.assert_almost_equal(hpat_func(A), test_impl(A))
def test_nunique(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df.A[2] = 0
return df.A.nunique()
hpat_func = sdc.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
# test compile again for overload related issues
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_nunique_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.four.nunique()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
# test compile again for overload related issues
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
def test_nunique_str(self):
def test_impl(n):
df = pd.DataFrame({'A': ['aa', 'bb', 'aa', 'cc', 'cc']})
return df.A.nunique()
hpat_func = sdc.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
# test compile again for overload related issues
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@unittest.skip('AssertionError - fix needed\n'
'5 != 3\n')
def test_nunique_str_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.two.nunique()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
# test compile again for overload related issues
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
def test_unique_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return (df.four.unique() == 3.0).sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@unittest.skip('AssertionError - fix needed\n'
'2 != 1\n')
def test_unique_str_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return (df.two.unique() == 'foo').sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
def test_describe(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.describe()
hpat_func = sdc.jit(test_impl)
n = 1001
hpat_func(n)
# XXX: test actual output
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_str_contains_regex(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'ADEF'])
df = pd.DataFrame({'A': A})
B = df.A.str.contains('AB*', regex=True)
return B.sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), 2)
def test_str_contains_noregex(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'ADEF'])
df = pd.DataFrame({'A': A})
B = df.A.str.contains('BB', regex=False)
return B.sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), 1)
def test_str_replace_regex(self):
def test_impl(df):
return df.A.str.replace('AB*', 'EE', regex=True)
df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_replace_noregex(self):
def test_impl(df):
return df.A.str.replace('AB', 'EE', regex=False)
df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_replace_regex_parallel(self):
def test_impl(df):
B = df.A.str.replace('AB*', 'EE', regex=True)
return B
n = 5
A = ['ABCC', 'CABBD', 'CCD', 'CCDAABB', 'ED']
start, end = get_start_end(n)
df = pd.DataFrame({'A': A[start:end]})
hpat_func = sdc.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
def test_str_split(self):
def test_impl(df):
return df.A.str.split(',')
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_split_default(self):
def test_impl(df):
return df.A.str.split()
df = pd.DataFrame({'A': ['AB CC', 'C ABB D', 'G ', ' ', 'g\t f']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_split_filter(self):
def test_impl(df):
B = df.A.str.split(',')
df2 = pd.DataFrame({'B': B})
return df2[df2.B.str.len() > 1]
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_frame_equal(
hpat_func(df), test_impl(df).reset_index(drop=True))
def test_str_split_box_df(self):
def test_impl(df):
return pd.DataFrame({'B': df.A.str.split(',')})
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df).B, test_impl(df).B, check_names=False)
def test_str_split_unbox_df(self):
def test_impl(df):
return df.A.iloc[0]
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
df2 = pd.DataFrame({'A': df.A.str.split(',')})
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(df2), test_impl(df2))
def test_str_split_bool_index(self):
def test_impl(df):
C = df.A.str.split(',')
return C[df.B == 'aa']
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D'], 'B': ['aa', 'bb']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_split_parallel(self):
def test_impl(df):
B = df.A.str.split(',')
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = sdc.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
def test_str_get(self):
def test_impl(df):
B = df.A.str.split(',')
return B.str.get(1)
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_split(self):
def test_impl(df):
return df.A.str.split(',')
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(df), test_impl(df), check_names=False)
def test_str_get_parallel(self):
def test_impl(df):
A = df.A.str.split(',')
B = A.str.get(1)
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD,F', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = sdc.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
def test_str_get_to_numeric(self):
def test_impl(df):
B = df.A.str.split(',')
C = pd.to_numeric(B.str.get(1), errors='coerce')
return C
df = pd.DataFrame({'A': ['AB,12', 'C,321,D']})
hpat_func = sdc.jit(locals={'C': sdc.int64[:]})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_flatten(self):
def test_impl(df):
A = df.A.str.split(',')
return pd.Series(list(itertools.chain(*A)))
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = sdc.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_str_flatten_parallel(self):
def test_impl(df):
A = df.A.str.split(',')
B = pd.Series(list(itertools.chain(*A)))
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = sdc.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
def test_to_numeric(self):
def test_impl(df):
B = pd.to_numeric(df.A, errors='coerce')
return B
df = pd.DataFrame({'A': ['123.1', '331.2']})
hpat_func = sdc.jit(locals={'B': sdc.float64[:]})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
def test_1D_Var_len(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n) + 1.0})
df1 = df[df.A > 5]
return len(df1.B)
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_rolling1(self):
# size 3 without unroll
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
Ac = df.A.rolling(3).sum()
return Ac.sum()
hpat_func = sdc.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
# size 7 with unroll
def test_impl_2(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.rolling(7).sum()
return Ac.sum()
hpat_func = sdc.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_rolling2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df['moving average'] = df.A.rolling(window=5, center=True).mean()
return df['moving average'].sum()
hpat_func = sdc.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_rolling3(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.rolling(3, center=True).apply(lambda a: a[0] + 2 * a[1] + a[2])
return Ac.sum()
hpat_func = sdc.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_shift1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.shift(1)
return Ac.sum()
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_shift2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.pct_change(1)
return Ac.sum()
hpat_func = sdc.jit(test_impl)
n = 11
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_df_input(self):
def test_impl(df):
return df.B.sum()
n = 121
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df), test_impl(df))
def test_df_input2(self):
def test_impl(df):
C = df.B == 'two'
return C.sum()
n = 11
df = pd.DataFrame({'A': np.random.ranf(3 * n), 'B': ['one', 'two', 'three'] * n})
hpat_func = sdc.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df), test_impl(df))
def test_df_input_dist1(self):
def test_impl(df):
return df.B.sum()
n = 121
A = [3, 4, 5, 6, 1]
B = [5, 6, 2, 1, 3]
n = 5
start, end = get_start_end(n)
df = pd.DataFrame({'A': A, 'B': B})
df_h = pd.DataFrame({'A': A[start:end], 'B': B[start:end]})
hpat_func = sdc.jit(distributed={'df'})(test_impl)
np.testing.assert_almost_equal(hpat_func(df_h), test_impl(df))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_concat(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
df3 = pd.concat([df1, df2])
return df3.A.sum() + df3.key2.sum()
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
def test_concat_str(self):
def test_impl():
df1 = pq.read_table('example.parquet').to_pandas()
df2 = pq.read_table('example.parquet').to_pandas()
A3 = pd.concat([df1, df2])
return (A3.two == 'foo').sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_concat_series(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
A3 = pd.concat([df1.A, df2.A])
return A3.sum()
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
def test_concat_series_str(self):
def test_impl():
df1 = pq.read_table('example.parquet').to_pandas()
df2 = pq.read_table('example.parquet').to_pandas()
A3 = pd.concat([df1.two, df2.two])
return (A3 == 'foo').sum()
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skipIf(int(os.getenv('SDC_NP_MPI', '0')) > 1, 'Test hangs on NP=2 and NP=3 on all platforms')
def test_intraday(self):
def test_impl(nsyms):
max_num_days = 100
all_res = 0.0
for i in sdc.prange(nsyms):
s_open = 20 * np.ones(max_num_days)
s_low = 28 * np.ones(max_num_days)
s_close = 19 * np.ones(max_num_days)
df = pd.DataFrame({'Open': s_open, 'Low': s_low, 'Close': s_close})
df['Stdev'] = df['Close'].rolling(window=90).std()
df['Moving Average'] = df['Close'].rolling(window=20).mean()
df['Criteria1'] = (df['Open'] - df['Low'].shift(1)) < -df['Stdev']
df['Criteria2'] = df['Open'] > df['Moving Average']
df['BUY'] = df['Criteria1'] & df['Criteria2']
df['Pct Change'] = (df['Close'] - df['Open']) / df['Open']
df['Rets'] = df['Pct Change'][df['BUY']]
all_res += df['Rets'].mean()
return all_res
hpat_func = sdc.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_OneDs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
def test_var_dist1(self):
def test_impl(A, B):
df = pd.DataFrame({'A': A, 'B': B})
df2 = df.groupby('A', as_index=False)['B'].sum()
# TODO: fix handling of df setitem to force match of array dists
# probably with a new node that is appended to the end of basic block
# df2['C'] = np.full(len(df2.B), 3, np.int8)
# TODO: full_like for Series
df2['C'] = np.full_like(df2.B.values, 3, np.int8)
return df2
A = np.array([1, 1, 2, 3])
B = np.array([3, 4, 5, 6])
hpat_func = sdc.jit(locals={'A:input': 'distributed',
'B:input': 'distributed', 'df2:return': 'distributed'})(test_impl)
start, end = get_start_end(len(A))
df2 = hpat_func(A[start:end], B[start:end])
# TODO:
# pd.testing.assert_frame_equal(
# hpat_func(A[start:end], B[start:end]), test_impl(A, B))
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.