filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_5212 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datasets
from huggingface_hub.inference_api import InferenceApi
from .testing_utils import with_production_testing
class InferenceApiTest(unittest.TestCase):
def read(self, filename: str) -> bytes:
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
@with_production_testing
def test_simple_inference(self):
api = InferenceApi("bert-base-uncased")
inputs = "Hi, I think [MASK] is cool"
results = api(inputs)
self.assertIsInstance(results, list)
result = results[0]
self.assertIsInstance(result, dict)
self.assertTrue("sequence" in result)
self.assertTrue("score" in result)
@with_production_testing
def test_inference_with_params(self):
api = InferenceApi("typeform/distilbert-base-uncased-mnli")
inputs = "I bought a device but it is not working and I would like to get reimbursed!"
params = {"candidate_labels": ["refund", "legal", "faq"]}
result = api(inputs, params)
self.assertIsInstance(result, dict)
self.assertTrue("sequence" in result)
self.assertTrue("scores" in result)
@with_production_testing
def test_inference_with_dict_inputs(self):
api = InferenceApi("deepset/roberta-base-squad2")
inputs = {
"question": "What's my name?",
"context": "My name is Clara and I live in Berkeley.",
}
result = api(inputs)
self.assertIsInstance(result, dict)
self.assertTrue("score" in result)
self.assertTrue("answer" in result)
@with_production_testing
def test_inference_with_audio(self):
api = InferenceApi("facebook/wav2vec2-large-960h-lv60-self")
dataset = datasets.load_dataset(
"patrickvonplaten/librispeech_asr_dummy", "clean", split="validation"
)
data = self.read(dataset["file"][0])
result = api(data=data)
self.assertIsInstance(result, dict)
self.assertTrue("text" in result)
@with_production_testing
def test_inference_with_image(self):
api = InferenceApi("google/vit-base-patch16-224")
dataset = datasets.load_dataset("Narsil/image_dummy", "image", split="test")
data = self.read(dataset["file"][0])
result = api(data=data)
self.assertIsInstance(result, list)
for classification in result:
self.assertIsInstance(classification, dict)
self.assertTrue("score" in classification)
self.assertTrue("label" in classification)
@with_production_testing
def test_inference_overriding_task(self):
api = InferenceApi(
"sentence-transformers/paraphrase-albert-small-v2",
task="feature-extraction",
)
inputs = "This is an example again"
result = api(inputs)
self.assertIsInstance(result, list)
@with_production_testing
def test_inference_overriding_invalid_task(self):
with self.assertRaises(
ValueError, msg="Invalid task invalid-task. Make sure it's valid."
):
InferenceApi("bert-base-uncased", task="invalid-task")
@with_production_testing
def test_inference_missing_input(self):
api = InferenceApi("deepset/roberta-base-squad2")
result = api({"question": "What's my name?"})
self.assertIsInstance(result, dict)
self.assertTrue("error" in result)
self.assertTrue("warnings" in result)
self.assertTrue(len(result["warnings"]) > 0)
|
the-stack_0_5214 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import os
import xml.etree.ElementTree as ET
from typing import List, Tuple, Union
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
__all__ = ["load_voc_instances", "register_pascal_voc"]
# fmt: off
CLASS_NAMES = (
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"
)
# fmt: on
def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
"""
Load Pascal VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "Annotations", "ImageSets", "JPEGImages"
split (str): one of "train", "test", "val", "trainval"
class_names: list or tuple of class names
"""
with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
# Needs to read many small annotation files. Makes sense at local
annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
dicts = []
for fileid in fileids:
anno_file = os.path.join(annotation_dirname, fileid + ".xml")
jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
with PathManager.open(anno_file) as f:
tree = ET.parse(f)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
# We include "difficult" samples in training.
# Based on limited experiments, they don't hurt accuracy.
# difficult = int(obj.find("difficult").text)
# if difficult == 1:
# continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append(
{"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
MetadataCatalog.get(name).set(
thing_classes=list(class_names), dirname=dirname, year=year, split=split
)
|
the-stack_0_5215 | from .file import read_file_upper
class Solver:
"""
Solver for a wordsearch puzzle.
Variables:
directions {list} -- Two-digit permutations of [-1, 0, 1], excluding [0, 0].
"""
directions = [
[ 0, -1],
[-1, 0],
[ 0, 1],
[ 1, 0],
[-1, -1],
[-1, 1],
[ 1, -1],
[ 1, 1]
]
def __init__(self, puzzle):
self.puzzle = puzzle
def find_candidates(self, char):
candidates = []
for row_key, row in enumerate(self.puzzle.grid):
for col_key, cell in enumerate(row):
if char == cell:
candidates.append([row_key, col_key])
return candidates
def find_word(self, word):
word_chars = list(word.replace(' ', '').strip().upper())
for candidate in self.find_candidates(word_chars[0]):
for direction in self.directions:
coords = []
for char_key, char in enumerate(word_chars):
row_key = candidate[0] + (direction[0] * char_key)
col_key = candidate[1] + (direction[1] * char_key)
if self.puzzle.coord_matches(row_key, col_key, char):
coords.append([row_key, col_key])
else:
break
if len(coords) == len(word_chars):
return coords
return []
def find_words(self, words):
coords = []
for word in words:
coords = coords + self.find_word(word)
return coords
def find_from_wordlist(self, wordlist):
words = read_file_upper(wordlist).splitlines()
return self.find_words(words)
|
the-stack_0_5216 | import logging
import typing
import numpy as np
from scipy import optimize
from smac.configspace import ConfigurationSpace
from smac.epm.base_gp import BaseModel
from smac.epm.gp_base_prior import Prior
from smac.utils.constants import VERY_SMALL_NUMBER
from skopt.learning.gaussian_process.kernels import Kernel
from skopt.learning.gaussian_process import GaussianProcessRegressor
logger = logging.getLogger(__name__)
class GaussianProcess(BaseModel):
"""
Gaussian process model.
The GP hyperparameterŝ are obtained by optimizing the marginal log likelihood.
This code is based on the implementation of RoBO:
Klein, A. and Falkner, S. and Mansur, N. and Hutter, F.
RoBO: A Flexible and Robust Bayesian Optimization Framework in Python
In: NIPS 2017 Bayesian Optimization Workshop
Parameters
----------
types : List[int]
Specifies the number of categorical values of an input dimension where
the i-th entry corresponds to the i-th input dimension. Let's say we
have 2 dimension where the first dimension consists of 3 different
categorical choices and the second dimension is continuous than we
have to pass [3, 0]. Note that we count starting from 0.
bounds : List[Tuple[float, float]]
bounds of input dimensions: (lower, uppper) for continuous dims; (n_cat, np.nan) for categorical dims
seed : int
Model seed.
kernel : george kernel object
Specifies the kernel that is used for all Gaussian Process
prior : prior object
Defines a prior for the hyperparameters of the GP. Make sure that
it implements the Prior interface.
normalize_y : bool
Zero mean unit variance normalization of the output values
n_opt_restart : int
Number of restarts for GP hyperparameter optimization
instance_features : np.ndarray (I, K)
Contains the K dimensional instance features of the I different instances
pca_components : float
Number of components to keep when using PCA to reduce dimensionality of instance features. Requires to
set n_feats (> pca_dims).
"""
def __init__(
self,
configspace: ConfigurationSpace,
types: typing.List[int],
bounds: typing.List[typing.Tuple[float, float]],
seed: int,
kernel: Kernel,
normalize_y: bool = True,
n_opt_restarts: int = 10,
instance_features: typing.Optional[np.ndarray] = None,
pca_components: typing.Optional[int] = None,
):
super().__init__(
configspace=configspace,
types=types,
bounds=bounds,
seed=seed,
kernel=kernel,
instance_features=instance_features,
pca_components=pca_components,
)
self.normalize_y = normalize_y
self.n_opt_restarts = n_opt_restarts
self.hypers = np.empty((0, ))
self.is_trained = False
self._n_ll_evals = 0
self._set_has_conditions()
def _train(self, X: np.ndarray, y: np.ndarray, do_optimize: bool = True) -> 'GaussianProcess':
"""
Computes the Cholesky decomposition of the covariance of X and
estimates the GP hyperparameters by optimizing the marginal
loglikelihood. The prior mean of the GP is set to the empirical
mean of X.
Parameters
----------
X: np.ndarray (N, D)
Input data points. The dimensionality of X is (N, D),
with N as the number of points and D is the number of features.
y: np.ndarray (N,)
The corresponding target values.
do_optimize: boolean
If set to true the hyperparameters are optimized otherwise
the default hyperparameters of the kernel are used.
"""
X = self._impute_inactive(X)
if self.normalize_y:
y = self._normalize_y(y)
if len(y.shape) == 1:
self.n_objectives_ = 1
else:
self.n_objectives_ = y.shape[1]
if self.n_objectives_ == 1:
y = y.flatten()
n_tries = 10
for i in range(n_tries):
try:
self.gp = self._get_gp()
self.gp.fit(X, y)
break
except np.linalg.LinAlgError as e:
if i == n_tries:
raise e
# Assume that the last entry of theta is the noise
theta = np.exp(self.kernel.theta)
theta[-1] += 1
self.kernel.theta = np.log(theta)
if do_optimize:
self._all_priors = self._get_all_priors(add_bound_priors=False)
self.hypers = self._optimize()
self.gp.kernel.theta = self.hypers
self.gp.fit(X, y)
else:
self.hypers = self.gp.kernel.theta
self.is_trained = True
return self
def _get_gp(self) -> GaussianProcessRegressor:
return GaussianProcessRegressor(
kernel=self.kernel,
normalize_y=False,
optimizer=None,
n_restarts_optimizer=-1, # Do not use scikit-learn's optimization routine
alpha=0, # Governed by the kernel
noise=None,
random_state=self.rng,
)
def _nll(self, theta: np.ndarray) -> typing.Tuple[float, np.ndarray]:
"""
Returns the negative marginal log likelihood (+ the prior) for
a hyperparameter configuration theta.
(negative because we use scipy minimize for optimization)
Parameters
----------
theta : np.ndarray(H)
Hyperparameter vector. Note that all hyperparameter are
on a log scale.
Returns
----------
float
lnlikelihood + prior
"""
self._n_ll_evals += 1
try:
lml, grad = self.gp.log_marginal_likelihood(theta, eval_gradient=True)
except np.linalg.LinAlgError:
return 1e25, np.zeros(theta.shape)
for dim, priors in enumerate(self._all_priors):
for prior in priors:
lml += prior.lnprob(theta[dim])
grad[dim] += prior.gradient(theta[dim])
# We add a minus here because scipy is minimizing
if not np.isfinite(lml).all() or not np.all(np.isfinite(grad)):
return 1e25, np.zeros(theta.shape)
else:
return -lml, -grad
def _optimize(self) -> np.ndarray:
"""
Optimizes the marginal log likelihood and returns the best found
hyperparameter configuration theta.
Returns
-------
theta : np.ndarray(H)
Hyperparameter vector that maximizes the marginal log likelihood
"""
log_bounds = [(b[0], b[1]) for b in self.gp.kernel.bounds]
# Start optimization from the previous hyperparameter configuration
p0 = [self.gp.kernel.theta]
if self.n_opt_restarts > 0:
dim_samples = []
prior = None # type: typing.Optional[typing.Union[typing.List[Prior], Prior]]
for dim, hp_bound in enumerate(log_bounds):
prior = self._all_priors[dim]
# Always sample from the first prior
if isinstance(prior, list):
if len(prior) == 0:
prior = None
else:
prior = prior[0]
prior = typing.cast(typing.Optional[Prior], prior)
if prior is None:
try:
sample = self.rng.uniform(
low=hp_bound[0],
high=hp_bound[1],
size=(self.n_opt_restarts,),
)
except OverflowError:
raise ValueError('OverflowError while sampling from (%f, %f)' % (hp_bound[0], hp_bound[1]))
dim_samples.append(sample.flatten())
else:
dim_samples.append(prior.sample_from_prior(self.n_opt_restarts).flatten())
p0 += list(np.vstack(dim_samples).transpose())
theta_star = None
f_opt_star = np.inf
for i, start_point in enumerate(p0):
theta, f_opt, _ = optimize.fmin_l_bfgs_b(self._nll, start_point, bounds=log_bounds)
if f_opt < f_opt_star:
f_opt_star = f_opt
theta_star = theta
return theta_star
def _predict(self, X_test: np.ndarray,
cov_return_type: typing.Optional[str] = 'diagonal_cov') \
-> typing.Tuple[np.ndarray, typing.Optional[np.ndarray]]:
r"""
Returns the predictive mean and variance of the objective function at
the given test points.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
cov_return_type: typing.Optional[str]
Specifies what to return along with the mean. Refer ``predict()`` for more information.
Returns
----------
np.array(N,)
predictive mean
np.array(N,) or np.array(N, N) or None
predictive variance or standard deviation
"""
if not self.is_trained:
raise Exception('Model has to be trained first!')
X_test = self._impute_inactive(X_test)
if cov_return_type is None:
mu = self.gp.predict(X_test)
var = None
if self.normalize_y:
mu = self._untransform_y(mu)
else:
predict_kwargs = {'return_cov': False, 'return_std': True}
if cov_return_type == 'full_cov':
predict_kwargs = {'return_cov': True, 'return_std': False}
mu, var = self.gp.predict(X_test, **predict_kwargs)
if cov_return_type != 'full_cov':
var = var ** 2 # since we get standard deviation for faster computation
# Clip negative variances and set them to the smallest
# positive float value
var = np.clip(var, VERY_SMALL_NUMBER, np.inf)
if self.normalize_y:
mu, var = self._untransform_y(mu, var)
if cov_return_type == 'diagonal_std':
var = np.sqrt(var) # converting variance to std deviation if specified
return mu, var
def sample_functions(self, X_test: np.ndarray, n_funcs: int = 1) -> np.ndarray:
"""
Samples F function values from the current posterior at the N
specified test points.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
n_funcs: int
Number of function values that are drawn at each test point.
Returns
----------
function_samples: np.array(F, N)
The F function values drawn at the N test points.
"""
if not self.is_trained:
raise Exception('Model has to be trained first!')
X_test = self._impute_inactive(X_test)
funcs = self.gp.sample_y(X_test, n_samples=n_funcs, random_state=self.rng)
if self.normalize_y:
funcs = self._untransform_y(funcs)
if len(funcs.shape) == 1:
return funcs[None, :]
else:
return funcs
|
the-stack_0_5217 | #!/usr/bin/env python
"""
@package ion.agents.platform.resource_monitor
@file ion/agents/platform/resource_monitor.py
@author Carlos Rueda
@brief Platform resource monitoring for a set of attributes having same rate
"""
__author__ = 'Carlos Rueda'
import logging
import pprint
from gevent import Greenlet, sleep
from pyon.public import log
from pyon.util.containers import current_time_millis
from ion.agents.platform.platform_driver_event import AttributeValueDriverEvent
from ion.agents.platform.util import ntp_2_ion_ts
# Platform attribute values are reported for the stream name "parsed".
# TODO confirm this.
_STREAM_NAME = "parsed"
# A small "ION System time" compliant increment to the latest received timestamp
# for purposes of the next request so we don't get that last sample repeated.
# Since "ION system time" is in milliseconds, this delta is in milliseconds.
_DELTA_TIME = 10
# _MULT_INTERVAL: for any request, the from_time parameter will be at most
# _MULT_INTERVAL * _rate_secs in the past wrt current time (OOIION-1372):
_MULT_INTERVAL = 3
class ResourceMonitor(object):
"""
Monitor for specific attributes having the same nominal monitoring rate.
"""
def __init__(self, platform_id, rate_secs, attr_defns,
get_attribute_values, notify_driver_event):
"""
Creates a monitor for a specific attribute in a given platform.
Call start to start the monitoring greenlet.
@param platform_id Platform ID
@param rate_secs Monitoring rate in secs
@param attr_defns List of attribute definitions
@param get_attribute_values
Function to retrieve attribute values for the specific
platform, to be called like this:
get_attribute_values(attr_ids, from_time)
@param notify_driver_event
Callback to notify whenever a value is retrieved.
"""
log.debug("%r: ResourceMonitor entered. rate_secs=%s, attr_defns=%s",
platform_id, rate_secs, attr_defns)
self._get_attribute_values = get_attribute_values
self._platform_id = platform_id
self._rate_secs = rate_secs
self._attr_defns = attr_defns
self._notify_driver_event = notify_driver_event
# corresponding attribute IDs to be retrieved
self._attr_ids = []
# and "ION System time" compliant timestamp of last retrieved value for
# each attribute:
self._last_ts_millis = {}
for attr_defn in self._attr_defns:
if 'attr_id' in attr_defn:
attr_id = attr_defn['attr_id']
self._attr_ids.append(attr_id)
self._last_ts_millis[attr_id] = None
else:
log.warn("%r: 'attr_id' key expected in attribute definition: %s",
self._platform_id, attr_defn)
self._active = False
# for debugging purposes
self._pp = pprint.PrettyPrinter()
log.debug("%r: ResourceMonitor created. rate_secs=%s, attr_ids=%s",
platform_id, rate_secs, self._attr_ids)
def __str__(self):
return "%s{platform_id=%r; rate_secs=%s; attr_ids=%s}" % (
self.__class__.__name__,
self._platform_id, self._rate_secs, str(self._attr_ids))
def start(self):
"""
Starts greenlet for resource monitoring.
"""
log.debug("%r: starting resource monitoring %s", self._platform_id, self)
self._active = True
runnable = Greenlet(self._run)
runnable.start()
def _run(self):
"""
The target function for the greenlet.
"""
while self._active:
slept = 0
# loop to incrementally sleep up to rate_secs while promptly
# reacting to request for termination
while self._active and slept < self._rate_secs:
# sleep in increments of 0.5 secs
incr = min(0.5, self._rate_secs - slept)
sleep(incr)
slept += incr
if self._active:
try:
self._retrieve_attribute_values()
except Exception:
log.exception("exception in _retrieve_attribute_values")
log.debug("%r: monitoring greenlet stopped. rate_secs=%s; attr_ids=%s",
self._platform_id, self._rate_secs, self._attr_ids)
def _retrieve_attribute_values(self):
"""
Retrieves the attribute values using the given function and calls
_values_retrieved.
"""
# TODO: note that the "from_time" parameters for the request below
# as well as the expected response are influenced by the RSN case
# (see CI-OMS interface). Need to see whether it also applies to
# CGSN so eventually adjustments may be needed.
#
# note that the "from_time" parameter in each pair (attr_id, from_time)
# for the _get_attribute_values call below, is in millis in UNIX epoch.
curr_time_millis = current_time_millis()
# minimum value for the from_time parameter (OOIION-1372):
min_from_time = curr_time_millis - 1000 * _MULT_INTERVAL * self._rate_secs
# this corresponds to (_MULT_INTERVAL * self._rate_secs) ago.
# determine each from_time for the request:
attrs = []
for attr_id in self._attr_ids:
if self._last_ts_millis[attr_id] is None:
# Very first request for this attribute. Use min_from_time:
from_time = min_from_time
else:
# We've already got values for this attribute. Use the latest
# timestamp + _DELTA_TIME as a basis for the new request:
from_time = int(self._last_ts_millis[attr_id]) + _DELTA_TIME
# but adjust it if it goes too far in the past:
if from_time < min_from_time:
from_time = min_from_time
log.trace("test_resource_monitoring_recent: attr_id=%s, from_time=%s, %s millis ago",
attr_id, from_time, curr_time_millis - from_time)
attrs.append((attr_id, from_time))
log.debug("%r: _retrieve_attribute_values: attrs=%s",
self._platform_id, attrs)
retrieved_vals = self._get_attribute_values(attrs)
if retrieved_vals is None:
# lost connection; nothing else to do here:
return
good_retrieved_vals = {}
# do validation: we expect an array of tuples (val, timestamp) for
# each attribute. If not, log a warning for the attribute and
# continue processing with the other valid attributes:
for attr_id, vals in retrieved_vals.iteritems():
if not isinstance(vals, (list, tuple)):
log.warn("%r: expecting an array for attribute %r, but got: %r",
self._platform_id, attr_id, vals)
continue
if len(vals):
if not isinstance(vals[0], (tuple, list)):
log.warn("%r: expecting elements in array to be tuples "
"(val, ts) for attribute %r, but got: %r",
self._platform_id, attr_id, vals[0])
continue
good_retrieved_vals[attr_id] = vals # even if empty array.
if not good_retrieved_vals:
# nothing else to do. TODO perhaps an additional warning?
return
if log.isEnabledFor(logging.TRACE): # pragma: no cover
# show good_retrieved_vals as retrieved (might be large)
log.trace("%r: _retrieve_attribute_values: _get_attribute_values "
"for attrs=%s returned\n%s",
self._platform_id, attrs, self._pp.pformat(good_retrieved_vals))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
summary = {attr_id: "(%d vals)" % len(vals)
for attr_id, vals in good_retrieved_vals.iteritems()}
log.debug("%r: _retrieve_attribute_values: _get_attribute_values "
"for attrs=%s returned %s",
self._platform_id, attrs, summary)
# vals_dict: attributes with non-empty reported values:
vals_dict = {}
for attr_id, from_time in attrs:
if not attr_id in good_retrieved_vals:
log.warn("%r: _retrieve_attribute_values: unexpected: "
"response does not include requested attribute %r. "
"Response is: %s",
self._platform_id, attr_id, good_retrieved_vals)
continue
attr_vals = good_retrieved_vals[attr_id]
if not attr_vals:
log.debug("%r: No values reported for attribute=%r from_time=%f",
self._platform_id, attr_id, from_time)
continue
if log.isEnabledFor(logging.DEBUG):
self._debug_values_retrieved(attr_id, attr_vals)
# ok, include this attribute for the notification:
vals_dict[attr_id] = attr_vals
if vals_dict:
self._values_retrieved(vals_dict)
def _values_retrieved(self, vals_dict):
"""
A values response has been received. Create and notify
corresponding event to platform agent.
"""
# update _last_ts_millis for each retrieved attribute:
for attr_id, attr_vals in vals_dict.iteritems():
_, ntp_ts = attr_vals[-1]
# update _last_ts_millis based on ntp_ts: note that timestamps are reported
# in NTP so we need to convert it to ION system time for a subsequent request:
self._last_ts_millis[attr_id] = ntp_2_ion_ts(ntp_ts)
# finally, notify the values event:
driver_event = AttributeValueDriverEvent(self._platform_id,
_STREAM_NAME,
vals_dict)
self._notify_driver_event(driver_event)
def _debug_values_retrieved(self, attr_id, values): # pragma: no cover
ln = len(values)
# just show a couple of elements
arrstr = "["
if ln <= 3:
vals = [str(e) for e in values[:ln]]
arrstr += ", ".join(vals)
else:
vals = [str(e) for e in values[:2]]
last_e = values[-1]
arrstr += ", ".join(vals)
arrstr += ", ..., " + str(last_e)
arrstr += "]"
log.debug("%r: attr=%r: values retrieved(%s) = %s",
self._platform_id, attr_id, ln, arrstr)
def stop(self):
log.debug("%r: stopping resource monitoring %s", self._platform_id, self)
self._active = False
|
the-stack_0_5220 | # -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
from skimage.draw import disk
import cv2
defocusKernelDims = [3,5,7,9]
def DefocusBlur_random(img):
kernelidx = np.random.randint(0, len(defocusKernelDims))
kerneldim = defocusKernelDims[kernelidx]
return DefocusBlur(img, kerneldim)
def DefocusBlur(img, dim):
imgarray = np.array(img, dtype="float32")
kernel = DiskKernel(dim)
# convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
convolved = cv2.filter2D(imgarray, -1, kernel).astype("uint8")
img = Image.fromarray(convolved)
return img
def DiskKernel(dim):
kernelwidth = dim
kernel = np.zeros((kernelwidth, kernelwidth), dtype=np.float32)
circleCenterCoord = dim / 2
circleRadius = circleCenterCoord +1
rr, cc = disk((circleCenterCoord, circleCenterCoord), circleRadius-1)
kernel[rr, cc] = 1
if(dim == 3 or dim == 5):
kernel = Adjust(kernel, dim)
normalizationFactor = np.count_nonzero(kernel)
kernel = kernel / normalizationFactor
return kernel
def Adjust(kernel, kernelwidth):
kernel[0,0] = 0
kernel[0,kernelwidth-1]=0
kernel[kernelwidth-1,0]=0
kernel[kernelwidth-1, kernelwidth-1] =0
return kernel
|
the-stack_0_5223 | import numpy as np
import random
import logging
import logging.config
#logging.disable()
logging.config.fileConfig('logging.conf')
# create logger
logger = logging.getLogger('simpleExample')
# Setting the size of the field
cells_number = 12
"""
We are going to show the following path finding algorithms:
Dijkstra’s Algorithm algorithm = D
A* Search Algorithm algorithm = A*
D* Algorithm algorithm = D*
"""
class Full_field():
def __init__(self):
'''
Status:
1 = solved
0 = not visited
-1 = to be checked next
-2 = Obstacle
3 = shortest path
'''
self.algorithm = "A*"
self.__start_field()
logger.info('Starting status of the board \n:' + str(self.status))
logger.info('Starting distance values\n' + str(self.values))
def __start_field(self):
self.values = np.zeros((cells_number, cells_number))
self.status = np.zeros((cells_number, cells_number))
self.previous_cell = np.full(
(cells_number, cells_number), tuple)
def set_obstacles(
self, obstacle_positions=[], number_of_obstacles=cells_number):
"""
Obstacles will be flagged as cells of value -2
"""
if obstacle_positions == []:
_ = 0
while _ < number_of_obstacles:
i = random.randint(0, cells_number - 1)
j = random.randint(0, cells_number - 1)
if self.status[i, j] not in [1, 2]:
self.status[i, j] = -2
_ += 1
else:
for pos in obstacle_positions:
if self.status[pos] != 1:
self.status[pos] = -2
def set_origin(
self, x0=0, y0=0):
self.x0 = x0
self.y0 = y0
self.status[x0, y0] = 1
def set_destination(
self, xf=cells_number - 1, yf=cells_number - 1
):
self.xf = xf
self.yf = yf
self.status[xf, yf] = 2
def select_algorithm(self, algorithm):
"""
We are going to show the following path finding algorithms:
Dijkstra’s Algorithm algorithm = Di
A* Search Algorithm algorithm = A*
D* Algorithm algorithm = D*
"""
self.algorithm = algorithm
logger.debug("Algorithm has been set to " + str(algorithm))
def find_shortest_path(self):
while self.status[self.xf, self.yf] != 1:
self.check_nodes()
self.min_closest_node()
logger.info('These are the values\n' + str(self.values))
logger.info('This is to see which one is solved\n' +
str(self.status))
def check_nodes(self):
for i in range(cells_number):
for j in range(cells_number):
if self.status[i, j] == 1:
self.find_next_node(i, j)
logger.info("These are the candidates'status\n" + str(self.status))
def min_closest_node(self):
# Search the closest node with the minimum value
node_values = []
node_pos = []
for i in range(cells_number):
for j in range(cells_number):
if self.status[i, j] == -1:
self.status[i, j] = 0
node_values.append(self.values[i, j])
node_pos.append([i, j])
logger.debug('This is the minimum value: ' + str(node_values))
try:
pos = np.argmin(node_values)
except:
logger.error('It is not possible to go from ' + str(self.x0) + str(self.y0) +
" to " + str(self.xf) + str(self.yf))
closest_node = tuple(node_pos[pos])
logger.debug('Node to be set as solved' + str(closest_node))
self.status[closest_node] = 1
def find_next_node(self, x0, y0):
for i in [x0 - 1, x0, x0 + 1]:
for j in [y0 - 1, y0, y0 + 1]:
if (i >= 0) & (i < cells_number) \
& (j >= 0) & (j < cells_number):
x1 = int(i)
y1 = int(j)
self.__set_value(x0, y0, x1, y1)
def __set_value(self, x0, y0, x1, y1):
distance_to_previous = self.__distance_between(x0, y0, x1, y1)
if self.algorithm == "A*":
distance_to_destination_x0 = self.__distance_between(
x0, y0, self.xf, self.yf)
distance_to_destination = self.__distance_between(
x1, y1, self.xf, self.yf)
elif self.algorithm == "Di":
distance_to_destination = 0
distance_to_destination_x0 = 0
cumulative_distance = self.values[x0, y0] - distance_to_destination_x0 + \
distance_to_previous + distance_to_destination
if (self.status[x1, y1] in [0, 2]) or \
((self.status[x1, y1] == -1) and
(cumulative_distance < self.values[x1, y1])):
self.values[x1, y1] = cumulative_distance
self.status[x1, y1] = -1
self.previous_cell[x1, y1] = (x0, y0)
def __distance_between(self, x0, y0, x1, y1):
distance = ((x1 - x0) ** 2 + (y1 - y0) ** 2) ** (1 / 2)
return distance
def show_path(self):
logger.debug('This is the previous_cell\n' + str(self.previous_cell))
x = self.xf
y = self.yf
while (x != self.x0) | (y != self.y0):
self.status[x, y] = 3
if x == self.xf and y == self.yf:
self.status[x, y] = 2
x, y = self.previous_cell[x, y]
logger.info("This is the shortest path from " + str(self.x0) + str(self.y0) +
" to " + str(self.xf) + str(self.yf) + "\n" + str(self.status))
def solve_fast(self):
self.find_shortest_path()
self.show_path()
def solve_one_step(self):
if self.status[self.xf, self.yf] != 1:
self.check_nodes()
class Find_Path(Full_field):
def __init__(self):
Full_field.__init__(self)
self.set_origin()
self.set_destination()
self.set_obstacles()
self.solve_fast()
if __name__ == "__main__":
Find_Path()
|
the-stack_0_5224 | from ethereum import utils
def mk_multisend_code(payments): # expects a dictionary, {address: wei}
kode = b''
for address, wei in payments.items():
kode += b'\x60\x00\x60\x00\x60\x00\x60\x00' # 0 0 0 0
encoded_wei = utils.encode_int(wei) or b'\x00'
kode += utils.ascii_chr(0x5f + len(encoded_wei)) + encoded_wei # value
kode += b'\x73' + utils.normalize_address(address) # to
kode += b'\x60\x00\xf1\x50' # 0 CALL POP
kode += b'\x33\xff' # CALLER SELFDESTRUCT
return kode
def get_multisend_gas(payments):
o = 26002 # 21000 + 2 (CALLER) + 5000 (SELFDESTRUCT)
for address, wei in payments.items():
encoded_wei = utils.encode_int(wei) or b'\x00'
# 20 bytes in txdata for address = 1360
# bytes in txdata for wei = 68 * n
# gas for pushes and pops = 3 * 7 + 2 = 23
# CALL = 9700 + 25000 (possible if new account)
o += 1360 + 68 * len(encoded_wei) + 23 + 34700
return o
|
the-stack_0_5228 | """Tests for the ProtocolAnalyzer."""
import pytest
from decoy import Decoy
from datetime import datetime
from opentrons.types import MountType, DeckSlotName
from opentrons.protocol_engine import commands as pe_commands, types as pe_types
from opentrons.protocol_runner import ProtocolRunner, ProtocolRunData, JsonPreAnalysis
from robot_server.protocols.analysis_store import AnalysisStore
from robot_server.protocols.protocol_store import ProtocolResource
from robot_server.protocols.protocol_analyzer import ProtocolAnalyzer
@pytest.fixture
def protocol_runner(decoy: Decoy) -> ProtocolRunner:
"""Get a mocked out ProtocolRunner."""
return decoy.mock(cls=ProtocolRunner)
@pytest.fixture
def analysis_store(decoy: Decoy) -> AnalysisStore:
"""Get a mocked out AnalysisStore."""
return decoy.mock(cls=AnalysisStore)
@pytest.fixture
def subject(
protocol_runner: ProtocolRunner,
analysis_store: AnalysisStore,
) -> ProtocolAnalyzer:
"""Get a ProtocolAnalyzer test subject."""
return ProtocolAnalyzer(
protocol_runner=protocol_runner,
analysis_store=analysis_store,
)
async def test_analyze(
decoy: Decoy,
protocol_runner: ProtocolRunner,
analysis_store: AnalysisStore,
subject: ProtocolAnalyzer,
) -> None:
"""It should be able to analyize a protocol."""
protocol_resource = ProtocolResource(
protocol_id="protocol-id",
pre_analysis=JsonPreAnalysis(schema_version=123, metadata={}),
created_at=datetime(year=2021, month=1, day=1),
files=[],
)
analysis_command = pe_commands.Pause(
id="command-id",
status=pe_commands.CommandStatus.SUCCEEDED,
createdAt=datetime(year=2022, month=2, day=2),
data=pe_commands.PauseData(message="hello world"),
)
analysis_labware = pe_types.LoadedLabware(
id="labware-id",
loadName="load-name",
definitionUri="namespace/load-name/42",
location=pe_types.DeckSlotLocation(slot=DeckSlotName.SLOT_1),
)
analysis_pipette = pe_types.LoadedPipette(
id="pipette-id",
pipetteName=pe_types.PipetteName.P300_SINGLE,
mount=MountType.LEFT,
)
decoy.when(await protocol_runner.run(protocol_resource)).then_return(
ProtocolRunData(
commands=[analysis_command],
labware=[analysis_labware],
pipettes=[analysis_pipette],
)
)
await subject.analyze(
protocol_resource=protocol_resource,
analysis_id="analysis-id",
)
decoy.verify(
analysis_store.update(
analysis_id="analysis-id",
commands=[analysis_command],
labware=[analysis_labware],
pipettes=[analysis_pipette],
errors=[],
),
)
async def test_analyze_error(
decoy: Decoy,
protocol_runner: ProtocolRunner,
analysis_store: AnalysisStore,
subject: ProtocolAnalyzer,
) -> None:
"""It should handle errors raised by the runner."""
protocol_resource = ProtocolResource(
protocol_id="protocol-id",
pre_analysis=JsonPreAnalysis(schema_version=123, metadata={}),
created_at=datetime(year=2021, month=1, day=1),
files=[],
)
error = RuntimeError("oh no")
decoy.when(await protocol_runner.run(protocol_resource)).then_raise(error)
await subject.analyze(
protocol_resource=protocol_resource,
analysis_id="analysis-id",
)
decoy.verify(
analysis_store.update(
analysis_id="analysis-id",
commands=[],
labware=[],
pipettes=[],
errors=[error],
),
)
|
the-stack_0_5229 | import os
import json
if not os.path.exists('normal'):
os.mkdir('normal')
for data_type in ['train', 'dev', 'test']:
with open('{}.json'.format(data_type), 'r') as f:
data = json.load(f)
dataset = []
for sample in data:
token = sample['token']
h_pos = [sample['subj_start'], sample['subj_end'] + 1]
t_pos = [sample['obj_start'], sample['obj_end'] + 1]
relation = sample['relation']
dataset.append({'token': token,
'h': {'pos': h_pos},
't': {'pos': t_pos},
'relation': relation})
with open('normal/{}.txt'.format(data_type), 'w') as f:
for sample in dataset:
f.write(json.dumps(sample))
f.write('\n')
|
the-stack_0_5230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: load-vgg19.py
from __future__ import print_function
import argparse
import numpy as np
import os
import cv2
import six
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow.dataset import ILSVRCMeta
enable_argscope_for_module(tf.layers)
def tower_func(image):
is_training = get_current_tower_context().is_training
with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
x = image
x = tf.layers.conv2d(x, 64, name='conv1_1')
x = tf.layers.conv2d(x, 64, name='conv1_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool1')
x = tf.layers.conv2d(x, 128, name='conv2_1')
x = tf.layers.conv2d(x, 128, name='conv2_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool2')
x = tf.layers.conv2d(x, 256, name='conv3_1')
x = tf.layers.conv2d(x, 256, name='conv3_2')
x = tf.layers.conv2d(x, 256, name='conv3_3')
x = tf.layers.conv2d(x, 256, name='conv3_4')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool3')
x = tf.layers.conv2d(x, 512, name='conv4_1')
x = tf.layers.conv2d(x, 512, name='conv4_2')
x = tf.layers.conv2d(x, 512, name='conv4_3')
x = tf.layers.conv2d(x, 512, name='conv4_4')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool4')
x = tf.layers.conv2d(x, 512, name='conv5_1')
x = tf.layers.conv2d(x, 512, name='conv5_2')
x = tf.layers.conv2d(x, 512, name='conv5_3')
x = tf.layers.conv2d(x, 512, name='conv5_4')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool5')
x = tf.layers.flatten(x, name='flatten')
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc6')
x = tf.layers.dropout(x, rate=0.5, name='drop0', training=is_training)
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc7')
x = tf.layers.dropout(x, rate=0.5, name='drop1', training=is_training)
logits = tf.layers.dense(x, 1000, activation=tf.identity, name='fc8')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
param_dict = dict(np.load(path))
param_dict = {k.replace('/W', '/kernel').replace('/b', '/bias'): v for k, v in six.iteritems(param_dict)}
predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')],
tower_func=tower_func,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['prob'] # prob:0 is the probability distribution
))
im = cv2.imread(input)
assert im is not None, input
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3)).astype('float32')
# VGG19 requires channelwise mean substraction
VGG_MEAN = [103.939, 116.779, 123.68]
im -= VGG_MEAN[::-1]
outputs = predict_func(im)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
print("Top10 predictions:", ret)
meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', required=True,
help='.npz model file generated by tensorpack.utils.loadcaffe')
parser.add_argument('--input', help='an input image', required=True)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
run_test(args.load, args.input)
|
the-stack_0_5232 | import os
class Config:
SSL_REDIRECT = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
@staticmethod
def init_app(app):
Config.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class HerokuConfig(ProductionConfig):
SSL_REDIRECT = True if os.environ.get('DYNO') else False
@staticmethod
def init_app(app):
ProductionConfig.init_app(app)
# handle reverse proxy server headers
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
} |
the-stack_0_5233 | from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.middleware import csrf
from django.utils.translation import gettext_lazy as _
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from c3nav.api.models import Token
from c3nav.api.utils import get_api_post_data
class SessionViewSet(ViewSet):
"""
Session for Login, Logout, etc…
Don't forget to set X-Csrftoken for POST requests!
/login – POST with fields token or username and password to log in
/get_token – POST with fields username and password to get a login token
/logout - POST to log out
"""
def list(self, request, *args, **kwargs):
return Response({
'is_authenticated': request.user.is_authenticated,
'csrf_token': csrf.get_token(request),
})
@action(detail=False, methods=['post'])
def login(self, request, *args, **kwargs):
# django-rest-framework doesn't do this for logged out requests
SessionAuthentication().enforce_csrf(request)
if request.user.is_authenticated:
raise ParseError(_('Log out first.'))
data = get_api_post_data(request)
if 'token' in data:
try:
token = Token.get_by_token(data['token'])
except Token.DoesNotExist:
raise PermissionDenied(_('This token does not exist or is no longer valid.'))
user = token.user
elif 'username' in data:
form = AuthenticationForm(request, data=data)
if not form.is_valid():
raise ParseError(form.errors)
user = form.user_cache
else:
raise ParseError(_('You need to send a token or username and password.'))
login(request, user)
return Response({
'detail': _('Login successful.'),
'csrf_token': csrf.get_token(request),
})
@action(detail=False, methods=['post'])
def get_token(self, request, *args, **kwargs):
# django-rest-framework doesn't do this for logged out requests
SessionAuthentication().enforce_csrf(request)
data = get_api_post_data(request)
form = AuthenticationForm(request, data=data)
if not form.is_valid():
raise ParseError(form.errors)
token = form.user_cache.login_tokens.create()
return Response({
'token': token.get_token(),
})
@action(detail=False, methods=['post'])
def logout(self, request, *args, **kwargs):
# django-rest-framework doesn't do this for logged out requests
SessionAuthentication().enforce_csrf(request)
if not request.user.is_authenticated:
return ParseError(_('Not logged in.'))
logout(request)
return Response({
'detail': _('Logout successful.'),
'csrf_token': csrf.get_token(request),
})
|
the-stack_0_5234 |
def extractVasaandypresWordpressCom(item):
'''
Parser for 'vasaandypres.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
the-stack_0_5235 |
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.flatten import Flatten
# Code for CIFAR ResNet is modified from https://github.com/itchencheng/pytorch-residual-networks
class FashionMNIST(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 12, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(12, 24, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(24*4*4, 50),
nn.ReLU(),
nn.Linear(50, 10),
nn.ReLU(),
)
def forward(self, x):
x = self.net(x)
return x
class SpeechCommand_Simplified(nn.Module):
def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):
super().__init__()
self.net = nn.Sequential(
# 1*8000
nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride),
# 32*496
nn.BatchNorm1d(n_channel),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 32*493
nn.Conv1d(n_channel, n_channel//2, kernel_size=3),
# 16*491
nn.BatchNorm1d(n_channel//2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 16*488
nn.Conv1d(n_channel//2, n_channel//2, kernel_size=3),
# 16*486
nn.BatchNorm1d(n_channel//2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 16*483
nn.Flatten(),
nn.Linear(16*483, 512),
nn.Linear(512, n_output),
nn.LogSoftmax(dim=1)
)
def forward(self, x):
x = self.net(x)
return x
class SpeechCommand(nn.Module):
def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):
super().__init__()
self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)
self.bn1 = nn.BatchNorm1d(n_channel)
self.pool1 = nn.MaxPool1d(4)
self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3)
self.bn2 = nn.BatchNorm1d(n_channel)
self.pool2 = nn.MaxPool1d(4)
self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)
self.bn3 = nn.BatchNorm1d(2 * n_channel)
self.pool3 = nn.MaxPool1d(4)
self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)
self.bn4 = nn.BatchNorm1d(2 * n_channel)
self.pool4 = nn.MaxPool1d(4)
self.fc1 = nn.Linear(2 * n_channel, n_output)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.pool3(x)
x = self.conv4(x)
x = F.relu(self.bn4(x))
x = self.pool4(x)
x = F.avg_pool1d(x, x.shape[-1])
x = x.permute(0, 2, 1)
x = self.fc1(x)
return F.log_softmax(x, dim=2)
class AGNEWS(nn.Module):
def __init__(self, vocab_size = 95811, embed_dim = 64, num_class = 4):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
class CIFAR_CNN(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Dropout(0.1),
nn.Flatten(),
nn.Linear(256*4*4, 256),
nn.ReLU(),
nn.Linear(256, 10),
nn.ReLU(),
)
# self.conv1 = nn.Conv2d(3, 6, 5)
# self.pool = nn.MaxPool2d(2, 2)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.fc1 = nn.Linear(16 * 5 * 5, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# x = self.pool(F.relu(self.conv1(x)))
# x = self.pool(F.relu(self.conv2(x)))
# x = torch.flatten(x, 1) # flatten all dimensions except batch
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
x = self.net(x)
return x
class ResBlock(nn.Module):
def __init__(self, in_chann, chann, stride):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_chann, chann, kernel_size=3, padding=1, stride=stride)
self.bn1 = nn.BatchNorm2d(chann)
self.conv2 = nn.Conv2d(chann, chann, kernel_size=3, padding=1, stride=1)
self.bn2 = nn.BatchNorm2d(chann)
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = F.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if (x.shape == y.shape):
z = x
else:
z = F.avg_pool2d(x, kernel_size=2, stride=2)
x_channel = x.size(1)
y_channel = y.size(1)
ch_res = (y_channel - x_channel)//2
pad = (0, 0, 0, 0, ch_res, ch_res)
z = F.pad(z, pad=pad, mode="constant", value=0)
z = z + y
z = F.relu(z)
return z
class BaseNet(nn.Module):
def __init__(self, Block, n):
super(BaseNet, self).__init__()
self.Block = Block
self.conv0 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.bn0 = nn.BatchNorm2d(16)
self.convs = self._make_layers(n)
self.avgpool = nn.AvgPool2d(kernel_size=8, stride=1)
self.fc = nn.Linear(64, 10)
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = F.relu(x)
x = self.convs(x)
x = self.avgpool(x)
x = x.view(x.size(0),-1)
x = self.fc(x)
return x
def _make_layers(self, n):
layers = []
in_chann = 16
chann = 16
stride = 1
for i in range(3):
for j in range(n):
if ((i > 0) and (j == 0)):
in_chann = chann
chann = chann * 2
stride = 2
layers += [self.Block(in_chann, chann, stride)]
stride = 1
in_chann = chann
return nn.Sequential(*layers)
class CIFARResNet(BaseNet):
def __init__(self, n=3):
super().__init__(ResBlock, n) |
the-stack_0_5236 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # World News from data (good & bad)
# > Significant changes vs. 10 days ago in transmission rates, ICU demand, and cases & deaths data.
#
# - categories: [world, overview, interactive, news]
# - permalink: /covid-news/
# - author: <a href=https://github.com/artdgn/>artdgn</a>
# - toc: true
# - image: images/news.png
# - hide: false
# > Warning: This dashboard was not built by an epidemiologist.
# > Note: Click a country name to open a search results page for that country's COVID-19 news.
# +
#hide
import pandas as pd
import covid_helpers as covid_helpers
stylers = covid_helpers.PandasStyling
# +
#hide
day_diff = 10
cur_data = covid_helpers.CovidData()
df_cur_all, debug_dfs = cur_data.table_with_projections(projection_days=[30], debug_dfs=True)
df_cur = cur_data.filter_df(df_cur_all)
past_data = covid_helpers.CovidData(-day_diff)
df_past = past_data.filter_df(past_data.table_with_projections(projection_days=[day_diff-1]))
# -
#hide_input
from IPython.display import Markdown
past_date = pd.to_datetime(past_data.dt_cols[-1]).date().isoformat()
Markdown(f"***Based on data up to: {cur_data.cur_date}. \
Compared to ({day_diff} days before): {past_date}***")
# +
#hide
df_data = df_cur.copy()
df_data['transmission_rate_past'] = df_past['transmission_rate']
df_data['transmission_rate_std_past'] = df_past['transmission_rate_std']
df_data['needICU.per100k_past'] = df_past['needICU.per100k']
# deaths toll changes
df_data['Deaths.total.diff'] = df_data['Deaths.total'] - df_past['Deaths.total']
df_data['Deaths.new.per100k.past'] = df_past['Deaths.new.per100k']
df_data['Deaths.new.past'] = df_past['Deaths.new']
df_data['Deaths.diff.per100k'] = df_data['Deaths.total.diff'] / (df_data['population'] / 1e5)
# misses and explanations
df_data['transmission_rate.change'] = (df_data['transmission_rate'] / df_data['transmission_rate_past']) - 1
df_data['affected_ratio.miss'] = (df_cur['affected_ratio.est'] / df_past['affected_ratio.est.+9d']) - 1
df_data['needICU.per100k.miss'] = (df_cur['needICU.per100k'] / df_past['needICU.per100k.+9d']) - 1
df_data['testing_bias.change'] = (df_data['current_testing_bias'] / df_past['current_testing_bias']) - 1
# -
#hide
def emoji_flags(inds):
return ' '.join(df_cur.loc[inds]['emoji_flag'])
# # Transmission rate:
# > Note: "transmission rate" here is a measure of speed of spread of infection, and means how much of the susceptible population each infected person is infecting per day (if everyone is susceptible). E.g. 10% means that 100 infected patients will infect 10 new people per day. Related to [R0](https://en.wikipedia.org/wiki/Basic_reproduction_number). See [Methodology](#Methodology) for details of calculation.
# hide
def style_news_infections(df):
cols = {
'transmission_rate': '<i>Current:</i><br>Estimated<br>daily<br>transmission<br>rate',
'transmission_rate_past': f'<i>{day_diff} days ago:</i><br>Estimated<br>daily<br>transmission<br>rate',
'Cases.new.est': 'Estimated <br> <i>recent</i> cases <br> in last 5 days',
'needICU.per100k': 'Estimated<br>current<br>ICU need<br>per 100k<br>population',
'affected_ratio.est': 'Estimated <br><i>total</i><br>affected<br>population<br>percentage',
}
rate_norm = max(df['transmission_rate'].max(), df['transmission_rate_past'].max())
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
return (df_show.style
.bar(subset=[cols['needICU.per100k']], color='#b21e3e', vmin=0, vmax=10)
.bar(subset=cols['Cases.new.est'], color='#b57b17', vmin=0)
.bar(subset=cols['affected_ratio.est'], color='#5dad64', vmin=0, vmax=1.0)
.apply(stylers.add_bar, color='#f49d5a',
s_v=df['transmission_rate'] / rate_norm, subset=cols['transmission_rate'])
.apply(stylers.add_bar, color='#d8b193',
s_v=df['transmission_rate_past'] / rate_norm,
subset=cols['transmission_rate_past'])
.format('<b>{:.2f}</b>', subset=[cols['needICU.per100k']])
.format('<b>{:,.0f}</b>', subset=cols['Cases.new.est'])
.format('<b>{:.1%}</b>', subset=[cols['affected_ratio.est'],
cols['transmission_rate'],
cols['transmission_rate_past']], na_rep="-"))
# hide
rate_diff = df_data['transmission_rate'] - df_data['transmission_rate_past']
higher_trans = (
(df_data['Cases.new.est'] > 100) &
(rate_diff > 0.02) &
(rate_diff > df_data['transmission_rate_std_past']) &
(df_data['transmission_rate_past'] != 0) # countries reporting infrequently
)
new_waves = rate_diff[higher_trans].sort_values(ascending=False).index
# hide_input
Markdown(f"## ⭕ Bad news: new waves {emoji_flags(new_waves)}")
# > Large increase in transmission rate vs. 10 days ago, that might mean a relapse, new wave, worsening outbreak.
#
# - Countries are sorted by size of change in transmission rate.
# - Includes only countries that were previously active (more than 100 estimated new cases).
# - "Large increase" = at least +2% change.
# hide_input
style_news_infections(df_data.loc[new_waves])
# +
# hide
df_alt_all = pd.concat([d.reset_index() for d in debug_dfs], axis=0)
def infected_plots(countries, title):
return covid_helpers.altair_multiple_countries_infected(
df_alt_all, countries=countries, title=title, marker_day=day_diff)
# -
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
#hide_input
infected_plots(new_waves, "Countries with new waves (vs. 10 days ago)")
#hide
lower_trans = (
(rate_diff < -0.02) &
(df_cur['Cases.new.est'] > 100) &
(rate_diff.abs() > df_data['transmission_rate_std']) &
(df_data['transmission_rate'] != 0) # countries reporting infrequently
)
slowing_outbreaks = rate_diff[lower_trans].sort_values().index
#hide_input
Markdown(f"## 🟢 Good news: slowing waves {emoji_flags(slowing_outbreaks)}")
# > Large decrease in transmission rate vs. 10 days ago, that might mean a slowing down / effective control measures.
#
# - Countries are sorted by size of change in transmission rate.
# - Includes only countries that were previously active (more than 100 estimated new cases).
# - "Large decrease" = at least -2% change.
#hide_input
style_news_infections(df_data.loc[slowing_outbreaks])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
#hide_input
infected_plots(slowing_outbreaks, "Countries with slowing waves (vs. 10 days ago)")
# # ICU need
# hide
def style_news_icu(df):
cols = {
'needICU.per100k': '<i>Current:</i><br>Estimated<br>ICU need<br>per 100k<br>population',
'needICU.per100k_past': f'<i>{day_diff} days ago:</i><br>Estimated<br>ICU need<br>per 100k<br>population',
'Cases.new.est': 'Estimated<br><i>recent</i> cases<br> in last 5 days',
'transmission_rate': 'Estimated<br>daily<br>transmission<br>rate',
'affected_ratio.est': 'Estimated <br><i>total</i><br>affected<br>population<br>percentage',
}
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
return (df_show.style
.bar(subset=cols['needICU.per100k'], color='#b21e3e', vmin=0, vmax=10)
.bar(subset=cols['needICU.per100k_past'], color='#c67f8e', vmin=0, vmax=10)
.bar(subset=cols['Cases.new.est'], color='#b57b17', vmin=0)
.bar(subset=cols['affected_ratio.est'], color='#5dad64', vmin=0, vmax=1.0)
.apply(stylers.add_bar, color='#f49d5a',
s_v=df['transmission_rate']/df['transmission_rate'].max(),
subset=cols['transmission_rate'])
.format('<b>{:.2f}</b>', subset=[cols['needICU.per100k'], cols['needICU.per100k_past']])
.format('<b>{:,.0f}</b>', subset=cols['Cases.new.est'])
.format('<b>{:.1%}</b>', subset=[cols['affected_ratio.est'],
cols['transmission_rate']]))
# hide
icu_diff = df_cur['needICU.per100k'] - df_past['needICU.per100k']
icu_increase = icu_diff[icu_diff > 0.2].sort_values(ascending=False).index
# hide_input
Markdown(f"## ⭕ Bad news: higher ICU need {emoji_flags(icu_increase)}")
# > Large increases in need for ICU beds per 100k population vs. 10 days ago.
#
# - Only countries for which the ICU need increased by more than 0.2 (per 100k).
# hide_input
style_news_icu(df_data.loc[icu_increase])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(icu_increase, "Countries with Higher ICU need (vs. 10 days ago)")
# hide
icu_decrease = icu_diff[icu_diff < -0.1].sort_values().index
# hide_input
Markdown(f"## 🟢 Good news: lower ICU need {emoji_flags(icu_decrease)}")
# > Large decreases in need for ICU beds per 100k population vs. 10 days ago.
#
# - Only countries for which the ICU need decreased by more than 0.1 (per 100k).
# hide_input
style_news_icu(df_data.loc[icu_decrease])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(icu_decrease, "Countries with Lower ICU need (vs. 10 days ago)")
# # New cases and deaths:
# hide
new_entries = df_cur.index[~df_cur.index.isin(df_past.index)]
# hide_input
Markdown(f"## ⭕ Bad news: new first significant outbreaks {emoji_flags(new_entries)}")
# > Countries that have started their first significant outbreak (crossed 1000 total reported cases or 20 deaths) vs. 10 days ago.
# hide_input
style_news_infections(df_data.loc[new_entries])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(new_entries, "Countries with first large outbreak (vs. 10 days ago)")
# hide
def style_no_news(df):
cols = {
'Cases.total.est': 'Estimated<br>total<br>cases',
'Deaths.total': 'Total<br>reported<br>deaths',
'last_case_date': 'Date<br>of last<br>reported case',
'last_death_date': 'Date<br>of last<br>reported death',
}
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
return (df_show.style
.format('<b>{:,.0f}</b>', subset=[cols['Cases.total.est'], cols['Deaths.total']]))
#hide
significant_past = ((df_past['Cases.total.est'] > 1000) & (df_past['Deaths.total'] > 10))
active_in_past = ((df_past['Cases.new'] > 0) | (df_past['Deaths.new'] > 0))
no_cases_filt = ((df_cur['Cases.total'] - df_past['Cases.total']) == 0)
no_deaths_filt = ((df_cur['Deaths.total'] - df_past['Deaths.total']) == 0)
no_cases_and_deaths = df_cur.loc[no_cases_filt & no_deaths_filt &
significant_past & active_in_past].index
# hide_input
Markdown(f"## 🟢 Good news: no new cases or deaths {emoji_flags(no_cases_and_deaths)}")
# > New countries with no new cases or deaths vs. 10 days ago.
#
# - Only considering countries that had at least 1000 estimated total cases and at least 10 total deaths and had an active outbreak previously.
# hide_input
style_no_news(df_data.loc[no_cases_and_deaths])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(no_cases_and_deaths, "New countries with no new cases or deaths (vs. 10 days ago)")
# hide
no_deaths = df_cur.loc[no_deaths_filt & (~no_cases_filt) &
significant_past & active_in_past].index
# hide_input
Markdown(f"## Mixed news: no new deaths, only new cases {emoji_flags(no_deaths)}")
# > New countries with no new deaths (only new cases) vs. 10 days ago.
#
# - Only considering countries that had at least 1000 estimated total cases and at least 10 total deaths and had an active outbreak previously.
# hide_input
style_news_infections(df_data.loc[no_deaths])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(no_deaths, "Countries with only new cases (vs. 10 days ago)")
# hide
not_active = df_cur.loc[no_cases_filt & significant_past & ~active_in_past].index
# hide_input
Markdown(f"## No news: continously inactive countries {emoji_flags(not_active)}")
# > Countries that had no new cases or deaths 10 days ago or now.
#
# - Only considering countries that had at least 1000 estimated total cases and at least 10 total deaths.
# - Caveat: these countries may have stopped reporting data like [Tanzania](https://en.wikipedia.org/wiki/COVID-19_pandemic_in_Tanzania).
# hide_input
style_no_news(df_data.loc[not_active])
# > Tip: Click country name in legend to switch countries. Uze mouse wheel to zoom Y axis.
# hide_input
infected_plots(not_active, "Continuosly inactive countries (now and 10 days ago)")
# # Deaths burden:
# hide
def style_death_burden(df):
cols = {
'Deaths.new.per100k': f'<i>Current</i>:<br>{cur_data.PREV_LAG} day<br>death<br>burden<br>per 100k',
'Deaths.new.per100k.past': f'<i>{day_diff} days ago</i>:<br>{cur_data.PREV_LAG} day<br>death<br>burden<br>per 100k',
'Deaths.total.diff': f'New<br>reported deaths<br>since {day_diff}<br>days ago',
'needICU.per100k': 'Estimated<br>current<br>ICU need<br>per 100k<br>population',
'affected_ratio.est': 'Estimated <br><i>total</i><br>affected<br>population<br>percentage',
}
df_show = stylers.country_index_emoji_link(df)[cols.keys()].rename(columns=cols)
death_norm = max(df['Deaths.new.per100k'].max(), df['Deaths.new.per100k.past'].max())
return (df_show.style
.bar(subset=cols['needICU.per100k'], color='#b21e3e', vmin=0, vmax=10)
.bar(subset=cols['Deaths.new.per100k'], color='#7b7a7c', vmin=0, vmax=death_norm)
.bar(subset=cols['Deaths.new.per100k.past'], color='#918f93', vmin=0, vmax=death_norm)
.bar(subset=cols['Deaths.total.diff'], color='#6b595d', vmin=0)
.bar(subset=cols['affected_ratio.est'], color='#5dad64', vmin=0, vmax=1.0)
.format('<b>{:.0f}</b>', subset=[cols['Deaths.total.diff'],
])
.format('<b>{:.1f}</b>', subset=cols['needICU.per100k'])
.format('<b>{:.2f}</b>', subset=[cols['Deaths.new.per100k'],
cols['Deaths.new.per100k.past']])
.format('<b>{:.1%}</b>', subset=[cols['affected_ratio.est']], na_rep="-"))
# hide
death_change_ratio = df_data['Deaths.new.per100k'] / df_data['Deaths.new.per100k.past']
filt = (
(df_data['Deaths.new'] > 10) &
(df_data['Deaths.new.past'] > 10) &
(df_data['Deaths.new.per100k'] > 0.1) &
(death_change_ratio > 2))
higher_death_burden = df_data[filt]['Deaths.diff.per100k'].sort_values(ascending=False).index
# hide_input
Markdown(f"## ⭕ Bad news: higher death burden {emoji_flags(higher_death_burden)}")
# > Countries with significantly higher recent death burden per 100k population vs. 10 days ago.
#
# - "Significantly higher" = 100% more.
# - Only considering countries that had at least 10 recent deaths in both timeframes, and death burden of at least 0.1 per 100k.
# hide_input
style_death_burden(df_data.loc[higher_death_burden])
# hide_input
infected_plots(higher_death_burden, "Countries with higher death burden (vs. 10 days ago)")
# hide
filt = (
(df_data['Deaths.new'] > 10) &
(df_data['Deaths.new.past'] > 10) &
(df_data['Deaths.new.per100k.past'] > 0.1) &
(death_change_ratio < 0.5))
lower_death_burden = df_data[filt]['Deaths.diff.per100k'].sort_values(ascending=False).index
# hide_input
Markdown(f"## 🟢 Good news: lower death burden {emoji_flags(lower_death_burden)}")
# > Countries with significantly lower recent death burden per 100k population vs. 10 days ago.
#
# - "Significantly lower" = 50% less
# - Only considering countries that had at least 10 recent deaths in both timeframes, and death burden of at least 0.1 per 100k.
# hide_input
style_death_burden(df_data.loc[lower_death_burden])
# hide_input
infected_plots(lower_death_burden, "Countries with lower death burden (vs. 10 days ago)")
# # Appendix:
# > Note: For interactive map, per country details, projections, and modeling methodology see [Projections of ICU need by Country dashboard](/covid-progress-projections/)
# > Warning: the visualisation below contains the results of a predictive model that was not built by an epidemiologist.
# ## Future model projections plots per country
# > For countries in any of the above groups.
# > Tip: Choose country from the drop-down below the graph.
#hide_input
all_news = (new_waves, slowing_outbreaks,
icu_increase, icu_decrease,
higher_death_burden, lower_death_burden,
not_active, no_deaths, no_cases_and_deaths, new_entries)
news_countries = [c for g in all_news for c in g]
df_alt_filt = df_alt_all[(df_alt_all['day'] > -60) &
(df_alt_all['country'].isin(news_countries))]
covid_helpers.altair_sir_plot(df_alt_filt, new_waves[0])
#hide
df_tot = df_alt_all.rename(columns={'country': cur_data.COL_REGION}
).set_index(cur_data.COL_REGION)
df_tot['population'] = df_cur_all['population']
for c in df_tot.columns[df_alt_all.dtypes == float]:
df_tot[c + '-total'] = df_tot[c] * df_tot['population']
df_tot = df_tot.reset_index()
df_tot.columns = [c.replace('.', '-') for c in df_tot.columns]
#hide_input
df_now = df_tot[df_tot['day'] == 0]
pop = df_now['population'].sum()
s_now = df_now['Susceptible-total'].sum() / pop
i_now = df_now['Infected-total'].sum() / pop
r_now = df_now['Removed-total'].sum() / pop
Markdown("## World totals:\n"
f"Infected 😷: **{i_now:.1%}**, "
f"Removed 😔: **{r_now:.1%}**, "
f"Susceptible 😟: **{s_now:.1%}**")
# ## Future World projections (all countries stacked)
# The outputs of the models for all countries in stacked plots.
# > Tip: Hover the mouse of the area to see which country is which and the countries S/I/R ratios at that point.
#
# > Tip: The plots are zoomable and draggable.
# +
#hide
# filter by days
days = 30
df_tot = df_tot[df_tot['day'].between(-days, days) | (df_tot['day'] % 10 == 0)]
# filter out noisy countries for actively infected plot:
df_tot_filt = df_tot[df_tot[cur_data.COL_REGION].isin(df_cur.index.unique())]
# -
# ### World total estimated actively infected
# +
#hide_input
import altair as alt
alt.data_transformers.disable_max_rows()
# today
today_line = (alt.Chart(pd.DataFrame({'x': [0]}))
.mark_rule(color='orange')
.encode(x='x', size=alt.value(1)))
# make plot
max_y = (df_tot_filt[df_tot_filt['day'].between(-days, days)]
.groupby('day')['Infected-total'].sum().max())
stacked_inf = alt.Chart(df_tot_filt).mark_area().encode(
x=alt.X('day:Q',
title=f'days relative to today ({cur_data.cur_date})',
scale=alt.Scale(domain=(-days, days))),
y=alt.Y("Infected-total:Q", stack=True, title="Number of people",
scale=alt.Scale(domain=(0, max_y))),
color=alt.Color("Country/Region:N", legend=None),
tooltip=['Country/Region', 'Susceptible', 'Infected', 'Removed'],
)
(stacked_inf + today_line).interactive()\
.properties(width=650, height=340)\
.properties(title='Actively infected')\
.configure_title(fontSize=20)
# -
# ### World total estimated recovered or dead
# +
#hide_input
max_y = df_tot_filt[df_tot_filt['day']==days]['Removed-total'].sum()
stacked_rem = alt.Chart(df_tot_filt).mark_area().encode(
x=alt.X('day:Q',
title=f'days relative to today ({cur_data.cur_date})',
scale=alt.Scale(domain=(-days, days))),
y=alt.Y("Removed-total:Q", stack=True, title="Number of people",
scale=alt.Scale(domain=(0, max_y))),
color=alt.Color("Country/Region:N", legend=None),
tooltip=['Country/Region', 'Susceptible', 'Infected', 'Removed']
)
(stacked_rem + today_line).interactive()\
.properties(width=650, height=340)\
.properties(title='Recovered or dead')\
.configure_title(fontSize=20)
# -
# <a id='methodology'></a>
# ## Methodology
# - I'm not an epidemiologist. This is an attempt to understand what's happening, and what the future looks like if current trends remain unchanged.
# - Everything is approximated and depends heavily on underlying assumptions.
# - Transmission rate calculation:
# - Growth rate is calculated over the 5 past days by averaging the daily growth rates.
# - Confidence bounds are calculated from the weighted standard deviation of the growth rate over the last 5 days. Model predictions are calculated for growth rates within 1 STD of the weighted mean. The maximum and minimum values for each day are used as confidence bands.
# Countries with highly noisy transmission rates are exluded from tranmission rate change tables ("new waves", "slowing waves").
# - Transmission rate, and its STD are calculated from growth rate and its STD using active cases estimation.
# - For projections (into future) very noisy projections (with broad confidence bounds) are not shown in the tables.
# - Where the rate estimated from [Total Outstanding Cases](https://covid19dashboards.com/outstanding_cases/#Appendix:-Methodology-of-Predicting-Recovered-Cases) is too high (on down-slopes) recovery probability if 1/20 is used (equivalent 20 days to recover).
# - Total cases are estimated from the reported deaths for each country:
# - Each country has a different testing policy and capacity and cases are under-reported in some countries. Using an estimated IFR (fatality rate) we can estimate the number of cases some time ago by using the total deaths until today.
# - IFRs for each country is estimated using the age adjusted IFRs from [May 1 New York paper](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3590771) and [UN demographic data for 2020](https://population.un.org/wpp/Download/Standard/Population/). These IFRs can be found in `df['age_adjusted_ifr']` column. Some examples: US - 0.98%, UK - 1.1%, Qatar - 0.25%, Italy - 1.4%, Japan - 1.6%.
# - The average fatality lag is assumed to be 8 days on average for a case to go from being confirmed positive (after incubation + testing lag) to death. This is the same figure used by ["Estimating The Infected Population From Deaths"](https://covid19dashboards.com/covid-infected/).
# - Testing bias adjustment: the actual lagged fatality rate is than divided by the IFR to estimate the testing bias in a country. The estimated testing bias then multiplies the reported case numbers to estimate the *true* case numbers (*=case numbers if testing coverage was as comprehensive as in the heavily tested countries*).
# - ICU need is calculated and age-adjusted as follows:
# - UK ICU ratio was reported as [4.4% of active reported cases](https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf).
# - Using UKs ICU ratio, UK's testing bias, and IFRs corrected for age demographics we can estimate each country's ICU ratio (the number of cases requiring ICU hospitalisation).
# 
|
the-stack_0_5238 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.exceptions import db
from st2common.models.system.common import ResourceReference
def get_ref_from_model(model):
if model is None:
raise ValueError('Model has None value.')
model_id = getattr(model, 'id', None)
if model_id is None:
raise db.StackStormDBObjectMalformedError('model %s must contain id.' % str(model))
reference = {'id': str(model_id),
'name': getattr(model, 'name', None)}
return reference
def get_model_from_ref(db_api, reference):
if reference is None:
raise db.StackStormDBObjectNotFoundError('No reference supplied.')
model_id = reference.get('id', None)
if model_id is not None:
return db_api.get_by_id(model_id)
model_name = reference.get('name', None)
if model_name is None:
raise db.StackStormDBObjectNotFoundError('Both name and id are None.')
return db_api.get_by_name(model_name)
def get_model_by_resource_ref(db_api, ref):
"""
Retrieve a DB model based on the resource reference.
:param db_api: Class of the object to retrieve.
:type db_api: ``object``
:param ref: Resource reference.
:type ref: ``str``
:return: Retrieved object.
"""
ref_obj = ResourceReference.from_string_reference(ref=ref)
result = db_api.query(name=ref_obj.name, pack=ref_obj.pack).first()
return result
def get_resource_ref_from_model(model):
"""
Return a ResourceReference given db_model.
:param model: DB model that contains name and pack.
:type model: ``object``
:return: ResourceReference.
"""
try:
name = model.name
pack = model.pack
except AttributeError:
raise Exception('Cannot build ResourceReference for model: %s. Name or pack missing.',
model)
return ResourceReference(name=name, pack=pack)
def get_str_resource_ref_from_model(model):
"""
Return a resource reference as string given db_model.
:param model: DB model that contains name and pack.
:type model: ``object``
:return: String representation of ResourceReference.
"""
return get_resource_ref_from_model(model).ref
|
the-stack_0_5239 | from random import randint as r
from time import sleep as s
print("This is a story about an ant")
sugar = 0
bank = 5_000_000
chocolate = 0
chocoxplode = 0
cost = 0
def intro(sugar, bank, chocolate, chocoxplode, cost):
s(2)
print("\nIt is a true story\n")
s(2)
print(f'Your sugar balance is {sugar}')
s(2)
choice = input("\nHow much sugar would you like?")
s(1)
choice = int(choice)
if choice < 10000:
sugar += choice
bank -= choice
sugarbank(sugar, bank, chocolate, chocoxplode, cost)
else:
print('You greedy monkey! No sugar for you.')
sugarbank(sugar, bank, chocolate, chocoxplode, cost)
def sugarbank(sugar, bank, chocolate, chocoxplode, cost):
s(2)
print(f'\n\nYour sugar balance is {sugar}')
print('''
Welcome to the sugar bank!
Would you like to make a deposit or a wishdrawl?
Type CAT for deposit
Type SNAKEFACE for withhdrawl
Type HAMSTER to rob the bank
Type MANGO to light your sugar on fire
Type GORILLA to give the bank your money
Type RAVEN to trade some sugar for chocolate
Type LADYBUG to play the lottery
Type WOLF to make chocoxplode (exploding chocolate with enchaned flavor)
''')
choice = input("Please pick an option")
if choice == 'CAT':
choice = input("How many sugars do you want to deposit")
choice = int(choice)
print("You deposit {choice} sugars")
sugar -= choice
bank += choice
elif choice == 'HAMSTER':
print("You rob the bank")
print("You now have a huge amount of sugar")
sugar += 50000
bank -= 50000
elif choice == 'MANGO':
print("You light your sugar on fire")
sugar = 0
print("You now have no sugar.")
print("I hope you feel the pain of sugar poverty!\n")
elif choice == 'GORILLA':
bank += sugar
sugar = 0
print("You give all your sugar to the bank. The bank will remember your kindness.")
s(2)
print("But a gorilla hears you calling it so it rampages and nearly kills you. ")
s(2)
print("But it kills you. You die. The end,.")
s(5)
print("Just kidding! The bank remembers your kindness and gives the gorilla 100 sugar. It agrees to leave you alone. ")
bank -= 100
elif choice == 'LADYBUG':
if sugar < 5:
print("You can't afford the lottery!")
else:
winning = []
winning.append(r(1,10))
winning.append(r(1,10))
winning.append(r(1,10))
lottery = []
print("You decide to play the lottery")
print("The lottery costs {cost} sugar")
sugar -= cost
cost *= 1.4
for i in range(3):
choice = input("Pick a number ")
choice = int(choice)
lottery.append(choice)
print(f'The winning lottery numbers are:')
for number in winning:
print(number)
if lottery in winning:
print("You won the lottery!")
sugar += 10*number
elif choice == 'BREAD':
print("You go to the bread bank")
url = open("bread.txt")
html = url.read()
print(html)
elif choice == 'RAVEN':
print("1 chocobar => 50 sugars (chocolate is better than sugar!)")
chocolate = input("How many chocobars would you like?")
sugar -= int(chocolate)*50
elif choice == 'WOLF':
print("10 chocolates + 100 sugars => chocoxplode")
chocoxplode += int(input("How many chocoxplodes would you like?"))
chocolate -= 10*int(chocoxplode)
sugar -= 100*int(chocoxplode)
else:
print("You take 100 sugars")
sugar += 100
bank -= 100
print(f'\nYour sugar balance is {sugar}\n')
print(f'\nYour chocolate balance is {chocolate}\n')
print(f'\nYour chocoxplode balance is {chocoxplode}\n')
s(2)
choice = input("Do you want to play again? y/n ")
s(1)
if choice == 'y':
intro(sugar, bank, chocolate, chocoxplode, cost)
intro(sugar, bank, chocolate, chocoxplode, cost)
print("Thanks for playing, game over")
#I had 97093 Sugar on 4/20/2020
|
the-stack_0_5240 | from backend.improvements.seastead import Seastead
import pytest
@pytest.fixture(scope="function")
def setup_improvement():
imp = Seastead()
return imp
# Init
testdata = [
('food', 2),
('production', 0),
('gold', 0),
('science', 0),
('culture', 0),
('faith', 0),
('housing', 2),
('appeal', 0),
('power', 0),
('acceptable_terrain', [
'lake',
'coast',
'ocean',
]),
('acceptable_features', None),
('resources', None),
]
@pytest.mark.parametrize("resource, value", testdata)
def test_init(setup_improvement, resource, value):
test_improvement = setup_improvement
assert getattr(test_improvement, resource) == value
|
the-stack_0_5241 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
import paste.urlmap
try:
from urllib.request import parse_http_list # pylint: disable=E0611
except ImportError:
from urllib2 import parse_http_list # Python 2
from cinder.api.openstack import wsgi
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse 'Content-Type'-like header into a tuple.
Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
# Accept application/atom+xml for the index query of each API
# version mount point as well as the root index
if (app_url and app_url + '/' == path_info) or path_info == '/':
supported_content_types.append('application/atom+xml')
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['cinder.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
|
the-stack_0_5243 | import os
import os.path
import sys
import unittest
from subprocess import Popen
from subprocess import PIPE
class TestRun(unittest.TestCase):
maxDiff = None
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
def tearDown(self):
os.chdir(self.cwd)
def _run(self, script):
env = os.environ.copy()
env['PYTHONWARNINGS'] = 'ignore'
args = [sys.executable, '-m', 'gevent.monkey', script, 'patched']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
gout, gerr = p.communicate()
self.assertEqual(0, p.returncode, (gout, gerr))
args = [sys.executable, script, 'stdlib']
p = Popen(args, stdout=PIPE, stderr=PIPE)
pout, perr = p.communicate()
self.assertEqual(0, p.returncode, (pout, perr))
glines = gout.decode("utf-8").splitlines()
plines = pout.decode('utf-8').splitlines()
self.assertEqual(glines, plines)
self.assertEqual(gerr, perr)
return glines, gerr
def test_run_simple(self):
self._run(os.path.join('monkey_package', 'script.py'))
def test_run_package(self):
# Run a __main__ inside a package.
lines, _ = self._run('monkey_package')
self.assertTrue(lines[0].endswith('__main__.py'), lines[0])
self.assertEqual(lines[1], '__main__')
def test_issue_302(self):
lines, _ = self._run(os.path.join('monkey_package', 'issue302monkey.py'))
self.assertEqual(lines[0], 'True')
lines[1] = lines[1].replace('\\', '/') # windows path
self.assertEqual(lines[1], 'monkey_package/issue302monkey.py')
self.assertEqual(lines[2], 'True', lines)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5244 | '''
Coding our First Game in PyGame
-
Cheat Codes and HomeScreen in PyGame
'''
import pygame
import random
pygame.init()
# print(x) # All 6 pygame modules successfully imported
# Colors
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
# Creating Game Window
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height)) # Game Window of 1200x500
pygame.display.set_caption("Snake - by Anubhav Madhav") # Title of the Game, which appears at the top of the window
pygame.display.update() # We need to update our display each and everytime we make a change
clock = pygame.time.Clock()
font = pygame.font.SysFont(None, 55)
def text_screen(text, color, x, y):
screen_text = font.render(text, True, color)
gameWindow.blit(screen_text, [x,y])
def plot_snake(gameWindow, color, snk_list, snake_size):
for x,y in snk_list:
pygame.draw.rect(gameWindow, color, [x, y, snake_size, snake_size])
def welcome(): # For Home Screen
exit_game = False
while not exit_game:
gameWindow.fill((233, 220, 229))
text_screen("Welcome to Snakes", black, 260, 250)
text_screen("Developed by Anubhav Madhav", black, 170, 300)
text_screen("Press Space Bar to Play", black, 220, 400)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gameloop()
pygame.display.update()
clock.tick(30) # hardcoded 'fps' because 'fps' is local variable of gameloop(), therefore we cannot use it in welcome()
# Creating a Game Loop
def gameloop():
# Game Specific Variables
exit_game = False
game_over = False
snake_x = 45 # Initial Position of Snake
snake_y = 55 # Initial Position of Snake
snake_size = 30
init_velocity = 5
velocity_x = 0
velocity_y = 0
food_x = random.randint(20, screen_width / 2)
food_y = random.randint(20, screen_height / 2)
food_size = 30
score = 0
fps = 30 # frames per second
snk_list = []
snk_length = 1
with open("highscore.txt", "r") as f:
hiscore = f.read()
while not exit_game:
if game_over:
gameWindow.fill(white)
text_screen("Game Over!! Press Enter to Continue", red, 100, 250)
with open("highscore.txt", "w") as f:
f.write(str(hiscore))
for event in pygame.event.get(): # This gets all the events which a user can perform in a game, like mouse hover, mouse click, pressing a certain key etc.
# print(event)
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
welcome()
else:
for event in pygame.event.get(): # This gets all the events which a user can perform in a game, like mouse hover, mouse click, pressing a certain key etc.
# print(event)
if event.type == pygame.QUIT:
exit_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
velocity_x = init_velocity
velocity_y = 0
if event.key == pygame.K_LEFT:
velocity_x = - init_velocity
velocity_y = 0
if event.key == pygame.K_UP:
velocity_y = - init_velocity
velocity_x = 0
if event.key == pygame.K_DOWN:
velocity_y = init_velocity
velocity_x = 0
if event.key == pygame.K_q: # cheatcode
score += 10
snake_x = snake_x + velocity_x
snake_y = snake_y + velocity_y
if abs(snake_x - food_x)<6 and abs(snake_y - food_y)<6: # condition when snake eats the food
score += 10
# print("Score: ", score )
food_x = random.randint(20, screen_width / 2) # to change the position of food after eating
food_y = random.randint(20, screen_height / 2)
snk_length += 5
if score>int(hiscore):
hiscore = score
gameWindow.fill(white) # Setting background color as white
pygame.draw.rect(gameWindow, red, [food_x, food_y, food_size, food_size]) # Making Food for Snake using Rectangle
text_screen("Score: " + str(score ) + " HighScore: " + str(hiscore), red, 5, 5)
head = [] # for the starting of the game
head.append(snake_x)
head.append(snake_y)
snk_list.append(head)
if len(snk_list)>snk_length:
del snk_list[0]
if head in snk_list[:-1]:
game_over = True
if snake_x<0 or snake_x>screen_height or snake_y<0 or snake_y>screen_height:
game_over = True
# print("Game Over!!")
# pygame.draw.rect(gameWindow, black, [snake_x, snake_y, snake_size, snake_size]) # Making Head of Snake using Rectangle
plot_snake(gameWindow, black, snk_list, snake_size)
pygame.display.update() # Need to update display cause we have made changes to gameWindow
clock.tick(fps)
pygame.quit()
quit()
welcome() |
the-stack_0_5246 | import cv2
import numpy as np
from skimage.measure import compare_ssim as ssim
def get_mse_psnr(x, y):
if x.ndim == 4:
mse_list = []
psnr_list = []
data_len = len(x)
for k in range(data_len):
img = x[k]
ref = y[k]
mse = np.mean((img - ref) ** 2)
psnr = 10 * np.log10(1. / mse)
mse_list.append(mse)
psnr_list.append(psnr)
return np.asarray(mse_list), np.asarray(psnr_list)
elif x.ndim == 3:
mse = np.mean((x - y) ** 2)
psnr = 10 * np.log10(1. / mse)
return mse, psnr
else:
raise ValueError('Invalid data!')
def get_ssim(x, y):
"""
:param x: input
:param y: reference
:return: SSIM
"""
if x.ndim == 4:
ssim_list = []
data_len = len(x)
for k in range(data_len):
img = x[k]
ref = y[k]
ssim_value = ssim(ref, img, data_range=img.max()-img.min(),
multichannel=True)
ssim_list.append(ssim_value)
return np.asarray(ssim_list)
elif x.ndim == 3:
ssim_value = ssim(y, x, data_range=x.max() - x.min(),
multichannel=True)
return ssim_value
def patch2image(patches, shape):
""" turn patches into an image
:param patches: patches of shape N x H x W x C
:param shape: a tuple (N_H, N_W)
:return: image of shape (N_H x H) x (N_W x W) x C
"""
num_patch = len(patches)
assert num_patch > 0
if num_patch < shape[0] * shape[1]:
patches_shape = patches.shape
patches_zero = np.zeros(
(shape[0] * shape[1] - num_patch,) + patches_shape[1:]
)
patches = np.concatenate((patches, patches_zero))
image = patches.reshape(tuple(shape) + patches.shape[1:])
image = np.swapaxes(image, axis1=1, axis2=2)
img_shape = image.shape
out_shape = (img_shape[0] * img_shape[1], img_shape[2] * img_shape[3],
img_shape[4])
image = image.reshape(out_shape)
return image
def create_patch_mask(size, edge):
""" create a map with all ones, pixels near edge approach 0 linearly
:param size: (h, w)
:param edge: (eh, ew)
:return: a edge_map of size (h, w)
"""
h, w = size
eh, ew = edge
assert eh <= h//2, ew <= w//2
edge_map = np.ones((h, w), dtype=np.float32)
for idx_h in range(eh):
edge_map[idx_h, :] = 1. * (idx_h + 1) / (eh + 1)
edge_map[-1 - idx_h, :] = 1. * (idx_h + 1) / (eh + 1)
for idx_w in range(ew):
temp_column = np.ones((h, ), dtype=np.float32) * (idx_w + 1) / (ew + 1)
edge_map[:, idx_w] = np.minimum(edge_map[:, idx_w], temp_column)
edge_map[:, -1 - idx_w] = np.minimum(
edge_map[:, -1 - idx_w], temp_column
)
return edge_map
def whole2patch(img, size, stride, is_mask=True):
""" split a whole image to overlapped patches
:param img: an input color image
:param size: (h, w), size of each patch
:param stride: (sh, sw), stride of each patch
:param is_mask: use edge mask or not
:return: (patches, positions, count_map)
"""
h, w = size
sh, sw = stride
H, W, C = img.shape
assert sh <= h <= H and sw <= w <= W and C==3
count_map = np.zeros((H, W), dtype=np.float32)
if is_mask:
eh = (h - sh) // 2
ew = (w - sw) // 2
mask = create_patch_mask((h, w), (eh, ew))
# crop
patches = []
positions = []
h_list = list(range(0, H-h, sh)) + [H-h]
w_list = list(range(0, W-w, sw)) + [W-w]
for idx_h in h_list:
for idx_w in w_list:
# position
positions.append([idx_h, idx_w])
# count map
if is_mask:
count_map[idx_h: idx_h + h, idx_w: idx_w + w] += mask
else:
count_map[idx_h: idx_h + h, idx_w: idx_w + w] += 1
# patches
patches.append(img[idx_h: idx_h + h, idx_w: idx_w + w, :])
positions = np.asarray(positions)
patches = np.asarray(patches)
return patches, positions, count_map
def patch2whole(patches, positions, count_map, stride, is_mask=True):
""" this is the inverse function of `whole2patch`
:param patches: cropped patches
:param positions: position for each cropped patch
:param count_map: how many times each pixel is counted
:param stride: (sw, sh)
:param is_mask: whether the count map is calculated with edge mask
:return: image
"""
H, W = count_map.shape # image shape
h, w = patches.shape[1:3] # patch shape
if is_mask:
sh, sw = stride
eh = (h - sh) // 2
ew = (w - sw) // 2
mask = create_patch_mask((h, w), (eh, ew))
mask = np.repeat(np.expand_dims(mask, axis=2), 3, axis=2)
image = np.zeros((H, W, 3), dtype=np.float32)
for patch, pos in zip(patches, positions):
idx_h, idx_w = pos
if is_mask:
image[idx_h: idx_h + h, idx_w: idx_w + w, :] += patch * mask
else:
image[idx_h: idx_h + h, idx_w: idx_w + w, :] += patch
image /= np.repeat(np.expand_dims(count_map, axis=2), 3, axis=2)
return image
def load_images(list_in, list_gt, size=63):
""" load images
:param list_in: input image list
:param list_gt: label image list
:param size: image size
:return: (input, label), RGB images
"""
assert len(list_in) == len(list_gt)
img_num = len(list_in)
imgs_in = np.zeros([img_num, size, size, 3])
imgs_gt = np.zeros([img_num, size, size, 3])
for k in range(img_num):
imgs_in[k, ...] = cv2.imread(list_in[k])[:, :, ::-1] / 255.
imgs_gt[k, ...] = cv2.imread(list_gt[k])[:, :, ::-1] / 255.
return imgs_in, imgs_gt
|
the-stack_0_5247 | import os
import socket
def getIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 1))
return s.getsockname()[0]
lsquic_dir = os.path.expanduser('~/oqs/lsquic')
#key_crt_dir = os.path.expanduser('~/SERVER_RSA_FILES/key_crt.pem')
#key_srv_dir = os.path.expanduser('~/SERVER_RSA_FILES/key_srv.pem')
key_crt_dir = f'{lsquic_dir}/certs/secp128r1/key_crt.pem'
key_srv_dir = f'{lsquic_dir}/certs/secp128r1/key_srv.pem'
print(f'Key CRT Directory: {key_crt_dir}')
print(f'Key SRV Directory: {key_srv_dir}')
server_ip = getIP()
print()
print(f'Server IP: {server_ip}')
print()
#myCmd= f'{lsquic_dir}/build/bin/./http_server -L debug -c www.example.com,{key_crt_dir},{key_srv_dir} -s {server_ip}:4433 -p /'
myCmd= f'{lsquic_dir}/build/bin/./http_server -c www.example.com,{key_crt_dir},{key_srv_dir} -s {server_ip}:4433 -p /'
os.system(myCmd)
|
the-stack_0_5249 | """Support for deCONZ lights."""
from __future__ import annotations
from pydeconz.group import DeconzGroup as Group
from pydeconz.light import (
ALERT_LONG,
ALERT_SHORT,
EFFECT_COLOR_LOOP,
EFFECT_NONE,
Light,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_XY,
DOMAIN,
EFFECT_COLORLOOP,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.color import color_hs_to_xy
from .const import DOMAIN as DECONZ_DOMAIN, NEW_GROUP, NEW_LIGHT, POWER_PLUGS
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
DECONZ_GROUP = "is_deconz_group"
EFFECT_TO_DECONZ = {EFFECT_COLORLOOP: EFFECT_COLOR_LOOP, "None": EFFECT_NONE}
FLASH_TO_DECONZ = {FLASH_SHORT: ALERT_SHORT, FLASH_LONG: ALERT_LONG}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ lights and groups from a config entry."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_light(lights=gateway.api.lights.values()):
"""Add light from deCONZ."""
entities = []
for light in lights:
if (
isinstance(light, Light)
and light.type not in POWER_PLUGS
and light.unique_id not in gateway.entities[DOMAIN]
):
entities.append(DeconzLight(light, gateway))
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_light
)
)
@callback
def async_add_group(groups=gateway.api.groups.values()):
"""Add group from deCONZ."""
if not gateway.option_allow_deconz_groups:
return
entities = []
for group in groups:
if not group.lights:
continue
known_groups = set(gateway.entities[DOMAIN])
new_group = DeconzGroup(group, gateway)
if new_group.unique_id not in known_groups:
entities.append(new_group)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_GROUP), async_add_group
)
)
async_add_light()
async_add_group()
class DeconzBaseLight(DeconzDevice, LightEntity):
"""Representation of a deCONZ light."""
TYPE = DOMAIN
def __init__(self, device, gateway):
"""Set up light."""
super().__init__(device, gateway)
self._attr_supported_color_modes = set()
if device.color_temp is not None:
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if device.hue is not None and device.saturation is not None:
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if device.xy is not None:
self._attr_supported_color_modes.add(COLOR_MODE_XY)
if not self._attr_supported_color_modes and device.brightness is not None:
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
if not self._attr_supported_color_modes:
self._attr_supported_color_modes.add(COLOR_MODE_ONOFF)
if device.brightness is not None:
self._attr_supported_features |= SUPPORT_FLASH
self._attr_supported_features |= SUPPORT_TRANSITION
if device.effect is not None:
self._attr_supported_features |= SUPPORT_EFFECT
self._attr_effect_list = [EFFECT_COLORLOOP]
@property
def color_mode(self) -> str:
"""Return the color mode of the light."""
if self._device.color_mode == "ct":
color_mode = COLOR_MODE_COLOR_TEMP
elif self._device.color_mode == "hs":
color_mode = COLOR_MODE_HS
elif self._device.color_mode == "xy":
color_mode = COLOR_MODE_XY
elif self._device.brightness is not None:
color_mode = COLOR_MODE_BRIGHTNESS
else:
color_mode = COLOR_MODE_ONOFF
return color_mode
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._device.brightness
@property
def color_temp(self):
"""Return the CT color value."""
return self._device.color_temp
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return (self._device.hue / 65535 * 360, self._device.saturation / 255 * 100)
@property
def xy_color(self) -> tuple | None:
"""Return the XY color value."""
return self._device.xy
@property
def is_on(self):
"""Return true if light is on."""
return self._device.state
async def async_turn_on(self, **kwargs):
"""Turn on light."""
data = {"on": True}
if ATTR_BRIGHTNESS in kwargs:
data["brightness"] = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
data["color_temperature"] = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
if COLOR_MODE_XY in self._attr_supported_color_modes:
data["xy"] = color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
else:
data["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
data["saturation"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
if ATTR_XY_COLOR in kwargs:
data["xy"] = kwargs[ATTR_XY_COLOR]
if ATTR_TRANSITION in kwargs:
data["transition_time"] = int(kwargs[ATTR_TRANSITION] * 10)
elif "IKEA" in self._device.manufacturer:
data["transition_time"] = 0
if (alert := FLASH_TO_DECONZ.get(kwargs.get(ATTR_FLASH))) is not None:
data["alert"] = alert
del data["on"]
if (effect := EFFECT_TO_DECONZ.get(kwargs.get(ATTR_EFFECT))) is not None:
data["effect"] = effect
await self._device.set_state(**data)
async def async_turn_off(self, **kwargs):
"""Turn off light."""
if not self._device.state:
return
data = {"on": False}
if ATTR_TRANSITION in kwargs:
data["brightness"] = 0
data["transition_time"] = int(kwargs[ATTR_TRANSITION] * 10)
if (alert := FLASH_TO_DECONZ.get(kwargs.get(ATTR_FLASH))) is not None:
data["alert"] = alert
del data["on"]
await self._device.set_state(**data)
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {DECONZ_GROUP: isinstance(self._device, Group)}
class DeconzLight(DeconzBaseLight):
"""Representation of a deCONZ light."""
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._device.max_color_temp or super().max_mireds
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._device.min_color_temp or super().min_mireds
class DeconzGroup(DeconzBaseLight):
"""Representation of a deCONZ group."""
def __init__(self, device, gateway):
"""Set up group and create an unique id."""
self._unique_id = f"{gateway.bridgeid}-{device.deconz_id}"
super().__init__(device, gateway)
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return self._unique_id
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"identifiers": {(DECONZ_DOMAIN, self.unique_id)},
"manufacturer": "Dresden Elektronik",
"model": "deCONZ group",
"name": self._device.name,
"via_device": (DECONZ_DOMAIN, self.gateway.api.config.bridge_id),
}
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = dict(super().extra_state_attributes)
attributes["all_on"] = self._device.all_on
return attributes
|
the-stack_0_5250 | from typing import List, Tuple
from mock import Mock
from synapse.events import EventBase
from synapse.federation.sender import PerDestinationQueue, TransactionManager
from synapse.federation.units import Edu
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests.test_utils import event_injection, make_awaitable
from tests.unittest import FederatingHomeserverTestCase, override_config
class FederationCatchUpTestCases(FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
def make_homeserver(self, reactor, clock):
return self.setup_test_homeserver(
federation_transport_client=Mock(spec=["send_transaction"]),
)
def prepare(self, reactor, clock, hs):
# stub out get_current_hosts_in_room
state_handler = hs.get_state_handler()
# This mock is crucial for destination_rooms to be populated.
state_handler.get_current_hosts_in_room = Mock(
return_value=make_awaitable(["test", "host2"])
)
# whenever send_transaction is called, record the pdu data
self.pdus = []
self.failed_pdus = []
self.is_online = True
self.hs.get_federation_transport_client().send_transaction.side_effect = (
self.record_transaction
)
async def record_transaction(self, txn, json_cb):
if self.is_online:
data = json_cb()
self.pdus.extend(data["pdus"])
return {}
else:
data = json_cb()
self.failed_pdus.extend(data["pdus"])
raise IOError("Failed to connect because this is a test!")
def get_destination_room(self, room: str, destination: str = "host2") -> dict:
"""
Gets the destination_rooms entry for a (destination, room_id) pair.
Args:
room: room ID
destination: what destination, default is "host2"
Returns:
Dictionary of { event_id: str, stream_ordering: int }
"""
event_id, stream_ordering = self.get_success(
self.hs.get_datastore().db_pool.execute(
"test:get_destination_rooms",
None,
"""
SELECT event_id, stream_ordering
FROM destination_rooms dr
JOIN events USING (stream_ordering)
WHERE dr.destination = ? AND dr.room_id = ?
""",
destination,
room,
)
)[0]
return {"event_id": event_id, "stream_ordering": stream_ordering}
@override_config({"send_federation": True})
def test_catch_up_destination_rooms_tracking(self):
"""
Tests that we populate the `destination_rooms` table as needed.
"""
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room = self.helper.create_room_as("u1", tok=u1_token)
self.get_success(
event_injection.inject_member_event(self.hs, room, "@user:host2", "join")
)
event_id_1 = self.helper.send(room, "wombats!", tok=u1_token)["event_id"]
row_1 = self.get_destination_room(room)
event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"]
row_2 = self.get_destination_room(room)
# check: events correctly registered in order
self.assertEqual(row_1["event_id"], event_id_1)
self.assertEqual(row_2["event_id"], event_id_2)
self.assertEqual(row_1["stream_ordering"], row_2["stream_ordering"] - 1)
@override_config({"send_federation": True})
def test_catch_up_last_successful_stream_ordering_tracking(self):
"""
Tests that we populate the `destination_rooms` table as needed.
"""
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room = self.helper.create_room_as("u1", tok=u1_token)
# take the remote offline
self.is_online = False
self.get_success(
event_injection.inject_member_event(self.hs, room, "@user:host2", "join")
)
self.helper.send(room, "wombats!", tok=u1_token)
self.pump()
lsso_1 = self.get_success(
self.hs.get_datastore().get_destination_last_successful_stream_ordering(
"host2"
)
)
self.assertIsNone(
lsso_1,
"There should be no last successful stream ordering for an always-offline destination",
)
# bring the remote online
self.is_online = True
event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"]
lsso_2 = self.get_success(
self.hs.get_datastore().get_destination_last_successful_stream_ordering(
"host2"
)
)
row_2 = self.get_destination_room(room)
self.assertEqual(
self.pdus[0]["content"]["body"],
"rabbits!",
"Test fault: didn't receive the right PDU",
)
self.assertEqual(
row_2["event_id"],
event_id_2,
"Test fault: destination_rooms not updated correctly",
)
self.assertEqual(
lsso_2,
row_2["stream_ordering"],
"Send succeeded but not marked as last_successful_stream_ordering",
)
@override_config({"send_federation": True}) # critical to federate
def test_catch_up_from_blank_state(self):
"""
Runs an overall test of federation catch-up from scratch.
Further tests will focus on more narrow aspects and edge-cases, but I
hope to provide an overall view with this test.
"""
# bring the other server online
self.is_online = True
# let's make some events for the other server to receive
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room_1 = self.helper.create_room_as("u1", tok=u1_token)
room_2 = self.helper.create_room_as("u1", tok=u1_token)
# also critical to federate
self.get_success(
event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join")
)
self.helper.send_state(
room_1, event_type="m.room.topic", body={"topic": "wombat"}, tok=u1_token
)
# check: PDU received for topic event
self.assertEqual(len(self.pdus), 1)
self.assertEqual(self.pdus[0]["type"], "m.room.topic")
# take the remote offline
self.is_online = False
# send another event
self.helper.send(room_1, "hi user!", tok=u1_token)
# check: things didn't go well since the remote is down
self.assertEqual(len(self.failed_pdus), 1)
self.assertEqual(self.failed_pdus[0]["content"]["body"], "hi user!")
# let's delete the federation transmission queue
# (this pretends we are starting up fresh.)
self.assertFalse(
self.hs.get_federation_sender()
._per_destination_queues["host2"]
.transmission_loop_running
)
del self.hs.get_federation_sender()._per_destination_queues["host2"]
# let's also clear any backoffs
self.get_success(
self.hs.get_datastore().set_destination_retry_timings("host2", None, 0, 0)
)
# bring the remote online and clear the received pdu list
self.is_online = True
self.pdus = []
# now we need to initiate a federation transaction somehow…
# to do that, let's send another event (because it's simple to do)
# (do it to another room otherwise the catch-up logic decides it doesn't
# need to catch up room_1 — something I overlooked when first writing
# this test)
self.helper.send(room_2, "wombats!", tok=u1_token)
# we should now have received both PDUs
self.assertEqual(len(self.pdus), 2)
self.assertEqual(self.pdus[0]["content"]["body"], "hi user!")
self.assertEqual(self.pdus[1]["content"]["body"], "wombats!")
def make_fake_destination_queue(
self, destination: str = "host2"
) -> Tuple[PerDestinationQueue, List[EventBase]]:
"""
Makes a fake per-destination queue.
"""
transaction_manager = TransactionManager(self.hs)
per_dest_queue = PerDestinationQueue(self.hs, transaction_manager, destination)
results_list = []
async def fake_send(
destination_tm: str,
pending_pdus: List[EventBase],
_pending_edus: List[Edu],
) -> bool:
assert destination == destination_tm
results_list.extend(pending_pdus)
return True # success!
transaction_manager.send_new_transaction = fake_send
return per_dest_queue, results_list
@override_config({"send_federation": True})
def test_catch_up_loop(self):
"""
Tests the behaviour of _catch_up_transmission_loop.
"""
# ARRANGE:
# - a local user (u1)
# - 3 rooms which u1 is joined to (and remote user @user:host2 is
# joined to)
# - some events (1 to 5) in those rooms
# we have 'already sent' events 1 and 2 to host2
per_dest_queue, sent_pdus = self.make_fake_destination_queue()
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room_1 = self.helper.create_room_as("u1", tok=u1_token)
room_2 = self.helper.create_room_as("u1", tok=u1_token)
room_3 = self.helper.create_room_as("u1", tok=u1_token)
self.get_success(
event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room_3, "@user:host2", "join")
)
# create some events
self.helper.send(room_1, "you hear me!!", tok=u1_token)
event_id_2 = self.helper.send(room_2, "wombats!", tok=u1_token)["event_id"]
self.helper.send(room_3, "Matrix!", tok=u1_token)
event_id_4 = self.helper.send(room_2, "rabbits!", tok=u1_token)["event_id"]
event_id_5 = self.helper.send(room_3, "Synapse!", tok=u1_token)["event_id"]
# destination_rooms should already be populated, but let us pretend that we already
# sent (successfully) up to and including event id 2
event_2 = self.get_success(self.hs.get_datastore().get_event(event_id_2))
# also fetch event 5 so we know its last_successful_stream_ordering later
event_5 = self.get_success(self.hs.get_datastore().get_event(event_id_5))
self.get_success(
self.hs.get_datastore().set_destination_last_successful_stream_ordering(
"host2", event_2.internal_metadata.stream_ordering
)
)
# ACT
self.get_success(per_dest_queue._catch_up_transmission_loop())
# ASSERT, noticing in particular:
# - event 3 not sent out, because event 5 replaces it
# - order is least recent first, so event 5 comes after event 4
# - catch-up is completed
self.assertEqual(len(sent_pdus), 2)
self.assertEqual(sent_pdus[0].event_id, event_id_4)
self.assertEqual(sent_pdus[1].event_id, event_id_5)
self.assertFalse(per_dest_queue._catching_up)
self.assertEqual(
per_dest_queue._last_successful_stream_ordering,
event_5.internal_metadata.stream_ordering,
)
@override_config({"send_federation": True})
def test_catch_up_on_synapse_startup(self):
"""
Tests the behaviour of get_catch_up_outstanding_destinations and
_wake_destinations_needing_catchup.
"""
# list of sorted server names (note that there are more servers than the batch
# size used in get_catch_up_outstanding_destinations).
server_names = ["server%02d" % number for number in range(42)] + ["zzzerver"]
# ARRANGE:
# - a local user (u1)
# - a room which u1 is joined to (and remote users @user:serverXX are
# joined to)
# mark the remotes as online
self.is_online = True
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room_id = self.helper.create_room_as("u1", tok=u1_token)
for server_name in server_names:
self.get_success(
event_injection.inject_member_event(
self.hs, room_id, "@user:%s" % server_name, "join"
)
)
# create an event
self.helper.send(room_id, "deary me!", tok=u1_token)
# ASSERT:
# - All servers are up to date so none should have outstanding catch-up
outstanding_when_successful = self.get_success(
self.hs.get_datastore().get_catch_up_outstanding_destinations(None)
)
self.assertEqual(outstanding_when_successful, [])
# ACT:
# - Make the remote servers unreachable
self.is_online = False
# - Mark zzzerver as being backed-off from
now = self.clock.time_msec()
self.get_success(
self.hs.get_datastore().set_destination_retry_timings(
"zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day
)
)
# - Send an event
self.helper.send(room_id, "can anyone hear me?", tok=u1_token)
# ASSERT (get_catch_up_outstanding_destinations):
# - all remotes are outstanding
# - they are returned in batches of 25, in order
outstanding_1 = self.get_success(
self.hs.get_datastore().get_catch_up_outstanding_destinations(None)
)
self.assertEqual(len(outstanding_1), 25)
self.assertEqual(outstanding_1, server_names[0:25])
outstanding_2 = self.get_success(
self.hs.get_datastore().get_catch_up_outstanding_destinations(
outstanding_1[-1]
)
)
self.assertNotIn("zzzerver", outstanding_2)
self.assertEqual(len(outstanding_2), 17)
self.assertEqual(outstanding_2, server_names[25:-1])
# ACT: call _wake_destinations_needing_catchup
# patch wake_destination to just count the destinations instead
woken = []
def wake_destination_track(destination):
woken.append(destination)
self.hs.get_federation_sender().wake_destination = wake_destination_track
# cancel the pre-existing timer for _wake_destinations_needing_catchup
# this is because we are calling it manually rather than waiting for it
# to be called automatically
self.hs.get_federation_sender()._catchup_after_startup_timer.cancel()
self.get_success(
self.hs.get_federation_sender()._wake_destinations_needing_catchup(), by=5.0
)
# ASSERT (_wake_destinations_needing_catchup):
# - all remotes are woken up, save for zzzerver
self.assertNotIn("zzzerver", woken)
# - all destinations are woken exactly once; they appear once in woken.
self.assertCountEqual(woken, server_names[:-1])
|
the-stack_0_5251 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descriptor wallet function."""
from test_framework.test_framework import VIDCoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class WalletDescriptorTest(VIDCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-keypool=100']]
self.wallet_names = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def run_test(self):
# Make a legacy wallet and check it is BDB
self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False)
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'bdb')
self.nodes[0].unloadwallet("legacy1")
# Make a descriptor wallet
self.log.info("Making a descriptor wallet")
self.nodes[0].createwallet(wallet_name="desc1", descriptors=True)
# A descriptor wallet should have 100 addresses * 3 types = 300 keys
self.log.info("Checking wallet info")
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'sqlite')
assert_equal(wallet_info['keypoolsize'], 300)
assert_equal(wallet_info['keypoolsize_hd_internal'], 300)
assert 'keypoololdest' not in wallet_info
# Check that getnewaddress works
self.log.info("Test that getnewaddress and getrawchangeaddress work")
addr = self.nodes[0].getnewaddress("", "legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/0/0')
# Check that getrawchangeaddress works
addr = self.nodes[0].getrawchangeaddress("legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/1/0')
# Make a wallet to receive coins at
self.nodes[0].createwallet(wallet_name="desc2", descriptors=True)
recv_wrpc = self.nodes[0].get_wallet_rpc("desc2")
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
send_wrpc.generatetoaddress(101, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
addr = recv_wrpc.getnewaddress()
send_wrpc.sendtoaddress(addr, 10)
# Make sure things are disabled
self.log.info("Test disabled RPCs")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress()))
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importmulti, [])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.sethdseed)
self.log.info("Test encryption")
# Get the master fingerprint before encrypt
info1 = send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
# Encrypt wallet 0
send_wrpc.encryptwallet('pass')
send_wrpc.walletpassphrase('pass', 10)
addr = send_wrpc.getnewaddress()
info2 = send_wrpc.getaddressinfo(addr)
assert info1['hdmasterfingerprint'] != info2['hdmasterfingerprint']
send_wrpc.walletlock()
assert 'hdmasterfingerprint' in send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
info3 = send_wrpc.getaddressinfo(addr)
assert_equal(info2['desc'], info3['desc'])
self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")
for _ in range(500):
send_wrpc.getnewaddress()
self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")
send_wrpc.walletpassphrase('pass', 10)
send_wrpc.importdescriptors([{
"desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n",
"timestamp": "now",
"range": [0,10],
"active": True
}])
send_wrpc.walletlock()
# Exhaust keypool of 100
for _ in range(100):
send_wrpc.getnewaddress(address_type='bech32')
# This should now error
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')
self.log.info("Test born encrypted wallets")
self.nodes[0].createwallet('desc_enc', False, False, 'pass', False, True)
enc_rpc = self.nodes[0].get_wallet_rpc('desc_enc')
enc_rpc.getnewaddress() # Makes sure that we can get a new address from a born encrypted wallet
self.log.info("Test blank descriptor wallets")
self.nodes[0].createwallet(wallet_name='desc_blank', blank=True, descriptors=True)
blank_rpc = self.nodes[0].get_wallet_rpc('desc_blank')
assert_raises_rpc_error(-4, 'This wallet has no available keys', blank_rpc.getnewaddress)
self.log.info("Test descriptor wallet with disabled private keys")
self.nodes[0].createwallet(wallet_name='desc_no_priv', disable_private_keys=True, descriptors=True)
nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv')
assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress)
if __name__ == '__main__':
WalletDescriptorTest().main ()
|
the-stack_0_5256 | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("meet-me", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("meet-me", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
|
the-stack_0_5258 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class VAE(nn.Module):
"""Encoder-Decoder architecture for both WAE-MMD and WAE-GAN."""
def __init__(self, z_dim=32, nc=3):
super(VAE, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 128, 4, 2, 1, bias=False), # B, 128, 32, 32
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False), # B, 256, 16, 16
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False), # B, 512, 8, 8
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512, 1024, 4, 2, 1, bias=False), # B, 1024, 4, 4
nn.BatchNorm2d(1024),
nn.ReLU(True),
View((-1, 1024*2*2)), # B, 1024*4*4
)
self.fc_mu = nn.Linear(1024*2*2, z_dim) # B, z_dim
self.fc_logvar = nn.Linear(1024*2*2, z_dim) # B, z_dim
self.decoder = nn.Sequential(
nn.Linear(z_dim, 1024*4*4), # B, 1024*8*8
View((-1, 1024, 4, 4)), # B, 1024, 8, 8
nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False), # B, 512, 16, 16
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False), # B, 256, 32, 32
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False), # B, 128, 64, 64
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, nc, 1), # B, nc, 64, 64
)
self.weight_init()
def weight_init(self):
for block in self._modules:
try:
for m in self._modules[block]:
kaiming_init(m)
except:
kaiming_init(block)
def forward(self, x):
z = self._encode(x)
mu, logvar = self.fc_mu(z), self.fc_logvar(z)
z = self.reparameterize(mu, logvar)
x_recon = self._decode(z)
return x_recon, z, mu, logvar
def reparameterize(self, mu, logvar):
stds = (0.5 * logvar).exp()
epsilon = torch.randn(*mu.size())
if mu.is_cuda:
stds, epsilon = stds.cuda(), epsilon.cuda()
latents = epsilon * stds + mu
return latents
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
class Discriminator(nn.Module):
"""Adversary architecture(Discriminator) for WAE-GAN."""
def __init__(self, z_dim=10, n_classes=1):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, n_classes),
nn.Softmax()
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, z):
return self.net(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
|
the-stack_0_5260 | try:
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_no_player_aces(q):
fig, axis = plt.subplots(3, 6, sharey=True)
fig.suptitle('Player Hand Values (No Aces) vs Dealer First Card Value', fontsize=10)
size = 5
for index, player_start in enumerate(range(4, 22)):
p = frozenset(set([player_start]))
hits = list()
stays = list()
for dealer_start in range(1, 12):
d = frozenset(set([dealer_start]))
if dealer_start == 1 or dealer_start == 11:
d = frozenset([1, 11])
if p in q and d in q[p]:
values = q[p][d]
hits.append(values[0])
stays.append(values[1])
else:
hits.append(None)
stays.append(None)
plt.subplot(3, 6, 1 + index)
plt.title('player hand value: ' + str(player_start), fontsize=6)
ax = plt.gca()
ax.set_xlabel('dealer first card', fontsize=5)
ax.set_ylabel('reward', fontsize=5)
ax.tick_params(axis='both', which='major', labelsize=5)
ax.tick_params(axis='both', which='minor', labelsize=10)
plt.plot(range(1, len(hits) + 1), hits, 'g-', label='hits')
plt.plot(range(1, len(stays) + 1), stays, 'r-', label='stays')
if player_start == 21:
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
plt.tight_layout(pad=0.2, h_pad=0.4, w_pad=-1.4)
plt.subplots_adjust(top=0.9, bottom=0.2)
plt.show()
def print_no_player_aces(q):
for player_start in range(4, 22):
for dealer_start in range(1, 12):
p = frozenset(set([player_start]))
d = frozenset(set([dealer_start]))
if dealer_start == 1 or dealer_start == 11:
d = frozenset([1, 11])
if p in q and d in q[p]:
values = q[p][d]
hit = str(round(values[0], 3)).ljust(6, '0')
stay = str(round(values[1], 3)).ljust(6, '0')
else:
hit = "-"
stay = "-"
print('{} / {}: [{}, {}]'.format(str(player_start).rjust(2, ' '), str(dealer_start).rjust(2, ' '), hit, stay))
def print_one_player_ace(q):
print('fit, one player ace, dealer ace = 1 or 11:')
for player_start in range(2, 21):
for dealer_start in range(1, 12):
p = frozenset(set([player_start + 1, player_start + 11]))
d = frozenset(set([dealer_start]))
if dealer_start == 1 or dealer_start == 11:
d = frozenset([1, 11])
if p in q and d in q[p]:
values = q[p][d]
hit = str(round(values[0], 3)).ljust(6, '0')
stay = str(round(values[1], 3)).ljust(6, '0')
else:
hit = "-"
stay = "-"
print('{} / {}: [{}, {}]'.format(str([player_start + 1, player_start + 11]).rjust(8, ' '), str(dealer_start).rjust(2, ' '), hit, stay))
|
the-stack_0_5262 | import math
tcase = int(input())
while(tcase):
num = int(input())
lst = []
while(num / 10 != 0):
lst.append(num % 10)
num = math.floor(num / 10)
print(lst[0] + lst[-1])
tcase -= 1
|
the-stack_0_5263 | # Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import logging
from typing import Dict, List
import torch
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, batched_nms_rotated, cat
from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
from detectron2.utils.memory import retry_if_cuda_oom
from ..box_regression import Box2BoxTransformRotated
from .build import PROPOSAL_GENERATOR_REGISTRY
from .proposal_utils import _is_tracing
from .rpn import RPN
logger = logging.getLogger(__name__)
def find_top_rrpn_proposals(
proposals,
pred_objectness_logits,
image_sizes,
nms_thresh,
pre_nms_topk,
post_nms_topk,
min_box_size,
training,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
image_sizes (list[tuple]): sizes (h, w) for each image
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RRPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_size(float): minimum proposal box side length in pixels (absolute units wrt
input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i.
"""
num_images = len(image_sizes)
device = proposals[0].device
# 1. Select top-k anchor for every level and every image
topk_scores = [] # #lvl Tensor, each of shape N x topk
topk_proposals = []
level_ids = [] # #lvl Tensor, each of shape (topk,)
batch_idx = torch.arange(num_images, device=device)
for level_id, proposals_i, logits_i in zip(
itertools.count(), proposals, pred_objectness_logits
):
Hi_Wi_A = logits_i.shape[1]
if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing
num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk)
else:
num_proposals_i = min(Hi_Wi_A, pre_nms_topk)
topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
# each is N x topk
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5
topk_proposals.append(topk_proposals_i)
topk_scores.append(topk_scores_i)
level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
# 2. Concat all levels together
topk_scores = cat(topk_scores, dim=1)
topk_proposals = cat(topk_proposals, dim=1)
level_ids = cat(level_ids, dim=0)
# 3. For each image, run a per-level NMS, and choose topk results.
results = []
for n, image_size in enumerate(image_sizes):
boxes = RotatedBoxes(topk_proposals[n])
scores_per_img = topk_scores[n]
lvl = level_ids
valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
if not valid_mask.all():
if training:
raise FloatingPointError(
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
)
boxes = boxes[valid_mask]
scores_per_img = scores_per_img[valid_mask]
lvl = lvl[valid_mask]
boxes.clip(image_size)
# filter empty boxes
keep = boxes.nonempty(threshold=min_box_size)
if _is_tracing() or keep.sum().item() != len(boxes):
boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], lvl[keep])
keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh)
# In Detectron1, there was different behavior during training vs. testing.
# (https://github.com/facebookresearch/Detectron/issues/459)
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep = keep[:post_nms_topk]
res = Instances(image_size)
res.proposal_boxes = boxes[keep]
res.objectness_logits = scores_per_img[keep]
results.append(res)
return results
@PROPOSAL_GENERATOR_REGISTRY.register()
class RRPN(RPN):
"""
Rotated Region Proposal Network described in :paper:`RRPN`.
"""
@configurable
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.anchor_boundary_thresh >= 0:
raise NotImplementedError(
"anchor_boundary_thresh is a legacy option not implemented for RRPN."
)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
return ret
@torch.no_grad()
def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]):
"""
Args:
anchors (list[RotatedBoxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across feature maps. Label values are in {-1, 0, 1},
with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
list[Tensor]:
i-th element is a Nx5 tensor, where N is the total number of anchors across
feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as 1.
"""
anchors = RotatedBoxes.cat(anchors)
gt_boxes = [x.gt_boxes for x in gt_instances]
del gt_instances
gt_labels = []
matched_gt_boxes = []
for gt_boxes_i in gt_boxes:
"""
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors)
matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i = self._subsample_labels(gt_labels_i)
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
else:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
gt_labels.append(gt_labels_i) # N,AHW
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
@torch.no_grad()
def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes):
pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
return find_top_rrpn_proposals(
pred_proposals,
pred_objectness_logits,
image_sizes,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_size,
self.training,
)
|
the-stack_0_5265 | from spaceone.core.service import *
from spaceone.identity.manager import DomainManager
from spaceone.identity.manager.domain_secret_manager import DomainSecretManager
from spaceone.identity.model import Domain
@authentication_handler(exclude=['create', 'list', 'get_public_key'])
@authorization_handler(exclude=['create', 'list', 'get_public_key'])
@mutation_handler(exclude=['create', 'list', 'get_public_key'])
@event_handler
class DomainService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
self.domain_mgr: DomainManager = self.locator.get_manager('DomainManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['name'])
def create(self, params):
""" Create domain
Args:
params (dict): {
'name': 'str',
'config': 'dict',
'plugin_info': 'dict',
'tags': 'list'
}
Returns:
domain_vo (object)
"""
# Create Domain
domain_vo: Domain = self.domain_mgr.create_domain(params)
# Create domain secret
domain_secret_mgr: DomainSecretManager = self._get_domain_secret_manager()
domain_secret_mgr.create_domain_secret(domain_vo.domain_id)
return domain_vo
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def update(self, params):
""" Update domain
Args:
params (dict): {
'domain_id': 'str',
'config': 'dict',
'tags': 'list'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.update_domain(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def delete(self, params):
""" Delete domain
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
None
"""
self.domain_mgr.delete_domain(params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def enable(self, params):
""" Enable domain
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.enable_domain(params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def disable(self, params):
""" Disable domain
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.disable_domain(params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def get(self, params):
""" Disable domain
Args:
params (dict): {
'domain_id': 'str',
'only': 'list'
}
Returns:
domain_vo (object)
"""
return self.domain_mgr.get_domain(params['domain_id'], params.get('only'))
@transaction(append_meta={'auth.scope': 'SYSTEM'})
@check_required(['domain_id'])
def get_public_key(self, params):
""" Get domain's public key for authentication
Args:
params (dict): {
'domain_id': 'str'
}
Returns:
result (dict): {
'pub_jwk': 'str',
'domain_id': 'str'
}
"""
domain_id = params['domain_id']
domain_secret_mgr: DomainSecretManager = self._get_domain_secret_manager()
pub_jwk = domain_secret_mgr.get_domain_public_key(domain_id=domain_id)
return {
'pub_jwk': pub_jwk,
'domain_id': domain_id
}
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@append_query_filter(['domain_id', 'name'])
@change_tag_filter('tags')
@append_keyword_filter(['domain_id', 'name'])
def list(self, params):
""" List api keys
Args:
params (dict): {
'domain_id': 'str',
'name': 'str',
'query': 'dict (spaceone.api.core.v1.Query)'
}
Returns:
results (list): 'list of domain_vo'
total_count (int)
"""
query = params.get('query', {})
return self.domain_mgr.list_domains(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query'])
@change_tag_filter('tags')
@append_keyword_filter(['domain_id', 'name'])
def stat(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list): 'list of statistics data'
total_count (int)
"""
query = params.get('query', {})
return self.domain_mgr.stat_domains(query)
def _get_domain_secret_manager(self):
return self.locator.get_manager('DomainSecretManager')
|
the-stack_0_5270 | #!/usr/bin/env python
# -*- coding: utf-8
# Functions dealing with image cropping
import logging
import numpy as np
from .image import Image, zeros_like
logger = logging.getLogger(__name__)
class BoundingBox(object):
def __init__(self, xmin=None, xmax=None, ymin=None, ymax=None, zmin=None, zmax=None):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.zmin = zmin
self.zmax = zmax
def get_minmax(self, img=None):
"""
Get voxel-based bounding box from coordinates. Replaces '-1' with max dim along each axis, '-2' with max dim
minus 1, etc.
:param img: Image object to get dimensions
:return:
"""
def _get_min_value(input):
if input is None:
return 0
else:
return input
def _get_max_value(input, dim):
# If empty, return dim+1 (corresponds to the maximum of the given dimension, e.g. nx)
if input is None:
return dim + 1
# If negative sign, return dim+1 if -1, dim if -2, dim-1 if -3, etc.
elif np.sign(input) == -1:
return input + dim + 1
# If user specified a non-negative value, use that
else:
return input
xyz_to_num = {'x': 0, 'y': 1, 'z': 2}
bbox_voxel = BoundingBox()
for attr, value in self.__dict__.items():
if attr[-3:] == 'min':
bbox_voxel.__setattr__(attr, _get_min_value(self.__getattribute__(attr)))
elif attr[-3:] == 'max':
bbox_voxel.__setattr__(attr, _get_max_value(self.__getattribute__(attr), img.dim[xyz_to_num[attr[0]]]))
else:
raise Exception(ValueError)
return bbox_voxel
class ImageCropper(object):
def __init__(self, img_in, mask=None, bbox=BoundingBox(), ref=None):
"""
:param img_in:
:param mask:
:param bbox: BoundingBox object with min and max values for each dimension, used for cropping.
:param ref:
"""
self.img_in = img_in
self.mask = mask
self.bbox = bbox
self.ref = ref
def crop(self, background=None):
"""
Crop image (change dimension)
:param background: int: If set, the output image will not be cropped. Instead, voxels outside the bounding
box will be set to the value specified by this parameter.
:return Image: img_out
"""
bbox = self.bbox
logger.info("Bounding box: x=[{}, {}], y=[{}, {}], z=[{}, {}]"
.format(bbox.xmin, bbox.xmax+1, bbox.ymin, bbox.ymax+1, bbox.zmin, bbox.zmax+1))
# Crop the image
if background is None:
logger.info("Cropping the image...")
data_crop = self.img_in.data[bbox.xmin:bbox.xmax+1, bbox.ymin:bbox.ymax+1, bbox.zmin:bbox.zmax+1]
img_out = Image(param=data_crop, hdr=self.img_in.hdr)
# adapt the origin in the sform and qform matrix
new_origin = np.dot(img_out.hdr.get_qform(), [bbox.xmin, bbox.ymin, bbox.zmin, 1])
img_out.hdr.structarr['qoffset_x'] = new_origin[0]
img_out.hdr.structarr['qoffset_y'] = new_origin[1]
img_out.hdr.structarr['qoffset_z'] = new_origin[2]
img_out.hdr.structarr['srow_x'][-1] = new_origin[0]
img_out.hdr.structarr['srow_y'][-1] = new_origin[1]
img_out.hdr.structarr['srow_z'][-1] = new_origin[2]
# Set voxels outside the bbox to the value 'background'
else:
logger.info("Setting voxels outside the bounding box to: {}".format(background))
img_out = self.img_in.copy()
img_out.data[:] = background
img_out.data[bbox.xmin:bbox.xmax+1, bbox.ymin:bbox.ymax+1, bbox.zmin:bbox.zmax+1] = \
self.img_in.data[bbox.xmin:bbox.xmax+1, bbox.ymin:bbox.ymax+1, bbox.zmin:bbox.zmax+1]
return img_out
def get_bbox_from_minmax(self, bbox=None):
"""
Get voxel bounding box from xmin, xmax, ymin, ymax, zmin, zmax user input
"""
self.bbox = bbox.get_minmax(img=self.img_in)
def get_bbox_from_mask(self, img_mask):
"""
Get bounding box from input binary mask, by looking at min/max values of the binary object in each dimension.
"""
data_nonzero = np.nonzero(img_mask.data)
# find min and max boundaries of the mask
dim = len(data_nonzero)
self.bbox.xmin, self.bbox.ymin, self.bbox.zmin = [min(data_nonzero[i]) for i in range(dim)]
self.bbox.xmax, self.bbox.ymax, self.bbox.zmax = [max(data_nonzero[i]) for i in range(dim)]
def get_bbox_from_ref(self, img_ref):
"""
Get bounding box from input reference image, by looking at min/max indices in each dimension.
img_ref and self.img_in should have the same dimensions.
"""
from spinalcordtoolbox.resampling import resample_nib
# Check that img_ref has the same length as img_in
if not len(img_ref.data.shape) == len(self.img_in.data.shape):
logger.error("Inconsistent dimensions: n_dim(img_ref)={}; n_dim(img_in)={}"
.format(len(img_ref.data.shape), len(self.img_in.data.shape)))
raise Exception(ValueError)
# Fill reference data with ones
img_ref.data[:] = 1
# Resample new image (in reference coordinates) into input image
img_ref_r = resample_nib(img_ref, image_dest=self.img_in, interpolation='nn', mode='constant')
# img_ref_r.save('test.nii') # for debug
# Get bbox from this resampled mask
self.get_bbox_from_mask(img_ref_r)
def get_bbox_from_gui(self):
"""
Launch a GUI. The medial sagittal plane of the image is shown. User selects two points: top-left and bottom-
right of the cropping window.
Note: There is no cropping along the right-left direction.
:return:
"""
from spinalcordtoolbox.gui import base
from spinalcordtoolbox.gui.sagittal import launch_sagittal_dialog
# Change orientation to SAL (for displaying sagittal view in the GUI)
native_orientation = self.img_in.orientation
self.img_in.change_orientation('SAL')
# Launch GUI
params = base.AnatomicalParams()
params.vertebraes = [1, 2] # TODO: Have user draw a sliding rectangle instead (more intuitive)
params.subtitle = "Click on the top-left (Label 1) and bottom-right (Label 2) of the image to select your " \
"cropping window."
img_labels = zeros_like(self.img_in)
launch_sagittal_dialog(self.img_in, img_labels, params)
# Extract coordinates
img_labels.change_orientation(native_orientation)
cropping_coord = img_labels.getNonZeroCoordinates(sorting='value')
# Since there is no cropping along the R-L direction, xmin/xmax are based on image dimension
self.bbox.xmin, self.bbox.ymin, self.bbox.zmin = (
0,
min(cropping_coord[0].y, cropping_coord[1].y),
min(cropping_coord[0].z, cropping_coord[1].z),
)
self.bbox.xmax, self.bbox.ymax, self.bbox.zmax = (
img_labels.dim[0],
max(cropping_coord[0].y, cropping_coord[1].y),
max(cropping_coord[0].z, cropping_coord[1].z),
)
# Put back input image in native orientation
self.img_in.change_orientation(native_orientation)
|
the-stack_0_5272 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import time
from google.cloud.environment_vars import CREDENTIALS as TEST_CREDENTIALS
# From shell environ. May be None.
CREDENTIALS = os.getenv(TEST_CREDENTIALS)
ENVIRON_ERROR_MSG = """\
To run the system tests, you need to set some environment variables.
Please check the CONTRIBUTING guide for instructions.
"""
class EmulatorCreds(object):
"""A mock credential object.
Used to avoid unnecessary token refreshing or reliance on the network
while an emulator is running.
"""
@staticmethod
def create_scoped_required():
return False
def check_environ():
err_msg = None
if CREDENTIALS is None:
err_msg = '\nMissing variables: ' + TEST_CREDENTIALS
elif not os.path.isfile(CREDENTIALS):
err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS,
CREDENTIALS)
if err_msg is not None:
msg = ENVIRON_ERROR_MSG + err_msg
print(msg, file=sys.stderr)
sys.exit(1)
def unique_resource_id(delimiter='_'):
"""A unique identifier for a resource.
Intended to help locate resources created in particular
testing environments and at particular times.
"""
build_id = os.getenv('TRAVIS_BUILD_ID', '')
if build_id == '':
return '%s%d' % (delimiter, 1000 * time.time())
else:
return '%s%s%s%d' % (delimiter, build_id,
delimiter, time.time())
|
the-stack_0_5273 | # Copyright (c) Chris Choy ([email protected]) and Wei Dong ([email protected])
#
# Please cite the following papers if you use any part of the code.
# - Christopher Choy, Wei Dong, Vladlen Koltun, Deep Global Registration, CVPR 2020
# - Christopher Choy, Jaesik Park, Vladlen Koltun, Fully Convolutional Geometric Features, ICCV 2019
# - Christopher Choy, JunYoung Gwak, Silvio Savarese, 4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks, CVPR 2019
import copy
import numpy as np
import math
import open3d as o3d
from submodule.DeepGlobalRegistration.core.knn import find_knn_cpu
def make_open3d_point_cloud(xyz, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
if len(color) != len(xyz):
color = np.tile(color, (len(xyz), 1))
pcd.colors = o3d.utility.Vector3dVector(color)
return pcd
def make_open3d_feature(data, dim, npts):
feature = o3d.registration.Feature()
feature.resize(dim, npts)
feature.data = data.cpu().numpy().astype('d').transpose()
return feature
def make_open3d_feature_from_numpy(data):
assert isinstance(data, np.ndarray)
assert data.ndim == 2
feature = o3d.registration.Feature()
feature.resize(data.shape[1], data.shape[0])
feature.data = data.astype('d').transpose()
return feature
def pointcloud_to_spheres(pcd, voxel_size, color, sphere_size=0.6):
spheres = o3d.geometry.TriangleMesh()
s = o3d.geometry.TriangleMesh.create_sphere(radius=voxel_size * sphere_size)
s.compute_vertex_normals()
s.paint_uniform_color(color)
if isinstance(pcd, o3d.geometry.PointCloud):
pcd = np.array(pcd.points)
for i, p in enumerate(pcd):
si = copy.deepcopy(s)
trans = np.identity(4)
trans[:3, 3] = p
si.transform(trans)
# si.paint_uniform_color(pcd.colors[i])
spheres += si
return spheres
def prepare_single_pointcloud(pcd, voxel_size):
pcd.estimate_normals(o3d.KDTreeSearchParamHybrid(radius=voxel_size * 2.0, max_nn=30))
return pcd
def prepare_pointcloud(filename, voxel_size):
pcd = o3d.io.read_point_cloud(filename)
T = get_random_transformation(pcd)
pcd.transform(T)
pcd_down = pcd.voxel_down_sample(voxel_size)
return pcd_down, T
def compute_overlap_ratio(pcd0, pcd1, trans, voxel_size):
pcd0_down = pcd0.voxel_down_sample(voxel_size)
pcd1_down = pcd1.voxel_down_sample(voxel_size)
matching01 = get_matching_indices(pcd0_down, pcd1_down, trans, voxel_size, 1)
matching10 = get_matching_indices(pcd1_down, pcd0_down, np.linalg.inv(trans),
voxel_size, 1)
overlap0 = len(matching01) / len(pcd0_down.points)
overlap1 = len(matching10) / len(pcd1_down.points)
return max(overlap0, overlap1)
def get_matching_indices(source, target, trans, search_voxel_size, K=None):
source_copy = copy.deepcopy(source)
target_copy = copy.deepcopy(target)
source_copy.transform(trans)
pcd_tree = o3d.geometry.KDTreeFlann(target_copy)
match_inds = []
for i, point in enumerate(source_copy.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
def evaluate_feature(pcd0, pcd1, feat0, feat1, trans_gth, search_voxel_size):
match_inds = get_matching_indices(pcd0, pcd1, trans_gth, search_voxel_size)
pcd_tree = o3d.geometry.KDTreeFlann(feat1)
dist = []
for ind in match_inds:
k, idx, _ = pcd_tree.search_knn_vector_xd(feat0.data[:, ind[0]], 1)
dist.append(
np.clip(np.power(pcd1.points[ind[1]] - pcd1.points[idx[0]], 2),
a_min=0.0,
a_max=1.0))
return np.mean(dist)
def valid_feat_ratio(pcd0, pcd1, feat0, feat1, trans_gth, thresh=0.1):
pcd0_copy = copy.deepcopy(pcd0)
pcd0_copy.transform(trans_gth)
inds = find_knn_cpu(feat0, feat1)
dist = np.sqrt(((np.array(pcd0_copy.points) - np.array(pcd1.points)[inds])**2).sum(1))
return np.mean(dist < thresh)
def evaluate_feature_3dmatch(pcd0, pcd1, feat0, feat1, trans_gth, inlier_thresh=0.1):
r"""Return the hit ratio (ratio of inlier correspondences and all correspondences).
inliear_thresh is the inlier_threshold in meter.
"""
if len(pcd0.points) < len(pcd1.points):
hit = valid_feat_ratio(pcd0, pcd1, feat0, feat1, trans_gth, inlier_thresh)
else:
hit = valid_feat_ratio(pcd1, pcd0, feat1, feat0, np.linalg.inv(trans_gth),
inlier_thresh)
return hit
def get_matching_matrix(source, target, trans, voxel_size, debug_mode):
source_copy = copy.deepcopy(source)
target_copy = copy.deepcopy(target)
source_copy.transform(trans)
pcd_tree = o3d.geometry.KDTreeFlann(target_copy)
matching_matrix = np.zeros((len(source_copy.points), len(target_copy.points)))
for i, point in enumerate(source_copy.points):
[k, idx, _] = pcd_tree.search_radius_vector_3d(point, voxel_size * 1.5)
if k >= 1:
matching_matrix[i, idx[0]] = 1 # TODO: only the cloest?
return matching_matrix
def get_random_transformation(pcd_input):
def rot_x(x):
out = np.zeros((3, 3))
c = math.cos(x)
s = math.sin(x)
out[0, 0] = 1
out[1, 1] = c
out[1, 2] = -s
out[2, 1] = s
out[2, 2] = c
return out
def rot_y(x):
out = np.zeros((3, 3))
c = math.cos(x)
s = math.sin(x)
out[0, 0] = c
out[0, 2] = s
out[1, 1] = 1
out[2, 0] = -s
out[2, 2] = c
return out
def rot_z(x):
out = np.zeros((3, 3))
c = math.cos(x)
s = math.sin(x)
out[0, 0] = c
out[0, 1] = -s
out[1, 0] = s
out[1, 1] = c
out[2, 2] = 1
return out
pcd_output = copy.deepcopy(pcd_input)
mean = np.mean(np.asarray(pcd_output.points), axis=0).transpose()
xyz = np.random.uniform(0, 2 * math.pi, 3)
R = np.dot(np.dot(rot_x(xyz[0]), rot_y(xyz[1])), rot_z(xyz[2]))
T = np.zeros((4, 4))
T[:3, :3] = R
T[:3, 3] = np.dot(-R, mean)
T[3, 3] = 1
return T
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
|
the-stack_0_5274 | import sys
import socket
import threading
import time
server_ip_address = sys.argv[1]
server_port = int(sys.argv[2])
users = int(sys.argv[3])
type_of_test = sys.argv[4]
tests = 32
output_array = []
output = open(type_of_test+"_"+str(tests)+"_"+str(users)+".txt", 'w')
def writer(output_array):
for line in range(len(output_array)):
output.writelines(output_array[line]+"\n")
def worker(test, user, server_ip_address, server_port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
global output_array
try:
start = time.perf_counter()
s.connect((server_ip_address, server_port))
# Increments ms
s.close()
RTT = (time.perf_counter() - start) * 1000
output_array.append(str(RTT))
except:
output_array.append("failed")
if len(output_array)==tests*users:
writer(output_array)
return
for test in range(tests):
threads = []
for user in range(users):
threads.append(threading.Thread(target = worker, args=(test, user, server_ip_address, server_port,)))
for thrd in range(len(threads)):
threads[thrd].start()
for thrd in range(len(threads)):
threads[thrd].join()
|
the-stack_0_5277 | """This module contains the general information for FabricFcEstcCloud ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FabricFcEstcCloudConsts:
pass
class FabricFcEstcCloud(ManagedObject):
"""This is FabricFcEstcCloud class."""
consts = FabricFcEstcCloudConsts()
naming_props = set([])
mo_meta = MoMeta("FabricFcEstcCloud", "fabricFcEstcCloud", "fc-estc", VersionMeta.Version141i, "InputOutput", 0x1f, [], ["admin", "ext-san-config", "ext-san-policy"], [u'fabricEp'], [u'fabricBHVlan', u'fabricFcEstc', u'fabricFcZoneProfile', u'fabricVsan', u'statsThresholdPolicy'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FabricFcEstcCloud", parent_mo_or_dn, **kwargs)
|
the-stack_0_5279 | """
封装文件操作:
● 递归读取所有文件目录形成列表
● 递归删除空目录
● 批量删除文件
"""
import os
def get_all_files(targetDir):
"""
递归读取所有文件目录形成列表
:param targetDir:
:return:
"""
files = []
listFiles = os.listdir(targetDir)
for i in range(0, len(listFiles)):
path = os.path.join(targetDir, listFiles[i])
if os.path.isdir(path):
files.extend(get_all_files(path))
elif os.path.isfile(path):
files.append(path)
return files
def remove_empty_dir(path):
"""
递归删除空目录
:param path:
:return:
"""
for root, dirs, files in os.walk(path, topdown=False):
if not files and not dirs:
os.rmdir(root)
def delete_files(delete_list: list):
"""
批量删除文件
:param delete_list:
:return:
"""
for file_path in delete_list:
try:
os.remove(file_path)
except(FileNotFoundError):
pass
|
the-stack_0_5280 | #!/usr/bin/env python
# coding=utf-8
# Copyright (C) 2018 Copter Express Technologies
#
# Author: Oleg Kalachev <[email protected]>
#
# Distributed under MIT License (available at https://opensource.org/licenses/MIT).
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import math
import subprocess
import re
from collections import OrderedDict
import traceback
from threading import Event
import numpy
import rospy
import tf2_ros
import tf2_geometry_msgs
from pymavlink import mavutil
from std_srvs.srv import Trigger
from sensor_msgs.msg import BatteryState, Image, CameraInfo, NavSatFix, Imu, Range
from mavros_msgs.msg import State, OpticalFlowRad, Mavlink
from mavros_msgs.srv import ParamGet
from geometry_msgs.msg import PoseStamped, TwistStamped, PoseWithCovarianceStamped, Vector3Stamped
from visualization_msgs.msg import MarkerArray as VisualizationMarkerArray
import tf.transformations as t
from aruco_pose.msg import MarkerArray
from mavros import mavlink
# TODO: check attitude is present
# TODO: disk free space
# TODO: map, base_link, body
# TODO: rc service
# TODO: perform commander check, ekf2 status on PX4
# TODO: check if FCU params setter succeed
# TODO: selfcheck ROS service (with blacklists for checks)
rospy.init_node('selfcheck')
tf_buffer = tf2_ros.Buffer()
tf_listener = tf2_ros.TransformListener(tf_buffer)
failures = []
infos = []
current_check = None
def failure(text, *args):
msg = text % args
rospy.logwarn('%s: %s', current_check, msg)
failures.append(msg)
def info(text, *args):
msg = text % args
rospy.loginfo('%s: %s', current_check, msg)
infos.append(msg)
def check(name):
def inner(fn):
def wrapper(*args, **kwargs):
failures[:] = []
infos[:] = []
global current_check
current_check = name
try:
fn(*args, **kwargs)
except Exception as e:
traceback.print_exc()
rospy.logerr('%s: exception occurred', name)
return
if not failures and not infos:
rospy.loginfo('%s: OK', name)
return wrapper
return inner
param_get = rospy.ServiceProxy('mavros/param/get', ParamGet)
def get_param(name):
try:
res = param_get(param_id=name)
except rospy.ServiceException as e:
failure('%s: %s', name, str(e))
return None
if not res.success:
failure('unable to retrieve PX4 parameter %s', name)
else:
if res.value.integer != 0:
return res.value.integer
return res.value.real
recv_event = Event()
link = mavutil.mavlink.MAVLink('', 255, 1)
mavlink_pub = rospy.Publisher('mavlink/to', Mavlink, queue_size=1)
mavlink_recv = ''
def mavlink_message_handler(msg):
global mavlink_recv
if msg.msgid == 126:
mav_bytes_msg = mavlink.convert_to_bytes(msg)
mav_msg = link.decode(mav_bytes_msg)
mavlink_recv += ''.join(chr(x) for x in mav_msg.data[:mav_msg.count])
if 'nsh>' in mavlink_recv:
# Remove the last line, including newline before prompt
mavlink_recv = mavlink_recv[:mavlink_recv.find('nsh>') - 1]
recv_event.set()
mavlink_sub = rospy.Subscriber('mavlink/from', Mavlink, mavlink_message_handler)
# FIXME: not sleeping here still breaks things
rospy.sleep(0.5)
def mavlink_exec(cmd, timeout=3.0):
global mavlink_recv
mavlink_recv = ''
recv_event.clear()
if not cmd.endswith('\n'):
cmd += '\n'
msg = mavutil.mavlink.MAVLink_serial_control_message(
device=mavutil.mavlink.SERIAL_CONTROL_DEV_SHELL,
flags=mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND | mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI,
timeout=3,
baudrate=0,
count=len(cmd),
data=map(ord, cmd.ljust(70, '\0')))
msg.pack(link)
ros_msg = mavlink.convert_to_rosmsg(msg)
mavlink_pub.publish(ros_msg)
recv_event.wait(timeout)
return mavlink_recv
BOARD_ROTATIONS = {
0: 'no rotation',
1: 'yaw 45°',
2: 'yaw 90°',
3: 'yaw 135°',
4: 'yaw 180°',
5: 'yaw 225°',
6: 'yaw 270°',
7: 'yaw 315°',
8: 'roll 180°',
9: 'roll 180°, yaw 45°',
10: 'roll 180°, yaw 90°',
11: 'roll 180°, yaw 135°',
12: 'pitch 180°',
13: 'roll 180°, yaw 225°',
14: 'roll 180°, yaw 270°',
15: 'roll 180°, yaw 315°',
16: 'roll 90°',
17: 'roll 90°, yaw 45°',
18: 'roll 90°, yaw 90°',
19: 'roll 90°, yaw 135°',
20: 'roll 270°',
21: 'roll 270°, yaw 45°',
22: 'roll 270°, yaw 90°',
23: 'roll 270°, yaw 135°',
24: 'pitch 90°',
25: 'pitch 270°',
26: 'roll 270°, yaw 270°',
27: 'roll 180°, pitch 270°',
28: 'pitch 90°, yaw 180',
29: 'pitch 90°, roll 90°',
30: 'yaw 293°, pitch 68°, roll 90°',
31: 'pitch 90°, roll 270°',
32: 'pitch 9°, yaw 180°',
33: 'pitch 45°',
34: 'pitch 315°',
}
@check('FCU')
def check_fcu():
try:
state = rospy.wait_for_message('mavros/state', State, timeout=3)
if not state.connected:
failure('no connection to the FCU (check wiring)')
return
# Make sure the console is available to us
mavlink_exec('\n')
version_str = mavlink_exec('ver all')
if version_str == '':
info('no version data available from SITL')
r = re.compile(r'^FW (git tag|version): (v?\d\.\d\.\d.*)$')
is_clover_firmware = False
for ver_line in version_str.split('\n'):
match = r.search(ver_line)
if match is not None:
field, version = match.groups()
info('firmware %s: %s' % (field, version))
if 'clover' in version or 'clever' in version:
is_clover_firmware = True
if not is_clover_firmware:
failure('not running Clover PX4 firmware, https://clover.coex.tech/firmware')
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
info('selected estimator: LPE')
fuse = get_param('LPE_FUSION')
if fuse & (1 << 4):
info('LPE_FUSION: land detector fusion is enabled')
else:
info('LPE_FUSION: land detector fusion is disabled')
if fuse & (1 << 7):
info('LPE_FUSION: barometer fusion is enabled')
else:
info('LPE_FUSION: barometer fusion is disabled')
mag_yaw_w = get_param('ATT_W_MAG')
if mag_yaw_w == 0:
info('magnetometer weight (ATT_W_MAG) is zero, better for indoor flights')
else:
info('magnetometer weight (ATT_W_MAG) is non-zero (%.2f), better for outdoor flights', mag_yaw_w)
elif est == 2:
info('selected estimator: EKF2')
else:
failure('unknown selected estimator: %s', est)
rot = get_param('SENS_BOARD_ROT')
if rot is not None:
try:
info('board rotation: %s', BOARD_ROTATIONS[rot])
except KeyError:
failure('unknown board rotation %s', rot)
cbrk_usb_chk = get_param('CBRK_USB_CHK')
if cbrk_usb_chk != 197848:
failure('set parameter CBRK_USB_CHK to 197848 for flying with USB connected')
try:
battery = rospy.wait_for_message('mavros/battery', BatteryState, timeout=3)
if not battery.cell_voltage:
failure('cell voltage is not available, https://clover.coex.tech/power')
else:
cell = battery.cell_voltage[0]
if cell > 4.3 or cell < 3.0:
failure('incorrect cell voltage: %.2f V, https://clover.coex.tech/power', cell)
elif cell < 3.7:
failure('critically low cell voltage: %.2f V, recharge battery', cell)
except rospy.ROSException:
failure('no battery state')
except rospy.ROSException:
failure('no MAVROS state (check wiring)')
def describe_direction(v):
if v.x > 0.9:
return 'forward'
elif v.x < - 0.9:
return 'backward'
elif v.y > 0.9:
return 'left'
elif v.y < -0.9:
return 'right'
elif v.z > 0.9:
return 'upward'
elif v.z < -0.9:
return 'downward'
else:
return None
def check_camera(name):
try:
img = rospy.wait_for_message(name + '/image_raw', Image, timeout=1)
except rospy.ROSException:
failure('%s: no images (is the camera connected properly?)', name)
return
try:
camera_info = rospy.wait_for_message(name + '/camera_info', CameraInfo, timeout=1)
except rospy.ROSException:
failure('%s: no calibration info', name)
return
if img.width != camera_info.width:
failure('%s: calibration width doesn\'t match image width (%d != %d)', name, camera_info.width, img.width)
if img.height != camera_info.height:
failure('%s: calibration height doesn\'t match image height (%d != %d))', name, camera_info.height, img.height)
try:
optical = Vector3Stamped()
optical.header.frame_id = img.header.frame_id
optical.vector.z = 1
cable = Vector3Stamped()
cable.header.frame_id = img.header.frame_id
cable.vector.y = 1
optical = describe_direction(tf_buffer.transform(optical, 'base_link').vector)
cable = describe_direction(tf_buffer.transform(cable, 'base_link').vector)
if not optical or not cable:
info('%s: custom camera orientation detected', name)
else:
info('camera is oriented %s, cable from camera goes %s', optical, cable)
except tf2_ros.TransformException:
failure('cannot transform from base_link to camera frame')
@check('Main camera')
def check_main_camera():
check_camera('main_camera')
def is_process_running(binary, exact=False, full=False):
try:
args = ['pgrep']
if exact:
args.append('-x') # match exactly with the command name
if full:
args.append('-f') # use full process name to match
args.append(binary)
subprocess.check_output(args)
return True
except subprocess.CalledProcessError:
return False
@check('ArUco markers')
def check_aruco():
if is_process_running('aruco_detect', full=True):
try:
info('aruco_detect/length = %g m', rospy.get_param('aruco_detect/length'))
except KeyError:
failure('aruco_detect/length parameter is not set')
known_tilt = rospy.get_param('aruco_detect/known_tilt', '')
if known_tilt == 'map':
known_tilt += ' (ALL markers are on the floor)'
elif known_tilt == 'map_flipped':
known_tilt += ' (ALL markers are on the ceiling)'
info('aruco_detector/known_tilt = %s', known_tilt)
try:
rospy.wait_for_message('aruco_detect/markers', MarkerArray, timeout=1)
except rospy.ROSException:
failure('no markers detection')
return
else:
info('aruco_detect is not running')
return
if is_process_running('aruco_map', full=True):
known_tilt = rospy.get_param('aruco_map/known_tilt', '')
if known_tilt == 'map':
known_tilt += ' (marker\'s map is on the floor)'
elif known_tilt == 'map_flipped':
known_tilt += ' (marker\'s map is on the ceiling)'
info('aruco_map/known_tilt = %s', known_tilt)
try:
visualization = rospy.wait_for_message('aruco_map/visualization', VisualizationMarkerArray, timeout=1)
info('map has %s markers', len(visualization.markers))
except:
failure('cannot read aruco_map/visualization topic')
try:
rospy.wait_for_message('aruco_map/pose', PoseWithCovarianceStamped, timeout=1)
except rospy.ROSException:
failure('no map detection')
else:
info('aruco_map is not running')
@check('Vision position estimate')
def check_vpe():
vis = None
try:
vis = rospy.wait_for_message('mavros/vision_pose/pose', PoseStamped, timeout=1)
except rospy.ROSException:
try:
vis = rospy.wait_for_message('mavros/mocap/pose', PoseStamped, timeout=1)
except rospy.ROSException:
failure('no VPE or MoCap messages')
# check if vpe_publisher is running
try:
subprocess.check_output(['pgrep', '-x', 'vpe_publisher'])
except subprocess.CalledProcessError:
return # it's not running, skip following checks
# check PX4 settings
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
ext_yaw = get_param('ATT_EXT_HDG_M')
if ext_yaw != 1:
failure('vision yaw is disabled, change ATT_EXT_HDG_M parameter')
vision_yaw_w = get_param('ATT_W_EXT_HDG')
if vision_yaw_w == 0:
failure('vision yaw weight is zero, change ATT_W_EXT_HDG parameter')
else:
info('Vision yaw weight: %.2f', vision_yaw_w)
fuse = get_param('LPE_FUSION')
if not fuse & (1 << 2):
failure('vision position fusion is disabled, change LPE_FUSION parameter')
delay = get_param('LPE_VIS_DELAY')
if delay != 0:
failure('LPE_VIS_DELAY parameter is %s, but it should be zero', delay)
info('LPE_VIS_XY is %.2f m, LPE_VIS_Z is %.2f m', get_param('LPE_VIS_XY'), get_param('LPE_VIS_Z'))
elif est == 2:
fuse = get_param('EKF2_AID_MASK')
if not fuse & (1 << 3):
failure('vision position fusion is disabled, change EKF2_AID_MASK parameter')
if not fuse & (1 << 4):
failure('vision yaw fusion is disabled, change EKF2_AID_MASK parameter')
delay = get_param('EKF2_EV_DELAY')
if delay != 0:
failure('EKF2_EV_DELAY is %.2f, but it should be zero', delay)
info('EKF2_EVA_NOISE is %.3f, EKF2_EVP_NOISE is %.3f',
get_param('EKF2_EVA_NOISE'),
get_param('EKF2_EVP_NOISE'))
if not vis:
return
# check vision pose and estimated pose inconsistency
try:
pose = rospy.wait_for_message('mavros/local_position/pose', PoseStamped, timeout=1)
except:
return
horiz = math.hypot(vis.pose.position.x - pose.pose.position.x, vis.pose.position.y - pose.pose.position.y)
if horiz > 0.5:
failure('horizontal position inconsistency: %.2f m', horiz)
vert = vis.pose.position.z - pose.pose.position.z
if abs(vert) > 0.5:
failure('vertical position inconsistency: %.2f m', vert)
op = pose.pose.orientation
ov = vis.pose.orientation
yawp, _, _ = t.euler_from_quaternion((op.x, op.y, op.z, op.w), axes='rzyx')
yawv, _, _ = t.euler_from_quaternion((ov.x, ov.y, ov.z, ov.w), axes='rzyx')
yawdiff = yawp - yawv
yawdiff = math.degrees((yawdiff + 180) % 360 - 180)
if abs(yawdiff) > 8:
failure('yaw inconsistency: %.2f deg', yawdiff)
@check('Simple offboard node')
def check_simpleoffboard():
try:
rospy.wait_for_service('navigate', timeout=3)
rospy.wait_for_service('get_telemetry', timeout=3)
rospy.wait_for_service('land', timeout=3)
except rospy.ROSException:
failure('no simple_offboard services')
@check('IMU')
def check_imu():
try:
rospy.wait_for_message('mavros/imu/data', Imu, timeout=1)
except rospy.ROSException:
failure('no IMU data (check flight controller calibration)')
@check('Local position')
def check_local_position():
try:
pose = rospy.wait_for_message('mavros/local_position/pose', PoseStamped, timeout=1)
o = pose.pose.orientation
_, pitch, roll = t.euler_from_quaternion((o.x, o.y, o.z, o.w), axes='rzyx')
MAX_ANGLE = math.radians(2)
if abs(pitch) > MAX_ANGLE:
failure('pitch is %.2f deg; place copter horizontally or redo level horizon calib',
math.degrees(pitch))
if abs(roll) > MAX_ANGLE:
failure('roll is %.2f deg; place copter horizontally or redo level horizon calib',
math.degrees(roll))
except rospy.ROSException:
failure('no local position')
@check('Velocity estimation')
def check_velocity():
try:
velocity = rospy.wait_for_message('mavros/local_position/velocity_local', TwistStamped, timeout=1)
horiz = math.hypot(velocity.twist.linear.x, velocity.twist.linear.y)
vert = velocity.twist.linear.z
if abs(horiz) > 0.1:
failure('horizontal velocity estimation is %.2f m/s; is copter staying still?' % horiz)
if abs(vert) > 0.1:
failure('vertical velocity estimation is %.2f m/s; is copter staying still?' % vert)
velocity = rospy.wait_for_message('mavros/local_position/velocity_body', TwistStamped, timeout=1)
angular = velocity.twist.angular
ANGULAR_VELOCITY_LIMIT = 0.1
if abs(angular.x) > ANGULAR_VELOCITY_LIMIT:
failure('pitch rate estimation is %.2f rad/s (%.2f deg/s); is copter staying still?',
angular.x, math.degrees(angular.x))
if abs(angular.y) > ANGULAR_VELOCITY_LIMIT:
failure('pitch rate estimation is %.2f rad/s (%.2f deg/s); is copter staying still?',
angular.y, math.degrees(angular.y))
if abs(angular.z) > ANGULAR_VELOCITY_LIMIT:
failure('pitch rate estimation is %.2f rad/s (%.2f deg/s); is copter staying still?',
angular.z, math.degrees(angular.z))
except rospy.ROSException:
failure('no velocity estimation')
@check('Global position (GPS)')
def check_global_position():
try:
rospy.wait_for_message('mavros/global_position/global', NavSatFix, timeout=1)
except rospy.ROSException:
info('no global position')
@check('Optical flow')
def check_optical_flow():
# TODO:check FPS!
try:
rospy.wait_for_message('mavros/px4flow/raw/send', OpticalFlowRad, timeout=0.5)
# check PX4 settings
rot = get_param('SENS_FLOW_ROT')
if rot != 0:
failure('SENS_FLOW_ROT parameter is %s, but it should be zero', rot)
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
fuse = get_param('LPE_FUSION')
if not fuse & (1 << 1):
failure('optical flow fusion is disabled, change LPE_FUSION parameter')
if not fuse & (1 << 1):
failure('flow gyro compensation is disabled, change LPE_FUSION parameter')
scale = get_param('LPE_FLW_SCALE')
if not numpy.isclose(scale, 1.0):
failure('LPE_FLW_SCALE parameter is %.2f, but it should be 1.0', scale)
info('LPE_FLW_QMIN is %s, LPE_FLW_R is %.4f, LPE_FLW_RR is %.4f, SENS_FLOW_MINHGT is %.3f, SENS_FLOW_MAXHGT is %.3f',
get_param('LPE_FLW_QMIN'),
get_param('LPE_FLW_R'),
get_param('LPE_FLW_RR'),
get_param('SENS_FLOW_MINHGT'),
get_param('SENS_FLOW_MAXHGT'))
elif est == 2:
fuse = get_param('EKF2_AID_MASK')
if not fuse & (1 << 1):
failure('optical flow fusion is disabled, change EKF2_AID_MASK parameter')
delay = get_param('EKF2_OF_DELAY')
if delay != 0:
failure('EKF2_OF_DELAY is %.2f, but it should be zero', delay)
info('EKF2_OF_QMIN is %s, EKF2_OF_N_MIN is %.4f, EKF2_OF_N_MAX is %.4f, SENS_FLOW_MINHGT is %.3f, SENS_FLOW_MAXHGT is %.3f',
get_param('EKF2_OF_QMIN'),
get_param('EKF2_OF_N_MIN'),
get_param('EKF2_OF_N_MAX'),
get_param('SENS_FLOW_MINHGT'),
get_param('SENS_FLOW_MAXHGT'))
except rospy.ROSException:
failure('no optical flow data (from Raspberry)')
@check('Rangefinder')
def check_rangefinder():
# TODO: check FPS!
rng = False
try:
rospy.wait_for_message('rangefinder/range', Range, timeout=4)
rng = True
except rospy.ROSException:
failure('no rangefinder data from Raspberry')
try:
rospy.wait_for_message('mavros/distance_sensor/rangefinder', Range, timeout=4)
rng = True
except rospy.ROSException:
failure('no rangefinder data from PX4')
if not rng:
return
est = get_param('SYS_MC_EST_GROUP')
if est == 1:
fuse = get_param('LPE_FUSION')
if not fuse & (1 << 5):
info('"pub agl as lpos down" in LPE_FUSION is disabled, NOT operating over flat surface')
else:
info('"pub agl as lpos down" in LPE_FUSION is enabled, operating over flat surface')
elif est == 2:
hgt = get_param('EKF2_HGT_MODE')
if hgt != 2:
info('EKF2_HGT_MODE != Range sensor, NOT operating over flat surface')
else:
info('EKF2_HGT_MODE = Range sensor, operating over flat surface')
aid = get_param('EKF2_RNG_AID')
if aid != 1:
info('EKF2_RNG_AID != 1, range sensor aiding disabled')
else:
info('EKF2_RNG_AID = 1, range sensor aiding enabled')
@check('Boot duration')
def check_boot_duration():
output = subprocess.check_output('systemd-analyze')
r = re.compile(r'([\d\.]+)s\s*$', flags=re.MULTILINE)
duration = float(r.search(output).groups()[0])
if duration > 15:
failure('long Raspbian boot duration: %ss (systemd-analyze for analyzing)', duration)
@check('CPU usage')
def check_cpu_usage():
WHITELIST = 'nodelet',
CMD = "top -n 1 -b -i | tail -n +8 | awk '{ printf(\"%-8s\\t%-8s\\t%-8s\\n\", $1, $9, $12); }'"
output = subprocess.check_output(CMD, shell=True)
processes = output.split('\n')
for process in processes:
if not process:
continue
pid, cpu, cmd = process.split('\t')
if cmd.strip() not in WHITELIST and float(cpu) > 30:
failure('high CPU usage (%s%%) detected: %s (PID %s)',
cpu.strip(), cmd.strip(), pid.strip())
@check('clover.service')
def check_clover_service():
try:
output = subprocess.check_output('systemctl show -p ActiveState --value clover.service'.split(),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
failure('systemctl returned %s: %s', e.returncode, e.output)
return
if 'inactive' in output:
failure('service is not running, try sudo systemctl restart clover')
return
elif 'failed' in output:
failure('service failed to run, check your launch-files')
r = re.compile(r'^(.*)\[(FATAL|ERROR)\] \[\d+.\d+\]: (.*?)(\x1b(.*))?$')
error_count = OrderedDict()
try:
for line in open('/tmp/clover.err', 'r'):
node_error = r.search(line)
if node_error:
msg = node_error.groups()[1] + ': ' + node_error.groups()[2]
if msg in error_count:
error_count[msg] += 1
else:
error_count.update({msg: 1})
else:
error_count.update({line.strip(): 1})
for error in error_count:
if error_count[error] == 1:
failure(error)
else:
failure('%s (%d)', error, error_count[error])
except IOError as e:
failure('%s', e)
@check('Image')
def check_image():
try:
info('version: %s', open('/etc/clover_version').read().strip())
except IOError:
info('no /etc/clover_version file, not the Clover image?')
@check('Preflight status')
def check_preflight_status():
# Make sure the console is available to us
mavlink_exec('\n')
cmdr_output = mavlink_exec('commander check')
if cmdr_output == '':
failure('no data from FCU')
return
cmdr_lines = cmdr_output.split('\n')
r = re.compile(r'^(.*)(Preflight|Prearm) check: (.*)')
for line in cmdr_lines:
if 'WARN' in line:
failure(line[line.find(']') + 2:])
continue
match = r.search(line)
if match is not None:
check_status = match.groups()[2]
if check_status != 'OK':
failure(' '.join([match.groups()[1], 'check:', check_status]))
@check('Network')
def check_network():
ros_hostname = os.environ.get('ROS_HOSTNAME', '').strip()
if not ros_hostname:
failure('no ROS_HOSTNAME is set')
elif ros_hostname.endswith('.local'):
# using mdns hostname
hosts = open('/etc/hosts', 'r')
for line in hosts:
parts = line.split()
if len(parts) < 2:
continue
ip = parts.pop(0).split('.')
if ip[0] == '127': # loopback ip
if ros_hostname in parts:
break
else:
failure('not found %s in /etc/hosts, ROS will malfunction if network interfaces are down, https://clover.coex.tech/hostname', ros_hostname)
@check('RPi health')
def check_rpi_health():
# `vcgencmd get_throttled` output codes taken from
# https://github.com/raspberrypi/documentation/blob/JamesH65-patch-vcgencmd-vcdbg-docs/raspbian/applications/vcgencmd.md#get_throttled
# TODO: support more base platforms?
FLAG_UNDERVOLTAGE_NOW = 0x1
FLAG_FREQ_CAP_NOW = 0x2
FLAG_THROTTLING_NOW = 0x4
FLAG_THERMAL_LIMIT_NOW = 0x8
FLAG_UNDERVOLTAGE_OCCURRED = 0x10000
FLAG_FREQ_CAP_OCCURRED = 0x20000
FLAG_THROTTLING_OCCURRED = 0x40000
FLAG_THERMAL_LIMIT_OCCURRED = 0x80000
FLAG_DESCRIPTIONS = (
(FLAG_THROTTLING_NOW, 'system throttled to prevent damage'),
(FLAG_THROTTLING_OCCURRED, 'your system is susceptible to throttling'),
(FLAG_UNDERVOLTAGE_NOW, 'not enough power for onboard computer, flight inadvisable'),
(FLAG_UNDERVOLTAGE_OCCURRED, 'power supply cannot provide enough power'),
(FLAG_FREQ_CAP_NOW, 'CPU reached thermal limit and is throttled now'),
(FLAG_FREQ_CAP_OCCURRED, 'CPU may overheat during drone operation, consider additional cooling'),
(FLAG_THERMAL_LIMIT_NOW, 'CPU reached soft thermal limit, frequency reduced'),
(FLAG_THERMAL_LIMIT_OCCURRED, 'CPU may reach soft thermal limit, consider additional cooling'),
)
try:
# vcgencmd outputs a single string in a form of
# <parameter>=<value>
# In case of `get_throttled`, <value> is a hexadecimal number
# with some of the FLAGs OR'ed together
output = subprocess.check_output(['vcgencmd', 'get_throttled'])
except OSError:
failure('could not call vcgencmd binary; not a Raspberry Pi?')
return
throttle_mask = int(output.split('=')[1], base=16)
for flag_description in FLAG_DESCRIPTIONS:
if throttle_mask & flag_description[0]:
failure(flag_description[1])
@check('Board')
def check_board():
try:
info('%s', open('/proc/device-tree/model').readline())
except IOError:
info('could not open /proc/device-tree/model, not a Raspberry Pi?')
def selfcheck():
check_image()
check_board()
check_clover_service()
check_network()
check_fcu()
check_imu()
check_local_position()
check_velocity()
check_global_position()
check_preflight_status()
check_main_camera()
check_aruco()
check_simpleoffboard()
check_optical_flow()
check_vpe()
check_rangefinder()
check_rpi_health()
check_cpu_usage()
check_boot_duration()
if __name__ == '__main__':
rospy.loginfo('Performing selfcheck...')
selfcheck()
|
the-stack_0_5281 | #!/usr/bin/env python
# Copyright 2012 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Uses different APIs to touch a file."""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding())))
def main():
print('Only look if a file exists but do not open it.')
assert len(sys.argv) == 2
path = os.path.join(BASE_DIR, 'test_file.txt')
command = sys.argv[1]
if command == 'access':
return not os.access(path, os.R_OK)
if command == 'isfile':
return not os.path.isfile(path)
if command == 'stat':
return not os.stat(path).st_size
return 1
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_5282 | """
Copyright (c) 2019-2022, Zihao Ding/Carnegie Mellon University
All rights reserved.
********************************************************************
Project: imgprocess.py
MODULE: util
Author: Zihao Ding, Carnegie Mellon University
Brief:
-------------
Image processing func
Date:
-------------
2022/03/17 ZD 1.0 public version
"""
from copy import deepcopy
from math import exp, floor, log10, ceil
import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy
from PIL import Image
# Adaptive histogram equalization
def clahe(img, limit=10, tileGridSize=(10, 10)):
'''
Contrast Limited Adaptive Histogram Equalization \\
Refer to https://docs.opencv.org/3.4/d5/daf/tutorial_py_histogram_equalization.html
Input:
------
img: images, 3darray, shape: (n, h, w)
limit: clipLimit, int, default: 10
tileGridSize: grid size, (int, int), default: (10, 10)
Output:
------
img: images after applying clahe, 3darray, shape: (n, h, w)
'''
clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=tileGridSize)
temp = np.zeros_like(img)
for i in range(img.shape[0]):
temp[i] = clahe.apply(img[i])
return temp
# circular mask
def circularmask(img):
'''
Apply largest circular mask
Input:
------
img: images, 3darray, shape: (n, h, w)
Output:
------
img: images with circular mask applied, 3darray, shape: (n, h, w)
'''
center = [int(img.shape[2]/2), int(img.shape[1]/2)]
radius = min(center[0], center[1], img.shape[2]-center[0], img.shape[1]-center[1])
Y, X = np.ogrid[:img.shape[1], :img.shape[2]]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask_array = dist_from_center <= radius
temp = img
temp[:,~mask_array] = 0
return temp
# square mask
def squaremask(img):
'''
Apply largest square mask inside image with circular mask \\
Will change the input size
Input:
------
img: images, 3darray, shape: (n, h, w)
Output:
------
img: images with square mask applied, 3darray, shape: (n, 0.707*min(h, w), 0.707*min(h, w))
'''
n = img.shape[1]
start = ceil(n*0.5*(1.-0.5**0.5))
end = floor(n-n*0.5*(1.-0.5**0.5))
return img[:,start-1:end,start-1:end]
def poisson_noise(img, c=1.):
'''
Apply Poisson noise
Input:
------
img: images, 3darray, shape: (n, h, w)
c: inverse of noise level (smaller c brings higher noise level), float, default: 1.
Output:
------
img: images with Poisson noise
'''
temp = np.zeros_like(img)
for i in range(img.shape[0]):
vals = len(np.unique(img[i]))
vals = 2 ** np.ceil(np.log2(vals))
temp[i] = np.random.poisson(img[i] * c * vals) / float(vals) / c
return temp
def bac(img, a=1, b=0):
'''
Adjust brightness and contrast
Input:
------
img: images, 3darray, shape: (n, h, w)
a: contrast ratio, float, default: 1
b: brightness offset, float, default: 0
Output:
------
img: images, 3darray, shape: (n, h, w)
'''
temp = np.clip(a*img+b, 0., 255.)
temp = temp.astype(np.uint8)
return temp
def gamma_trans(img, gamma):
'''
Apply Gamma correction \\
If gamma < 1:
The whole figure is brighter, contrast of dark part is increased
vise versa
Input:
------
img: images, 3darray, shape: (n, h, w)
gamma: Gamma value, float
Output:
------
img: images, 3darray, shape: (n, h, w)
'''
gamma_table = [np.power(x/255.0, gamma)*255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
temp = np.zeros_like(img)
for i in range(img.shape[0]):
temp[i] = cv2.LUT(img[i], gamma_table)
temp = temp.astype(np.uint8)
return temp
def autogamma(img):
'''
Apply automatic Gamma correction (gamma = 2.2)
Input:
------
img: images, 3darray, shape: (n, h, w)
Output:
------
img: images, 3darray, shape: (n, h, w)
'''
meanGrayVal = np.sum(img) / np.prod(img.shape)
gamma = log10(1/2.2) / log10(meanGrayVal/255.0)
return gamma_trans(img, gamma) |
the-stack_0_5283 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests for fake_pathlib.
As most of fake_pathlib is a wrapper around fake_filesystem methods, the tests
are there mostly to ensure basic functionality.
Note that many of the tests are directly taken from examples in the
python docs.
"""
import errno
import os
import pathlib
import stat
import sys
import unittest
from pyfakefs.fake_filesystem import is_root
from pyfakefs import fake_pathlib, fake_filesystem
from pyfakefs.helpers import IS_PYPY
from pyfakefs.tests.test_utils import RealFsTestCase
is_windows = sys.platform == 'win32'
class RealPathlibTestCase(RealFsTestCase):
def __init__(self, methodName='runTest'):
super(RealPathlibTestCase, self).__init__(methodName)
self.pathlib = pathlib
self.path = None
def setUp(self):
super().setUp()
if not self.use_real_fs():
self.pathlib = fake_pathlib.FakePathlibModule(self.filesystem)
self.path = self.pathlib.Path
class FakePathlibInitializationTest(RealPathlibTestCase):
def test_initialization_type(self):
"""Make sure tests for class type will work"""
path = self.path('/test')
if is_windows:
self.assertTrue(isinstance(path, self.pathlib.WindowsPath))
self.assertTrue(isinstance(path, self.pathlib.PureWindowsPath))
self.assertTrue(self.pathlib.PurePosixPath())
# in fake fs, we allow to use the other OS implementation
if self.use_real_fs():
with self.assertRaises(NotImplementedError):
self.pathlib.PosixPath()
else:
self.assertTrue(self.pathlib.PosixPath())
else:
self.assertTrue(isinstance(path, self.pathlib.PosixPath))
self.assertTrue(isinstance(path, self.pathlib.PurePosixPath))
self.assertTrue(self.pathlib.PureWindowsPath())
if self.use_real_fs():
with self.assertRaises(NotImplementedError):
self.pathlib.WindowsPath()
else:
self.assertTrue(self.pathlib.WindowsPath())
def test_init_with_segments(self):
"""Basic initialization tests - taken from pathlib.Path documentation
"""
self.assertEqual(self.path('/', 'foo', 'bar', 'baz'),
self.path('/foo/bar/baz'))
self.assertEqual(self.path(), self.path('.'))
self.assertEqual(self.path(self.path('foo'), self.path('bar')),
self.path('foo/bar'))
self.assertEqual(self.path('/etc') / 'init.d' / 'reboot',
self.path('/etc/init.d/reboot'))
def test_init_collapse(self):
"""Tests for collapsing path during initialization.
Taken from pathlib.PurePath documentation.
"""
self.assertEqual(self.path('foo//bar'), self.path('foo/bar'))
self.assertEqual(self.path('foo/./bar'), self.path('foo/bar'))
self.assertNotEqual(self.path('foo/../bar'), self.path('foo/bar'))
self.assertEqual(self.path('/etc', '/usr', 'lib64'),
self.path('/usr/lib64'))
def test_path_parts(self):
sep = self.os.path.sep
path = self.path(sep + self.os.path.join('foo', 'bar', 'setup.py'))
self.assertEqual(path.parts, (sep, 'foo', 'bar', 'setup.py'))
self.assertEqual(path.drive, '')
self.assertEqual(path.root, sep)
self.assertEqual(path.anchor, sep)
self.assertEqual(path.name, 'setup.py')
self.assertEqual(path.stem, 'setup')
self.assertEqual(path.suffix, '.py')
self.assertEqual(path.parent,
self.path(sep + self.os.path.join('foo', 'bar')))
self.assertEqual(path.parents[0],
self.path(sep + self.os.path.join('foo', 'bar')))
self.assertEqual(path.parents[1], self.path(sep + 'foo'))
self.assertEqual(path.parents[2], self.path(sep))
@unittest.skipIf(is_windows, 'POSIX specific behavior')
def test_is_absolute_posix(self):
self.assertTrue(self.path('/a/b').is_absolute())
self.assertFalse(self.path('a/b').is_absolute())
self.assertFalse(self.path('d:/b').is_absolute())
@unittest.skipIf(not is_windows, 'Windows specific behavior')
def test_is_absolute_windows(self):
self.assertFalse(self.path('/a/b').is_absolute())
self.assertFalse(self.path('a/b').is_absolute())
self.assertTrue(self.path('d:/b').is_absolute())
class RealPathlibInitializationTest(FakePathlibInitializationTest):
def use_real_fs(self):
return True
@unittest.skipIf(not is_windows, 'Windows specific behavior')
class FakePathlibInitializationWithDriveTest(RealPathlibTestCase):
def test_init_with_segments(self):
"""Basic initialization tests - taken from pathlib.Path
documentation"""
self.assertEqual(self.path('c:/', 'foo', 'bar', 'baz'),
self.path('c:/foo/bar/baz'))
self.assertEqual(self.path(), self.path('.'))
self.assertEqual(self.path(self.path('foo'), self.path('bar')),
self.path('foo/bar'))
self.assertEqual(self.path('c:/Users') / 'john' / 'data',
self.path('c:/Users/john/data'))
def test_init_collapse(self):
"""Tests for collapsing path during initialization.
Taken from pathlib.PurePath documentation.
"""
self.assertEqual(self.path('c:/Windows', 'd:bar'),
self.path('d:bar'))
self.assertEqual(self.path('c:/Windows', '/Program Files'),
self.path('c:/Program Files'))
def test_path_parts(self):
path = self.path(
self.os.path.join('d:', 'python scripts', 'setup.py'))
self.assertEqual(path.parts, ('d:', 'python scripts', 'setup.py'))
self.assertEqual(path.drive, 'd:')
self.assertEqual(path.root, '')
self.assertEqual(path.anchor, 'd:')
self.assertEqual(path.name, 'setup.py')
self.assertEqual(path.stem, 'setup')
self.assertEqual(path.suffix, '.py')
self.assertEqual(path.parent,
self.path(
self.os.path.join('d:', 'python scripts')))
self.assertEqual(path.parents[0],
self.path(
self.os.path.join('d:', 'python scripts')))
self.assertEqual(path.parents[1], self.path('d:'))
@unittest.skipIf(not is_windows, 'Windows-specifc behavior')
def test_is_absolute(self):
self.assertTrue(self.path('c:/a/b').is_absolute())
self.assertFalse(self.path('/a/b').is_absolute())
self.assertFalse(self.path('c:').is_absolute())
self.assertTrue(self.path('//some/share').is_absolute())
class RealPathlibInitializationWithDriveTest(
FakePathlibInitializationWithDriveTest):
def use_real_fs(self):
return True
class FakePathlibPurePathTest(RealPathlibTestCase):
"""Tests functionality present in PurePath class."""
@unittest.skipIf(is_windows, 'POSIX specific behavior')
def test_is_reserved_posix(self):
self.assertFalse(self.path('/dev').is_reserved())
self.assertFalse(self.path('/').is_reserved())
self.assertFalse(self.path('COM1').is_reserved())
self.assertFalse(self.path('nul.txt').is_reserved())
@unittest.skipIf(not is_windows, 'Windows specific behavior')
def test_is_reserved_windows(self):
self.check_windows_only()
self.assertFalse(self.path('/dev').is_reserved())
self.assertFalse(self.path('/').is_reserved())
self.assertTrue(self.path('COM1').is_reserved())
self.assertTrue(self.path('nul.txt').is_reserved())
def test_joinpath(self):
self.assertEqual(self.path('/etc').joinpath('passwd'),
self.path('/etc/passwd'))
self.assertEqual(self.path('/etc').joinpath(self.path('passwd')),
self.path('/etc/passwd'))
self.assertEqual(self.path('/foo').joinpath('bar', 'baz'),
self.path('/foo/bar/baz'))
def test_joinpath_drive(self):
self.check_windows_only()
self.assertEqual(self.path('c:').joinpath('/Program Files'),
self.path('c:/Program Files'))
def test_match(self):
self.assertTrue(self.path('a/b.py').match('*.py'))
self.assertTrue(self.path('/a/b/c.py').match('b/*.py'))
self.assertFalse(self.path('/a/b/c.py').match('a/*.py'))
self.assertTrue(self.path('/a.py').match('/*.py'))
self.assertFalse(self.path('a/b.py').match('/*.py'))
def test_relative_to(self):
self.assertEqual(self.path('/etc/passwd').relative_to('/'),
self.path('etc/passwd'))
self.assertEqual(self.path('/etc/passwd').relative_to('/'),
self.path('etc/passwd'))
with self.assertRaises(ValueError):
self.path('passwd').relative_to('/usr')
@unittest.skipIf(sys.version_info < (3, 9),
'is_relative_to new in Python 3.9')
def test_is_relative_to(self):
path = self.path('/etc/passwd')
self.assertTrue(path.is_relative_to('/etc'))
self.assertFalse(path.is_relative_to('/src'))
def test_with_name(self):
self.check_windows_only()
self.assertEqual(
self.path('c:/Downloads/pathlib.tar.gz').with_name('setup.py'),
self.path('c:/Downloads/setup.py'))
with self.assertRaises(ValueError):
self.path('c:/').with_name('setup.py')
def test_with_suffix(self):
self.assertEqual(
self.path('c:/Downloads/pathlib.tar.gz').with_suffix('.bz2'),
self.path('c:/Downloads/pathlib.tar.bz2'))
self.assertEqual(self.path('README').with_suffix('.txt'),
self.path('README.txt'))
class RealPathlibPurePathTest(FakePathlibPurePathTest):
def use_real_fs(self):
return True
class FakePathlibFileObjectPropertyTest(RealPathlibTestCase):
def setUp(self):
super(FakePathlibFileObjectPropertyTest, self).setUp()
self.file_path = self.make_path('home', 'jane', 'test.py')
self.create_file(self.file_path, contents=b'a' * 100)
self.create_dir(self.make_path('home', 'john'))
try:
self.skip_if_symlink_not_supported()
except unittest.SkipTest:
return
self.create_symlink(self.make_path('john'),
self.make_path('home', 'john'))
self.file_link_path = self.make_path('test.py')
self.create_symlink(self.file_link_path, self.file_path)
self.create_symlink(self.make_path('broken_dir_link'),
self.make_path('home', 'none'))
self.create_symlink(self.make_path('broken_file_link'),
self.make_path('home', 'none', 'test.py'))
def test_exists(self):
self.skip_if_symlink_not_supported()
self.assertTrue(self.path(self.file_path).exists())
self.assertTrue(self.path(
self.make_path('home', 'jane')).exists())
self.assertFalse(self.path(
self.make_path('home', 'jane', 'test')).exists())
self.assertTrue(self.path(
self.make_path('john')).exists())
self.assertTrue(self.path(
self.file_link_path).exists())
self.assertFalse(self.path(
self.make_path('broken_dir_link')).exists())
self.assertFalse(self.path(
self.make_path('broken_file_link')).exists())
def test_is_dir(self):
self.skip_if_symlink_not_supported()
self.assertFalse(self.path(
self.file_path).is_dir())
self.assertTrue(self.path(
self.make_path('home/jane')).is_dir())
self.assertTrue(self.path(
self.make_path('john')).is_dir())
self.assertFalse(self.path(
self.file_link_path).is_dir())
self.assertFalse(self.path(
self.make_path('broken_dir_link')).is_dir())
self.assertFalse(self.path(
self.make_path('broken_file_link')).is_dir())
def test_is_file(self):
self.skip_if_symlink_not_supported()
self.assertTrue(self.path(
self.make_path('home/jane/test.py')).is_file())
self.assertFalse(self.path(
self.make_path('home/jane')).is_file())
self.assertFalse(self.path(
self.make_path('john')).is_file())
self.assertTrue(self.path(
self.file_link_path).is_file())
self.assertFalse(self.path(
self.make_path('broken_dir_link')).is_file())
self.assertFalse(self.path(
self.make_path('broken_file_link')).is_file())
def test_is_symlink(self):
self.skip_if_symlink_not_supported()
self.assertFalse(self.path(
self.make_path('home/jane/test.py')).is_symlink())
self.assertFalse(self.path(
self.make_path('home/jane')).is_symlink())
self.assertTrue(self.path(
self.make_path('john')).is_symlink())
self.assertTrue(self.path(
self.file_link_path).is_symlink())
self.assertTrue(self.path(
self.make_path('broken_dir_link')).is_symlink())
self.assertTrue(self.path(
self.make_path('broken_file_link')).is_symlink())
def test_stat(self):
self.skip_if_symlink_not_supported()
file_stat = self.os.stat(self.file_path)
stat_result = self.path(self.file_link_path).stat()
self.assertFalse(stat_result.st_mode & stat.S_IFDIR)
self.assertTrue(stat_result.st_mode & stat.S_IFREG)
self.assertEqual(stat_result.st_ino, file_stat.st_ino)
self.assertEqual(stat_result.st_size, 100)
self.assertEqual(stat_result.st_mtime, file_stat.st_mtime)
self.assertEqual(stat_result[stat.ST_MTIME],
int(file_stat.st_mtime))
def check_lstat(self, expected_size):
self.skip_if_symlink_not_supported()
link_stat = self.os.lstat(self.file_link_path)
stat_result = self.path(self.file_link_path).lstat()
self.assertTrue(stat_result.st_mode & stat.S_IFREG)
self.assertTrue(stat_result.st_mode & stat.S_IFLNK)
self.assertEqual(stat_result.st_ino, link_stat.st_ino)
self.assertEqual(stat_result.st_size, expected_size)
self.assertEqual(stat_result.st_mtime, link_stat.st_mtime)
@unittest.skipIf(is_windows, 'POSIX specific behavior')
def test_lstat_posix(self):
self.check_lstat(len(self.file_path))
@unittest.skipIf(not is_windows, 'Windows specific behavior')
def test_lstat_windows(self):
self.skip_if_symlink_not_supported()
self.check_lstat(0)
@unittest.skipIf(is_windows, 'Linux specific behavior')
def test_chmod(self):
self.check_linux_only()
file_stat = self.os.stat(self.file_path)
self.assertEqual(file_stat.st_mode, stat.S_IFREG | 0o666)
link_stat = self.os.lstat(self.file_link_path)
# we get stat.S_IFLNK | 0o755 under MacOs
self.assertEqual(link_stat.st_mode, stat.S_IFLNK | 0o777)
def test_lchmod(self):
self.skip_if_symlink_not_supported()
file_stat = self.os.stat(self.file_path)
link_stat = self.os.lstat(self.file_link_path)
if not hasattr(os, "lchmod"):
with self.assertRaises(NotImplementedError):
self.path(self.file_link_path).lchmod(0o444)
else:
self.path(self.file_link_path).lchmod(0o444)
self.assertEqual(file_stat.st_mode, stat.S_IFREG | 0o666)
# the exact mode depends on OS and Python version
self.assertEqual(link_stat.st_mode & 0o777700,
stat.S_IFLNK | 0o700)
@unittest.skipIf(sys.version_info < (3, 10),
"follow_symlinks argument new in Python 3.10")
def test_chmod_no_followsymlinks(self):
self.skip_if_symlink_not_supported()
file_stat = self.os.stat(self.file_path)
link_stat = self.os.lstat(self.file_link_path)
if os.chmod not in os.supports_follow_symlinks or IS_PYPY:
with self.assertRaises(NotImplementedError):
self.path(self.file_link_path).chmod(0o444,
follow_symlinks=False)
else:
self.path(self.file_link_path).chmod(0o444, follow_symlinks=False)
self.assertEqual(file_stat.st_mode, stat.S_IFREG | 0o666)
# the exact mode depends on OS and Python version
self.assertEqual(link_stat.st_mode & 0o777700,
stat.S_IFLNK | 0o700)
def test_resolve(self):
self.create_dir(self.make_path('antoine', 'docs'))
self.create_file(self.make_path('antoine', 'setup.py'))
self.os.chdir(self.make_path('antoine'))
# use real path to handle symlink /var to /private/var in MacOs
self.assert_equal_paths(self.path().resolve(),
self.path(self.os.path.realpath(
self.make_path('antoine'))))
self.assert_equal_paths(
self.path(
self.os.path.join('docs', '..', 'setup.py')).resolve(),
self.path(
self.os.path.realpath(
self.make_path('antoine', 'setup.py'))))
def test_stat_file_in_unreadable_dir(self):
self.check_posix_only()
dir_path = self.make_path('some_dir')
file_path = self.os.path.join(dir_path, 'some_file')
self.create_file(file_path)
self.os.chmod(dir_path, 0o000)
if not is_root():
self.assert_raises_os_error(
errno.EACCES, self.path(file_path).stat)
else:
self.assertEqual(0, self.path(file_path).stat().st_size)
def test_iterdir_in_unreadable_dir(self):
self.check_posix_only()
dir_path = self.make_path('some_dir')
file_path = self.os.path.join(dir_path, 'some_file')
self.create_file(file_path)
self.os.chmod(dir_path, 0o000)
iter = self.path(dir_path).iterdir()
if not is_root():
self.assert_raises_os_error(errno.EACCES, list, iter)
else:
path = str(list(iter)[0])
self.assertTrue(path.endswith('some_file'))
def test_resolve_nonexisting_file(self):
path = self.path(
self.make_path('/path', 'to', 'file', 'this can not exist'))
self.assertEqual(path, path.resolve())
def test_cwd(self):
dir_path = self.make_path('jane')
self.create_dir(dir_path)
self.os.chdir(dir_path)
self.assert_equal_paths(self.path.cwd(),
self.path(self.os.path.realpath(dir_path)))
def test_expanduser(self):
if is_windows:
self.assertEqual(self.path('~').expanduser(),
self.path(
os.environ['USERPROFILE'].replace('\\',
'/')))
else:
self.assertEqual(self.path('~').expanduser(),
self.path(os.environ['HOME']))
def test_home(self):
if is_windows:
self.assertEqual(self.path(
os.environ['USERPROFILE'].replace('\\', '/')),
self.path.home())
else:
self.assertEqual(self.path(os.environ['HOME']),
self.path.home())
class RealPathlibFileObjectPropertyTest(FakePathlibFileObjectPropertyTest):
def use_real_fs(self):
return True
class FakePathlibPathFileOperationTest(RealPathlibTestCase):
"""Tests methods related to file and directory handling."""
def test_exists(self):
self.skip_if_symlink_not_supported()
self.create_file(self.make_path('home', 'jane', 'test.py'))
self.create_dir(self.make_path('home', 'john'))
self.create_symlink(
self.make_path('john'), self.make_path('home', 'john'))
self.create_symlink(
self.make_path('none'), self.make_path('home', 'none'))
self.assertTrue(
self.path(self.make_path('home', 'jane', 'test.py')).exists())
self.assertTrue(self.path(self.make_path('home', 'jane')).exists())
self.assertTrue(self.path(self.make_path('john')).exists())
self.assertFalse(self.path(self.make_path('none')).exists())
self.assertFalse(
self.path(self.make_path('home', 'jane', 'test')).exists())
def test_open(self):
self.create_dir(self.make_path('foo'))
with self.assertRaises(OSError):
self.path(self.make_path('foo', 'bar.txt')).open()
self.path(self.make_path('foo', 'bar.txt')).open('w').close()
self.assertTrue(
self.os.path.exists(self.make_path('foo', 'bar.txt')))
def test_read_text(self):
self.create_file(self.make_path('text_file'), contents='foo')
file_path = self.path(self.make_path('text_file'))
self.assertEqual(file_path.read_text(), 'foo')
def test_read_text_with_encoding(self):
self.create_file(self.make_path('text_file'),
contents='ерунда', encoding='cyrillic')
file_path = self.path(self.make_path('text_file'))
self.assertEqual(file_path.read_text(encoding='cyrillic'),
'ерунда')
def test_write_text(self):
path_name = self.make_path('text_file')
file_path = self.path(path_name)
file_path.write_text(str('foo'))
self.assertTrue(self.os.path.exists(path_name))
self.check_contents(path_name, 'foo')
def test_write_text_with_encoding(self):
path_name = self.make_path('text_file')
file_path = self.path(path_name)
file_path.write_text('ανοησίες', encoding='greek')
self.assertTrue(self.os.path.exists(path_name))
self.check_contents(path_name, 'ανοησίες'.encode('greek'))
@unittest.skipIf(sys.version_info < (3, 10),
"newline argument new in Python 3.10")
def test_write_with_newline_arg(self):
path = self.path(self.make_path('some_file'))
path.write_text('1\r\n2\n3\r4', newline='')
self.check_contents(path, b'1\r\n2\n3\r4')
path.write_text('1\r\n2\n3\r4', newline='\n')
self.check_contents(path, b'1\r\n2\n3\r4')
path.write_text('1\r\n2\n3\r4', newline='\r\n')
self.check_contents(path, b'1\r\r\n2\r\n3\r4')
path.write_text('1\r\n2\n3\r4', newline='\r')
self.check_contents(path, b'1\r\r2\r3\r4')
def test_read_bytes(self):
path_name = self.make_path('binary_file')
self.create_file(path_name, contents=b'Binary file contents')
file_path = self.path(path_name)
self.assertEqual(file_path.read_bytes(), b'Binary file contents')
def test_write_bytes(self):
path_name = self.make_path('binary_file')
file_path = self.path(path_name)
file_path.write_bytes(b'Binary file contents')
self.assertTrue(self.os.path.exists(path_name))
self.check_contents(path_name, b'Binary file contents')
def test_rename(self):
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name, contents='test')
new_file_name = self.make_path('foo', 'baz.txt')
self.path(file_name).rename(new_file_name)
self.assertFalse(self.os.path.exists(file_name))
self.check_contents(new_file_name, 'test')
def test_replace(self):
self.create_file(self.make_path('foo', 'bar.txt'), contents='test')
self.create_file(self.make_path('bar', 'old.txt'),
contents='replaced')
self.path(self.make_path('bar', 'old.txt')).replace(
self.make_path('foo', 'bar.txt'))
self.assertFalse(
self.os.path.exists(self.make_path('bar', 'old.txt')))
self.check_contents(self.make_path('foo', 'bar.txt'), 'replaced')
def test_unlink(self):
file_path = self.make_path('foo', 'bar.txt')
self.create_file(file_path, contents='test')
self.assertTrue(self.os.path.exists(file_path))
self.path(file_path).unlink()
self.assertFalse(self.os.path.exists(file_path))
def test_touch_non_existing(self):
self.create_dir(self.make_path('foo'))
file_name = self.make_path('foo', 'bar.txt')
self.path(file_name).touch(mode=0o444)
self.check_contents(file_name, '')
self.assertTrue(self.os.stat(file_name).st_mode,
stat.S_IFREG | 0o444)
self.os.chmod(file_name, mode=0o666)
def test_touch_existing(self):
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name, contents='test')
file_path = self.path(file_name)
self.assert_raises_os_error(
errno.EEXIST, file_path.touch, exist_ok=False)
file_path.touch()
self.check_contents(file_name, 'test')
def test_samefile(self):
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
file_name2 = self.make_path('foo', 'baz.txt')
self.create_file(file_name2)
with self.assertRaises(OSError):
self.path(self.make_path('foo', 'other')).samefile(
self.make_path('foo', 'other.txt'))
path = self.path(file_name)
other_name = self.make_path('foo', 'other.txt')
with self.assertRaises(OSError):
path.samefile(other_name)
with self.assertRaises(OSError):
path.samefile(self.path(other_name))
self.assertFalse(path.samefile(file_name2))
self.assertFalse(path.samefile(self.path(file_name2)))
self.assertTrue(
path.samefile(self.make_path('foo', '..', 'foo', 'bar.txt')))
self.assertTrue(path.samefile(
self.path(self.make_path('foo', '..', 'foo', 'bar.txt'))))
def test_symlink_to(self):
self.skip_if_symlink_not_supported()
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
link_name = self.make_path('link_to_bar')
path = self.path(link_name)
path.symlink_to(file_name)
self.assertTrue(self.os.path.exists(link_name))
self.assertTrue(path.is_symlink())
@unittest.skipIf(sys.version_info < (3, 8),
'link_to new in Python 3.8')
def test_link_to(self):
self.skip_if_symlink_not_supported()
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
self.assertEqual(1, self.os.stat(file_name).st_nlink)
link_name = self.make_path('link_to_bar')
path = self.path(file_name)
path.link_to(link_name)
self.assertTrue(self.os.path.exists(link_name))
self.assertFalse(path.is_symlink())
self.assertEqual(2, self.os.stat(file_name).st_nlink)
@unittest.skipIf(sys.version_info < (3, 10),
'hardlink_to new in Python 3.10')
def test_hardlink_to(self):
self.skip_if_symlink_not_supported()
file_name = self.make_path('foo', 'bar.txt')
self.create_file(file_name)
self.assertEqual(1, self.os.stat(file_name).st_nlink)
link_path = self.path(self.make_path('link_to_bar'))
path = self.path(file_name)
link_path.hardlink_to(path)
self.assertTrue(self.os.path.exists(link_path))
self.assertFalse(path.is_symlink())
self.assertEqual(2, self.os.stat(file_name).st_nlink)
@unittest.skipIf(sys.version_info < (3, 9),
'readlink new in Python 3.9')
def test_readlink(self):
self.skip_if_symlink_not_supported()
link_path = self.make_path('foo', 'bar', 'baz')
target = self.make_path('tarJAY')
self.create_symlink(link_path, target)
path = self.path(link_path)
self.assert_equal_paths(path.readlink(), self.path(target))
def test_mkdir(self):
dir_name = self.make_path('foo', 'bar')
self.assert_raises_os_error(errno.ENOENT,
self.path(dir_name).mkdir)
self.path(dir_name).mkdir(parents=True)
self.assertTrue(self.os.path.exists(dir_name))
self.assert_raises_os_error(errno.EEXIST,
self.path(dir_name).mkdir)
def test_mkdir_exist_ok(self):
dir_name = self.make_path('foo', 'bar')
self.create_dir(dir_name)
self.path(dir_name).mkdir(exist_ok=True)
file_name = self.os.path.join(dir_name, 'baz')
self.create_file(file_name)
self.assert_raises_os_error(errno.EEXIST,
self.path(file_name).mkdir,
exist_ok=True)
def test_rmdir(self):
dir_name = self.make_path('foo', 'bar')
self.create_dir(dir_name)
self.path(dir_name).rmdir()
self.assertFalse(self.os.path.exists(dir_name))
self.assertTrue(self.os.path.exists(self.make_path('foo')))
self.create_file(self.make_path('foo', 'baz'))
with self.assertRaises(OSError):
self.path(self.make_path('foo')).rmdir()
self.assertTrue(self.os.path.exists(self.make_path('foo')))
def test_iterdir(self):
self.create_file(self.make_path('foo', 'bar', 'file1'))
self.create_file(self.make_path('foo', 'bar', 'file2'))
self.create_file(self.make_path('foo', 'bar', 'file3'))
path = self.path(self.make_path('foo', 'bar'))
contents = [entry for entry in path.iterdir()]
self.assertEqual(3, len(contents))
self.assertIn(self.path(self.make_path('foo', 'bar', 'file2')),
contents)
def test_glob(self):
self.create_file(self.make_path('foo', 'setup.py'))
self.create_file(self.make_path('foo', 'all_tests.py'))
self.create_file(self.make_path('foo', 'README.md'))
self.create_file(self.make_path('foo', 'setup.pyc'))
path = self.path(self.make_path('foo'))
self.assertEqual(sorted(path.glob('*.py')),
[self.path(self.make_path('foo', 'all_tests.py')),
self.path(self.make_path('foo', 'setup.py'))])
@unittest.skipIf(not is_windows, 'Windows specific test')
def test_glob_case_windows(self):
self.create_file(self.make_path('foo', 'setup.py'))
self.create_file(self.make_path('foo', 'all_tests.PY'))
self.create_file(self.make_path('foo', 'README.md'))
self.create_file(self.make_path('foo', 'example.Py'))
path = self.path(self.make_path('foo'))
self.assertEqual(sorted(path.glob('*.py')),
[self.path(self.make_path('foo', 'all_tests.PY')),
self.path(self.make_path('foo', 'example.Py')),
self.path(self.make_path('foo', 'setup.py'))])
@unittest.skipIf(is_windows, 'Posix specific test')
def test_glob_case_posix(self):
self.check_posix_only()
self.create_file(self.make_path('foo', 'setup.py'))
self.create_file(self.make_path('foo', 'all_tests.PY'))
self.create_file(self.make_path('foo', 'README.md'))
self.create_file(self.make_path('foo', 'example.Py'))
path = self.path(self.make_path('foo'))
self.assertEqual(sorted(path.glob('*.py')),
[self.path(self.make_path('foo', 'setup.py'))])
class RealPathlibPathFileOperationTest(FakePathlibPathFileOperationTest):
def use_real_fs(self):
return True
@unittest.skipIf(sys.version_info < (3, 6),
'path-like objects new in Python 3.6')
class FakePathlibUsageInOsFunctionsTest(RealPathlibTestCase):
"""Test that many os / os.path functions accept a path-like object
since Python 3.6. The functionality of these functions is tested
elsewhere, we just check that they accept a fake path object as an
argument.
"""
def test_join(self):
dir1 = 'foo'
dir2 = 'bar'
dir = self.os.path.join(dir1, dir2)
self.assertEqual(dir, self.os.path.join(self.path(dir1), dir2))
self.assertEqual(dir, self.os.path.join(dir1, self.path(dir2)))
self.assertEqual(dir,
self.os.path.join(self.path(dir1),
self.path(dir2)))
def test_normcase(self):
dir1 = self.make_path('Foo', 'Bar', 'Baz')
self.assertEqual(self.os.path.normcase(dir1),
self.os.path.normcase(self.path(dir1)))
def test_normpath(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.normpath(dir1),
self.os.path.normpath(self.path(dir1)))
def test_realpath(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.realpath(dir1),
self.os.path.realpath(self.path(dir1)))
def test_relpath(self):
path_foo = self.make_path('path', 'to', 'foo')
path_bar = self.make_path('path', 'to', 'bar')
rel_path = self.os.path.relpath(path_foo, path_bar)
self.assertEqual(rel_path,
self.os.path.relpath(self.path(path_foo),
path_bar))
self.assertEqual(rel_path,
self.os.path.relpath(path_foo,
self.path(path_bar)))
self.assertEqual(rel_path,
self.os.path.relpath(self.path(path_foo),
self.path(path_bar)))
def test_split(self):
dir1 = self.make_path('Foo', 'Bar', 'Baz')
self.assertEqual(self.os.path.split(dir1),
self.os.path.split(self.path(dir1)))
def test_splitdrive(self):
dir1 = self.make_path('C:', 'Foo', 'Bar', 'Baz')
self.assertEqual(self.os.path.splitdrive(dir1),
self.os.path.splitdrive(self.path(dir1)))
def test_abspath(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.abspath(dir1),
self.os.path.abspath(self.path(dir1)))
def test_exists(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.exists(dir1),
self.os.path.exists(self.path(dir1)))
def test_lexists(self):
dir1 = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.lexists(dir1),
self.os.path.lexists(self.path(dir1)))
def test_expanduser(self):
dir1 = self.os.path.join('~', 'foo')
self.assertEqual(self.os.path.expanduser(dir1),
self.os.path.expanduser(self.path(dir1)))
def test_getmtime(self):
self.skip_real_fs()
dir1 = self.make_path('foo', 'bar1.txt')
path_obj = self.filesystem.create_file(dir1)
path_obj._st_mtime = 24
self.assertEqual(self.os.path.getmtime(dir1),
self.os.path.getmtime(self.path(dir1)))
def test_getctime(self):
self.skip_real_fs()
dir1 = self.make_path('foo', 'bar1.txt')
path_obj = self.filesystem.create_file(dir1)
path_obj.st_ctime = 42
self.assertEqual(self.os.path.getctime(dir1),
self.os.path.getctime(self.path(dir1)))
def test_getatime(self):
self.skip_real_fs()
dir1 = self.make_path('foo', 'bar1.txt')
path_obj = self.filesystem.create_file(dir1)
path_obj.st_atime = 11
self.assertEqual(self.os.path.getatime(dir1),
self.os.path.getatime(self.path(dir1)))
def test_getsize(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, contents='1234567')
self.assertEqual(self.os.path.getsize(path),
self.os.path.getsize(self.path(path)))
def test_isabs(self):
path = self.make_path('foo', 'bar', '..', 'baz')
self.assertEqual(self.os.path.isabs(path),
self.os.path.isabs(self.path(path)))
def test_isfile(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path)
self.assertEqual(self.os.path.isfile(path),
self.os.path.isfile(self.path(path)))
def test_isfile_not_readable(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, perm=0)
self.assertEqual(self.os.path.isfile(path),
self.os.path.isfile(self.path(path)))
def test_islink(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path)
self.assertEqual(self.os.path.islink(path),
self.os.path.islink(self.path(path)))
def test_isdir(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path)
self.assertEqual(self.os.path.isdir(path),
self.os.path.isdir(self.path(path)))
def test_ismount(self):
path = self.os.path.sep
self.assertEqual(self.os.path.ismount(path),
self.os.path.ismount(self.path(path)))
def test_access(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, contents='1234567')
self.assertEqual(self.os.access(path, os.R_OK),
self.os.access(self.path(path), os.R_OK))
def test_chdir(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_dir(path)
self.os.chdir(self.path(path))
# use real path to handle symlink /var to /private/var in MacOs
self.assert_equal_paths(self.os.path.realpath(path), self.os.getcwd())
def test_chmod(self):
path = self.make_path('some_file')
self.create_file(path)
self.os.chmod(self.path(path), 0o444)
self.assertEqual(stat.S_IMODE(0o444),
stat.S_IMODE(self.os.stat(path).st_mode))
self.os.chmod(self.path(path), 0o666)
def test_link(self):
self.skip_if_symlink_not_supported()
file1_path = self.make_path('test_file1')
file2_path = self.make_path('test_file2')
self.create_file(file1_path)
self.os.link(self.path(file1_path), file2_path)
self.assertTrue(self.os.path.exists(file2_path))
self.os.unlink(file2_path)
self.os.link(self.path(file1_path), self.path(file2_path))
self.assertTrue(self.os.path.exists(file2_path))
self.os.unlink(file2_path)
self.os.link(file1_path, self.path(file2_path))
self.assertTrue(self.os.path.exists(file2_path))
def test_listdir(self):
path = self.make_path('foo', 'bar')
self.create_dir(path)
self.create_file(path + 'baz.txt')
self.assertEqual(self.os.listdir(path),
self.os.listdir(self.path(path)))
def test_mkdir(self):
path = self.make_path('foo')
self.os.mkdir(self.path(path))
self.assertTrue(self.os.path.exists(path))
def test_makedirs(self):
path = self.make_path('foo', 'bar')
self.os.makedirs(self.path(path))
self.assertTrue(self.os.path.exists(path))
@unittest.skipIf(is_windows and sys.version_info < (3, 8),
'os.readlink does not to support path-like objects '
'under Windows before Python 3.8')
def test_readlink(self):
self.skip_if_symlink_not_supported()
link_path = self.make_path('foo', 'bar', 'baz')
target = self.make_path('tarJAY')
self.create_symlink(link_path, target)
self.assert_equal_paths(self.os.readlink(self.path(link_path)), target)
@unittest.skipIf(is_windows and sys.version_info < (3, 8),
'os.readlink does not to support path-like objects '
'under Windows before Python 3.8')
def test_readlink_bytes(self):
self.skip_if_symlink_not_supported()
link_path = self.make_path(b'foo', b'bar', b'baz')
target = self.make_path(b'tarJAY')
self.create_symlink(link_path, target)
self.assert_equal_paths(self.os.readlink(self.path(link_path)), target)
def test_remove(self):
path = self.make_path('test.txt')
self.create_file(path)
self.os.remove(self.path(path))
self.assertFalse(self.os.path.exists(path))
def test_rename(self):
path1 = self.make_path('test1.txt')
path2 = self.make_path('test2.txt')
self.create_file(path1)
self.os.rename(self.path(path1), path2)
self.assertTrue(self.os.path.exists(path2))
self.os.rename(self.path(path2), self.path(path1))
self.assertTrue(self.os.path.exists(path1))
def test_replace(self):
path1 = self.make_path('test1.txt')
path2 = self.make_path('test2.txt')
self.create_file(path1)
self.os.replace(self.path(path1), path2)
self.assertTrue(self.os.path.exists(path2))
self.os.replace(self.path(path2), self.path(path1))
self.assertTrue(self.os.path.exists(path1))
def test_rmdir(self):
path = self.make_path('foo', 'bar')
self.create_dir(path)
self.os.rmdir(self.path(path))
self.assertFalse(self.os.path.exists(path))
def test_scandir(self):
directory = self.make_path('xyzzy', 'plugh')
self.create_dir(directory)
self.create_file(self.os.path.join(directory, 'test.txt'))
dir_entries = [entry for entry in
self.os.scandir(self.path(directory))]
self.assertEqual(1, len(dir_entries))
def test_symlink(self):
self.skip_if_symlink_not_supported()
file_path = self.make_path('test_file1')
link_path = self.make_path('link')
self.create_file(file_path)
self.os.symlink(self.path(file_path), link_path)
self.assertTrue(self.os.path.exists(link_path))
self.os.remove(link_path)
self.os.symlink(self.path(file_path), self.path(link_path))
self.assertTrue(self.os.path.exists(link_path))
def test_stat(self):
path = self.make_path('foo', 'bar', 'baz')
self.create_file(path, contents='1234567')
self.assertEqual(self.os.stat(path), self.path(path).stat())
@unittest.skipIf(sys.version_info < (3, 10), "New in Python 3.10")
def test_stat_follow_symlinks(self):
self.check_posix_only()
directory = self.make_path('foo')
base_name = 'bar'
file_path = self.path(self.os.path.join(directory, base_name))
link_path = self.path(self.os.path.join(directory, 'link'))
contents = "contents"
self.create_file(file_path, contents=contents)
self.create_symlink(link_path, base_name)
self.assertEqual(len(contents),
link_path.stat(follow_symlinks=True)[stat.ST_SIZE])
self.assertEqual(len(base_name),
link_path.stat(follow_symlinks=False)[stat.ST_SIZE])
def test_utime(self):
path = self.make_path('some_file')
self.create_file(path, contents='test')
self.os.utime(self.path(path), times=(1, 2))
st = self.os.stat(path)
self.assertEqual(1, st.st_atime)
self.assertEqual(2, st.st_mtime)
def test_truncate(self):
path = self.make_path('some_file')
self.create_file(path, contents='test_test')
self.os.truncate(self.path(path), length=4)
st = self.os.stat(path)
self.assertEqual(4, st.st_size)
@unittest.skipIf(sys.platform == 'win32',
'no pwd and grp modules in Windows')
def test_owner_and_group_posix(self):
self.check_posix_only()
path = self.make_path('some_file')
self.create_file(path)
self.assertTrue(self.path(path).owner())
self.assertTrue(self.path(path).group())
def test_owner_and_group_windows(self):
self.check_windows_only()
path = self.make_path('some_file')
self.create_file(path)
with self.assertRaises(NotImplementedError):
self.path(path).owner()
with self.assertRaises(NotImplementedError):
self.path(path).group()
class RealPathlibUsageInOsFunctionsTest(FakePathlibUsageInOsFunctionsTest):
def use_real_fs(self):
return True
@unittest.skipIf(sys.version_info < (3, 6),
'Path-like objects new in Python 3.6')
class FakeFilesystemPathLikeObjectTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(
path_separator='/')
self.pathlib = fake_pathlib.FakePathlibModule(self.filesystem)
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def test_create_dir_with_pathlib_path(self):
dir_path_string = 'foo/bar/baz'
dir_path = self.pathlib.Path(dir_path_string)
self.filesystem.create_dir(dir_path)
self.assertTrue(self.os.path.exists(dir_path_string))
self.assertEqual(stat.S_IFDIR,
self.os.stat(
dir_path_string).st_mode & stat.S_IFDIR)
def test_create_file_with_pathlib_path(self):
file_path_string = 'foo/bar/baz'
file_path = self.pathlib.Path(file_path_string)
self.filesystem.create_file(file_path)
self.assertTrue(self.os.path.exists(file_path_string))
self.assertEqual(stat.S_IFREG,
self.os.stat(
file_path_string).st_mode & stat.S_IFREG)
def test_create_symlink_with_pathlib_path(self):
file_path = self.pathlib.Path('foo/bar/baz')
link_path_string = 'foo/link'
link_path = self.pathlib.Path(link_path_string)
self.filesystem.create_symlink(link_path, file_path)
self.assertTrue(self.os.path.lexists(link_path_string))
self.assertEqual(stat.S_IFLNK,
self.os.lstat(link_path_string).st_mode &
stat.S_IFLNK)
def test_add_existing_real_file_with_pathlib_path(self):
real_file_path_string = os.path.abspath(__file__)
real_file_path = self.pathlib.Path(real_file_path_string)
self.filesystem.add_real_file(real_file_path)
fake_filepath_string = real_file_path_string.replace(
os.sep, self.os.sep)
self.assertTrue(self.os.path.exists(fake_filepath_string))
self.assertEqual(stat.S_IFREG, self.os.stat(
fake_filepath_string).st_mode & stat.S_IFREG)
def test_add_existing_real_directory_with_pathlib_path(self):
real_dirpath_string = os.path.dirname(os.path.abspath(__file__))
real_dir_path = self.pathlib.Path(real_dirpath_string)
self.filesystem.add_real_directory(real_dir_path)
fake_dirpath_string = real_dirpath_string.replace(
os.sep, self.os.sep)
self.assertTrue(self.os.path.exists(fake_dirpath_string))
self.assertEqual(stat.S_IFDIR, self.os.stat(
fake_dirpath_string).st_mode & stat.S_IFDIR)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
the-stack_0_5285 | # -*- coding: utf-8 -*-
import functools
from mock import Mock
from bravado_core.model import _post_process_spec
from bravado_core.spec import Spec
def test_empty():
swagger_spec = Spec({})
callback = Mock()
_post_process_spec(
spec_dict=swagger_spec.spec_dict,
spec_resolver=swagger_spec.resolver,
on_container_callbacks=[callback],
)
assert callback.call_count == 0
def test_single_key():
spec_dict = {'definitions': {}}
swagger_spec = Spec(spec_dict)
callback = Mock()
_post_process_spec(
spec_dict=swagger_spec.spec_dict,
spec_resolver=swagger_spec.resolver,
on_container_callbacks=[callback],
)
assert callback.call_count == 1
callback.assert_called_once_with(spec_dict, '#/definitions')
def test_visits_refs_only_once():
# bar should only be de-reffed once even though there are two refs to it
spec_dict = {
'ref_one': {'$ref': '#/bar'},
'ref_two': {'$ref': '#/bar'},
'bar': 'baz'
}
swagger_spec = Spec(spec_dict)
# Yech! mock doesn't make this easy
mutable = {'cnt': 0}
def callback(container, json_reference, mutable):
# Bump the mutable counter every time bar is de-reffed
if json_reference.endswith('/bar'):
mutable['cnt'] += 1
_post_process_spec(
spec_dict=swagger_spec.spec_dict,
spec_resolver=swagger_spec.resolver,
on_container_callbacks=[
functools.partial(
callback,
mutable=mutable,
),
],
)
assert mutable['cnt'] == 1
|
the-stack_0_5286 | import unittest
from mock import patch, Mock
import pytest
from nose.tools import * # noqa (PEP8 asserts)
from admin.rdm_addons.utils import get_rdm_addon_option
from osf_tests.factories import (
fake_email,
AuthUserFactory,
InstitutionFactory,
ExternalAccountFactory,
UserFactory,
ProjectFactory
)
from addons.dropboxbusiness.models import NodeSettings
from admin_tests.rdm_addons import factories as rdm_addon_factories
pytestmark = pytest.mark.django_db
class DropboxBusinessAccountFactory(ExternalAccountFactory):
provider = 'dropboxbusiness'
FILEACCESS_NAME = 'dropboxbusiness'
MANAGEMENT_NAME = 'dropboxbusiness_manage'
DBXBIZ = 'addons.dropboxbusiness'
class TestDropboxBusiness(unittest.TestCase):
def setUp(self):
super(TestDropboxBusiness, self).setUp()
self.institution = InstitutionFactory()
self.user = UserFactory()
self.user.eppn = fake_email()
self.user.affiliated_institutions.add(self.institution)
self.user.save()
self.f_option = get_rdm_addon_option(self.institution.id,
FILEACCESS_NAME)
self.m_option = get_rdm_addon_option(self.institution.id,
MANAGEMENT_NAME)
f_account = ExternalAccountFactory(provider=FILEACCESS_NAME)
m_account = ExternalAccountFactory(provider=MANAGEMENT_NAME)
self.f_option.external_accounts.add(f_account)
self.m_option.external_accounts.add(m_account)
def _new_project(self):
with patch(DBXBIZ + '.utils.TeamInfo') as mock1, \
patch(DBXBIZ + '.utils.get_current_admin_group_and_sync') as mock2, \
patch(DBXBIZ + '.utils.get_current_admin_dbmid') as mock3, \
patch(DBXBIZ + '.utils.create_team_folder') as mock4:
mock2.return_value = (Mock(), Mock())
mock3.return_value = 'dbmid:dummy'
mock4.return_value = ('dbtid:dummy', 'g:dummy')
self.project = ProjectFactory(creator=self.user)
def _allowed(self):
self.f_option.is_allowed = True
self.f_option.save()
def test_dropboxbusiness_default_is_not_allowed(self):
assert_false(self.f_option.is_allowed)
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_no_eppn(self):
self.user.eppn = None
self.user.save()
self._allowed()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_no_institution(self):
self.user.affiliated_institutions.clear()
self._allowed()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_no_addon_option(self):
self.f_option.delete()
self._allowed()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_equal(result, None)
def test_dropboxbusiness_automount(self):
self.f_option.is_allowed = True
self.f_option.save()
self._new_project()
result = self.project.get_addon('dropboxbusiness')
assert_true(isinstance(result, NodeSettings))
assert_equal(result.admin_dbmid, 'dbmid:dummy')
assert_equal(result.team_folder_id, 'dbtid:dummy')
assert_equal(result.group_id, 'g:dummy')
|
the-stack_0_5288 | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Print information about the current rez context, or a given context file.
'''
from __future__ import print_function
# Disable the following:
# - context tracking
# - package caching
#
# Use of rez-context is't really 'using' the context, so much as inspecting it.
# Since features such as context tracking are related to context use only, we
# disable them in this tool.
#
import os
import json
import sys
os.environ.update({
"REZ_CONTEXT_TRACKING_HOST": '',
"REZ_WRITE_PACKAGE_CACHE": "False"
})
from rez.rex import OutputStyle # noqa
def setup_parser(parser, completions=False):
from rez.system import system
from rez.shells import get_shell_types
formats = get_shell_types() + ['dict', 'table']
if json is not None:
formats.append('json')
output_styles = [e.name for e in OutputStyle]
parser.add_argument(
"--req", "--print-request", dest="print_request",
action="store_true",
help="print only the request list (not including implicits)")
parser.add_argument(
"--res", "--print-resolve", dest="print_resolve",
action="store_true",
help="print only the resolve list. Use with --su to print package URIs")
parser.add_argument(
"--so", "--source-order", dest="source_order", action="store_true",
help="print resolved packages in order they are sorted, rather than "
"alphabetical order")
parser.add_argument(
"--su", "--show-uris", dest="show_uris", action="store_true",
help="list resolved package's URIs, rather than the default 'root' "
"filepath")
parser.add_argument(
"-t", "--tools", action="store_true",
help="print a list of the executables available in the context")
parser.add_argument(
"--which", type=str, metavar="CMD",
help="locate a program within the context")
parser.add_argument(
"-g", "--graph", action="store_true",
help="display the resolve graph as an image")
parser.add_argument(
"-d", "--dependency-graph", action="store_true",
help="display the (simpler) dependency graph. Works in combination "
"with other graph options")
parser.add_argument(
"--pg", "--print-graph", dest="print_graph", action="store_true",
help="print the resolve graph as a string")
parser.add_argument(
"--wg", "--write-graph", dest="write_graph", type=str,
metavar='FILE', help="write the resolve graph to FILE")
parser.add_argument(
"--pp", "--prune-package", dest="prune_pkg", metavar="PKG",
type=str, help="prune the graph down to PKG")
parser.add_argument(
"-i", "--interpret", action="store_true",
help="interpret the context and print the resulting code")
parser.add_argument(
"-f", "--format", type=str, choices=formats, default=system.shell,
help="print interpreted output in the given format. Ignored if "
"--interpret is not present (default: %(default)s). If one of "
"table, dict or json, the environ dict is printed.")
parser.add_argument(
"-s", "--style", type=str, default="file", choices=output_styles,
help="Set code output style. Ignored if --interpret is not present "
"(default: %(default)s)")
parser.add_argument(
"--no-env", dest="no_env", action="store_true",
help="interpret the context in an empty environment")
diff_action = parser.add_argument(
"--diff", type=str, metavar="RXT",
help="diff the current context against the given context")
parser.add_argument(
"--fetch", action="store_true",
help="diff the current context against a re-resolved copy of the "
"current context")
RXT_action = parser.add_argument(
"RXT", type=str, nargs='?',
help="rez context file (current context if not supplied). Use '-' to "
"read the context from stdin")
if completions:
from rez.cli._complete_util import FilesCompleter
rxt_completer = FilesCompleter(dirs=False, file_patterns=["*.rxt"])
RXT_action.completer = rxt_completer
diff_action.completer = rxt_completer
def command(opts, parser, extra_arg_groups=None):
from rez.cli._util import print_items
from rez.status import status
from rez.utils.formatting import columnise, PackageRequest
from rez.resolved_context import ResolvedContext
from rez.utils.graph_utils import save_graph, view_graph, prune_graph
from pprint import pformat
rxt_file = opts.RXT if opts.RXT else status.context_file
if not rxt_file:
print("not in a resolved environment context.", file=sys.stderr)
sys.exit(1)
if rxt_file == '-': # read from stdin
rc = ResolvedContext.read_from_buffer(sys.stdin, 'STDIN')
else:
rc = ResolvedContext.load(rxt_file)
def _graph():
if rc.has_graph:
if opts.dependency_graph:
return rc.get_dependency_graph(as_dot=True)
else:
return rc.graph(as_dot=True)
else:
print("The context does not contain a graph.", file=sys.stderr)
sys.exit(1)
parent_env = {} if opts.no_env else None
if not opts.interpret:
if opts.print_request:
print_items(rc.requested_packages(False))
elif opts.print_resolve:
if opts.show_uris:
print_items(x.uri for x in rc.resolved_packages)
else:
print_items(x.qualified_package_name for x in rc.resolved_packages)
elif opts.tools:
rc.print_tools()
elif opts.diff:
rc_other = ResolvedContext.load(opts.diff)
rc.print_resolve_diff(rc_other, True)
elif opts.fetch:
rc_new = ResolvedContext(rc.requested_packages(),
package_paths=rc.package_paths,
verbosity=opts.verbose)
rc.print_resolve_diff(rc_new, heading=("current", "updated"))
elif opts.which:
cmd = opts.which
path = rc.which(cmd, parent_environ=parent_env)
if path:
print(path)
else:
print("'%s' not found in the context" % cmd, file=sys.stderr)
elif opts.print_graph:
gstr = _graph()
print(gstr)
elif opts.graph or opts.dependency_graph or opts.write_graph:
gstr = _graph()
if opts.prune_pkg:
req = PackageRequest(opts.prune_pkg)
gstr = prune_graph(gstr, req.name)
func = view_graph if (opts.graph or opts.dependency_graph) else save_graph
func(gstr, dest_file=opts.write_graph)
else:
rc.print_info(verbosity=opts.verbose,
source_order=opts.source_order,
show_resolved_uris=opts.show_uris)
return
if opts.format in ("dict", "table", "json"):
env = rc.get_environ(parent_environ=parent_env)
if opts.format == 'table':
rows = [x for x in sorted(env.items())]
print('\n'.join(columnise(rows)))
elif opts.format == 'dict':
print(pformat(env))
else: # json
print(json.dumps(env, sort_keys=True, indent=4))
else:
code = rc.get_shell_code(shell=opts.format,
parent_environ=parent_env,
style=OutputStyle[opts.style])
print(code)
|
the-stack_0_5289 | # Copyright 2010 Chet Luther <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
version = '1.0.4'
setup(
name='snmposter',
version=version,
description="SNMP Agent Simulator",
long_description="""
""",
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
keywords='snmp agent simulator snmpwalk',
author='Chet Luther',
author_email='[email protected]',
url='http://github.com/cluther/snmposter',
license='Apache 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Twisted==12.0.0',
#'TwistedSNMP', Not currently installable via PyPI.
#'pysnmp-se', Not currently installable via PyPI.
],
entry_points={
'console_scripts': [
'snmposter = snmposter.scripts:launcher',
]
},
)
|
the-stack_0_5291 | # This is how we set compiler options globally - we define them here, import
# this variable in every build file that has C code, and then set copts on every
# target manually. This is clumsy, but it's also very simple. There will
# probably be an easier way to do this in the future.
#
# See Bazel discussion: https://github.com/bazelbuild/bazel/issues/5198
COPTS_BASE = [
# Ubuntu 18 LTS uses GCC 7.4, but c17 is not supported until GCC 8.
"-std=c11",
# "-D_DEFAULT_SOURCE",
]
_CWARN = [
"-Wall",
"-Wextra",
"-Wpointer-arith",
"-Wwrite-strings",
"-Wmissing-prototypes",
"-Wdouble-promotion",
"-Werror=implicit-function-declaration",
"-Winit-self",
"-Wstrict-prototypes",
]
COPTS = (
COPTS_BASE +
select({
"//tools:warnings_off": [],
"//tools:warnings_on": _CWARN,
"//tools:warnings_error": _CWARN + ["-Werror"],
})
)
CXXOPTS_BASE = [
"-std=c++17",
]
_CXXWARN = [
"-Wall",
"-Wextra",
]
CXXOPTS = (
CXXOPTS_BASE +
select({
"//tools:warnings_off": [],
"//tools:warnings_on": _CXXWARN,
"//tools:warnings_error": _CXXWARN + ["-Werror"],
})
)
|
the-stack_0_5293 | import os
import sys
sys.path.insert(0, os.getcwd())
import time
import glob
import numpy as np
import random
import torch
import darts.cnn.utils as utils
import logging
import torch.nn as nn
import darts.cnn.genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from collections import namedtuple
from darts.cnn.model import NetworkCIFAR as Network
class Train:
def __init__(self):
self.data='./data'
self.batch_size= 96
self.learning_rate= 0.025
self.momentum= 0.9
self.weight_decay = 3e-4
self.load_weights = 0
self.report_freq = 500
self.gpu = 0
self.epochs = 50
self.init_channels = 32
self.layers = 8
self.auxiliary = True
self.auxiliary_weight = 0.4
self.cutout = True
self.cutout_length = 16
self.drop_path_prob = 0.2
self.save = 'EXP'
self.seed = 0
self.grad_clip = 5
self.train_portion = 0.9
self.validation_set = True
self.CIFAR_CLASSES = 10
self.count = 1
def main(self, seed, arch, epochs=50, gpu=0, load_weights=False, train_portion=0.9, save='model_search'):
# Set up save file and logging
self.save = save
self.save = '{}'.format(self.save)
if not os.path.exists(self.save):
utils.create_exp_dir(self.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(self.save, 'log-seed{}.txt'.format(seed)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
else:
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(self.save, 'log-seed{}.txt'.format(seed)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
self.arch = arch
self.epochs = epochs
self.load_weights = load_weights
self.gpu = gpu
self.train_portion = train_portion
if self.train_portion == 1:
self.validation_set = False
self.seed = seed
# cpu-gpu switch
if not torch.cuda.is_available():
torch.manual_seed(self.seed)
device = torch.device('cpu')
else:
torch.cuda.manual_seed_all(self.seed)
random.seed(self.seed)
torch.manual_seed(self.seed)
device = torch.device(self.gpu)
cudnn.benchmark = False
cudnn.enabled=True
cudnn.deterministic=True
genotype = arch
logging.info('counter: {} genotypes: {}'.format(self.count, genotype))
model = Network(self.init_channels, self.CIFAR_CLASSES, self.layers, self.auxiliary, genotype)
model = model.to(device)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Model total parameters: {}'.format(total_params))
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = torch.optim.SGD(
model.parameters(),
self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay
)
train_transform, test_transform = utils._data_transforms_cifar10(self.cutout, self.cutout_length)
train_data = dset.CIFAR10(root=self.data, train=True, transform=train_transform)
test_data = dset.CIFAR10(root=self.data, train=False, transform=test_transform)
num_train = len(train_data)
indices = list(range(num_train))
if self.validation_set:
split = int(np.floor(self.train_portion * num_train))
else:
split = num_train
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=4)
if self.validation_set:
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=self.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=4)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=self.batch_size, shuffle=False, pin_memory=True, num_workers=4)
if self.load_weights:
logging.info('loading saved weights')
ml = 'cuda:{}'.format(self.gpu) if torch.cuda.is_available() else 'cpu'
model.load_state_dict(torch.load('weights.pt', map_location = ml))
logging.info('loaded saved weights')
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(self.epochs))
valid_accs = []
test_accs = []
for epoch in range(self.epochs):
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = self.drop_path_prob * epoch / self.epochs
train_acc, train_obj = self.train(train_queue, model, criterion, optimizer)
if self.validation_set:
valid_acc, valid_obj = self.infer(valid_queue, model, criterion)
else:
valid_acc, valid_obj = 0, 0
test_acc, test_obj = self.infer(test_queue, model, criterion, test_data=True)
logging.info('train_acc: {:.4f}, valid_acc: {:.4f}, test_acc: {:.4f}'.format(train_acc, valid_acc, test_acc))
if epoch in list(range(max(0, epochs - 5), epochs)):
valid_accs.append((epoch, valid_acc))
test_accs.append((epoch, test_acc))
scheduler.step()
self.count += 1
return valid_accs, test_accs
def train(self, train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
device = torch.device('cuda:{}'.format(self.gpu) if torch.cuda.is_available() else 'cpu')
input = input.to(device)
target = target.to(device)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if self.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += self.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
return top1.avg, objs.avg
def infer(self, valid_queue, model, criterion, test_data=False):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
device = torch.device('cuda:{}'.format(self.gpu) if torch.cuda.is_available() else 'cpu')
for step, (input, target) in enumerate(valid_queue):
with torch.no_grad():
input = input.to(device)
target = target.to(device)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
return top1.avg, objs.avg |
the-stack_0_5294 | # -*- coding: utf-8 -*-
"""Utility methods for list objects.
AUTHORS:
- Thomas McTavish
"""
# While this software is under the permissive MIT License,
# (http://www.opensource.org/licenses/mit-license.php)
# We ask that you cite the neuronpy package (or tools used in this package)
# in any publications and contact the author with your referenced publication.
#
# Format:
# McTavish, T.S. NeuronPy library, version 0.1, http://bitbucket.org/tommctavish/neuronpy
#
# Copyright (c) 2010 Thomas S. McTavish
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class ListEmptyError(Exception):
"""Alert when a list is empty"""
def __init__(self, list_name=None):
self.list_name = list_name
def __str__(self):
the_list_name=''
if self.list_name is not None:
the_list_name = '\''+self.list_name+'\''
errstr='List %s is empty.'%the_list_name
return repr(errstr)
def nonempty_copy(list):
"""
Makes a copy of the list and then removes any empty elements.
"""
list_copy=list[:]
remove_empties(list_copy)
return list_copy
def remove_empties(list):
"""
Removes any empty elements from the list.
"""
contains_empties = True
while contains_empties is True:
# We may have to re-loop if there are adjacent empty elements.
contains_empties = False
for i in iter(list):
if len(i)==0:
contains_empties=True
list.remove(i)
def flatten_from_2d(list_of_lists):
"""
Returns a 1d, flattened version of a 2d array or list of lists.
This also removes any empty elements.
"""
vec = []
for row in iter(list_of_lists):
vec.extend(row)
return vec |
the-stack_0_5296 | # Adaptation of the original code from
# https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>,
# Apoorv Vyas <[email protected]>
#
# Modifications Copyright (c) 2021 Kazuki Irie
import torch
import torch.nn.functional as F
from torch.utils.cpp_extension import load
# Just in time import
# https://pytorch.org/tutorials/advanced/cpp_extens
# The extra arg `extra_cuda_cflags=['--ftemplate-depth=1024']` solves:
# ```
# pybind11/detail/common.h(461):
# error: excessive recursion at instantiation of class
# ```
mod_causal_dot_product_cuda = load(
extra_cuda_cflags=['--ftemplate-depth=1024'],
name="fast_lstm_v3_forward",
sources=["utils/fast_lstm_v3/fast_lstm_v3_cuda.cu"], verbose=True)
mod_causal_dot_backward_cuda = load(
extra_cuda_cflags=['--ftemplate-depth=1024'],
name="fast_lstm_v3_backward",
sources=["utils/fast_lstm_v3/fast_lstm_v3_cuda.cu"], verbose=True)
causal_dot_product_cuda = mod_causal_dot_product_cuda.fast_lstm_v3_forward
causal_dot_backward_cuda = mod_causal_dot_backward_cuda.fast_lstm_v3_backward
class FastLSTMv3(torch.autograd.Function):
"""Fast LSTM with the FWM update rule."""
dot = {
# "cpu": causal_dot_product_cpu,
"cuda": causal_dot_product_cuda
}
dot_backward = {
# "cpu": causal_dot_backward_cpu,
"cuda": causal_dot_backward_cuda
}
@staticmethod
def forward(ctx,
Zi, Ki, Vi, bi, Wi,
Zu, Ku, Vu, bu, Wu,
Zo, Ko, Vo, bo, Wo, h0, c0):
# Computations:
# fast weights with sum update rule: R_t = R_t-1 + v_t (x) k_t
# output: h_t = tanh(R_t * h_t-1 + z_t)
# z_t is the output of a feed-forward fast weight layer.
# h0 is the initial RNN state.
# E = M.
# Create the output tensor
device = Zi.device
N, H, L, _ = Zi.shape
_, _, _, M = Vi.shape
assert Ki.shape == (N, H, L, M)
assert Vi.shape == (N, H, L, M)
assert h0.shape == (N, H, 1, M)
assert Wi.shape == (N, H, M, M)
rnn_out = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
cell_out = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
gate_i = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
update_u = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
gate_o = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
out_del_nmz = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
h_init = h0.detach().clone()
c_init = c0.detach().clone()
V_old_i = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
V_old_u = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
V_old_o = torch.zeros((N, H, L, M), device=device, dtype=Zi.dtype)
# W = torch.zeros((N, H, E, M), device=device, dtype=Z.dtype)
# h0 = torch.zeros((N, H, M), device=device, dtype=Z.dtype)
# Actually perform the dot product
FastLSTMv3.dot[device.type](
Zi.data, # input gate
Ki.data,
Vi.data,
bi.data,
Zu.data, # update candidate
Ku.data,
Vu.data,
bu.data,
Zo.data, # ouput gate
Ko.data,
Vo.data,
bo.data,
h0.data, # init hidden states
c0.data, # init cell states
Wi,
Wu,
Wo,
rnn_out,
out_del_nmz,
cell_out,
gate_i,
update_u,
gate_o,
V_old_i,
V_old_u,
V_old_o
)
ctx.save_for_backward(rnn_out, out_del_nmz,
cell_out, gate_i, update_u, gate_o,
Zi, Ki, Vi, Wi, bi,
Zu, Ku, Vu, Wu, bu,
Zo, Ko, Vo, Wo, bo,
V_old_i, V_old_u, V_old_o,
h_init, c_init)
return rnn_out, cell_out
@staticmethod
def backward(ctx, grad_out, grad_cell):
# Extract the saved tensors
(rnn_out, rnn_out_delayed, cell_out, gate_i, update_u, gate_o,
Zi, Ki, Vi, Wi, bi,
Zu, Ku, Vu, Wu, bu,
Zo, Ko, Vo, Wo, bo,
V_old_i, V_old_u, V_old_o,
h0, c0) = ctx.saved_tensors
# Allocate memory for the gradients
grad_Zi = torch.zeros_like(Zi)
grad_Ki = torch.zeros_like(Ki)
grad_Vi = torch.zeros_like(Vi)
grad_bi = torch.zeros_like(bi)
grad_Zu = torch.zeros_like(Zu)
grad_Ku = torch.zeros_like(Ku)
grad_Vu = torch.zeros_like(Vu)
grad_bu = torch.zeros_like(bu)
grad_Zo = torch.zeros_like(Zo)
grad_Ko = torch.zeros_like(Ko)
grad_Vo = torch.zeros_like(Vo)
grad_bo = torch.zeros_like(bo)
# Prepare delayed RNN outputs
# shape of rnn_out: N, H, L, M
# dim2 is the time dim.
# shape of h0: N, H, 1, M
# rnn_out_delayed = torch.cat([h0, rnn_out[:, :, :-1]], dim=2)
c_out_delayed = torch.cat([c0, cell_out[:, :, :-1]], dim=2)
# In the backward pass, we need u_t - cell_{t-1} and not delayed cell.
u_minus_c = update_u - c_out_delayed
# Compute the gradients
FastLSTMv3.dot_backward[Zi.device.type](
grad_out,
Ki.data,
Vi.data,
bi.data,
Ku.data,
Vu.data,
bu.data,
Ko.data,
Vo.data,
bo.data,
V_old_i.data,
V_old_u.data,
V_old_o.data,
rnn_out,
rnn_out_delayed,
cell_out,
u_minus_c,
gate_i,
update_u,
gate_o,
Wi.data,
Wu.data,
Wo.data,
grad_Zi,
grad_Ki,
grad_Vi,
grad_bi,
grad_Zu,
grad_Ku,
grad_Vu,
grad_bu,
grad_Zo,
grad_Ko,
grad_Vo,
grad_bo,
)
return (grad_Zi, grad_Ki, grad_Vi, grad_bi, None,
grad_Zu, grad_Ku, grad_Vu, grad_bu, None,
grad_Zo, grad_Ko, grad_Vo, grad_bo, None,
None, None)
# Alias the autograd functions to python style snake case naming
fast_lstm_v3 = FastLSTMv3.apply
if __name__ == '__main__':
import torch
torch.manual_seed(111)
# Tests pass if the relative difference compared with
# the corresponding torch autograd computation
# is smaller than a threshold.
# Ideally should be tested with double...
rel_threshold = 1e-2
# from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py
def max_relative_error(a, b, eps=1e-8):
return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item())
print('##########################')
print('# Test forward pass')
print('##########################')
bsz, n_head, slen, d_head = 3, 5, 11, 18
v_dim = d_head
h0 = torch.zeros(bsz, n_head, 1, v_dim, device='cuda')
c0 = torch.zeros(bsz, n_head, 1, v_dim, device='cuda')
# (B, H, len, dim)
k0i = torch.rand(bsz, n_head, slen, d_head, device='cuda')
v0i = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
z0i = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
b0i = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda'))
k0u = torch.rand(bsz, n_head, slen, d_head, device='cuda')
v0u = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
z0u = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
b0u = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda'))
k0o = torch.rand(bsz, n_head, slen, d_head, device='cuda')
v0o = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
z0o = torch.rand(bsz, n_head, slen, v_dim, device='cuda')
b0o = torch.sigmoid(torch.rand(bsz, n_head, slen, 1, device='cuda'))
# key sum norm
k0i = F.softmax(k0i, dim=-1)
k0u = F.softmax(k0u, dim=-1)
k0o = F.softmax(k0o, dim=-1)
k1i = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v1i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z1i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b1i = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# update candidate
k1u = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v1u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z1u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b1u = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# output gate
k1o = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v1o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z1o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b1o = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# q1.data = q0.data
k1i.data = k0i.data
v1i.data = v0i.data
b1i.data = b0i.data
z1i.data = z0i.data
k1u.data = k0u.data
v1u.data = v0u.data
b1u.data = b0u.data
z1u.data = z0u.data
k1o.data = k0o.data
v1o.data = v0o.data
b1o.data = b0o.data
z1o.data = z0o.data
W1i = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda')
W1u = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda')
W1o = torch.zeros(bsz, n_head, d_head, v_dim, device='cuda')
# h0 = torch.zeros(n_head, d_head, v_dim, device='cuda')
print("Forwarding custom kernel...")
out1, _ = fast_lstm_v3(z1i, k1i, v1i, b1i, W1i,
z1u, k1u, v1u, b1u, W1u,
z1o, k1o, v1o, b1o, W1o,
h0, c0)
print("done.")
# compute using torch
k2i = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v2i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z2i = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b2i = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# update candidate
k2u = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v2u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z2u = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b2u = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# output gate
k2o = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
v2o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
z2o = torch.zeros(
bsz, n_head, slen, v_dim, requires_grad=True, device='cuda')
b2o = torch.zeros(
bsz, n_head, slen, 1, requires_grad=True, device='cuda')
# q1.data = q0.data
k2i.data = k0i.data
v2i.data = v0i.data
b2i.data = b0i.data
z2i.data = z0i.data
k2u.data = k0u.data
v2u.data = v0u.data
b2u.data = b0u.data
z2u.data = z0u.data
k2o.data = k0o.data
v2o.data = v0o.data
b2o.data = b0o.data
z2o.data = z0o.data
# (len, B, H, dim)
# input
z_2i = z2i.permute(2, 0, 1, 3)
slen, bsz, n_head, d_head = z_2i.shape
z_2i = z_2i.reshape(slen, bsz * n_head, d_head)
k_2i = k2i.permute(2, 0, 1, 3)
k_2i = k_2i.reshape(slen, bsz * n_head, d_head)
v_2i = v2i.permute(2, 0, 1, 3)
v_2i = v_2i.reshape(slen, bsz * n_head, v_dim)
b_2i = b2i.permute(2, 0, 1, 3)
b_2i = b_2i.reshape(slen, bsz * n_head, 1)
# update
z_2u = z2u.permute(2, 0, 1, 3)
z_2u = z_2u.reshape(slen, bsz * n_head, d_head)
k_2u = k2u.permute(2, 0, 1, 3)
k_2u = k_2u.reshape(slen, bsz * n_head, d_head)
v_2u = v2u.permute(2, 0, 1, 3)
v_2u = v_2u.reshape(slen, bsz * n_head, v_dim)
b_2u = b2u.permute(2, 0, 1, 3)
b_2u = b_2u.reshape(slen, bsz * n_head, 1)
# output gate
z_2o = z2o.permute(2, 0, 1, 3)
z_2o = z_2o.reshape(slen, bsz * n_head, d_head)
k_2o = k2o.permute(2, 0, 1, 3)
k_2o = k_2o.reshape(slen, bsz * n_head, d_head)
v_2o = v2o.permute(2, 0, 1, 3)
v_2o = v_2o.reshape(slen, bsz * n_head, v_dim)
b_2o = b2o.permute(2, 0, 1, 3)
b_2o = b_2o.reshape(slen, bsz * n_head, 1)
Wi = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda')
Wu = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda')
Wo = torch.zeros(bsz * n_head, v_dim, d_head, device='cuda')
h = torch.zeros(bsz * n_head, d_head, device='cuda')
cell = torch.zeros(bsz * n_head, d_head, device='cuda')
out_list = []
print("Forwarding PyTorch code...")
for pos in range(slen):
# get old values
v_old_i = torch.bmm(Wi, k_2i[pos].unsqueeze(2)).squeeze()
v_old_u = torch.bmm(Wu, k_2u[pos].unsqueeze(2)).squeeze()
v_old_o = torch.bmm(Wo, k_2o[pos].unsqueeze(2)).squeeze()
v_insert_i = b_2i[pos] * (v_2i[pos] - v_old_i)
v_insert_u = b_2u[pos] * (v_2u[pos] - v_old_u)
v_insert_o = b_2o[pos] * (v_2o[pos] - v_old_o)
# update fast weights
Wi = Wi + torch.bmm(v_insert_i.unsqueeze(2), k_2i[pos].unsqueeze(1))
Wu = Wu + torch.bmm(v_insert_u.unsqueeze(2), k_2u[pos].unsqueeze(1))
Wo = Wo + torch.bmm(v_insert_o.unsqueeze(2), k_2o[pos].unsqueeze(1))
h = F.softmax(h, dim=-1)
gate_i = torch.sigmoid(
torch.bmm(Wi, h.unsqueeze(2)).squeeze() + z_2i[pos])
rec_u = torch.sigmoid(
torch.bmm(Wu, h.unsqueeze(2)).squeeze() + z_2u[pos])
gate_o = torch.sigmoid(
torch.bmm(Wo, h.unsqueeze(2)).squeeze() + z_2o[pos])
cell = gate_i * rec_u + (1. - gate_i) * cell
# modified LSTM for FWM update rule.
# skip connection
# h = cell * gate_o + z_2u[pos]
h = cell * gate_o + z_2u[pos]
out_list.append(h.clone())
print("done.")
out2 = torch.stack(out_list)
out2 = out2.view(slen, bsz, n_head, v_dim)
out1 = out1.permute(2, 0, 1, 3)
for s in range(slen):
for b in range(bsz):
for h in range(n_head):
print(f"forward: s={s} b={b} h={h}")
print(f"out: {out1[s][b][h]}")
print(f"ref: {out2[s][b][h]}")
assert max_relative_error(
out1[s][b][h], out2[s][b][h]) < rel_threshold
print("pass!")
print('##########################')
print('# Test Backward pass')
print('##########################')
# grad
loss1 = out1.sum()
z1i.retain_grad()
k1i.retain_grad()
v1i.retain_grad()
b1i.retain_grad()
z1u.retain_grad()
k1u.retain_grad()
v1u.retain_grad()
b1u.retain_grad()
z1o.retain_grad()
k1o.retain_grad()
v1o.retain_grad()
b1o.retain_grad()
loss1.backward()
loss2 = out2.sum()
z2i.retain_grad()
k2i.retain_grad()
v2i.retain_grad()
b2i.retain_grad()
z2u.retain_grad()
k2u.retain_grad()
v2u.retain_grad()
b2u.retain_grad()
z2o.retain_grad()
k2o.retain_grad()
v2o.retain_grad()
b2o.retain_grad()
loss2.backward()
thr = 1e-6
for s in reversed(range(slen)):
for b in reversed(range(bsz)):
for h in range(n_head):
print(f" === backward: s={s}, b={b}, h={h} ===")
# Output gate
print("Output gate ---")
print(f"grad input out: {z1o.grad[b][h][s]}")
print(f"grad input ref: {z2o.grad[b][h][s]}")
assert max_relative_error(
z1o.grad[b][h][s], z2o.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad key out: {k1o.grad[b][h][s]}")
print(f"grad key ref: {k2o.grad[b][h][s]}")
assert max_relative_error(
k1o.grad[b][h][s], k2o.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad beta out: {b1o.grad[b][h][s]}")
print(f"grad beta ref: {b2o.grad[b][h][s]}")
assert max_relative_error(
b1o.grad[b][h][s], b2o.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad value out: {v1o.grad[b][h][s]}")
print(f"grad value ref: {v2o.grad[b][h][s]}")
assert max_relative_error(
v1o.grad[b][h][s], v2o.grad[b][h][s]) < rel_threshold
print("pass!")
# Update term
print("Update candidate ---")
print(f"grad input out: {z1u.grad[b][h][s]}")
print(f"grad input ref: {z2u.grad[b][h][s]}")
assert max_relative_error(
z1u.grad[b][h][s], z2u.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad key out: {k1u.grad[b][h][s]}")
print(f"grad key ref: {k2u.grad[b][h][s]}")
assert max_relative_error(
k1u.grad[b][h][s], k2u.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad value out: {v1u.grad[b][h][s]}")
print(f"grad value ref: {v2u.grad[b][h][s]}")
assert max_relative_error(
v1u.grad[b][h][s], v2u.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad beta out: {b1u.grad[b][h][s]}")
print(f"grad beta ref: {b2u.grad[b][h][s]}")
assert max_relative_error(
b1u.grad[b][h][s], b2u.grad[b][h][s]) < rel_threshold
print("pass!")
# Input gate
print("Input gate ---")
print(f"grad input out: {z1i.grad[b][h][s]}")
print(f"grad input ref: {z2i.grad[b][h][s]}")
assert max_relative_error(
z1i.grad[b][h][s], z2i.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad key out: {k1i.grad[b][h][s]}")
print(f"grad key ref: {k2i.grad[b][h][s]}")
assert max_relative_error(
k1i.grad[b][h][s], k2i.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad value out: {v1i.grad[b][h][s]}")
print(f"grad value ref: {v2i.grad[b][h][s]}")
assert max_relative_error(
v1i.grad[b][h][s], v2i.grad[b][h][s]) < rel_threshold
print("pass!")
print(f"grad beta out: {b1i.grad[b][h][s]}")
print(f"grad beta ref: {b2i.grad[b][h][s]}")
assert max_relative_error(
b1i.grad[b][h][s], b2i.grad[b][h][s]) < rel_threshold
print("pass!")
print("All tests pass.")
|
the-stack_0_5297 | # Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
import cinder.volume.drivers.netapp.common as na_common
import cinder.volume.drivers.netapp.dataontap.fc_cmode as fc_cmode
import cinder.volume.drivers.netapp.utils as na_utils
class NetAppDriverFactoryTestCase(test.TestCase):
def setUp(self):
super(NetAppDriverFactoryTestCase, self).setUp()
self.mock_object(na_common, 'LOG')
def test_new(self):
self.mock_object(na_utils.OpenStackInfo, 'info',
return_value='fake_info')
mock_create_driver = self.mock_object(na_common.NetAppDriver,
'create_driver')
config = na_fakes.create_configuration()
config.netapp_storage_family = 'fake_family'
config.netapp_storage_protocol = 'fake_protocol'
kwargs = {'configuration': config}
na_common.NetAppDriver(**kwargs)
kwargs['app_version'] = 'fake_info'
mock_create_driver.assert_called_with('fake_family', 'fake_protocol',
*(), **kwargs)
def test_new_missing_config(self):
self.mock_object(na_utils.OpenStackInfo, 'info')
self.mock_object(na_common.NetAppDriver, 'create_driver')
self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{})
def test_new_missing_family(self):
self.mock_object(na_utils.OpenStackInfo, 'info')
self.mock_object(na_common.NetAppDriver, 'create_driver')
config = na_fakes.create_configuration()
config.netapp_storage_protocol = 'fake_protocol'
config.netapp_storage_family = None
kwargs = {'configuration': config}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver,
**kwargs)
def test_new_missing_protocol(self):
self.mock_object(na_utils.OpenStackInfo, 'info')
self.mock_object(na_common.NetAppDriver, 'create_driver')
config = na_fakes.create_configuration()
config.netapp_storage_family = 'fake_family'
kwargs = {'configuration': config}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver,
**kwargs)
def test_create_driver(self):
def get_full_class_name(obj):
return obj.__module__ + '.' + obj.__class__.__name__
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY
for family in registry:
for protocol, full_class_name in registry[family].items():
driver = na_common.NetAppDriver.create_driver(
family, protocol, **kwargs)
self.assertEqual(full_class_name, get_full_class_name(driver))
def test_create_driver_case_insensitive(self):
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC',
**kwargs)
self.assertIsInstance(driver, fc_cmode.NetAppCmodeFibreChannelDriver)
def test_create_driver_invalid_family(self):
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver.create_driver,
'kardashian', 'iscsi', **kwargs)
def test_create_driver_invalid_protocol(self):
kwargs = {
'configuration': na_fakes.create_configuration(),
'app_version': 'fake_info',
'host': 'fakehost@fakebackend',
}
self.assertRaises(exception.InvalidInput,
na_common.NetAppDriver.create_driver,
'ontap_7mode', 'carrier_pigeon', **kwargs)
|
the-stack_0_5300 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Harmonic calculations for frequency representations'''
import numpy as np
import scipy.interpolate
import scipy.signal
from ..util.exceptions import ParameterError
__all__ = ['salience', 'interp_harmonics']
def salience(S, freqs, h_range, weights=None, aggregate=None,
filter_peaks=True, fill_value=np.nan, kind='linear', axis=0):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range), ))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal
def interp_harmonics(x, freqs, h_range, kind='linear', fill_value=0, axis=0):
'''Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
`x_harm[i]` will have the same shape as `x`, and measure
the energy at the `h_range[i]` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3, 2, i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# X_out will be the same shape as X, plus a leading
# axis that has length = len(h_range)
out_shape = [len(h_range)]
out_shape.extend(x.shape)
x_out = np.zeros(out_shape, dtype=x.dtype)
if freqs.ndim == 1 and len(freqs) == x.shape[axis]:
harmonics_1d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
elif freqs.ndim == 2 and freqs.shape == x.shape:
harmonics_2d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
else:
raise ParameterError('freqs.shape={} does not match '
'input shape={}'.format(freqs.shape, x.shape))
return x_out
def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear',
fill_value=0, axis=0):
'''Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3,2,i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# Note: this only works for fixed-grid, 1d interpolation
f_interp = scipy.interpolate.interp1d(freqs, x,
kind=kind,
axis=axis,
copy=False,
bounds_error=False,
fill_value=fill_value)
idx_out = [slice(None)] * harmonic_out.ndim
# Compute the output index of the interpolated values
interp_axis = 1 + (axis % x.ndim)
# Iterate over the harmonics range
for h_index, harmonic in enumerate(h_range):
idx_out[0] = h_index
# Iterate over frequencies
for f_index, frequency in enumerate(freqs):
# Offset the output axis by 1 to account for the harmonic index
idx_out[interp_axis] = f_index
# Estimate the harmonic energy at this frequency across time
harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency)
def harmonics_2d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0,
axis=0):
'''Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of `x`
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
'''
idx_in = [slice(None)] * x.ndim
idx_freq = [slice(None)] * x.ndim
idx_out = [slice(None)] * harmonic_out.ndim
# This is the non-interpolation axis
ni_axis = (1 + axis) % x.ndim
# For each value in the non-interpolated axis, compute its harmonics
for i in range(x.shape[ni_axis]):
idx_in[ni_axis] = slice(i, i + 1)
idx_freq[ni_axis] = i
idx_out[1 + ni_axis] = idx_in[ni_axis]
harmonics_1d(harmonic_out[idx_out], x[idx_in], freqs[idx_freq],
h_range, kind=kind, fill_value=fill_value,
axis=axis)
|
the-stack_0_5302 | import logging
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection import SelectKBest, chi2
import numpy as np
from sklearn.feature_selection.univariate_selection import _clean_nans
from discoutils.thesaurus_loader import Vectors
from eval.utils.misc import calculate_log_odds, update_dict_according_to_mask
__author__ = 'mmb28'
class VectorBackedSelectKBest(SelectKBest):
"""
An extention of sklearn's SelectKBest, which also contains a VectorStore. Feature selection is done
in two optional steps:
1: Remove all features that are not contained in the vector store
2: Remove any remaining low-scoring features to ensure a maximum of k features are left fit
Additionally, this class stores a vocabulary (like a vectorizer), which maps features to a corresponding columns
in the feature vector matrix. This is so that a FeatureVectorsCsvDumper can be placed after this object in a
pipeline.
Also, this object assumes its input is not a matrix X (as in SelectKBest), but a tuple (X, vocabulary). The
vocabulary is provided by ThesaurusVectorizer, which comes before this object in a pipeline and represents the
mapping of features to columns in X before any feature selection is done.
"""
def __init__(self, score_func=chi2, k='all', must_be_in_thesaurus=False, min_log_odds_score=0, **kwargs):
"""
:param min_log_odds_score: any feature with a log odds score between -min_log_odds_score and
min_log_odds_score will be removed. Assumes the classification problem is binary.
"""
if not score_func:
score_func = chi2
self.k = k
self.must_be_in_thesaurus = must_be_in_thesaurus
self.min_log_odds_score = min_log_odds_score
self.vocabulary_ = None
super(VectorBackedSelectKBest, self).__init__(score_func=score_func, k=k)
def fit(self, X, y, vector_source=None, clusters=None, **kwargs):
if vector_source is None and clusters is None and self.must_be_in_thesaurus:
logging.error('You requested feature selection based on vector presence '
'but did not provide a vector source.')
raise ValueError('sector source (vectors or clusters) required with must_be_in_thesaurus')
if self.must_be_in_thesaurus:
self.vector_source = vector_source if vector_source else set(clusters.index)
# Vectorizer also returns its vocabulary, store it and work with the rest
X, self.vocabulary_ = X
if self.k == 'all' or int(self.k) >= X.shape[1]:
# do not bother calculating feature informativeness if all features will be used anyway
self.scores_ = np.ones((X.shape[1],))
else:
super(VectorBackedSelectKBest, self).fit(X, y)
self.vectors_mask = self._zero_score_of_oot_feats() \
if self.must_be_in_thesaurus else np.ones(X.shape[1], dtype=bool)
self.log_odds_mask = self._zero_score_of_low_log_odds_features(X, y) \
if self.min_log_odds_score > 0 else np.ones(X.shape[1], dtype=bool);
return self
def transform(self, X):
# Vectorizer also returns its vocabulary, remove it
if self.vocabulary_:
return super(VectorBackedSelectKBest, self).transform(X[0]), self.vocabulary_
else:
# Sometimes the training set contain no features. We don't want this to break the experiment,
# so let is slide
logging.error('Empty vocabulary')
return X[0], self.vocabulary_
def _zero_score_of_oot_feats(self):
mask = np.ones(self.scores_.shape, dtype=bool)
for feature, index in self.vocabulary_.items():
if feature not in self.vector_source:
mask[index] = False
if np.count_nonzero(mask) == 0:
logging.error('Feature selector removed all features')
raise ValueError('Empty vocabulary')
return mask
def _zero_score_of_low_log_odds_features(self, X, y):
if self.min_log_odds_score <= 0:
# we don't want to use log odds score, return an all-true mask
return np.ones(X.shape[1])
if len(set(y)) != 2:
raise ValueError('Calculating a log odds score requires a binary classification task')
log_odds = calculate_log_odds(X, y)
return (log_odds > self.min_log_odds_score) | (log_odds < -self.min_log_odds_score)
def _get_support_mask(self):
k = self.k
chi2_scores = self.scores_
chi2_mask = np.ones(chi2_scores.shape, dtype=bool)
if k != 'all' and k < len(chi2_scores):
# we don't want all features to be kept, and the number we want is less than the number available
chi2_scores = _clean_nans(chi2_scores)
selected_indices = np.argsort(chi2_scores)[:k]
chi2_mask[selected_indices] = False
mask = chi2_mask & self.vectors_mask & self.log_odds_mask
logging.info('%d/%d features survived feature selection', np.count_nonzero(mask), len(mask))
# Only keep the scores of the features that survived. This array is used to check the
# input data shape at train and decode time matches. However, because the post-feature-selections
# vocabulary is passed back into the vectorizer, at decode time the input will likely be smaller. This is
# like doing feature selection in the vectorizer.
self.scores_ = self.scores_[mask]
self.log_odds_mask = self.log_odds_mask[mask]
self.vectors_mask = self.vectors_mask[mask]
self.vocabulary_ = update_dict_according_to_mask(self.vocabulary_, mask)
return mask
class MetadataStripper(BaseEstimator, TransformerMixin):
"""
The current implementation of ThesaurusVectorizer's fit() returns not just a data matrix, but also some
metadata (its vocabulary). This class is meant to sit in a pipeline behind the vectorizer to remove that
metadata, so that it doesn't break other items in the pipeline.
Currently several other pipeline elements can make use of this data ( VectorBackedSelectKBest and
FeatureVectorsCsvDumper). This class must come after these in a pipeline as they do not have any
defensive checks
"""
def fit(self, X, y, vector_source=None, strategy='linear', k=None, **kwargs):
matrix, self.voc = X # store voc, may be handy for for debugging
self.vector_source = vector_source
if isinstance(self.vector_source, Vectors):
# the vector source can be either a Thesaurus or Vectors. Both can provide nearest neighbours,
# but the latter needs this method to be called first
if not k:
k = 10
self.vector_source.init_sims([str(foo) for foo in self.voc.keys()],
strategy=strategy, n_neighbors=k)
return self
def transform(self, X, **kwargs):
# if X is a tuple, strip metadata, otherwise let it be
return X[0] if tuple(X) == X else X
|
the-stack_0_5304 | import sys,tweepy,csv,re
from textblob import TextBlob
import matplotlib.pyplot as plt
#alteration
class SentimentAnalysis:
def __init__(self):
self.tweets = []
self.tweetText = []
def DownloadData(self):
# authenticating
consumerKey = 'qBIngtySLGxbyw6eo4Ihqxz2K'
consumerSecret = '6Eu8Ax7QqjLR7uTEMLavaj8KfXCTqzy7W6Ap8JZQQu8HMyu3LZ'
accessToken = '2482155314-176f8Yno4FQiMRTM8YFlXYDQ4m7SsCw0DojEgAy'
accessTokenSecret = 'yofigXgWTivDeQxa6AKtdL7cGUR7Sblp0jBAX7f9xvXVg'
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
# input for term to be searched and how many tweets to search
searchTerm = input("Enter Keyword/Tag to search about: ")
NoOfTerms = int(input("Enter how many tweets to search: "))
# searching for tweets
self.tweets = tweepy.Cursor(api.search, q=searchTerm, lang = "en").items(NoOfTerms)
# Open/create a file to append data to
csvFile = open('result.csv', 'a')
# Use csv writer
csvWriter = csv.writer(csvFile)
# creating some variables to store info
polarity = 0
positive = 0
wpositive = 0
spositive = 0
negative = 0
wnegative = 0
snegative = 0
neutral = 0
# iterating through tweets fetched
for tweet in self.tweets:
#Append to temp so that we can store in csv later. I use encode UTF-8
self.tweetText.append(self.cleanTweet(tweet.text).encode('utf-8'))
# print (tweet.text.translate(non_bmp_map)) #print tweet's text
analysis = TextBlob(tweet.text)
# print(analysis.sentiment) # print tweet's polarity
polarity += analysis.sentiment.polarity # adding up polarities to find the average later
if (analysis.sentiment.polarity == 0): # adding reaction of how people are reacting to find average later
neutral += 1
elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3):
wpositive += 1
elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6):
positive += 1
elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1):
spositive += 1
elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0):
wnegative += 1
elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3):
negative += 1
elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6):
snegative += 1
# Write to csv and close csv file
csvWriter.writerow(self.tweetText)
csvFile.close()
# finding average of how people are reacting
positive = self.percentage(positive, NoOfTerms)
wpositive = self.percentage(wpositive, NoOfTerms)
spositive = self.percentage(spositive, NoOfTerms)
negative = self.percentage(negative, NoOfTerms)
wnegative = self.percentage(wnegative, NoOfTerms)
snegative = self.percentage(snegative, NoOfTerms)
neutral = self.percentage(neutral, NoOfTerms)
# finding average reaction
polarity = polarity / NoOfTerms
# printing out data
print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.")
print()
print("General Report: ")
if (polarity == 0):
print("Neutral")
elif (polarity > 0 and polarity <= 0.3):
print("Weakly Positive")
elif (polarity > 0.3 and polarity <= 0.6):
print("Positive")
elif (polarity > 0.6 and polarity <= 1):
print("Strongly Positive")
elif (polarity > -0.3 and polarity <= 0):
print("Weakly Negative")
elif (polarity > -0.6 and polarity <= -0.3):
print("Negative")
elif (polarity > -1 and polarity <= -0.6):
print("Strongly Negative")
print()
print("Detailed Report: ")
print(str(positive) + "% people thought it was positive")
print(str(wpositive) + "% people thought it was weakly positive")
print(str(spositive) + "% people thought it was strongly positive")
print(str(negative) + "% people thought it was negative")
print(str(wnegative) + "% people thought it was weakly negative")
print(str(snegative) + "% people thought it was strongly negative")
print(str(neutral) + "% people thought it was neutral")
self.plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, NoOfTerms)
def cleanTweet(self, tweet):
# Remove Links, Special Characters etc from tweet
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w +:\ / \ / \S +)", " ", tweet).split())
# function to calculate percentage
def percentage(self, part, whole):
temp = 100 * float(part) / float(whole)
return format(temp, '.2f')
def plotPieChart(self, positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms):
labels = ['Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]','Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]',
'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]']
sizes = [positive, wpositive, spositive, neutral, negative, wnegative, snegative]
colors = ['yellowgreen','lightgreen','darkgreen', 'gold', 'red','lightsalmon','darkred']
patches, texts = plt.pie(sizes, colors=colors, startangle=90)
plt.legend(patches, labels, loc="best")
plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.')
plt.axis('equal')
plt.tight_layout()
plt.show()
if __name__== "__main__":
sa = SentimentAnalysis()
sa.DownloadData()
|
the-stack_0_5306 | # manage.py
import os
import unittest
import coverage
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/server/config.py',
'project/server/*/__init__.py'
]
)
COV.start()
from project.server import app, db, models
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def cov():
"""Runs the unit tests with coverage."""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return 1
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
if __name__ == '__main__':
manager.run() |
the-stack_0_5307 | # pylint: disable=unused-import
"""
UBX Protocol Input payload definitions
THESE ARE THE PAYLOAD DEFINITIONS FOR _SET_ MESSAGES _TO_ THE RECEIVER
(e.g. configuration and calibration commands; AssistNow payloads)
Created on 27 Sep 2020
Information sourced from u-blox Interface Specifications © 2013-2021, u-blox AG
:author: semuadmin
"""
# pylint: disable=too-many-lines, line-too-long, duplicate-code
from pyubx2.ubxtypes_core import (
C2,
C6,
C32,
I1,
I2,
I4,
R4,
R8,
U1,
U2,
U3,
U4,
U5,
U6,
U7,
U8,
U9,
U12,
U22,
U40,
U64,
X1,
X2,
X4,
)
UBX_PAYLOADS_SET = {
# AssistNow Aiding Messages: i.e. Ephemeris, Almanac, other A-GPS data input.
# Messages in the AID class are used to send GPS aiding data to the receiver
# AID messages are deprecated in favour of MGA messages in >=Gen8
"AID-ALM": {"svid": U4, "week": U4, "optBlock": ("None", {"dwrd": U4})},
"AID-AOP": {"gnssId": U1, "svId": U1, "reserved1": U2, "data": U64},
"AID-EPH": {
"svid": U4,
"how": U4,
"optBlock": (
"None",
{
"sf1d1": U4,
"sf1d2": U4,
"sf1d3": U4,
"sf1d4": U4,
"sf1d5": U4,
"sf1d6": U4,
"sf1d7": U4,
"sf1d8": U4,
"sf2d1": U4,
"sf2d2": U4,
"sf2d3": U4,
"sf2d4": U4,
"sf2d5": U4,
"sf2d6": U4,
"sf2d7": U4,
"sf2d8": U4,
"sf3d1": U4,
"sf3d2": U4,
"sf3d3": U4,
"sf3d4": U4,
"sf3d5": U4,
"sf3d6": U4,
"sf3d7": U4,
"sf3d8": U4,
},
),
},
"AID-HUI": {
"health": X4,
"utcA0": R8,
"utcA1": R8,
"utcTOW": I4,
"utcWNT": I2,
"utcLS": I2,
"utcWNF": I2,
"utcDNs": I2,
"utcLSF": I2,
"utcSpare": I2,
"klobA0": R4,
"klobA1": R4,
"klobA2": R4,
"klobA3": R4,
"klobB0": R4,
"klobB1": R4,
"klobB2": R4,
"klobB3": R4,
"flags": X4,
},
"AID-INI": {
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"posAcc": U4,
"tmCfg": X2,
"wn": U2,
"tow": U4,
"towNs": I4,
"tAccMs": U4,
"tAccNs": U4,
"clkDOrFreq": I4,
"clkDAccOrFreqAcc": U4,
"flags": X4,
},
# ********************************************************************
# Configuration Input Messages: i.e. Set Dynamic Model, Set DOP Mask, Set Baud Rate, etc..
# Messages in the CFG class are used to configure the receiver and read out current configuration values. Any
# messages in the CFG class sent to the receiver are either acknowledged (with message UBX-ACK-ACK) if
# processed successfully or rejected (with message UBX-ACK-NAK) if processing unsuccessfully.
"CFG-ANT": {
"flags": (
X2,
{
"svcs": U1,
"scd": U1,
"ocd": U1,
"pdwnOnSCD": U1,
"recovery": U1,
},
),
"pins": (
X2,
{
"pinSwitch": U5,
"pinSCD": U5,
"pinOCD": U5,
"reconfig": U1,
},
),
},
"CFG-BATCH": {
"version": U1,
"flags": (
X1,
{
"enable": U1,
"reserved1": U1,
"extraPvt": U1,
"extraOdo": U1,
"reserved2": U1,
"pioEnable": U1,
"pioActiveLow": U1,
},
),
"bufSize": U2,
"notifThrs": U2,
"pioId": U1,
"reserved0": U1,
},
"CFG-CFG": {
"clearMask": X4,
"saveMask": X4,
"loadMask": X4,
"deviceMask": (
X1,
{
"devBBR": U1,
"devFlash": U1,
"devEEPROM": U1,
"reserved1": U1,
"devSpiFlash": U1,
},
),
},
"CFG-DAT": {
"datumNum": U2,
"datumName": C6,
"majA": R8,
"flat": R8,
"dX": R4,
"dY": R4,
"dZ": R4,
"rotX": R4,
"rotY": R4,
"rotZ": R4,
"scale": R4,
},
"CFG-DGNSS": {
"dgnssMode": U1,
"reserved0": U3,
},
"CFG-DOSC": {
"version": U1,
"numOsc": U1,
"reserved1": U2,
"group": (
"numOsc",
{ # repeating group * numOsc
"oscId": U1,
"reserved2": U1,
"flags": (
X2,
{
"isCalibrated": U1,
"controlIf": U4,
},
),
"freq": U4,
"phaseOffset": I4,
"withTemp": U4,
"withAge": U4,
"timeToTemp": U2,
"reserved3": U2,
"gainVco": I4,
"gainUncertainty": U1,
"reserved4": U3,
},
),
},
"CFG-DYNSEED": {"version": U1, "reserved1": U3, "seedHi": U4, "seedLo": U4},
"CFG-ESFALG": {
"bitfield": U4,
"yaw": U4,
"pitch": I2,
"roll": I2,
},
"CFG-ESFA": {
"version": U1,
"reserved1": U9,
"accelRmsThdl": U1,
"frequency": U1,
"latency": U2,
"accuracy": U2,
"reserved2": U4,
},
"CFG-ESFG": {
"version": U1,
"reserved1": U7,
"tcTableSaveRate": U2,
"gyroRmsThdl": U1,
"frequency": U1,
"latency": U2,
"accuracy": U2,
"reserved2": U4,
},
"CFG-ESFWT": {
"version": U1,
"flags1": (
X1,
{
"combineTicks": U1,
"reserved3": U3,
"useWtSpeed": U1,
"dirPinPol": U1,
"useWtPin": U1,
},
),
"flags2": (
X1,
{
"autoWtCountMaxOff": U1,
"autoDirPinPolOff": U1,
"autoSoftwareWtOff": U1,
"autoUseWtSpeedOff": U1,
},
),
"reserved1": U1,
"wtFactor": U4,
"wtQuantError": U4,
"wtCountMax": U4,
"wtLatency": U2,
"wtFrequency": U1,
"flags3": (
X1,
{
"reserved3": U4,
"cntBothEdges": U1,
},
),
"speedDeadBand": U2,
"reserved2": U1,
},
"CFG-ESRC": {
"version": U1,
"numSources": U1,
"reserved1": U2,
"group": (
"numSources",
{ # repeating group * numSources
"extInt": U1,
"flags": (
X2,
{
"polarity": U1,
"gnssUtc": U1,
},
),
"freq": U4,
"reserved2": U4,
"withTemp": U4,
"withAge": U4,
"timeToTemp": U2,
"maxDevLifeTim": U2,
"offset": I4,
"offsetUncertainty": U4,
"jitter": U4,
},
),
},
"CFG-FIXSEED": {
"version": U1,
"length": U1,
"reserved1": U2,
"seedHi": U4,
"seedLo": U4,
"group": ("length", {"classId": U1, "msgId": U1}), # repeating group * length
},
"CFG-GEOFENCE": {
"version": U1,
"numFences": U1,
"confLvl": U1,
"reserved1": U1,
"pioEnabled": U1,
"pinPolarity": U1,
"pin": U1,
"reserved2": U1,
"group": (
"numFences",
{"lat": I4, "lon": I4, "radius": U4}, # repeating group * numFences
),
},
"CFG-GNSS": {
"msgVer": U1,
"numTrkChHw": U1,
"numTrkChUse": U1,
"numConfigBlocks": U1,
"group": (
"numConfigBlocks",
{ # repeating group * numConfigBlocks
"gnssId": U1,
"resTrkCh": U1,
"maxTrkCh": U1,
"reserved0": U1,
"flags": (
X4,
{
"enable": U1,
"reserved1": U8,
"reserved2": U7,
"sigCfMask": U8,
"reserved3": U8,
},
),
},
),
},
"CFG-HNR": {
"highNavRate": U1,
"reserved1": U3,
},
"CFG-INF": {
"protocolID": U1,
"reserved0": U3,
"infMaskGroup": (
6,
{
"infMsgMask": (
X1,
{
"enableError": U1,
"enableWarning": U1,
"enableNotice": U1,
"enableTest": U1,
"enableDebug": U1,
},
),
},
),
},
"CFG-ITFM": {
"config": (
X4,
{
"bbThreshold": U4,
"cwThreshold": U5,
"algorithmBits": U22,
"enable": U1,
},
),
"config2": (
X4,
{
"generalBits": U12,
"antSetting": U2,
"enable2": U1,
},
),
},
"CFG-LOGFILTER": {
"version": U1,
"flags": (
X1,
{
"recordEnabled": U1,
"psmOncePerWakupEnabled": U1,
"applyAllFilterSettings": U1,
},
),
"minInterval": U2,
"timeThreshold": U2,
"speedThreshold": U2,
"positionThreshold": U4,
},
"CFG-MSG": {
"msgClass": U1,
"msgID": U1,
"rateDDC": U1,
"rateUART1": U1,
"rateUART2": U1,
"rateUSB": U1,
"rateSPI": U1,
"reserved": U1,
},
"CFG-NAV5": {
"mask": (
X2,
{
"dyn": U1,
"minEl": U1,
"posFixMode": U1,
"drLim": U1,
"posMask": U1,
"timeMask": U1,
"staticHoldMask": U1,
"dgpsMask": U1,
"cnoThreshold": U1,
"reserved0": U1,
"utc": U1,
},
),
"dynModel": U1,
"fixMode": U1,
"fixedAlt": I4,
"fixedAltVar": U4,
"minElev": I1,
"drLimit": U1,
"pDop": U2,
"tDop": U2,
"pAcc": U2,
"tAcc": U2,
"staticHoldThresh": U1,
"dgpsTimeOut": U1,
"reserved2": U4,
"reserved3": U4,
"reserved4": U4,
},
"CFG-NAVX5": {
"version": U2,
"mask1": (
X2,
{
"reserved9": U2,
"minMax": U1,
"minCno": U1,
"reserved10": U2,
"initial3dfix": U1,
"reserved11": U2,
"wknRoll": U1,
"ackAid": U1,
"reserved12": U2,
"ppp": U1,
"aop": U1,
},
),
"mask2": (
X4,
{
"reserved13": U6,
"useAdr": U1,
"sigAttenComp": U1,
},
),
"reserved0": U2,
"minSVs": U1,
"maxSVs": U1,
"minCNO": U1,
"reserved1": U1,
"iniFix3D": U1,
"reserved2": U2,
"ackAiding": U1,
"wknRollover": U2,
"sigAttenCompMode": U1,
"reserved3": U1,
"reserved4": U2,
"reserved5": U2,
"usePPP": U1,
"aopCfg": U1,
"reserved6": U2,
"aopOrbMaxErr": U2,
"reserved7": U4,
"reserved8": U3,
"useAdr": U1,
},
"CFG-NMEAvX": { # deprecated length 4
"filter": (
X1,
{
"posFilt": U1,
"mskPosFilt": U1,
"timeFilt": U1,
"dateFilt": U1,
"gpsOnlyFilter": U1,
"trackFilt": U1,
},
),
"nmeaVersion": U1,
"numSV": U1,
"flags": (
X1,
{
"compat": U1,
"consider": U1,
"limit82": U1,
"highPrec": U1,
},
),
},
"CFG-NMEAv0": { # v0 deprecated length 12
"filter": (
X1,
{
"posFilt": U1,
"mskPosFilt": U1,
"timeFilt": U1,
"dateFilt": U1,
"gpsOnlyFilter": U1,
"trackFilt": U1,
},
),
"nmeaVersion": U1,
"numSV": U1,
"flags": (
X1,
{
"compat": U1,
"consider": U1,
"limit82": U1,
"highPrec": U1,
},
),
"gnssToFilter": (
X4,
{
"disableGps": U1,
"disableSbas": U1,
"disableGalileo": U1,
"reserved2": U1,
"disableQzss": U1,
"disableGlonass": U1,
"disableBeidou": U1,
},
),
"svNumbering": U1,
"mainTalkerId": U1,
"gsvTalkerId": U1,
"version": U1,
},
"CFG-NMEA": { # preferred version length 20
"filter": (
X1,
{
"posFilt": U1,
"mskPosFilt": U1,
"timeFilt": U1,
"dateFilt": U1,
"gpsOnlyFilter": U1,
"trackFilt": U1,
},
),
"nmeaVersion": U1,
"numSV": U1,
"flags": (
X1,
{
"compat": U1,
"consider": U1,
"limit82": U1,
"highPrec": U1,
},
),
"gnssToFilter": (
X4,
{
"disableGps": U1,
"disableSbas": U1,
"disableGalileo": U1,
"reserved2": U1,
"disableQzss": U1,
"disableGlonass": U1,
"disableBeidou": U1,
},
),
"svNumbering": U1,
"mainTalkerId": U1,
"gsvTalkerId": U1,
"version": U1,
"bdsTalkerId": C2,
"reserved1": U6,
},
"CFG-ODO": {
"version": U1,
"reserved0": U3,
"flags": (
X1,
{
"useODO": U1,
"useCOG": U1,
"outLPVel": U1,
"outLPCog": U1,
},
),
"odoCfg": (
X1,
{
"profile": U3,
},
),
"reserved1": U6,
"cogMaxSpeed": U1,
"cogMaxPosAcc": U1,
"reserved2": U2,
"velLpGain": U1,
"cogLpGain": U1,
"reserved3": U2,
},
"CFG-PM2": {
"version": U1,
"reserved0": U1,
"maxStartupStateDur": U1,
"reserved1": U1,
"flags": (
X4,
{
"optTarget": U3,
"extintSel": U1,
"extintWake": U1,
"extintBackup": U1,
"extintInactive": U1,
"limitPeakCurr": U2,
"waitTimeFix": U1,
"updateRTC": U1,
"updateEPH": U1,
"reserved12": U3,
"doNotEnterOff": U1,
"operationMode": U2,
},
),
"updatePeriod": U4,
"searchPeriod": U4,
"gridOffset": U4,
"onTime": U2,
"minAcqTime": U2,
"reserved4": U2,
"reserved5": U2,
"reserved6": U4,
"reserved7": U4,
"reserved8": U1,
"reserved9": U1,
"reserved10": U2,
"reserved11": U4,
},
"CFG-PMS": {
"version": U1,
"powerSetupValue": U1,
"period": U2,
"onTime": U2,
"reserved1": U2,
},
"CFG-PRT": {
"portID": U1,
"reserved0": U1,
"txReady": (
X2,
{
"enable": U1,
"pol": U1,
"pin": U5,
"thres": U9,
},
),
"UARTmode": (
X4,
{
"reserved6": U6,
"charLen": U2,
"reserved7": U1,
"parity": U3,
"nStopBits": U2,
},
),
"baudRate": U4,
"inProtoMask": (
X2,
{
"inUBX": U1,
"inNMEA": U1,
"inRTCM": U1,
"reserved8": U2,
"inRTCM3": U1,
},
),
"outProtoMask": (
X2,
{
"outUBX": U1,
"outNMEA": U1,
"reserved9": U3,
"outRTCM3": U1,
},
),
"flags": (
X2,
{
"reserved10": U1,
"extendedTxTimeout": U1,
},
),
"reserved5": U2,
},
"CFG-PWR": {"version": U1, "reserved1": U3, "state": U4},
"CFG-RATE": {"measRate": U2, "navRate": U2, "timeRef": U2},
"CFG-RINV": {
"flags": (
X1,
{
"dump": U1,
"binary": U1,
},
),
"group": ("None", {"data": U1}),
}, # repeating group
"CFG-RST": {
"navBbrMask": (
X2,
{
"eph": U1,
"alm": U1,
"health": U1,
"klob": U1,
"pos": U1,
"clkd": U1,
"osc": U1,
"utc": U1,
"rtc": U1,
"reserved2": U6,
"aop": U1,
},
),
"resetMode": U1,
"reserved1": U1,
},
"CFG-RXM": {"reserved0": U1, "lpMode": U1},
"CFG-SBAS": {
"mode": (
X1,
{
"enabled": U1,
"test": U1,
},
),
"usage": (
X1,
{
"range": U1,
"diffCorr": U1,
"integrity": U1,
},
),
"maxSBAS": U1,
"scanmode2": (
X1,
{
"PRN152": U1,
"PRN153": U1,
"PRN154": U1,
"PRN155": U1,
"PRN156": U1,
"PRN157": U1,
"PRN158": U1,
},
),
"scanmode1": (
X4,
{
"PRN120": U1,
"PRN121": U1,
"PRN122": U1,
"PRN123": U1,
"PRN124": U1,
"PRN125": U1,
"PRN126": U1,
"PRN127": U1,
"PRN128": U1,
"PRN129": U1,
"PRN130": U1,
"PRN131": U1,
"PRN132": U1,
"PRN133": U1,
"PRN134": U1,
"PRN135": U1,
"PRN136": U1,
"PRN137": U1,
"PRN138": U1,
"PRN139": U1,
"PRN140": U1,
"PRN141": U1,
"PRN142": U1,
"PRN143": U1,
"PRN144": U1,
"PRN145": U1,
"PRN146": U1,
"PRN147": U1,
"PRN148": U1,
"PRN149": U1,
"PRN150": U1,
"PRN151": U1,
},
),
},
"CFG-SENIF": {
"type": U1,
"version": U1,
"flags": (
X2,
{
"senConn": U1,
},
),
"pioConf": X2,
},
"CFG-SLAS": {
"mode": (
X1,
{
"enabled": U1,
"test": U1,
"raim": U1,
},
),
"reserved1": U3,
},
"CFG-SMGR": {
"minGNSSFix": U1,
"maxFreqChange": U2,
"maxPhaseCorrRate": U2,
"reserved1": U2,
"freqTolerance": U2,
"timeTolerance": U2,
"messageCfg": (
X2,
{
"measInternal": U1,
"measGNSS": U1,
"measEXTINT0": U1,
"measEXTINT1": U1,
},
),
"maxSlewRate": U2,
"flags": (
X4,
{
"disableInternal": U1,
"disableExternal": U1,
"preferenceMode": U1,
"enableGNSS": U1,
"enableEXTINT0": U1,
"enableEXTINT1": U1,
"enableHostMeasInt": U1,
"enableHostMeasExt": U1,
"reserved1": U2,
"useAnyFix": U1,
"disableMaxSlewRate": U1,
"issueFreqWarning": U1,
"issueTimeWarning": U1,
"TPCoherent": U2,
"disableOffset": U1,
},
),
},
"CFG-SPT": {
"version": U1,
"reserved0": U1,
"sensorId": U2,
"reserved1": U8,
},
"CFG-TMODE2": {
"timeMode": U1,
"reserved1": U1,
"flags": (
X2,
{
"lla": U1,
"altInv": U1,
},
),
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"fixedPosAcc": U4,
"svinMinDur": U4,
"svinAccLimit": U4,
},
"CFG-TMODE3": {
"version": U1,
"reserved0": U1,
"flags": (
X2,
{
"rcvrMode": U8,
"lla": U1,
},
),
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"ecefXOrLatHP": I1,
"ecefYOrLonHP": I1,
"ecefZOrAltHP": I1,
"reserved1": U1,
"fixedPosAcc": U4,
"svinMinDur": U4,
"svinAccLimit": U4,
"reserved2": U8,
},
"CFG-TP5": {
"tpIdx": U1,
"reserved0": U1,
"reserved1": U2,
"antCableDelay": I2,
"rfGroupDelay": I2,
"freqPeriod": U4,
"freqPeriodLock": U4,
"pulseLenRatio": U4,
"pulseLenRatioLock": U4,
"userConfigDelay": I4,
"flags": (
X4,
{
"active": U1,
"lockGnssFreq": U1,
"lockedOtherSet": U1,
"isFreq": U1,
"isLength": U1,
"alignToTow": U1,
"polarity": U1,
"gridUtcGnss": U4,
"syncMode": U3,
},
),
},
"CFG-TXSLOT": {
"version": U1,
"enable": (
X1,
{
"enableDDC": U1,
"enableUART1": U1,
"enableUART2": U1,
"enableUSB": U1,
"enableSPI": U1,
},
),
"refTp": U1,
"reserved1": U1,
"end1": U4,
"end2": U4,
"end3": U4,
},
"CFG-USB": {
"vendorID": U2,
"productID": U2,
"reserved1": U2,
"reserved2": U2,
"powerConsumpt": U2,
"flags": (
X2,
{
"reEnum": U1,
"powerMode": U1,
},
),
"vendorString": C32,
"productString": C32,
"serialNumber": C32,
},
"CFG-VALDEL": {
"version": U1, # = 0 no transaction, 1 with transaction
"layers": X1,
"transaction": X1, # if version = 1, else reserved
"reserved0": U1,
"group": ("None", {"keys": U4}), # repeating group
},
"CFG-VALSET": {
"version": U1, # = 0 no transaction, 1 with transaction
"layers": X1,
"transaction": U1, # if version = 1, else reserved
"reserved0": U1,
"group": ("None", {"cfgData": U1}), # repeating group
},
# ********************************************************************
# External Sensor Fusion Messages: i.e. External Sensor Measurements and Status Information.
# Messages in the ESF class are used to output external sensor fusion information from the receiver.
"ESF-MEAS": { # this version used when bit 3 of flags = 0
"timeTag": U4,
"flags": X2,
"id": U2,
"group": (
"None",
{ # repeating group * numMeas, which is bits 11..15 in flags
"data": X4,
},
),
},
"ESF-MEAS-CT": { # this version used when bit 3 of flags = 1
"timeTag": U4,
"flags": X2,
"id": U2,
"group": (
"ESF-MEAS-CT",
{ # repeating group * numMeas, which is bits 11..15 of flags
"data": X4,
},
),
"calibTtag": U4,
},
# ********************************************************************
# Logging Messages: i.e. Log creation, deletion, info and retrieval.
# Messages in the LOG class are used to configure and report status information of the logging feature.
"LOG-CREATE": {
"version": U1,
"logCfg": X1,
"reserved1": U1,
"logSize": U1,
"userDefinedSize": U4,
},
"LOG-ERASE": {},
"LOG-FINDTIME": {
"version": U1,
"type": U1,
"year": U2,
"month": U1,
"day": U1,
"hour": U1,
"minute": U1,
"second": U1,
"reserved1": U1,
},
"LOG-RETRIEVE": {
"startNumber": U4,
"entryCount": U4,
"version": U1,
"reserved": U3,
},
"LOG-RETRIEVEBATCH": {
"version": U1,
"flags": X1,
"reserved0": U2,
},
"LOG-STRING": {"group": ("None", {"bytes": U1})}, # repeating group
# ********************************************************************
# Multiple GNSS Assistance Messages: i.e. Assistance data for various GNSS.
# Messages in the MGA class are used for GNSS aiding information from and to the receiver.
"MGA-ANO": {
"type": U1,
"version": U1,
"svId": U1,
"gnssId": U1,
"year": U1,
"month": U1,
"day": U1,
"reserved1": U1,
"data": U64,
"reserved2": U4,
},
"MGA-BDS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"SatH1": U1,
"IODC": U1,
"a2": I2,
"a1": I4,
"a0": I4,
"toc": U4,
"TGD1": I2,
"URAI": U1,
"IODE": U1,
"toe": U4,
"sqrtA": U4,
"e": U4,
"omega": I4,
"Deltan": I2,
"IDOT": I2,
"M0": I4,
"Omega0": I4,
"OmegaDot": I4,
"i0": I4,
"Cuc": I4,
"Cus": I4,
"Crc": I4,
"Crs": I4,
"Cic": I4,
"Cis": I4,
"reserved2": U4,
},
"MGA-BDS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"Wna": U1,
"toa": U1,
"deltaI": I2,
"sqrtA": U4,
"e": U4,
"omega": I4,
"M0": I4,
"Omega0": I4,
"omegaDot": I4,
"a0": I2,
"a1": I2,
"reserved2": U4,
},
"MGA-BDS-HEALTH": {
"type": U1, # 0x04
"version": U1,
"reserved0": U2,
"grouphealthcode": (
30,
{
"healthCode": U2,
},
), # repeating group * 30
"reserved1": U4,
},
"MGA-BDS-UTC": {
"type": U1, # 0x05
"version": U1,
"reserved1": U2,
"a0UTC": I4,
"a1UTC": I4,
"dtLS": I1,
"reserved2": U1,
"wnRec": U1,
"wnLSF": U1,
"dN": U1,
"dtLSF": I1,
"reserved3": U2,
},
"MGA-BDS-IONO": {
"type": U1, # 0x06
"version": U1,
"reserved1": U2,
"alpha0": I1,
"alpha1": I1,
"alpha2": I1,
"alpha3": I1,
"beta0": I1,
"beta1": I1,
"beta2": I1,
"beta3": I1,
"reserved2": U4,
},
"MGA-FLASH-DATA": {
"type": U1,
"version": U1,
"sequence": U2,
"size": U2,
"group": ("size", {"data": U1}), # repeating group * size
},
"MGA-FLASH-STOP": {"type": U1, "version": U1},
"MGA-GAL-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"iodNav": U2,
"deltaN": I2,
"m0": I4,
"e": U4,
"sqrtA": U4,
"omega0": I4,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"iDot": I2,
"cuc": I2,
"cus": I2,
"crc": I2,
"crs": I2,
"cic": I2,
"cis": I2,
"toe": U2,
"af0": I4,
"af1": I4,
"af2": I1,
"sisaIndexE1E5b": U1,
"toc": U2,
"bgdE1E5b": I2,
"reserved2": U2,
"healthE1B": U1,
"dataValidityE1B": U1,
"healthE5b": U1,
"dataValidityE5b": U1,
"reserved3": U4,
},
"MGA-GAL-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"ioda": U1,
"almWNa": U1,
"toa": U2,
"deltaSqrtA": I2,
"e": U2,
"deltaI": I2,
"omega0": I2,
"omegaDot": I2,
"omega": I2,
"m0": I2,
"af0": I2,
"af1": I2,
"healthE1B": U1,
"healthE5b": U1,
"reserved2": U4,
},
"MGA-GAL-TIMEOFFSET": {
"type": U1,
"version": U1,
"reserved1": U2,
"a0G": I2,
"a1G": I2,
"t0G": U1,
"wn0G": U1,
"reserved2": U2,
},
"MGA-GAL-UTC": {
"type": U1,
"version": U1,
"reserved1": U2,
"a0": I4,
"a1": I4,
"dtLS": I1,
"tot": U1,
"wnt": U1,
"wnLSF": U1,
"dN": U1,
"dTLSF": I1,
"reserved2": U2,
},
"MGA-GLO-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"FT": U1,
"B": U1,
"M": U1,
"H": I1,
"x": I4,
"y": I4,
"z": I4,
"dx": I4,
"dy": I4,
"dz": I4,
"ddx": I1,
"ddy": I1,
"ddz": I1,
"tb": U1,
"gamma": I2,
"E": U1,
"deltaTau": I1,
"tau": I4,
"reserved2": U4,
},
"MGA-GLO-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"N": U2,
"M": U1,
"C": U1,
"tau": I2,
"epsilon": U2,
"lambda": I4,
"deltaI": I4,
"tLambda": U4,
"deltaT": I4,
"deltaDT": I1,
"H": I1,
"omega": I2,
"reserved2": U4,
},
"MGA-GLO-TIMEOFFSET": {
"type": U1,
"version": U1,
"N": U2,
"tauC": I4,
"tauGps": I4,
"B1": I2,
"B2": I2,
"reserved1": U4,
},
"MGA-GPS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"fitInterval": U1,
"uraIndex": U1,
"svHealth": U1,
"tgd": I1,
"iodc": U2,
"toc": U2,
"reserved2": U1,
"af2": I1,
"af1": I2,
"af0": I4,
"crs": I2,
"deltaN": I2,
"m0": I4,
"cuc": I2,
"cus": I2,
"e": U4,
"sqrtA": U4,
"toe": U2,
"cic": I2,
"omega0": I4,
"cis": I2,
"crc": I2,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"idot": I2,
"reserved3": U4,
},
"MGA-GPS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"svHealth": U1,
"e": U2,
"almWNa": U1,
"toa": U1,
"deltaI": I2,
"omegaDot": I2,
"sqrtA": U4,
"omega0": I4,
"omega": I4,
"m0": I4,
"af0": I2,
"af1": I2,
"reserved1": U4,
},
"MGA-GPS-HEALTH": {
"type": U1,
"version": U1,
"reserved0": U2,
"grouphealthcode": (
32,
{
"healthCode": U1,
},
), # repeating group * 32
"reserved1": U4,
},
"MGA-GPS-UTC": {
"type": U1,
"version": U1,
"reserved1": U2,
"utcA0": I4,
"utcA1": I4,
"utcDtLS": I1,
"utcTot": U1,
"utcWNt": U1,
"utcWNlsf": U1,
"utcDn": U1,
"utcDtLSF": I1,
"reserved2": U2,
},
"MGA-GPS-IONO": {
"type": U1,
"version": U1,
"reserved1": U2,
"ionoAlpha0": I1,
"ionoAlpha1": I1,
"ionoAlpha2": I1,
"ionoAlpha3": I1,
"ionoBeta0": I1,
"ionoBeta1": I1,
"ionoBeta2": I1,
"ionoBeta3": I1,
"reserved2": U4,
},
"MGA-INI-POS_XYZ": {
"type": U1,
"version": U1,
"reserved1": U2,
"ecefX": I4,
"ecefY": I4,
"ecefZ": I4,
"posAcc": U4,
},
"MGA-INI-POS_LLH": {
"type": U1,
"version": U1,
"reserved1": U2,
"lat": I4,
"lon": I4,
"alt": I4,
"posAcc": U4,
},
"MGA-INI-TIME_UTC": {
"type": U1,
"version": U1,
"ref": X1,
"leapSecs": I1,
"year": U2,
"month": U1,
"day": U1,
"hour": U1,
"minute": U1,
"second": U1,
"reserved1": U1,
"ns": U4,
"tAccS": U2,
"reserved2": U2,
"tAccNs": U4,
},
"MGA-INI-TIME_GNSS": {
"type": U1,
"version": U1,
"ref": X1,
"gnssId": U1,
"reserved1": U2,
"week": U2,
"tow": U4,
"ns": U4,
"tAccS": U2,
"reserved2": U2,
"tAccNs": U4,
},
"MGA-INI-CLKD": {
"type": U1,
"version": U1,
"reserved1": U2,
"clkD": I4,
"clkDAcc": U4,
},
"MGA-INI-FREQ": {
"type": U1,
"version": U1,
"reserved1": U1,
"flags": X1,
"freq": I4,
"freqAcc": U4,
},
"MGA-INI-EOP": {
"type": U1,
"version": U1,
"reserved1": U2,
"d2kRef": U2,
"d2kMax": U2,
"xpP0": I4,
"xpP1": I4,
"ypP0": I4,
"ypP1": I4,
"dUT1": I4,
"ddUT1": I4,
"reserved2": U40,
},
"MGA-QZSS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"fitInterval": U1,
"uraIndex": U1,
"svHealth": U1,
"tgd": I1,
"iodc": U2,
"toc": U2,
"reserved2": U1,
"af2": I1,
"af1": I2,
"af0": I4,
"crs": I2,
"deltaN": I2,
"m0": I4,
"cuc": I2,
"cus": I2,
"e": U4,
"sqrtA": U4,
"toe": U2,
"cic": I2,
"omega0": I4,
"cis": I2,
"crc": I2,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"idot": I2,
"reserved3": U2,
},
"MGA-QZSS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"svHealth": U1,
"e": U2,
"almWNa": U1,
"toa": U1,
"deltaI": I2,
"omegaDot": I2,
"sqrtA": U4,
"omega0": I4,
"omega": I4,
"m0": I4,
"af0": I2,
"af1": I2,
"reserved1": U4,
},
"MGA-QZSS-HEALTH": {
"type": U1,
"version": U1,
"reserved0": U2,
"grouphealthcode": (
5,
{
"healthCode": U1,
},
), # repeating group * 5
"reserved1": U3,
},
# ********************************************************************
# Navigation Results Messages: i.e. Position, Speed, Time, Acceleration, Heading, DOP, SVs used.
# Messages in the NAV class are used to output navigation data such as position, altitude and velocity in a
# number of formats. Additionally, status flags and accuracy figures are output. The messages are generated with
# the configured navigation/measurement rate.
"NAV-RESETODO": {},
# ********************************************************************
# Receiver Manager Messages: i.e. Satellite Status, RTC Status.
# Messages in the RXM class are used to output status and result data from the Receiver Manager. The output
# rate is not bound to the navigation/measurement rate and messages can also be generated on events.
"RXM-PMREQ-S": {
"duration": U4,
"flags": X4,
}, # this appears to be a deprecated version
"RXM-PMREQ": {
"version": U1, # 0x00
"reserved0": U3,
"duration": U4,
"flags": X4,
"wakeupSources": X4,
},
# ********************************************************************
# Timing Messages: i.e. Time Pulse Output, Time Mark Results.
# Messages in the TIM class are used to output timing information from the receiver, like Time Pulse and Time
# Mark measurements.
"TIM-HOC": {"version": U1, "oscId": U1, "flags": U1, "reserved1": U1, "value": I4},
"TIM-VCOCAL": {
"type": U1,
"version": U1,
"oscId": U1,
"reserved1": U3,
"gainUncertainty": U2,
"gainVco": I4,
},
# ********************************************************************
# Firmware Update Messages: i.e. Memory/Flash erase/write, Reboot, Flash identification, etc..
# Messages in the UPD class are used to update the firmware and identify any attached flash device.
"UPD-SOS": {"cmd": U1, "reserved1": U3}, # Create or clear backup in flash
}
|
the-stack_0_5308 | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import random
import uuid
import numpy as np
from mars.errors import StorageFull
from mars.graph import DAG
from mars.utils import get_next_port, serialize_graph
from mars.scheduler import ChunkMetaActor
from mars.scheduler.utils import SchedulerClusterInfoActor
from mars.tests.core import patch_method
from mars.worker import WorkerDaemonActor, DispatchActor, StorageManagerActor, \
CpuCalcActor, IORunnerActor, PlasmaKeyMapActor, SharedHolderActor, \
InProcHolderActor, QuotaActor, MemQuotaActor, StatusActor
from mars.worker.storage import DataStorageDevice
from mars.worker.storage.sharedstore import PlasmaSharedStore
from mars.worker.tests.base import WorkerCase
from mars.worker.utils import build_quota_key, WorkerClusterInfoActor
class Test(WorkerCase):
@contextlib.contextmanager
def _start_calc_pool(self):
mock_addr = f'127.0.0.1:{get_next_port()}'
with self.create_pool(n_process=1, backend='gevent', address=mock_addr) as pool:
pool.create_actor(SchedulerClusterInfoActor, [mock_addr],
uid=SchedulerClusterInfoActor.default_uid())
pool.create_actor(WorkerClusterInfoActor, [mock_addr],
uid=WorkerClusterInfoActor.default_uid())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
pool.create_actor(StatusActor, mock_addr, uid=StatusActor.default_uid())
pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())
pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())
pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid())
pool.create_actor(IORunnerActor)
pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid())
shared_holder_ref = pool.create_actor(
SharedHolderActor, uid=SharedHolderActor.default_uid())
pool.create_actor(InProcHolderActor)
pool.create_actor(CpuCalcActor, uid=CpuCalcActor.default_uid())
with self.run_actor_test(pool) as test_actor:
try:
yield pool, test_actor
finally:
shared_holder_ref.destroy()
@staticmethod
def _build_test_graph(data_list):
from mars.tensor.fetch import TensorFetch
from mars.tensor.arithmetic import TensorTreeAdd
inputs = []
for idx, d in enumerate(data_list):
chunk_key = f'chunk-{random.randint(0, 999)}-{idx}'
fetch_chunk = TensorFetch(to_fetch_key=chunk_key, dtype=d.dtype) \
.new_chunk([], shape=d.shape, _key=chunk_key)
inputs.append(fetch_chunk)
add_chunk = TensorTreeAdd(data_list[0].dtype).new_chunk(inputs, shape=data_list[0].shape)
exec_graph = DAG()
exec_graph.add_node(add_chunk)
for input_chunk in inputs:
exec_graph.add_node(input_chunk)
exec_graph.add_edge(input_chunk, add_chunk)
return exec_graph, inputs, add_chunk
def testCpuCalcSingleFetches(self):
import gc
with self._start_calc_pool() as (_pool, test_actor):
quota_ref = test_actor.promise_ref(MemQuotaActor.default_uid())
calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid())
session_id = str(uuid.uuid4())
data_list = [np.random.random((10, 10)) for _ in range(3)]
exec_graph, fetch_chunks, add_chunk = self._build_test_graph(data_list)
storage_client = test_actor.storage_client
for fetch_chunk, d in zip(fetch_chunks, data_list):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
self.assertEqual(list(storage_client.get_data_locations(session_id, [fetch_chunks[0].key])[0]),
[(0, DataStorageDevice.SHARED_MEMORY)])
quota_batch = {
build_quota_key(session_id, add_chunk.key, add_chunk.op.key): data_list[0].nbytes,
}
for idx in [1, 2]:
quota_batch[build_quota_key(session_id, fetch_chunks[idx].key, add_chunk.op.key)] \
= data_list[idx].nbytes
self.waitp(
storage_client.copy_to(session_id, [fetch_chunks[idx].key], [DataStorageDevice.DISK])
.then(lambda *_: storage_client.delete(
session_id, [fetch_chunks[idx].key], [DataStorageDevice.SHARED_MEMORY]))
)
self.assertEqual(
list(storage_client.get_data_locations(session_id, [fetch_chunks[idx].key])[0]),
[(0, DataStorageDevice.DISK)])
self.waitp(
quota_ref.request_batch_quota(quota_batch, _promise=True),
)
o_create = PlasmaSharedStore.create
def _mock_plasma_create(store, session_id, data_key, size):
if data_key == fetch_chunks[2].key:
raise StorageFull
return o_create(store, session_id, data_key, size)
id_type_set = set()
def _extract_value_ref(*_):
inproc_handler = storage_client.get_storage_handler((0, DataStorageDevice.PROC_MEMORY))
obj = inproc_handler.get_objects(session_id, [add_chunk.key])[0]
id_type_set.add((id(obj), type(obj)))
del obj
with patch_method(PlasmaSharedStore.create, _mock_plasma_create):
self.waitp(
calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph),
[add_chunk.key], _promise=True)
.then(_extract_value_ref)
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))
)
self.assertTrue(all((id(obj), type(obj)) not in id_type_set
for obj in gc.get_objects()))
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [fetch_chunks[0].key])[0]),
[(0, DataStorageDevice.SHARED_MEMORY)])
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [fetch_chunks[1].key])[0]),
[(0, DataStorageDevice.DISK)])
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [fetch_chunks[2].key])[0]),
[(0, DataStorageDevice.DISK)])
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [add_chunk.key])[0]),
[(0, DataStorageDevice.SHARED_MEMORY)])
def testCpuCalcErrorInRunning(self):
with self._start_calc_pool() as (_pool, test_actor):
calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid())
session_id = str(uuid.uuid4())
data_list = [np.random.random((10, 10)) for _ in range(2)]
exec_graph, fetch_chunks, add_chunk = self._build_test_graph(data_list)
storage_client = test_actor.storage_client
for fetch_chunk, d in zip(fetch_chunks, data_list):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
def _mock_calc_results_error(*_, **__):
raise ValueError
with patch_method(CpuCalcActor._calc_results, _mock_calc_results_error), \
self.assertRaises(ValueError):
self.waitp(
calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph),
[add_chunk.key], _promise=True)
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))
)
def testDestroyCalcActor(self):
import gevent.event
with self._start_calc_pool() as (_pool, test_actor):
calc_ref = _pool.actor_ref(CpuCalcActor.default_uid())
calc_ref.mark_destroy()
gevent.sleep(0.8)
self.assertFalse(_pool.has_actor(calc_ref))
with self._start_calc_pool() as (_pool, test_actor):
calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid())
session_id = str(uuid.uuid4())
data_list = [np.random.random((10, 10)) for _ in range(2)]
exec_graph, fetch_chunks, add_chunk = self._build_test_graph(data_list)
exec_graph2, fetch_chunks2, add_chunk2 = self._build_test_graph(data_list[::-1])
storage_client = test_actor.storage_client
for fetch_chunk, d in zip(fetch_chunks, data_list):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
for fetch_chunk2, d in zip(fetch_chunks2, data_list[::-1]):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk2.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
orig_calc_results = CpuCalcActor._calc_results
start_event = gevent.event.Event()
def _mock_calc_delayed(actor_obj, *args, **kwargs):
start_event.set()
gevent.sleep(1)
return orig_calc_results(actor_obj, *args, **kwargs)
with patch_method(CpuCalcActor._calc_results, _mock_calc_delayed):
p = calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph),
[add_chunk.key], _promise=True) \
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))
start_event.wait()
calc_ref.mark_destroy()
p2 = calc_ref.calc(session_id, add_chunk2.op.key, serialize_graph(exec_graph2),
[add_chunk2.key], _promise=True) \
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk2.op.key, [add_chunk2.key], None, _promise=True))
self.assertTrue(_pool.has_actor(calc_ref._ref))
self.waitp(p)
self.waitp(p2)
gevent.sleep(0.8)
self.assertFalse(_pool.has_actor(calc_ref._ref))
|
the-stack_0_5310 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.events.cloning import EventCloner
from indico.modules.events.editing.models.file_types import EditingFileType
from indico.modules.events.editing.models.review_conditions import EditingReviewCondition
from indico.modules.events.editing.models.tags import EditingTag
from indico.modules.events.models.events import EventType
from indico.util.i18n import _
class EditingSettingsCloner(EventCloner):
name = 'editing_settings'
friendly_name = _('Editing (configured tags, file types, review conditions)')
new_event_only = True
@property
def is_visible(self):
return self.old_event.type_ == EventType.conference
@no_autoflush
def run(self, new_event, cloners, shared_data, event_exists=False):
self._filetype_map = {}
self._clone_tags(new_event)
self._clone_filetypes(new_event)
self._clone_review_conditions(new_event)
db.session.flush()
def _clone_tags(self, new_event):
attrs = get_simple_column_attrs(EditingTag)
for old_tag in self.old_event.editing_tags:
tag = EditingTag()
tag.populate_from_attrs(old_tag, attrs)
new_event.editing_tags.append(tag)
def _clone_filetypes(self, new_event):
attrs = get_simple_column_attrs(EditingFileType)
del new_event.editing_file_types[:]
db.session.flush()
for old_filetype in self.old_event.editing_file_types:
filetype = EditingFileType()
filetype.populate_from_attrs(old_filetype, attrs)
new_event.editing_file_types.append(filetype)
db.session.flush()
self._filetype_map[old_filetype] = filetype
def _clone_review_conditions(self, new_event):
old_conditions = EditingReviewCondition.query.with_parent(self.old_event).all()
for condition in old_conditions:
new_filetypes = {self._filetype_map[ft] for ft in condition.file_types}
new_condition = EditingReviewCondition(type=condition.type, file_types=new_filetypes)
new_event.editing_review_conditions.append(new_condition)
|
the-stack_0_5313 | #!/usr/bin/env python3
import argparse
import glob
import json
import logging
import os
import shlex
import subprocess
from pathlib import Path
def runBazelBuildForCompilationDatabase(bazel_options, bazel_targets):
query_targets = ' union '.join(bazel_targets)
query = ' union '.join(
q.format(query_targets) for q in [
'attr(include_prefix, ".+", kind(cc_library, deps({})))',
'attr(strip_include_prefix, ".+", kind(cc_library, deps({})))',
'attr(generator_function, ".*proto_library", kind(cc_.*, deps({})))',
])
build_targets = subprocess.check_output(["bazel", "query", "--notool_deps",
query]).decode().splitlines()
subprocess.check_call(["bazel", "build"] + bazel_options + build_targets)
# This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh
def generateCompilationDatabase(args):
# We need to download all remote outputs for generated source code. This option lives here to override those
# specified in bazelrc.
bazel_options = shlex.split(os.environ.get("BAZEL_BUILD_OPTIONS", "")) + [
"--config=compdb",
"--remote_download_outputs=all",
]
if args.keep_going:
bazel_options.append("-k")
if args.run_bazel_build:
try:
runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets)
except subprocess.CalledProcessError as e:
if not args.keep_going:
raise
else:
logging.warning("bazel build failed {}: {}".format(e.returncode, e.cmd))
subprocess.check_call(["bazel", "build"] + bazel_options + [
"--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect",
"--output_groups=compdb_files"
] + args.bazel_targets)
execroot = subprocess.check_output(["bazel", "info", "execution_root"] +
bazel_options).decode().strip()
compdb = []
for compdb_file in Path(execroot).glob("**/*.compile_commands.json"):
compdb.extend(json.loads("[" + compdb_file.read_text().replace("__EXEC_ROOT__", execroot) +
"]"))
return compdb
def isHeader(filename):
for ext in (".h", ".hh", ".hpp", ".hxx"):
if filename.endswith(ext):
return True
return False
def isCompileTarget(target, args):
filename = target["file"]
if not args.include_headers and isHeader(filename):
return False
if not args.include_genfiles:
if filename.startswith("bazel-out/"):
return False
if not args.include_external:
if filename.startswith("external/"):
return False
return True
def modifyCompileCommand(target, args):
cc, options = target["command"].split(" ", 1)
# Workaround for bazel added C++11 options, those doesn't affect build itself but
# clang-tidy will misinterpret them.
options = options.replace("-std=c++0x ", "")
options = options.replace("-std=c++11 ", "")
if args.vscode:
# Visual Studio Code doesn't seem to like "-iquote". Replace it with
# old-style "-I".
options = options.replace("-iquote ", "-I ")
if isHeader(target["file"]):
options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable"
options += " -Wno-unused-function"
target["command"] = " ".join([cc, options])
return target
def fixCompilationDatabase(args, db):
db = [modifyCompileCommand(target, args) for target in db if isCompileTarget(target, args)]
with open("compile_commands.json", "w") as db_file:
json.dump(db, db_file, indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate JSON compilation database')
parser.add_argument('--run_bazel_build', action='store_true')
parser.add_argument('-k', '--keep_going', action='store_true')
parser.add_argument('--include_external', action='store_true')
parser.add_argument('--include_genfiles', action='store_true')
parser.add_argument('--include_headers', action='store_true')
parser.add_argument('--vscode', action='store_true')
parser.add_argument('bazel_targets',
nargs='*',
default=["//source/...", "//test/...", "//tools/..."])
args = parser.parse_args()
fixCompilationDatabase(args, generateCompilationDatabase(args))
|
the-stack_0_5314 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 14:34:24 2022
@author: Manuel Huber
"""
import os.path
import multiprocessing
from multiprocessing import Process, Manager
import ee
import geemap
import numpy as np
Map = geemap.Map()
import matplotlib.pyplot as plt
from colour import Color
#from osgeo import gdal
import pandas as pd
import time
import os, glob
import progressbar
from osgeo import gdal
#########################################################################
def get_geotiff_gee(dataset,world,name, path, scale_x, name_save, tile_size):
sel_name = 'wld_rgn' #country_na'
conti = world.filter(ee.Filter.eq(sel_name, name)) # Select the right continent boundaries of the input name
sel_name = 'country_na'
features_country = np.unique(conti.aggregate_array(sel_name).getInfo()) # All countries in the selected continents/area
bar = progressbar.ProgressBar(maxval=len(features_country), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', '{}'.format(name), progressbar.Percentage()])
bar.start()
# Looping through all countries individually as there are limitations on the "coveringGrid" function, which needs to put into a list:
for j in range(len(features_country)):
bar.update(j+1)
geometry = world.filter(ee.Filter.eq(sel_name, features_country[j]))
ROI = geometry.geometry()
data_pro = dataset.projection()
features = ROI.coveringGrid(data_pro,tile_size) #Set the size of the tiling which will depend on the inital resolution set!
geometries_new = features.toList(5000)
for k in range(len(geometries_new.getInfo())):
roi =ee.Feature(geometries_new.getInfo()[k]).geometry()
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
dataset_2 = dataset.select('wat')
data = dataset_2.updateMask(dataset_2.eq(1)).clip(roi)
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data_pro = data.projection(); # Select projection of the image
# Force the next reprojection to aggregate instead of resampling.
new_area_count = data.reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro,None, scale_x)
new_area_count_all = data.unmask().reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro, None ,scale_x)
scaled_pixels =new_area_count.divide(new_area_count_all.divide(100)) # ((Sum of selected pixels)/Total_Count_Pixels)*100 To get percent
rio_pixels = scaled_pixels.clip(roi)
#Possibility to mask certain vaules etc.:
#imgUnmasked = rio_pixels.gt(0) #.select('b1')
#umasked_data = rio_pixels.updateMask(imgUnmasked)
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
geemap.ee_export_image(rio_pixels , filename='{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k), scale= scale_x, region = ROI)
#print(name_save, features_country[j], k)
#else:
# print('This file already exists: ',name_save,k,features_country[j])
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
file_object = open('{}Missing_Files.txt'.format(path), 'a')
file_object.write('{}, {}, {}, '.format(name_save, features_country[j], k))
file_object.write("\n")
# Close the file
file_object.close()
print(name_save, features_country[j], k, 'Is still missing - Download process failed - Will be downloaded in smaller patches')
# Backup download in case there is downloading issue with the set tilesize
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
features_2 = roi.coveringGrid(data_pro, 200000)
geometries_new_2 = features_2.toList(5000)#.map(func_yhi)
for p in range(len(geometries_new_2.getInfo())):
roi_2 =ee.Feature(geometries_new_2.getInfo()[p]).geometry()
rio_pixels_2 = rio_pixels.clip(roi_2)
geemap.ee_export_image(rio_pixels_2 , filename='{}/Image_Exported_Failed_Down_{}_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k,p), scale= scale_x, region = roi_2)
bar.finish()
##################### Start the first the mining process in Google Earth Engine ##############################
if __name__ == "__main__":
##### Input - user depndend ##########################
name = 'MERIT_Surface_Water' # Select name at which data will be sotred with
dataset = ee.Image("MERIT/Hydro/v1_0_1")
# Set path where the data should be saved
path_save = '/data/River_Density/New_River_Composition_Different_Res/'
# Name the folder:
folder_name = 'Test_Folder'
if os.path.exists('{}{}'.format(path_save,folder_name)) == False:
os.mkdir('{}{}'.format(path_save,folder_name))
path ='{}{}/'.format(path_save,folder_name)
# Set scale of the density map
scale_x= 25000 #In m ==> 25km
# If downloading issues occure due to high resolution maps decrease the tile size
tile_size = 500000
# Set number of processors for the multiprocessing:
number_of_processors = 4
######################################################
world = ee.FeatureCollection("USDOS/LSIB_SIMPLE/2017") # Feature collection which gives boundaries for countries and continents
sel_name = 'wld_rgn' # if interested for countries select 'country_na'
europe = world# Here is also option to select individual countries or continents, e.g. filter(ee.Filter.eq('wld_rgn', 'Europe'))
features_cont = np.array(['North America','Africa' , 'Australia', 'Caribbean' ,'Central America',
'Central Asia' ,'E Asia', 'Europe' ,'Indian Ocean', 'N Asia' ,
'Oceania', 'S Asia', 'S Atlantic' ,'SE Asia', 'SW Asia', 'South America'])
# To avoid spaces an addtional list of names has been created:
features_cont_name = np.array(['North_America','Africa' , 'Australia', 'Caribbean' ,'Central_America',
'Central_Asia' ,'E_Asia', 'Europe' ,'Indian_Ocean', 'N_Asia' ,
'Oceania', 'S_Asia', 'S_Atlantic' ,'SE_Asia', 'SW_Asia', 'South_America'])
# Creating a list to split the processes to the provided cores (this case 5 processes in parallel)
x = np.arange(len(features_cont))
split = np.array_split(x, number_of_processors) # Here the number of processors can be selected
print(split, len(split))
for s in range(len(split)):
#for s in range(1):
print('Split', s+1, 'out of ', len(split))
area_sel = features_cont[split[s]]
area_sel_name = features_cont_name[split[s]]
manager = multiprocessing.Manager()
print('entering the processing')
df_all = manager.list()
processes = []
for j in range(len(area_sel)):
name_save = area_sel_name[j]
name_inp = area_sel[j]
print(name_inp, 'is in the making')
p = Process(target=get_geotiff_gee, args=(dataset,world,name_inp, path, scale_x, name_save,tile_size,)) # Passing the list
p.start()
processes.append(p)
for p in processes:
p.join()
print('Finished first part. Now its time to look for the date line issue.')
####################### Downloading the areas along the date line separately to aviod feature cross over at -180,180!
geometry_miss_1 = ee.Geometry.Polygon(
[[[158.84159346653087, 73.96789885519699],
[158.84159346653087, 52.15339248067615],
[179.84745284153087, 52.15339248067615],
[179.84745284153087, 73.96789885519699]]])
geometry_miss_2 = ee.Geometry.Polygon(
[[[-165.56270340846913, 73.72336873420824],
[-165.56270340846913, 44.519635837378665],
[-139.01973465846913, 44.519635837378665],
[-139.01973465846913, 73.72336873420824]]])
geometry_miss_all = [geometry_miss_1, geometry_miss_2]
data_pro = dataset.projection()
for i in range(len(geometry_miss_all)):
ROI = ee.Feature(geometry_miss_all[i]).geometry()
features = ROI.coveringGrid(data_pro, 1000000)
geometries_new = features.toList(5000)#.map(func_yhi)
list_images = []
for k in range(len(geometries_new.getInfo())):
roi =ee.Feature(geometries_new.getInfo()[k]).geometry()
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
dataset_2 = dataset.select('wat')
data = dataset_2.updateMask(dataset_2.eq(1)).clip(roi)
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data_pro = data.projection(); # Select projection of the image
# Force the next reprojection to aggregate instead of resampling.
new_area_count = data.reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro,None, scale_x)
new_area_count_all = data.unmask().reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro, None ,scale_x)
scaled_pixels =new_area_count.divide(new_area_count_all.divide(100)) # ((Sum of selected pixels)/Total_Count_Pixels)*100 To get percent
rio_pixels = scaled_pixels.clip(roi)
if os.path.exists('{}Image_Date_Line_Missing_{}_{}_{}_{}.tif'.format(path,scale_x,i,k,len(geometries_new.getInfo()))) == False:
geemap.ee_export_image(rio_pixels, filename='{}Image_Date_Line_Missing_{}_{}_{}_{}.tif'.format(path,scale_x,i,k,len(geometries_new.getInfo()) ), scale= scale_x, region = roi)
print('All data is downloaded, its time to start creating some maps.')
######################### Merging and Reprojecting the data ###########################
folder_name_2 = 'Reprojected_Files'
if os.path.exists('{}{}'.format(path,folder_name_2)) == False:
os.mkdir('{}{}'.format(path,folder_name_2))
path_repro ='{}{}/'.format(path,folder_name_2)
folder_name_3 = 'Final_Files'
if os.path.exists('{}{}'.format(path,folder_name_3)) == False:
os.mkdir('{}{}'.format(path,folder_name_3))
path_final ='{}{}/'.format(path,folder_name_3)
files_to_mosaic = glob.glob('{}/*.tif'.format(path))
print(len(files_to_mosaic))
files_string = " ".join(files_to_mosaic)
for i in range(len(files_to_mosaic)):
# Possibility to set projection
command ='gdalwarp {} {}Out_{}.tif -overwrite -t_srs "+proj=longlat +ellps=WGS84"'.format(files_to_mosaic[i], path_repro,i)
print(os.popen(command).read())
files_to_mosaic = np.array(glob.glob('{}*.tif'.format(path_repro)))
long = np.array_split(range(len(files_to_mosaic)), 5) # This needs to be done because gdal has a limit of geotiff files which can be processed at the same time
for f in range(len(long)):
files_ib = files_to_mosaic[long[f].astype(int)]
print(len(files_to_mosaic))
files_string = " ".join(files_ib)
command = "gdal_merge.py -o {}inbetween_{}.tif -of gtiff -n 0 ".format(path_repro,f) + files_string
print(os.popen(command).read())
# Merging the inbetween files together
files_to_mosaic = glob.glob('{}inbetween*.tif'.format(path_repro))
files_string = " ".join(files_to_mosaic)
command = "gdal_merge.py -o {}{}_{}.tif -of gtiff -n 0 ".format(path_final,scale_x,name) + files_string
print(os.popen(command).read())
command = "gdal_translate -scale -of KMLSUPEROVERLAY {}{}_{}.tif {}{}_{}.kmz".format(path_final,scale_x,name,path_final,scale_x,name)
print(os.popen(command).read())
|
the-stack_0_5316 | """Templates for the policy_sentry YML files.
These can be used for generating policies
"""
ACTIONS_TEMPLATE = """mode: actions
name: ''
actions:
- ''
"""
CRUD_TEMPLATE = """mode: crud
name: ''
# Specify resource ARNs
read:
- ''
write:
- ''
list:
- ''
tagging:
- ''
permissions-management:
- ''
# Skip resource constraint requirements by listing actions here.
skip-resource-constraints:
- ''
# Actions that do not support resource constraints
wildcard-only:
single-actions: # standalone actions
- ''
# Service-wide - like 's3' or 'ec2'
service-read:
- ''
service-write:
- ''
service-list:
- ''
service-tagging:
- ''
service-permissions-management:
- ''
"""
CRUD_TEMPLATE_DICT = {
"mode": "crud",
"name": "",
"read": [],
"write": [],
"list": [],
"tagging": [],
"permissions-management": [],
"skip-resource-constraints": [],
"wildcard-only": {
"single-actions": [],
"service-read": [],
"service-write": [],
"service-list": [],
"service-tagging": [],
"service-permissions-management": [],
},
}
ACTIONS_TEMPLATE_DICT = {"mode": "actions", "name": "", "actions": []}
def create_crud_template():
"""Generate the CRUD YML Template """
return CRUD_TEMPLATE
def create_actions_template():
"""Generate the Actions YML template"""
return ACTIONS_TEMPLATE
def get_crud_template_dict():
"""Generate the CRUD template in dict format"""
return CRUD_TEMPLATE_DICT
def get_actions_template_dict():
"""Get the Actions template in dict format."""
return ACTIONS_TEMPLATE_DICT
|
the-stack_0_5317 | from textstyle.en.stylometry.style_features import get_basic_style_features
def test_get_basic_style_features():
text_corpus = [
"I like to eat broccoli and bananas.",
"I ate a banana and spinach smoothie for breakfast.",
"Chinchillas and kittens are cute.",
"My sister adopted a kitten yesterday.",
"Look at this cute hamster munching on a piece of broccoli."
]
words_count = [7, 9, 5, 6, 11]
chars_count = [35, 50, 33, 37, 58]
capital_chars_count = [1, 1, 1, 1, 1]
lower_chars_count = [27, 40, 27, 30, 46]
punc_count = [1, 1, 1, 1, 1]
stopwords_count = [2, 3, 2, 1, 5]
nouns_count = [2, 4, 2, 3, 5]
verbs_count = [2, 1, 1, 1, 1]
pascal_case_count = [1, 1, 1, 1, 1]
all_capital_case_count = [1, 1, 0, 0, 0]
inter_count = [0, 0, 0, 0, 0]
results = get_basic_style_features(text_corpus)
assert len(results) == 12
assert results['words_count'] == words_count
assert results['chars_count'] == chars_count
assert results['capital_chars_count'] == capital_chars_count
assert results['lower_chars_count'] == lower_chars_count
assert results['punc_count'] == punc_count
assert results['stopwords_count'] == stopwords_count
assert results['nouns_count'] == nouns_count
assert results['verbs_count'] == verbs_count
assert results['pascal_case_count'] == pascal_case_count
assert results['all_capital_case_count'] == all_capital_case_count
assert results['interruptions_count'] == inter_count
|
the-stack_0_5322 | import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from matplotlib import pyplot as plt
from DataLoaders import MNIST_Loaders
from Network import Net
import utils
import AttackTools
import optimizers
import copy
import os #to make dir
def opt_setup(Otype, param):
if 'r' not in param:
param['r'] = 1.
if 'budget' not in param:
param['budget'] = 10000
if 'target_fval' not in param:
param['target_fval'] = None
if 'constrained' not in param:
param['constrained'] = False
if 'metric' not in param:
param['metric'] = 'inf'
if Otype.upper() == 'RING':
if 'useFIM' not in param:
param['useFIM'] = False
if 'm' not in param:
param['m'] = 8
if 'num_blocks' not in param:
param['num_blocks'] = 49*2
if 'BweightOption' not in param:
param['BweightOption'] = None
param['useFIM'] = True
opt = optimizers.RING(param, y0 = None,f = None)
elif Otype.upper() == 'SHIPS':
param['useFIM'] = False
opt = optimizers.RING(param, y0 = None,f = None) # param['useFIM'] = False
elif Otype.upper() == 'CARS' or Otype.upper() == 'ZOO':
opt = optimizers.CARS(param, y0 = None,f = None)
elif Otype.upper() == 'SQUARE':
opt = optimizers.SquareATK(param, y0 = None, f = None)
return opt
def vec2Img(vec):
ImageSize = [28, 28] #image size for MNIST
return vec.reshape(ImageSize)
class result:
def __init__(self):
self.name = None # optimizer's name
self.isol = None # initial sol
self.ilbl = None # initial label
self.tsol = None # terminal sol
self.tlbl = None # terminal label
self.fevals = None # function evaluations
self.niter = None # number of iterations
self.status = None # status after test
self.performance_log = None # performance log
self.CARScounter = None # if available
def saveResult(self, name, opt, performance_log = None):
self.name = name
self.isol = opt.Atk.data
self.ilbl = opt.Atk.label
self.tsol = opt.ximg
self.tlbl = opt.Atk.curr_lbl(self.tsol)
self.fevals = opt.function_evals
self.niter = opt.t
self.status = opt.status
self.performance_log = performance_log
if hasattr(opt, 'CARScounter'):
self.CARScounter = opt.CARScounter
def showBriefResult(self):
if hasattr(self, 'CARScounter'):
print(f'opt = {self.name}, niter = {self.niter}, CARScounter = {self.CARScounter}')
print(f'\tfunction evaluations = {self.fevals}, status = {self.status}')
print(f'\tori lbl = {self.ilbl}, curr lbl = {self.tlbl}')
def plotResult(self, cmap = None):
# assume a proper subplot is already set
if cmap == None:
plt.imshow(vec2Img(self.tsol))
else:
plt.imshow(vec2Img(self.tsol), cmap = cmap)
plt.title(f'{self.tlbl}')
# plt.title(f'lbl ({self.name}) = {self.tlbl}')
def save2file(self, f, tid, delim = '\t'):
'''
saves only
opt name, testset_id, orig label, final label, num_evals
file name:
(opt_name).csv
content:
each row consists of:
[testset_id, orig_label, final_label, num_evals, status]
here status = 'B'(budget reached), 'T'(target reached), or 'S'(attack succeeded)
@param
f ..... file object (open, append mode)
t ..... testset_id
'''
f.write(f'{tid}{delim}{self.ilbl}{delim}{self.tlbl}{delim}{self.fevals}{delim}{self.status}\n')
class Tester:
'''
A test suite, containing the set of test data and set of optimizers
Usage:
1. Set data
2. Set optimizers
3. Run .run_single_test(label, target_label)
'''
def __init__(self, atk_test_set, atk, opts = None, options = None, tid = 0):
"""
(inputs)
atk_test_set .. dict
(*this)[label] = image data in a vector, whose label is "label"
atk ........... AttackTools object
opts .......... dict
a dict of optimizers to test
key = optimizer's name (possibly with options)
options ....... dict
['normalize'] --> images will be normalized to have pixel values btw 0 and 1
(attributes)
res ........... dict of dict of result class
res[optname][lbl] = Attack Results containing
- initial / terminal solutions
- initial / terminal labels
- function_evals
- number of iterations
- status
- CARScounter (if available)
- performance log
"""
self.atk = atk
self.atk_test_set = atk_test_set
if opts != None:
self.setopts(opts)
self.tid = tid
if 'normalize' in options:
if options['normalize']:
self.normalize_data()
if 'metric' in options:
self.metric = options['metric']
if 'constrained' in options:
self.constrained = options['constrained']
def setopts(self, opts):
self.opts = opts
self.res = {} # will be a dict(key: opt) of dict(key: labels) of status
for optname in opts:
self.res[optname] = {}
def addopts(self, added):
self.opts = {**self.opts, **added} # merge two dictionaries
for optname in added:
self.res[optname] = {}
def normalize_data(self):
# re-normalize the images to have pixel values in [0, 1]
max_pixel_val = max([ np.max(self.atk_test_set[lbl]) for lbl in self.atk_test_set])
min_pixel_val = min([ np.min(self.atk_test_set[lbl]) for lbl in self.atk_test_set])
for lbl in self.atk_test_set:
self.atk_test_set[lbl] = (self.atk_test_set[lbl]-min_pixel_val)/(max_pixel_val-min_pixel_val)
def run_single_test(self, label, selected_opts = None, target_lbl = None, verbal = 2):
# verbal option: 0 --> no output
# 1 --> output when started/finished
# 2 --> 1 + func vals
if label not in self.atk_test_set:
print(f'{label} is not a valid label in this attak test set.')
self.atk.setdata(self.atk_test_set[label])
self.atk.setlabel(label)
self.atk.target_lbl = target_lbl
self.atk.metric = self.metric
self.atk.constrained = self.constrained
# self.atk.settargetlabel(target_lbl)
if selected_opts == None:
opts = self.opts
else:
opts = selected_opts
for oname in opts:
self.res[oname][label] = result()
# setup
opt = copy.deepcopy(self.opts[oname]) # to reset everytime
# otherwise the shallow copied opt may alter the original opt object
opt.setAtkAll( Atk = self.atk,
y0 = self.atk_test_set[label],
f = lambda x:self.atk(x) )
performance_log = []
status = None
# verbal
if verbal > 0:
if target_lbl != None:
print(f'start atd atk ({oname}) on lbl = {label}, target lbl = {self.atk.target_lbl}')
else:
print(f'\t[{label}]', end='\t')#start an untargeted atk ({oname}) on lbl = {label}')
# actual attack starts here
while status == None:
# one iteration
evals, _xfinal, status = opt.step()
# logging
performance_log.append( [evals, opt.fval])
# print
if verbal > 2:
if opt.t % int(10*np.log(opt.t+10)) == 0:
opt.report('f(x): %f F-evals: %d\n' %
(opt.fval, evals) )
# logging
self.res[oname][label].saveResult(oname, opt, performance_log)
if verbal>1:
if opt.t>0:
if opt.Otype != 'SQUARE':
print(f"CARS: {opt.CARScounter},\tCVX = {opt.cvx_counter/opt.t*100:.1f} %", end='\t')
# print(f"CVX counter: {opt.cvx_counter}")
# print(f"Final distortion: {opt.Atk.dist_from_orig(opt.x)}")
np.sum((_xfinal-opt.xinit)**2)
print(f"distortion (L2) = { np.sum((_xfinal-opt.xinit)**2) }", end = '\t')
print(f"(Linf) = {np.amax(_xfinal-opt.xinit):.2f}", end='\t')
if verbal > 0:
print( f"#iter = {opt.t} (#eval = {evals}),\t final status = {status}")
def display_single_res(self, label, opts_names_to_disp = None, cmap = None, title = None,
onlyImg = False, onlyLoss = False, save = None, show = False, logplot = True, savedir = None):
if title == None:
title = 'RING for MNIST ATK'
if onlyImg == False and onlyLoss == False:
plt.subplot(2,1,1)
plt.cla()
legends = []
if opts_names_to_disp == None: # default: display all
opts_names_to_disp = self.opts # only need the names (keys of the dict)
for oname in opts_names_to_disp:
if logplot:
plt.plot(np.array(self.res[oname][label].performance_log)[:,0],
np.log10(np.array(self.res[oname][label].performance_log)[:,1]), linewidth=1, label = oname)
else:
plt.plot(np.array(self.res[oname][label].performance_log)[:,0],
(np.array(self.res[oname][label].performance_log)[:,1]), linewidth=1, label = oname)
legends.append(oname)
plt.title(title)
plt.xlabel('function evaluations')
plt.ylabel('$log($ f $)$')
plt.legend(legends)
nopts = len(opts_names_to_disp)
plotnum = 1
# show original image
plt.subplot(2,nopts+1,nopts+2)
if cmap == None:
plt.imshow(vec2Img(self.atk_test_set[label]))
else:
plt.imshow(vec2Img(self.atk_test_set[label]), cmap = cmap)
plt.title(f'original label = {label}')
# show attacked
for oname in opts_names_to_disp:
plt.subplot(2, nopts+1, nopts+2 + plotnum)
self.res[oname][label].showBriefResult()
self.res[oname][label].plotResult(cmap)
plotnum += 1
plt.tight_layout(pad = 1.0)
if show:
plt.show()
elif onlyImg == True: # plot only images
nopts = len(opts_names_to_disp)
# show original image
# set number of rows
if nopts < 8:
nr = 2
elif nopts < 12:
nr = 3
else:
nr = 4
nc = int(np.ceil((nopts+1)/nr))
plt.subplot(nr,nc,1)
if cmap == None:
plt.imshow(vec2Img(self.atk_test_set[label]))
else:
plt.imshow(vec2Img(self.atk_test_set[label]), cmap = cmap)
plt.title(f'original label = {label}')
# show attacked
plotnum = 2
for oname in opts_names_to_disp:
plt.subplot(nr, nc, plotnum)
self.res[oname][label].showBriefResult()
self.res[oname][label].plotResult(cmap)
plotnum += 1
plt.tight_layout(pad = 1.0)
if show:
plt.show()
elif onlyLoss == True: # plot only loss
legends = []
if opts_names_to_disp == None: # default: display all
opts_names_to_disp = self.opts # only need the names (keys of the dict)
for oname in opts_names_to_disp:
plt.plot(np.array(self.res[oname][label].performance_log)[:,0],
np.log10(np.array(self.res[oname][label].performance_log)[:,1]), linewidth=1, label = oname)
legends.append(oname)
plt.title(title)
plt.xlabel('function evaluations')
plt.ylabel('$log($ f $)$')
plt.legend(legends)
if show:
plt.show()
if save != None:
if savedir != None:
if not os.path.exists(savedir):
os.makedirs(savedir)
save = savedir+save # add directory to the file name
plt.savefig(save)
def save_res_simple(self, testset_id, label, opts_names_to_save = None, subdir = 'Res'):
'''
results are saved in the subdir folder (the folder will be created if not exsits)
1. saves the brief result
file name: (opt_name).csv
content: each row consists of:
[testset_id, orig_label, final_label, num_evals]
2. also saves the original/attacked images as 28*28 numpy array
original img name: (label)_(testset_id).npy
attacked img name: (label)_(testset_id)_(opt_name)_(final_label).npy
'''
if opts_names_to_save == None: # default: save all
opts_names_to_save = self.opts # only need the names (keys of the dict)
for oname in opts_names_to_save:
save_orig = True
res = self.res[oname][label]
if not os.path.exists(subdir):
os.makedirs(subdir)
fname = subdir + f'/{oname}.csv'
f = open(fname, 'a')
res.save2file(f=f, tid = testset_id, delim='\t')
f.close()
if save_orig:
subdir_img = 'img_'+ subdir
if not os.path.exists(subdir_img):
os.makedirs(subdir_img)
fname = subdir_img + f'/{label}_{testset_id}.npy'
np.save( fname, vec2Img(res.isol))
save_orig = False
fname = subdir_img + f'/{label}_{testset_id}_{oname}_{res.tlbl}.npy'
np.save( fname, vec2Img(res.tsol))
|
the-stack_0_5323 | from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(
description='Kube-Hunter - hunts for security '
'weaknesses in Kubernetes clusters')
parser.add_argument(
'--list',
action="store_true",
help="Displays all tests in kubehunter "
"(add --active flag to see active tests)")
parser.add_argument(
'--interface',
action="store_true",
help="Set hunting on all network interfaces")
parser.add_argument(
'--pod',
action="store_true",
help="Set hunter as an insider pod")
parser.add_argument(
'--quick',
action="store_true",
help="Prefer quick scan (subnet 24)")
parser.add_argument(
'--include-patched-versions',
action="store_true",
help="Don't skip patched versions when scanning")
parser.add_argument(
'--cidr',
type=str,
help="Set an ip range to scan, example: 192.168.0.0/16")
parser.add_argument(
'--mapping',
action="store_true",
help="Outputs only a mapping of the cluster's nodes")
parser.add_argument(
'--remote',
nargs='+',
metavar="HOST",
default=list(),
help="One or more remote ip/dns to hunt")
parser.add_argument(
'--active',
action="store_true",
help="Enables active hunting")
parser.add_argument(
'--log',
type=str,
metavar="LOGLEVEL",
default='INFO',
help="Set log level, options are: debug, info, warn, none")
parser.add_argument(
'--report',
type=str,
default='plain',
help="Set report type, options are: plain, yaml, json")
parser.add_argument(
'--dispatch',
type=str,
default='stdout',
help="Where to send the report to, options are: "
"stdout, http (set KUBEHUNTER_HTTP_DISPATCH_URL and "
"KUBEHUNTER_HTTP_DISPATCH_METHOD environment variables to configure)")
parser.add_argument(
'--statistics',
action="store_true",
help="Show hunting statistics")
parser.add_argument(
'--network-timeout',
type=float,
default=5.0,
help="network operations timeout")
return parser.parse_args()
|
the-stack_0_5324 |
"""
=========================================================================
BlockingCacheFL.py
=========================================================================
A function level cache model which only passes cache requests and
responses to the memory
Author: Eric Tang (et396), Xiaoyu Yan (xy97)
Date: 23 December 2019
"""
import math
from pymtl3 import *
from mem_ifcs.MemMsg import MemMsgType
# Assumes 32 bit address and 32 bit data
#-------------------------------------------------------------------------
# Make messages
#-------------------------------------------------------------------------
def req( CacheReqType, type_, opaque, addr, len, data ):
# type_ as string
if type_ == 'rd': type_ = MemMsgType.READ
elif type_ == 'wr': type_ = MemMsgType.WRITE
elif type_ == 'in': type_ = MemMsgType.WRITE_INIT
elif type_ == 'ad': type_ = MemMsgType.AMO_ADD
elif type_ == 'an': type_ = MemMsgType.AMO_AND
elif type_ == 'or': type_ = MemMsgType.AMO_OR
elif type_ == 'sw': type_ = MemMsgType.AMO_SWAP
elif type_ == 'mi': type_ = MemMsgType.AMO_MIN
elif type_ == 'mu': type_ = MemMsgType.AMO_MINU
elif type_ == 'mx': type_ = MemMsgType.AMO_MAX
elif type_ == 'xu': type_ = MemMsgType.AMO_MAXU
elif type_ == 'xo': type_ = MemMsgType.AMO_XOR
elif type_ == 'inv': type_ = MemMsgType.INV
elif type_ == 'fl': type_ = MemMsgType.FLUSH
return CacheReqType( type_, opaque, addr, len, data )
def resp( CacheRespType, type_, opaque, test, len, data ):
if type_ == 'rd': type_ = MemMsgType.READ
elif type_ == 'wr': type_ = MemMsgType.WRITE
elif type_ == 'in': type_ = MemMsgType.WRITE_INIT
elif type_ == 'ad': type_ = MemMsgType.AMO_ADD
elif type_ == 'an': type_ = MemMsgType.AMO_AND
elif type_ == 'or': type_ = MemMsgType.AMO_OR
elif type_ == 'sw': type_ = MemMsgType.AMO_SWAP
elif type_ == 'mi': type_ = MemMsgType.AMO_MIN
elif type_ == 'mu': type_ = MemMsgType.AMO_MINU
elif type_ == 'mx': type_ = MemMsgType.AMO_MAX
elif type_ == 'xu': type_ = MemMsgType.AMO_MAXU
elif type_ == 'xo': type_ = MemMsgType.AMO_XOR
elif type_ == 'inv': type_ = MemMsgType.INV
elif type_ == 'fl': type_ = MemMsgType.FLUSH
return CacheRespType( type_, opaque, test, len, data )
#-------------------------------------------------------------------------
# Define AMO functions
#-------------------------------------------------------------------------
AMO_FUNS = { MemMsgType.AMO_ADD : lambda m,a : m+a,
MemMsgType.AMO_AND : lambda m,a : m&a,
MemMsgType.AMO_OR : lambda m,a : m|a,
MemMsgType.AMO_SWAP : lambda m,a : a,
MemMsgType.AMO_MIN : lambda m,a : m if m.int() < a.int() else a,
MemMsgType.AMO_MINU : min,
MemMsgType.AMO_MAX : lambda m,a : m if m.int() > a.int() else a,
MemMsgType.AMO_MAXU : max,
MemMsgType.AMO_XOR : lambda m,a : m^a,
}
#----------------------------------------------------------------------
# Enhanced random tests
#----------------------------------------------------------------------
# This set of random tests uses a cache model that properly tracks
# hits and misses, and should completely accurately model eviction
# behavior. The model is split up into a hit/miss tracker, and a
# transaction generator, so that the hit/miss tracker can be reused
# in an FL model
class HitMissTracker:
def __init__(self, size, nways, nbanks, linesize):
# Compute various sizes
self.nways = nways
self.linesize = linesize
self.nlines = int(size // linesize)
self.nsets = int(self.nlines // self.nways)
self.nbanks = nbanks
# Compute how the address is sliced
self.offset_start = 0
self.offset_end = self.offset_start + int(math.log(linesize//8, 2))
self.bank_start = self.offset_end
if nbanks > 0:
self.bank_end = self.bank_start + int(math.log(nbanks, 2))
else:
self.bank_end = self.bank_start
self.idx_start = self.bank_end
self.idx_end = self.idx_start + int(math.log(self.nsets, 2))
self.tag_start = self.idx_end
self.tag_end = 32
# Initialize the tag and valid array
# Both arrays are of the form line[idx][way]
# Note that line[idx] is a one-element array for a direct-mapped cache
self.line = []
self.valid = []
for n in range(self.nlines):
self.line.insert(n, [Bits(32, 0) for x in range(nways)])
self.valid.insert(n, [False for x in range(nways)])
# Initialize the LRU array
# Implemented as an array for each set index
# lru[idx][0] is the most recently used
# lru[idx][-1] is the least recently used
self.lru = []
for n in range(self.nsets):
self.lru.insert(n, [x for x in range(nways)])
# Generate the components of an address
# Ignores the bank bits, since they don't affect the behavior
# (and may not even exist)
def split_address(self, addr):
addr = Bits(32, addr)
offset = addr[self.offset_start:self.offset_end]
idx = addr[self.idx_start:self.idx_end]
tag = addr[self.tag_start:self.tag_end]
return (tag, idx, offset)
# Update the LRU status, given that a hit just occurred
def lru_hit(self, idx, way):
self.lru[idx].remove(way)
self.lru[idx].insert(0, way)
# Get the least recently used way for an index
# The LRU is always the last element in the list
def lru_get(self, idx):
return self.lru[idx][-1]
# Perform a tag check, and update lru if a hit occurs
def tag_check(self, tag, idx):
for way in range(self.nways):
if self.valid[idx][way] and self.line[idx][way] == tag:
# Whenever tag check hits, update the set's lru array
self.lru_hit(idx, way)
return True
return False
# Update the tag array due to a value getting fetched from memory
def refill(self, tag, idx):
victim = self.lru_get(idx)
self.line[idx][victim] = tag
self.valid[idx][victim] = True
self.lru_hit(idx, victim)
# Simulate accessing an address. Returns True if a hit occurred,
# False on miss
def access_address(self, addr):
(tag, idx, offset) = self.split_address(addr)
hit = self.tag_check(tag, idx)
if not hit:
self.refill(tag, idx)
return hit
def lru_set(self, idx, way):
self.lru[idx].remove(way)
self.lru[idx].append(way)
def amo_req(self, addr):
(tag, idx, offset) = self.split_address(addr)
for way in range(self.nways):
if self.valid[idx][way] and self.line[idx][way] == tag:
self.valid[idx][way] = False
self.lru_set( idx, way )
break
def invalidate(self):
# invalidates all the cachelines
for way in range(self.nways):
for idx in range(self.nsets):
self.valid[idx][way] = False
class ModelCache:
def __init__(self, size, nways, nbanks, CacheReqType, CacheRespType, MemReqType, MemRespType, mem=None):
# The hit/miss tracker
self.mem_bitwidth_data = MemReqType.get_field_type("data").nbits
self.cache_bitwidth_data = CacheReqType.get_field_type("data").nbits
self.BitsData = mk_bits(self.cache_bitwidth_data)
size = size*8
self.tracker = HitMissTracker(size, nways, nbanks, self.mem_bitwidth_data)
# The transactions list contains the requests and responses for
# the stream of read/write calls on this model
self.transactions = []
self.opaque = 0
self.CacheReqType = CacheReqType
self.CacheRespType = CacheRespType
self.MemReqType = MemReqType
self.MemRespType = MemRespType
self.nlines = int(size // self.mem_bitwidth_data)
self.nsets = int(self.nlines // nways)
# Compute how the address is sliced
self.offset_start = 0
self.offset_end = self.offset_start + int(math.log(self.mem_bitwidth_data//8, 2))
self.idx_start = self.offset_end
self.idx_end = self.idx_start + int(math.log(self.nsets, 2))
self.tag_start = self.idx_end
self.tag_end = 32
# Unpack any initial values of memory into a dict (has easier lookup)
#
# zip is used here to convert the mem array into an array of
# (addr, value) pairs (which it really should be in the first
# place)
self.mem = {}
if mem:
for addr, value in zip(mem[::2], mem[1::2]):
offset = int(Bits32(addr)[ self.offset_start : self.offset_end ])
addr = int(Bits32(addr)[ self.offset_end : 32 ])
if addr not in self.mem:
self.mem[addr] = Bits(self.mem_bitwidth_data, 0)
# assume word mem declarations
self.mem[addr][ offset*8 : (offset+4)*8 ] = value
def check_hit(self, addr):
# Tracker returns boolean, need to convert to 1 or 0 to use
# in the "test" field of the response
if self.tracker.access_address(addr):
return 1
else:
return 0
def read(self, addr, opaque, len_):
hit = self.check_hit(addr)
new_addr = int(addr[self.offset_end:32])
offset = int(addr[self.offset_start:self.offset_end])
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
if len_ == 0:
value = self.mem[new_addr][offset*8 : (offset+self.cache_bitwidth_data/8)*8]
else:
value = self.mem[new_addr][offset*8 : (offset + int(len_))*8 ]
value = zext(value, self.cache_bitwidth_data)
self.transactions.append(req (self.CacheReqType, 'rd', opaque, addr, len_, 0))
self.transactions.append(resp(self.CacheRespType,'rd', opaque, hit, len_, value))
self.opaque += 1
def write(self, addr, value, opaque, len_):
hit = self.check_hit(addr)
new_addr = int(addr[self.offset_end:32])
offset = int(addr[self.offset_start:self.offset_end])
value = Bits(self.cache_bitwidth_data, value)
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
if len_ == 0:
self.mem[new_addr][offset*8 : (offset+self.cache_bitwidth_data/8)*8] = value[0 : self.cache_bitwidth_data ]
else:
self.mem[new_addr][offset*8 : (offset + int(len_))*8] = value[0 : int(len_)*8 ]
self.transactions.append(req (self.CacheReqType, 'wr', opaque, addr, len_, value))
self.transactions.append(resp(self.CacheRespType,'wr', opaque, hit, len_, 0))
self.opaque += 1
def init(self, addr, value, opaque, len_):
hit = self.check_hit(addr)
new_addr = int(addr[self.offset_end:32])
offset = int(addr[self.offset_start:self.offset_end])
value = Bits(self.cache_bitwidth_data, value)
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
if len_ == 0:
self.mem[new_addr][offset*8 : (offset+self.cache_bitwidth_data/8)*8] = value
else:
self.mem[new_addr][offset*8 : (offset + int(len_))*8 ] = value
self.transactions.append(req(self.CacheReqType,'in', opaque, addr, len_, value))
self.transactions.append(resp(self.CacheRespType,'in', opaque, 0, len_, 0))
self.opaque += 1
def amo(self, addr, value, opaque, len_, func):
# AMO operations are on the word level only
self.tracker.amo_req(addr)
new_addr = addr[self.offset_end:32]
offset = int(addr[self.offset_start:self.offset_end])
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
ret = self.mem[new_addr.int()][offset * 8 : (offset + 4) * 8]
value = trunc(value, 32)
amo_out = AMO_FUNS[ int(func) ]( ret, value )
ret = zext( ret, self.cache_bitwidth_data )
value = zext( value, self.cache_bitwidth_data )
self.mem[new_addr.int()][offset * 8 : (offset + 4) * 8] = amo_out
self.transactions.append(req (self.CacheReqType, func, opaque, addr, len_, value))
self.transactions.append(resp(self.CacheRespType,func, opaque, 0, len_, ret))
self.opaque += 1
def invalidate(self, opaque):
self.tracker.invalidate()
self.transactions.append(req (self.CacheReqType, 'inv', opaque, 0, 0, 0))
self.transactions.append(resp(self.CacheRespType, 'inv', opaque, 0, 0, 0))
self.opaque += 1
def flush(self, opaque):
self.transactions.append(req (self.CacheReqType, 'fl', opaque, 0, 0, 0))
self.transactions.append(resp(self.CacheRespType, 'fl', opaque, 0, 0, 0))
self.opaque += 1
def get_transactions(self):
return self.transactions
|
the-stack_0_5325 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
temp = [data_dict[key]['salary'] for key in data_dict.keys()]
salaries = [x for x in temp if x!='NaN']
print('max salary:',max(salaries))
print('min salary:',min(salaries))
temp = [data_dict[key]['exercised_stock_options'] for key in data_dict.keys()]
stock_options = [x for x in temp if x!='NaN']
print('max stock_options:',max(stock_options))
print('min stock_options:',min(stock_options))
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
feature_3 = "total_payments"
poi = "poi"
features_list = [poi, feature_1, feature_2,feature_3]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2, _ in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2,feature_3]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print("no predictions object named pred found, no clusters to plot")
|
the-stack_0_5327 | import logging
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.events import event_document_type_edited
from mayan.apps.documents.models import Document, DocumentType
from mayan.apps.documents.permissions import (
permission_document_type_edit, permission_document_view
)
from mayan.apps.documents.views.document_views import DocumentListView
from mayan.apps.views.generics import (
AddRemoveView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectEditView, SingleObjectListView
)
from mayan.apps.views.mixins import ExternalObjectMixin
from .events import event_smart_link_edited
from .forms import SmartLinkConditionForm, SmartLinkForm
from .icons import icon_smart_link_setup, icon_smart_link_condition
from .links import link_smart_link_create, link_smart_link_condition_create
from .models import ResolvedSmartLink, SmartLink, SmartLinkCondition
from .permissions import (
permission_smart_link_create, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
logger = logging.getLogger(name=__name__)
class DocumentTypeSmartLinksView(AddRemoveView):
main_object_method_add = 'smart_link_add'
main_object_method_remove = 'smart_link_remove'
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'document_type_id'
secondary_object_model = SmartLink
secondary_object_permission = permission_smart_link_edit
list_available_title = _('Available smart links')
list_added_title = _('Smart links enabled')
related_field = 'smart_links'
def action_add(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.smart_links.add(obj)
event_smart_link_edited.commit(
actor=_user, action_object=self.main_object, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.smart_links.remove(obj)
event_smart_link_edited.commit(
actor=_user, action_object=self.main_object, target=obj
)
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Smart links to enable for document type: %s'
) % self.main_object,
}
class ResolvedSmartLinkView(ExternalObjectMixin, DocumentListView):
external_object_class = Document
external_object_permission = permission_document_view
external_object_pk_url_kwarg = 'document_id'
def dispatch(self, request, *args, **kwargs):
self.smart_link = self.get_smart_link()
return super(
ResolvedSmartLinkView, self
).dispatch(request, *args, **kwargs)
def get_document_queryset(self):
try:
queryset = self.smart_link.get_linked_document_for(
document=self.external_object
)
except Exception as exception:
queryset = Document.objects.none()
# Check if the user has the smart link edit permission before
# showing the exception text.
try:
AccessControlList.objects.check_access(
obj=self.smart_link,
permissions=(permission_smart_link_edit,),
user=self.request.user
)
except PermissionDenied:
"""User doesn't have the required permission."""
else:
messages.error(
message=_('Smart link query error: %s' % exception),
request=self.request
)
return queryset
def get_extra_context(self):
dynamic_label = self.smart_link.get_dynamic_label(
document=self.external_object
)
if dynamic_label:
title = _('Documents in smart link: %s') % dynamic_label
else:
title = _(
'Documents in smart link "%(smart_link)s" as related to '
'"%(document)s"'
) % {
'document': self.external_object,
'smart_link': self.smart_link.label,
}
context = super(ResolvedSmartLinkView, self).get_extra_context()
context.update(
{
'object': self.external_object,
'title': title,
}
)
return context
def get_smart_link(self):
queryset = AccessControlList.objects.restrict_queryset(
permission=permission_smart_link_view,
queryset=SmartLink.objects.filter(enabled=True),
user=self.request.user
)
return get_object_or_404(
klass=queryset, pk=self.kwargs['smart_link_id']
)
class SmartLinkDocumentTypesView(AddRemoveView):
main_object_method_add = 'document_types_add'
main_object_method_remove = 'document_types_remove'
main_object_permission = permission_smart_link_edit
main_object_model = SmartLink
main_object_pk_url_kwarg = 'smart_link_id'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types enabled')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Document type for which to enable smart link: %s'
) % self.main_object,
}
class SmartLinkListView(SingleObjectListView):
model = SmartLink
object_permission = permission_smart_link_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_smart_link_setup,
'no_results_main_link': link_smart_link_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Indexes group documents into units, usually with similar '
'properties and of equal or similar types. Smart links '
'allow defining relationships between documents even '
'if they are in different indexes and are of different '
'types.'
),
'no_results_title': _(
'There are no smart links'
),
'title': _('Smart links'),
}
class DocumentSmartLinkListView(ExternalObjectMixin, SmartLinkListView):
external_object_class = Document
external_object_permission = permission_document_view
external_object_pk_url_kwarg = 'document_id'
def get_extra_context(self):
return {
'document': self.external_object,
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_smart_link_setup,
'no_results_text': _(
'Smart links allow defining relationships between '
'documents even if they are in different indexes and '
'are of different types.'
),
'no_results_title': _(
'There are no smart links for this document'
),
'object': self.external_object,
'title': _('Smart links for document: %s') % self.external_object,
}
def get_source_queryset(self):
# Override SingleObjectListView source queryset from SmartLink to
# ResolvedSmartLink.
return ResolvedSmartLink.objects.get_for(
document=self.external_object
)
class SmartLinkCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new smart link')}
form_class = SmartLinkForm
post_action_redirect = reverse_lazy(
viewname='linking:smart_link_list'
)
view_permission = permission_smart_link_create
def get_save_extra_data(self):
return {'_user': self.request.user}
class SmartLinkDeleteView(SingleObjectDeleteView):
model = SmartLink
object_permission = permission_smart_link_delete
pk_url_kwarg = 'smart_link_id'
post_action_redirect = reverse_lazy(
viewname='linking:smart_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Delete smart link: %s') % self.object
}
class SmartLinkEditView(SingleObjectEditView):
form_class = SmartLinkForm
model = SmartLink
object_permission = permission_smart_link_edit
pk_url_kwarg = 'smart_link_id'
post_action_redirect = reverse_lazy(
viewname='linking:smart_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit smart link: %s') % self.object
}
def get_save_extra_data(self):
return {'_user': self.request.user}
class SmartLinkConditionListView(ExternalObjectMixin, SingleObjectListView):
external_object_class = SmartLink
external_object_permission = permission_smart_link_edit
external_object_pk_url_kwarg = 'smart_link_id'
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_smart_link_condition,
'no_results_main_link': link_smart_link_condition_create.resolve(
context=RequestContext(
request=self.request, dict_={
'object': self.external_object
}
)
),
'no_results_text': _(
'Conditions are small logic units that when combined '
'define how the smart link will behave.'
),
'no_results_title': _(
'There are no conditions for this smart link'
),
'object': self.external_object,
'title': _(
'Conditions for smart link: %s'
) % self.external_object,
}
def get_source_queryset(self):
return self.external_object.conditions.all()
class SmartLinkConditionCreateView(
ExternalObjectMixin, SingleObjectCreateView
):
external_object_class = SmartLink
external_object_permission = permission_smart_link_edit
external_object_pk_url_kwarg = 'smart_link_id'
form_class = SmartLinkConditionForm
def get_extra_context(self):
return {
'title': _(
'Add new conditions to smart link: "%s"'
) % self.external_object,
'object': self.external_object,
}
def get_instance_extra_data(self):
return {'smart_link': self.external_object}
def get_post_action_redirect(self):
return reverse(
viewname='linking:smart_link_condition_list', kwargs={
'smart_link_id': self.external_object.pk
}
)
def get_queryset(self):
return self.external_object.conditions.all()
class SmartLinkConditionDeleteView(SingleObjectDeleteView):
model = SmartLinkCondition
object_permission = permission_smart_link_edit
pk_url_kwarg = 'smart_link_condition_id'
def get_extra_context(self):
return {
'condition': self.object,
'navigation_object_list': ('object', 'condition'),
'object': self.object.smart_link,
'title': _(
'Delete smart link condition: "%s"?'
) % self.object,
}
def get_post_action_redirect(self):
return reverse(
viewname='linking:smart_link_condition_list', kwargs={
'smart_link_id': self.object.smart_link.pk
}
)
class SmartLinkConditionEditView(SingleObjectEditView):
form_class = SmartLinkConditionForm
model = SmartLinkCondition
object_permission = permission_smart_link_edit
pk_url_kwarg = 'smart_link_condition_id'
def get_extra_context(self):
return {
'condition': self.object,
'navigation_object_list': ('object', 'condition'),
'object': self.object.smart_link,
'title': _('Edit smart link condition'),
}
def get_post_action_redirect(self):
return reverse(
viewname='linking:smart_link_condition_list', kwargs={
'smart_link_id': self.object.smart_link.pk
}
)
|
the-stack_0_5328 | import face_detection.video_receiver as video_receiver
import face_detection.face_detector as face_detector
import configuration.general_settings as settings
from model.vgg_adapted_model import FaceAnalyserModel
def main():
# Initialize model
model = FaceAnalyserModel(settings.model_weights_path)
# Initialize video capture
capture = video_receiver.initializeVideoCapture()
# Check is user wants quit
while(video_receiver.checkVideo()):
# Capture frame-by-frame
ret, frame = video_receiver.captureFrame(capture)
# Display the resulting frame
video_receiver.displayFrame(frame)
# Change image scale color
frame_gray = face_detector.changeColorScale(frame)
# Detect face
face_coordinates = face_detector.getFaceCoordinates(frame_gray)
# Check if face has been detected
if face_coordinates is not None:
# Preprocess image
preprocessed_frame = face_detector.preprocess(frame_gray, face_coordinates)
emotion_prob, emotion_index = model.detectEmotion(preprocessed_frame)
#print ("The worker is " + settings.detected_emotions[emotion_index])
print("ex:EmotionDetected rdf:type ewe-emodet:EmotionDetected. \
\n ex:EmotionDetected ewe-emodet:hasDetected onyx:Emotion. \
\n onyx:Emotion onyx:hasEmotionCategory wn-affect:" + settings.detected_emotions[emotion_index] + " . \
\n onyx:Emotion onyx:hasEmotionIntensity " + str(emotion_prob) + ".\n\n")
# Draw face rectangle
video_receiver.drawFace(frame, face_coordinates, settings.detected_emotions[emotion_index])
# When everything done, release the capture
capture.release()
video_receiver.stopVideoCapture()
if __name__ == '__main__':
main()
|
the-stack_0_5329 | from core.advbase import *
from slot.a import *
from slot.d import*
def module():
return Summer_Ranzal
class Summer_Ranzal(Adv):
a1 = ('lo',0.4)
a3 = ('primed_defense', 0.08)
conf = {}
conf['slots.a'] = Resounding_Rendition() + Breakfast_at_Valerios()
conf['slots.frostbite.a'] = Primal_Crisis() + His_Clever_Brother()
conf['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s3
`s4
`s2
"""
coab = ['Xander', 'Dagger', 'Tiki']
conf['afflict_res.bog'] = 100
share = ['Gala_Elisanne', 'Ranzal']
def init(self):
self.a3_iscding = 0
self.buff_class = Teambuff if self.condition('buff all team') else Selfbuff
@staticmethod
def prerun_skillshare(adv, dst):
adv.buff_class = Teambuff if adv.condition('buff all team') else Selfbuff
def s1_proc(self, e):
self.dmg_make(e.name,2.16)
self.afflics.bog.on(e.name, 100)
self.dmg_make(e.name,6.48)
def s2_proc(self, e):
self.buff_class(e.name,0.10,15).on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
the-stack_0_5331 | import os
from setuptools import find_packages, setup
from pufsim import version
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# load readme file
with open('README.rst') as readme:
README = readme.read()
# stamp the package prior to installation
version.stamp_directory(os.path.join(os.getcwd(), 'pufsim'))
setup(
name='django-pufsim',
version=version.get_version(),
packages=['pufsim'],
include_package_data=True,
license='MIT License',
description='Front-end app for puflib',
long_description=README,
url='https://github.com/gregschmit/django-pufsim',
author='Gregory N. Schmit',
author_email='[email protected]',
install_requires=['Django>=2', 'numpy', 'matplotlib', 'puflib',],
package_data={'pufsim': ['VERSION_STAMP']},
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.0',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
|
the-stack_0_5336 | import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
# local imports
from load_data import (
income_distribution_dropdown_values,
median_income_dropdown_values
)
# we use the Row and Col components to construct the sidebar header
# it consists of a title, and a toggle, the latter is hidden on large screens
sidebar_header = dbc.Row(
[
dbc.Col(html.H2("Page Menu", className="display-4")),
dbc.Col(
html.Button(
# use the Bootstrap navbar-toggler classes to style the toggle
html.Span(className="navbar-toggler-icon"),
className="navbar-toggler",
# the navbar-toggler classes don't set color, so we do it here
style={
"color": "rgba(0,0,0,.5)",
"border-color": "rgba(0,0,0,.1)",
},
id="toggle",
),
# the column containing the toggle will be only as wide as the
# toggle, resulting in the toggle being right aligned
width="auto",
# vertically align the toggle in the center
align="center",
),
]
)
sidebar = html.Div(
[
sidebar_header,
# we wrap the horizontal rule and short blurb in a div that can be
# hidden on a small screen
html.Div(
[
html.Hr(),
],
id="blurb",
),
# use the Collapse component to animate hiding / revealing links
dbc.Collapse(
dbc.Nav(
[
dbc.NavLink("About", href="/page-1", id="page-1-link"),
dbc.NavLink("Income distribution", href="/page-2", id="page-2-link"),
dbc.NavLink("Median income", href="/page-3", id="page-3-link"),
dbc.NavLink("References", href="/page-4", id="page-4-link"),
],
vertical=True,
pills=True,
),
id="collapse",
),
],
id="sidebar",
)
layout1 = html.Div([
html.H3("About"),
html.P("This aim of this site is to help make some of the personal \
income statistics from Statistics Canada more accessible to \
Canadians."),
html.H5("Income distribution"),
dcc.Markdown("The *Income distribution* page is based on 'total income' from \
income tax returns and includes [(ref)](https://www150.statcan.gc.ca/n1/en/catalogue/72-212-X):"),
dcc.Markdown("\
- employment income (salaries, commission), \n \
- self employment income, \n \
- pension income \
(OAS, CPP/QPP, registered pension plans, RRIFs), \n \
- investment income, \n \
- social benefit payments (EI, workers' compensation, \
social assistance), and \n \
- other income."),
dcc.Markdown("There is an important caveat about the definition of 'total income' \
that is relevant when interpreting these statistics. Let's consider two \
people with different income and pension benefits:"),
dcc.Markdown("\
- Person 1: $70,000 total income, no pension, contributes $10,000 of their \
income to RRSPs, \n \
- Person 2: $60,000 total income, receives a defined benefit \
pension worth $10,000"),
dcc.Markdown("At the end of the day these individuals have the same disposal \
income, and presumably similar potential future income from their \
pensions. Based on StatCans 'total income' statistics, Person 1 has higher \
'total income'. The employment Person 1 uses \
to make RRSP and RPP contributions counts as 'total income' \
in the year it is earned, but also again when it \
is withdrawn as pension income (plus any appreciation from capital gains, \
dividends, and interest). For Person 2, the defined benefit pension \
is not counted as 'total income'. Defined benefit pensions are \
promises made by employers to pay employees in the future. \
Defined benefits pensions (and also employee contributions to \
workplace pensions) show up on T4's as a 'pension adjustment'. \
"),
html.H5("Median income"),
dcc.Markdown("The *Median income* page is based on statistics from the \
[Canadian Income Survey]\
(https://www23.statcan.gc.ca/imdb/p2SV.pl?Function=getSurvey&Id=1275662).\
For the 2018 CIS, the sample size was around \
56,000 households.")
])
################
# Layout 2: income distribution
################
def get_dropdown_options(items):
return [{'label': value, 'value': value} for value in items]
layout2_dropdown_headers = dbc.Row([
dbc.Col(html.Div("Select year")),
dbc.Col(html.Div("Select location")),
dbc.Col(html.Div("Select age group")),
])
layout2_dropdown = dbc.Row([
dbc.Col(
dcc.Dropdown(
id='page2-year',
placeholder="Select year",
options=get_dropdown_options(
income_distribution_dropdown_values["year_values"]),
value=2018,
),
),
dbc.Col(
dcc.Dropdown(
id='page2-geo',
placeholder="Select location",
options=get_dropdown_options(
income_distribution_dropdown_values["geo_values"]),
value="Canada"
),
),
dbc.Col(
dcc.Dropdown(
id='page2-age',
placeholder="Select age group",
options=get_dropdown_options(
income_distribution_dropdown_values["age_values"]),
value='35 to 44 years',
),
),
])
layout2 = html.Div([
html.H3("Income distribution"),
layout2_dropdown_headers,
layout2_dropdown,
html.Div(
dcc.Loading(dcc.Graph(id="income-distribution"), type='circle'),
style={'width':'90%'}
),
html.Div(
dcc.Loading(dcc.Graph(id="cumulative-distribution"), type='circle'),
style={'width':'90%'}
)
])
################
# Layout 3: median income
################
page_3_dropdown_header = dbc.Row([
dbc.Col(html.Div("Select age group")),
dbc.Col(html.Div("Select sex")),
dbc.Col()
])
page3_dropdown_header_2 = dbc.Row([
dbc.Col(html.Div("Select region (hold ctrl for multiple selections)"))])
age_sex_dropdown = dbc.Row([
dbc.Col(
dcc.Dropdown(
id='page3-age',
placeholder="Select age group",
options=get_dropdown_options(median_income_dropdown_values["Age group"]),
value='35 to 44 years',
)
),
dbc.Col(
dcc.Dropdown(
id='page3-sex',
placeholder="Select sex",
options=get_dropdown_options(median_income_dropdown_values["Sex"]),
value=["Males", "Females"],
multi=True
)
),
dbc.Col()
])
region_dropdown = dbc.Row([
dbc.Col(
dcc.Dropdown(
id='page3-geo',
placeholder="Select location",
options=get_dropdown_options(
median_income_dropdown_values["GEO"]),
value=[
'Ottawa-Gatineau, Ontario/Quebec',
'Vancouver, British Columbia'],
multi=True
)
)
])
layout3 = html.Div([
html.H3("Median income"),
page_3_dropdown_header,
age_sex_dropdown,
page3_dropdown_header_2,
region_dropdown,
html.Div(
dcc.Loading(dcc.Graph(id="median-income"), type='circle'),
style={'width':'100%'})
])
layout4 = html.Div([
html.H3("References"),
html.Div(dcc.Markdown("\
- Statistics Canada \n\
- [Tax filers and dependants with income by total income, sex and age](https://doi.org/10.25318/1110000801-eng),\n\
- [Income of individuals by age group, sex and income source, Canada, provinces and selected census metropolitan areas](https://doi.org/10.25318/1110023901-eng)\n\
- Source code for this webpage is hosted on [Github](https://github.com/BlaneG/CAN-income-stats)\n\
"
))
]) |
the-stack_0_5338 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import tensorflow as tf
import time
# Deopout rate
RATE_DROPOUT = 0.5
def small_cnn(x, phase_train):
# Dense Layer
pool2_flat = tf.reshape(x, [-1, 4 * 4 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(inputs=dense, rate=RATE_DROPOUT, training=phase_train)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
logits = tf.layers.dropout(inputs=logits, rate=RATE_DROPOUT, training=phase_train)
return logits
def wrap(x, m, n, stride, shape):
slicing = tf.TensorArray('float32', m * n)
for j in range(m):
for k in range(n):
slicing = slicing.write(
j * n + k, tf.slice(x, [0, j * stride, k * stride, 0],
shape))
sliced = tf.reshape(slicing.concat(), shape)
slicing.close().mark_used()
return sliced
def model(x):
phase_train = tf.placeholder(tf.bool)
m = 5
n = 5
stride = 3
x = tf.reshape(x, [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
conv1_dropout = tf.layers.dropout(inputs=conv1, rate=RATE_DROPOUT, training=phase_train)
# Convolutional Layer #2
conv2 = tf.layers.conv2d(
inputs=conv1_dropout,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
conv2_dropout = tf.layers.dropout(inputs=conv2, rate=RATE_DROPOUT, training=phase_train)
# Pooling Layer #1
pool1 = tf.layers.average_pooling2d(inputs=conv2_dropout, pool_size=[2, 2], strides=2)
# Convolutional Layer #3
conv3 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
conv3_dropout = tf.layers.dropout(inputs=conv3, rate=RATE_DROPOUT, training=phase_train)
# Pooling Layer #2
pool2 = tf.layers.average_pooling2d(inputs=conv3_dropout, pool_size=[2, 2], strides=2)
padding = tf.pad(pool2, [[0, 0], [1, 1], [1, 1], [0, 0]])
logits = small_cnn(wrap(padding, 6, 6, 1, [-1, 4, 4, 64]), phase_train)
logits = tf.reduce_mean(tf.reshape(logits, [36, -1, 10]), 0)
return logits, phase_train
def main(unused_argv):
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
input_data = tf.placeholder(tf.float32, [None, 784])
output_data = tf.placeholder(tf.int64, [None])
y_model, phase_train= model(input_data)
#Loss
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=output_data, logits=y_model)
cross_entropy = tf.reduce_mean(cross_entropy)
#Optimizer
rate = tf.placeholder(tf.float32)
train_step = tf.train.AdamOptimizer(rate).minimize(cross_entropy)
#Accuracy
correct_prediction = tf.equal(tf.argmax(y_model, 1), output_data)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
#Congifg
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
t0 = time.clock()
rt = 1e-3
for i in range(60001):
# Get the data of next batch
batch = mnist.train.next_batch(100)
if (i % 600 == 0) and (i != 0):
if i == 30000:
rt = 3e-4
if i == 42000:
rt = 1e-4
if i == 48000:
rt = 3e-5
if i == 54000:
rt = 1e-5
# Print the accuracy
test_accuracy = 0
test_accuracy_once = 0
for index in range(200):
accuracy_batch = mnist.test.next_batch(50)
test_accuracy_once = sess.run(accuracy, feed_dict={
input_data: accuracy_batch[0], output_data: accuracy_batch[1],
phase_train: False})
test_accuracy += test_accuracy_once
test_accuracy_once = 0
print('%g, %g, %g' %
(i / 600, test_accuracy / 200, (time.clock() - t0)))
t0 = time.clock()
# Train
_ = sess.run(
train_step,
feed_dict={input_data: batch[0],
output_data: batch[1],
phase_train: True,
rate: rt})
if __name__ == "__main__":
tf.app.run() |
the-stack_0_5339 | # $Id: fact_base.py 081917d30609 2010-03-05 mtnyogi $
# coding=utf-8
#
# Copyright © 2007-2008 Bruce Frederiksen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
A fact_base is one of the kinds of knowledge_bases (see also, rule_base
and special).
>>> from pyke import knowledge_engine
>>> engine = knowledge_engine.engine()
>>> fb = fact_base(engine, 'fb_name')
>>> fb
<fact_base fb_name>
>>> fb.dump_universal_facts()
>>> fb.dump_specific_facts()
A fact_base is nothing more than a list of facts. Each fact has a name
and a tuple of arguments. These arguments are python data (not
patterns).
Fact_bases support two kinds of facts: universal facts (universally
true) and case specific facts (only true in a specific situation).
>>> fb.add_universal_fact('some_universal_fact', ('a', 2))
>>> fb.add_case_specific_fact('some_specific_fact', ('b', ('hi', 32)))
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
some_specific_fact('b', ('hi', 32))
The 'reset' method deletes all case specific facts, but leaves the
universal facts.
>>> fb.reset()
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
Normally, universal facts are established once at program
initialization time and case specific facts are established both just
prior to each invocation of the expert system as well as by assertions
in forward chaining rules.
>>> fb.assert_('some_fact', ('a', 2, ('hi', 'mom')))
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
>>> fb.assert_('some_fact', ('a', 3, ('hi', 'mom')))
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
>>> fb.assert_('some_other_fact', ())
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
some_other_fact()
Duplicate facts are not allowed and trying to assert a duplicate fact is
silently ignored.
>>> fb.assert_('some_fact', ('a', 2, ('hi', 'mom')))
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
some_other_fact()
'''
import itertools
import contextlib
from pyke import knowledge_base, contexts
class fact_base(knowledge_base.knowledge_base):
''' Not much to fact_bases. The real work is done in fact_list! '''
def __init__(self, engine, name, register = True):
super(fact_base, self).__init__(engine, name, fact_list, register)
def dump_universal_facts(self):
for fl_name in sorted(self.entity_lists.keys()):
self.entity_lists[fl_name].dump_universal_facts()
def dump_specific_facts(self):
for fl_name in sorted(self.entity_lists.keys()):
self.entity_lists[fl_name].dump_specific_facts()
def add_universal_fact(self, fact_name, args):
self.get_entity_list(fact_name).add_universal_fact(args)
def add_case_specific_fact(self, fact_name, args):
self.get_entity_list(fact_name).add_case_specific_fact(args)
def assert_(self, fact_name, args):
self.add_case_specific_fact(fact_name, args)
def get_stats(self):
num_fact_lists = num_universal = num_case_specific = 0
for fact_list in self.entity_lists.values():
universal, case_specific = fact_list.get_stats()
num_universal += universal
num_case_specific += case_specific
num_fact_lists += 1
return num_fact_lists, num_universal, num_case_specific
def print_stats(self, f):
num_fact_lists, num_universal, num_case_specific = self.get_stats()
f.write("%s: %d fact names, %d universal facts, "
"%d case_specific facts\n" %
(self.name, num_fact_lists, num_universal, num_case_specific))
class fact_list(knowledge_base.knowledge_entity_list):
def __init__(self, name):
super(fact_list, self).__init__(name)
self.universal_facts = [] # [(arg...)...]
self.case_specific_facts = [] # [(arg...)...]
self.hashes = {} # (len, (index...)): (other_indices,
# {(arg...): [other_args_from_factn...]})
self.fc_rule_refs = [] # (fc_rule, foreach_index)
def reset(self):
self.case_specific_facts = []
self.hashes.clear()
self.fc_rule_refs = []
def dump_universal_facts(self):
for args in self.universal_facts:
print('%s%s' % (self.name, args))
def dump_specific_facts(self):
for args in self.case_specific_facts:
print('%s%s' % (self.name, args))
def add_fc_rule_ref(self, fc_rule, foreach_index):
self.fc_rule_refs.append((fc_rule, foreach_index))
def get_affected_fc_rules(self):
return (fc_rule for fc_rule, foreach_index in self.fc_rule_refs)
def lookup(self, bindings, pat_context, patterns):
""" Returns a context manager for a generator that binds patterns to
successive facts, yielding None for each successful match.
Undoes bindings upon continuation, so that no bindings remain at
StopIteration.
"""
indices = tuple(enum for enum in enumerate(patterns)
if enum[1].is_data(pat_context))
other_indices, other_arg_lists = \
self._get_hashed(len(patterns),
tuple(index[0] for index in indices),
tuple(index[1].as_data(pat_context)
for index in indices))
def gen():
if other_arg_lists:
for args in other_arg_lists:
mark = bindings.mark(True)
end_done = False
try:
if all(map(
lambda i, arg:
patterns[i].match_data(bindings,
pat_context,
arg),
other_indices,
args)):
bindings.end_save_all_undo()
end_done = True
yield
finally:
if not end_done: bindings.end_save_all_undo()
bindings.undo_to_mark(mark)
return contextlib.closing(gen())
def _get_hashed(self, len, indices, args):
ans = self.hashes.get((len, indices))
if ans is None: ans = self._hash(len, indices)
other_indices, arg_map = ans
return other_indices, arg_map.get(args, ())
def _hash(self, length, indices):
args_hash = {}
new_entry = (tuple(i for i in range(length) if i not in indices),
args_hash)
self.hashes[length, indices] = new_entry
for args in itertools.chain(self.universal_facts,
self.case_specific_facts):
if len(args) == length:
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
args_hash.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
return new_entry
def add_universal_fact(self, args):
assert args not in self.case_specific_facts, \
"add_universal_fact: fact already present as specific fact"
if args not in self.universal_facts:
self.universal_facts.append(args)
self.add_args(args)
def add_case_specific_fact(self, args):
if args not in self.universal_facts and \
args not in self.case_specific_facts:
self.case_specific_facts.append(args)
self.add_args(args)
for fc_rule, foreach_index in self.fc_rule_refs:
fc_rule.new_fact(args, foreach_index)
def add_args(self, args):
for (length, indices), (other_indices, arg_map) \
in self.hashes.items():
if length == len(args):
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
arg_map.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
def get_stats(self):
return len(self.universal_facts), len(self.case_specific_facts)
|
the-stack_0_5340 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 30 00:06:41 2021
@author: qizhe
"""
class Solution:
def computeArea(self, ax1: int, ay1: int, ax2: int, ay2: int, bx1: int, by1: int, bx2: int, by2: int) -> int:
"""
和答案一模一样,巧妙的地方在于充分利用了max min 的作用,从而考虑了各种情况
用时10min
"""
Area1 = (ax2-ax1)*(ay2-ay1)
Area2 = (bx2-bx1)*(by2-by1)
xCross = max(min(ax2,bx2) - max(ax1,bx1), 0)
yCross = max(min(ay2,by2) - max(ay1,by1), 0)
AreaCross = xCross*yCross
# print(Area1,Area2,AreaCross)
return Area1 + Area2 - AreaCross
if __name__ == '__main__':
solu = Solution()
ax1 = -3
ay1 = 0
ax2 = 3
ay2 = 4
bx1 = 0
by1 = -1
bx2 = 9
by2 = 2
# ax1 = -2
# ay1 = -2
# ax2 = 2
# ay2 = 2
# bx1 = -2
# by1 = -2
# bx2 = 2
# by2 = 2
result = solu.computeArea(ax1,ay1,ax2,ay2,bx1,by1,bx2,by2)
# while result:
# print(result.val)
# result = result.next
output_Str = 'result = ' + str(result)
print(output_Str) |
the-stack_0_5341 | load("@io_bazel_rules_go//go:def.bzl", "GoLibrary")
load("@io_bazel_rules_go//go/private:mode.bzl", "get_mode")
go_filetype = ["*.go"]
def _compute_genrule_variables(resolved_srcs, resolved_outs):
variables = {"SRCS": cmd_helper.join_paths(" ", resolved_srcs),
"OUTS": cmd_helper.join_paths(" ", resolved_outs)}
if len(resolved_srcs) == 1:
variables["<"] = list(resolved_srcs)[0].path
if len(resolved_outs) == 1:
variables["@"] = list(resolved_outs)[0].path
return variables
def _compute_genrule_command(ctx, go_stdlib):
workspace_root = '$$(pwd)'
if ctx.build_file_path.startswith('external/'):
# We want GO_WORKSPACE to point at the root directory of the Bazel
# workspace containing this go_genrule's BUILD file. If it's being
# included in a different workspace as an external dependency, the
# link target must point to the external subtree instead of the main
# workspace (which contains code we don't care about).
#
# Given a build file path like "external/foo/bar/BUILD", the following
# slash split+join sets external_dep_prefix to "external/foo" and the
# effective workspace root to "$PWD/external/foo/".
external_dep_prefix = '/'.join(ctx.build_file_path.split('/')[:2])
workspace_root = '$$(pwd)/' + external_dep_prefix
cmd = [
'set -e',
'export GOROOT=$$(pwd)/' + go_stdlib.root_file.dirname,
'export GOOS=' + go_stdlib.goos,
'export GOARCH=' + go_stdlib.goarch,
# setup main GOPATH
'GENRULE_TMPDIR=$$(mktemp -d $${TMPDIR:-/tmp}/bazel_%s_XXXXXXXX)' % ctx.attr.name,
'export GOPATH=$${GENRULE_TMPDIR}/gopath',
'export GO_WORKSPACE=$${GOPATH}/src/' + ctx.attr.go_prefix.go_prefix,
'mkdir -p $${GO_WORKSPACE%/*}',
'ln -s %s/ $${GO_WORKSPACE}' % (workspace_root,),
'if [[ ! -e $${GO_WORKSPACE}/external ]]; then ln -s $$(pwd)/external/ $${GO_WORKSPACE}/; fi',
'if [[ ! -e $${GO_WORKSPACE}/bazel-out ]]; then ln -s $$(pwd)/bazel-out/ $${GO_WORKSPACE}/; fi',
# setup genfile GOPATH
'export GENGOPATH=$${GENRULE_TMPDIR}/gengopath',
'export GENGO_WORKSPACE=$${GENGOPATH}/src/' + ctx.attr.go_prefix.go_prefix,
'mkdir -p $${GENGO_WORKSPACE%/*}',
'ln -s $$(pwd)/$(GENDIR) $${GENGO_WORKSPACE}',
# drop into WORKSPACE
'export GOPATH=$${GOPATH}:$${GENGOPATH}',
'cd $${GO_WORKSPACE}',
# execute user command
ctx.attr.cmd.strip(' \t\n\r'),
]
return '\n'.join(cmd)
def _go_genrule_impl(ctx):
go_toolchain = ctx.toolchains["@io_bazel_rules_go//go:toolchain"]
mode = get_mode(ctx, ctx.attr._go_toolchain_flags)
go_stdlib = go_toolchain.stdlib.get(ctx, go_toolchain, mode)
all_srcs = depset(go_stdlib.files)
label_dict = {}
for dep in ctx.attr.go_deps:
lib = dep[GoLibrary]
all_srcs += lib.package.srcs
for transitive_lib in lib.transitive:
all_srcs += transitive_lib.srcs
for dep in ctx.attr.srcs:
all_srcs += dep.files
label_dict[dep.label] = dep.files
cmd = _compute_genrule_command(ctx, go_stdlib)
resolved_inputs, argv, runfiles_manifests = ctx.resolve_command(
command=cmd,
attribute="cmd",
expand_locations=True,
make_variables=_compute_genrule_variables(all_srcs, depset(ctx.outputs.outs)),
tools=ctx.attr.tools,
label_dict=label_dict
)
ctx.action(
inputs = list(all_srcs) + resolved_inputs,
outputs = ctx.outputs.outs,
env = ctx.configuration.default_shell_env,
command = argv,
progress_message = "%s %s" % (ctx.attr.message, ctx),
mnemonic = "GoGenrule",
)
# We have codegen procedures that depend on the "go/*" stdlib packages
# and thus depend on executing with a valid GOROOT and GOPATH containing
# some amount transitive go src of dependencies. This go_genrule enables
# the creation of these sandboxes.
go_genrule = rule(
attrs = {
"srcs": attr.label_list(allow_files = True),
"tools": attr.label_list(
cfg = "host",
allow_files = True,
),
"outs": attr.output_list(mandatory = True),
"cmd": attr.string(mandatory = True),
"go_deps": attr.label_list(),
"message": attr.string(),
"executable": attr.bool(default = False),
"_go_toolchain_flags": attr.label(default = Label("@io_bazel_rules_go//go/private:go_toolchain_flags")),
# Next rule copied from bazelbuild/rules_go@a9df110cf04e167b33f10473c7e904d780d921e6
# and then modified a bit.
# I'm not sure if this is correct anymore.
# Also, go_prefix is deprecated, so this is probably going to break in the near future.
"go_prefix": attr.label(
providers = ["go_prefix"],
default = Label(
"//:go_prefix",
relative_to_caller_repository = True,
),
allow_files = False,
cfg = "host",
),
},
output_to_genfiles = True,
toolchains = ["@io_bazel_rules_go//go:toolchain"],
implementation = _go_genrule_impl,
)
|
the-stack_0_5342 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import glob
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.logger import setup_logger
from detectron2.utils.comm import get_world_size
from fvcore.common.file_io import PathManager
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = []
for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
suffix = "leftImg8bit.png"
assert image_file.endswith(suffix)
prefix = image_dir
instance_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_instanceIds.png"
assert os.path.isfile(instance_file), instance_file
label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelIds.png"
assert os.path.isfile(label_file), label_file
json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json"
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
logger = logging.getLogger(__name__)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
# Map cityscape ids to contiguous ids
from cityscapesScripts.cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
suffix = "leftImg8bit.png"
assert image_file.endswith(suffix)
prefix = image_dir
label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelTrainIds.png"
assert os.path.isfile(
label_file
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json"
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
return ret
def cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesScripts.cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
if __name__ == "__main__":
"""
Test the cityscapes dataset loader.
Usage:
python -m detectron2.data.datasets.cityscapes \
cityscapes/leftImg8bit/train cityscapes/gtFine/train
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("image_dir")
parser.add_argument("gt_dir")
parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
args = parser.parse_args()
from detectron2.data.catalog import Metadata
from detectron2.utils.visualizer import Visualizer
from cityscapesscripts.helpers.labels import labels
logger = setup_logger(name=__name__)
dirname = "cityscapes-data-vis"
os.makedirs(dirname, exist_ok=True)
if args.type == "instance":
dicts = load_cityscapes_instances(
args.image_dir, args.gt_dir, from_json=True, to_polygons=True
)
logger.info("Done loading {} samples.".format(len(dicts)))
thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]
meta = Metadata().set(thing_classes=thing_classes)
else:
dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
logger.info("Done loading {} samples.".format(len(dicts)))
stuff_names = [k.name for k in labels if k.trainId != 255]
stuff_colors = [k.color for k in labels if k.trainId != 255]
meta = Metadata().set(stuff_names=stuff_names, stuff_colors=stuff_colors)
for d in dicts:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
# cv2.imshow("a", vis.get_image()[:, :, ::-1])
# cv2.waitKey()
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
|
the-stack_0_5343 | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.h(input_qubit[0]) # number=7
prog.cz(input_qubit[1],input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=9
prog.x(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_noisy187.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_5346 | # Echo server program
import socket
from time import ctime
import os
def psend(conn, prompt, data):
conn.sendall(('[%s] %s' %
(prompt, data.decode())
).encode())
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50007 # Arbitrary non-previleged port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
if not data: break
datas = data.decode().split(' ')
if len(datas) > 0:
if datas[0] == 'date':
psend(conn, bytes(ctime(), 'utf-8'), data)
elif datas[0] == 'os':
psend(conn, os.name, data)
elif datas[0] == 'ls':
if len(datas) > 1:
psend(conn, os.listdir(datas[1]), data)
else:
psend(conn, os.listdir(os.curdir), data)
else:
conn.sendall(data)
else:
conn.sendall(data) |
the-stack_0_5347 | """DjPra1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include,re_path
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from . import views
app_name='newGroup'
#from django.conf.urls import url,include
urlpatterns = [
re_path(r'^search',views.search,name='search'),
re_path(r'^newGroup',views.newGroup,name='newGroup'),
#test
# start 练习部分的内容
re_path(r'^base',views.testbase,name='testbase'),
re_path(r'^guide',views.testGuide,name='testGuide'),
re_path(r'^home',views.homePage,name='homePage'),
re_path(r'^showimage',views.showImage,name='showImage'),
re_path(r'^create',views.newGroupCreate,name='NewGroupCreate'),
#re_path(r'^articles/(?P<year>[0-9]{4})/$', views.year_archive),
path('choose/<int:groupsId>/',views.newGroupChoose,name='newGroupChoose'),
path('column/<int:imageGroups>/<int:groupsId>/',views.newGroupColumn,name='newGroupColumn'),
re_path(r'more',views.morefunction,name='morefunction'),
re_path(r'',views.morefunction,name='morefunction'),
]
|
the-stack_0_5348 | #!/usr/bin/env python3
import sys
from model import ModelW2W
sys.path.extend(['..'])
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import LSTMCell
from tfx.bricks import embedding, rnn, rnn_decoder, dense_to_one_hot, brnn
class Model(ModelW2W):
def __init__(self, data, FLAGS):
super(Model, self).__init__(data, FLAGS)
encoder_embedding_size = 16 * 2
encoder_lstm_size = 16
encoder_vocabulary_length = len(data.idx2word_history)
history_length = data.train_set['histories'].shape[1]
encoder_sequence_length = data.train_set['histories'].shape[2]
decoder_lstm_size = 16
decoder_embedding_size = 16
decoder_sequence_length = data.batch_actions.shape[2]
decoder_vocabulary_length = len(data.idx2word_action)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories', trainable=False)
batch_actions = tf.Variable(data.batch_actions, name='actions', trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions = tf.gather(batch_actions, self.batch_idx)
# inference model
with tf.name_scope('model'):
batch_size = tf.shape(histories)[0]
encoder_embedding = embedding(
input=histories,
length=encoder_vocabulary_length,
size=encoder_embedding_size,
name='encoder_embedding'
)
with tf.name_scope("UtterancesEncoder"):
with tf.name_scope("RNNForwardUtteranceEncoderCell_1"):
cell_fw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=encoder_embedding_size,
use_peepholes=True
)
initial_state_fw_1 = cell_fw_1.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNBackwardUtteranceEncoderCell_1"):
cell_bw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=encoder_embedding_size,
use_peepholes=True
)
initial_state_bw_1 = cell_bw_1.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNForwardUtteranceEncoderCell_2"):
cell_fw_2 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_1.output_size + cell_bw_1.output_size,
use_peepholes=True
)
initial_state_fw_2 = cell_fw_2.zero_state(batch_size, tf.float32)
# the input data has this dimensions
# [
# #batch,
# #utterance in a history (a dialogue),
# #word in an utterance (a sentence),
# embedding dimension
# ]
# encode all utterances along the word axis
encoder_states_2d = []
for utterance in range(history_length):
encoder_outputs, _ = brnn(
cell_fw=cell_fw_1,
cell_bw=cell_bw_1,
inputs=[encoder_embedding[:, utterance, word, :] for word in
range(encoder_sequence_length)],
initial_state_fw=initial_state_fw_1,
initial_state_bw=initial_state_bw_1,
name='RNNUtteranceBidirectionalLayer',
reuse=True if utterance > 0 else None
)
_, encoder_states = rnn(
cell=cell_fw_2,
inputs=encoder_outputs,
initial_state=initial_state_fw_2,
name='RNNUtteranceForwardEncoder',
reuse=True if utterance > 0 else None
)
# print(encoder_states[-1])
encoder_states = tf.concat(1, tf.expand_dims(encoder_states[-1], 1))
# print(encoder_states)
encoder_states_2d.append(encoder_states)
encoder_states_2d = tf.concat(1, encoder_states_2d)
# print('encoder_states_2d', encoder_states_2d)
with tf.name_scope("HistoryEncoder"):
# encode all histories along the utterance axis
with tf.name_scope("RNNForwardHistoryEncoderCell_1"):
cell_fw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_2.state_size,
use_peepholes=True
)
initial_state_fw_1 = cell_fw_1.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNBackwardHistoryEncoderCell_1"):
cell_bw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_2.state_size,
use_peepholes=True
)
initial_state_bw_1 = cell_fw_2.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNForwardHistoryEncoderCell_2"):
cell_fw_2 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_1.output_size + cell_bw_1.output_size,
use_peepholes=True
)
initial_state_fw_2 = cell_fw_2.zero_state(batch_size, tf.float32)
encoder_outputs, _ = brnn(
cell_fw=cell_fw_1,
cell_bw=cell_bw_1,
inputs=[encoder_states_2d[:, utterance, :] for utterance in range(history_length)],
initial_state_fw=initial_state_fw_1,
initial_state_bw=initial_state_bw_1,
name='RNNHistoryBidirectionalLayer',
reuse=None
)
_, encoder_states = rnn(
cell=cell_fw_2,
inputs=encoder_outputs,
initial_state=initial_state_fw_2,
name='RNNHistoryForwardEncoder',
reuse=None
)
with tf.name_scope("Decoder"):
with tf.name_scope("RNNDecoderCell"):
cell = LSTMCell(
num_units=decoder_lstm_size,
input_size=decoder_embedding_size+cell_fw_2.state_size,
use_peepholes=True,
)
initial_state = cell.zero_state(batch_size, tf.float32)
# decode all histories along the utterance axis
final_encoder_state = encoder_states[-1]
decoder_states, decoder_outputs, decoder_outputs_softmax = rnn_decoder(
cell=cell,
inputs=[actions[:, word] for word in range(decoder_sequence_length)],
static_input=final_encoder_state,
initial_state=initial_state, #final_encoder_state,
embedding_size=decoder_embedding_size,
embedding_length=decoder_vocabulary_length,
sequence_length=decoder_sequence_length,
name='RNNDecoder',
reuse=False,
use_inputs_prob=self.use_inputs_prob
)
self.predictions = tf.concat(1, decoder_outputs_softmax)
# print(p_o_i)
if FLAGS.print_variables:
for v in tf.trainable_variables():
print(v.name)
with tf.name_scope('loss'):
one_hot_labels = dense_to_one_hot(actions, decoder_vocabulary_length)
self.loss = tf.reduce_mean(- one_hot_labels * tf.log(tf.clip_by_value(self.predictions, 1e-10, 1.0)), name='loss')
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(one_hot_labels, 2), tf.argmax(self.predictions, 2))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.scalar_summary('accuracy', self.accuracy)
|
the-stack_0_5349 | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.td3.policies import TD3Policy
class TD3(OffPolicyAlgorithm):
"""
Twin Delayed DDPG (TD3)
Addressing Function Approximation Error in Actor-Critic Methods.
Original implementation: https://github.com/sfujim/TD3
Paper: https://arxiv.org/abs/1802.09477
Introduction to TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param policy_delay: Policy and target networks will only be updated once every policy_delay steps
per training steps. The Q values will be updated policy_delay more often (update every training step).
:param target_policy_noise: Standard deviation of Gaussian noise added to target policy
(smoothing noise)
:param target_noise_clip: Limit for absolute value of target policy smoothing noise.
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[TD3Policy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-3,
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 100,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: int = -1,
gradient_steps: int = -1,
n_episodes_rollout: int = 1,
action_noise: Optional[ActionNoise] = None,
optimize_memory_usage: bool = False,
policy_delay: int = 2,
target_policy_noise: float = 0.2,
target_noise_clip: float = 0.5,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Dict[str, Any] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(TD3, self).__init__(
policy,
env,
TD3Policy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=action_noise,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
)
self.policy_delay = policy_delay
self.target_noise_clip = target_noise_clip
self.target_policy_noise = target_policy_noise
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(TD3, self)._setup_model()
self._create_aliases()
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.actor_target = self.policy.actor_target
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to lr schedule
self._update_learning_rate([self.actor.optimizer, self.critic.optimizer])
actor_losses, critic_losses = [], []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Select action according to policy and add clipped noise
noise = replay_data.actions.clone().data.normal_(0, self.target_policy_noise)
noise = noise.clamp(-self.target_noise_clip, self.target_noise_clip)
next_actions = (self.actor_target(replay_data.next_observations) + noise).clamp(-1, 1)
# Compute the next Q-values: min over all critics targets
next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])
critic_losses.append(critic_loss.item())
# Optimize the critics
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Delayed policy updates
if gradient_step % self.policy_delay == 0:
# Compute actor loss
actor_loss = -self.critic.q1_forward(replay_data.observations, self.actor(replay_data.observations)).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
polyak_update(self.actor.parameters(), self.actor_target.parameters(), self.tau)
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/actor_loss", np.mean(actor_losses))
logger.record("train/critic_loss", np.mean(critic_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "TD3",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(TD3, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(TD3, self)._excluded_save_params() + ["actor", "critic", "actor_target", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
return state_dicts, []
|
the-stack_0_5350 | #!/usr/bin/python
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""This file contains code to support the hitless image upgrade feature."""
import argparse
from builtins import object
from builtins import str
import copy
from datetime import timedelta
import re
import sys
import traceback
sys.path.append("/opt/contrail/fabric_ansible_playbooks/module_utils")
# unit test
sys.path.append("../fabric-ansible/ansible-playbooks/module_utils")
from filter_utils import _task_error_log, FilterLog
from job_manager.job_utils import JobAnnotations, JobVncApi
ordered_role_groups = [
["leaf"],
["spine"],
["default"]
]
IMAGE_UPGRADE_DURATION = 30 # minutes
class FilterModule(object):
critical_routing_bridging_roles = {
"CRB-MCAST-Gateway",
"DC-Gateway",
"DCI-Gateway",
}
@staticmethod
def _validate_job_ctx(job_ctx):
if not job_ctx.get('fabric_fqname'):
raise ValueError('Invalid job_ctx: missing fabric_fqname')
job_input = job_ctx.get('job_input')
if not job_input:
raise ValueError('Invalid job_ctx: missing job_input')
if not job_input.get('fabric_uuid'):
raise ValueError('Invalid job_ctx: missing fabric_uuid')
return job_input
# end _validate_job_ctx
def filters(self):
return {
'hitless_upgrade_plan': self.get_hitless_upgrade_plan,
'hitless_next_batch': self.get_next_batch,
'hitless_all_devices': self.get_all_devices,
'hitless_device_info': self.get_device_info,
'hitless_validate': self.validate_critical_roles
}
# end filters
# Wrapper to call main routine
def get_hitless_upgrade_plan(self, job_ctx, image_upgrade_list):
try:
FilterLog.instance("HitlessUpgradeFilter")
self.job_input = FilterModule._validate_job_ctx(job_ctx)
self.fabric_uuid = self.job_input['fabric_uuid']
self.vncapi = JobVncApi.vnc_init(job_ctx)
self.job_ctx = job_ctx
self.ja = JobAnnotations(self.vncapi)
self.advanced_parameters = self._get_advanced_params()
self._cache_job_input()
self.batch_limit = self.advanced_parameters.get(
'bulk_device_upgrade_count')
self.image_upgrade_list = image_upgrade_list
upgrade_plan = self._get_hitless_upgrade_plan()
return upgrade_plan
except Exception as ex:
errmsg = "Unexpected error: %s\n%s" % (
str(ex), traceback.format_exc()
)
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_hitless_upgrade_plan
# Get any advanced parameters from job input to override defaults
def _get_advanced_params(self):
job_template_fqname = self.job_ctx.get('job_template_fqname')
def_json = self.ja.generate_default_json(job_template_fqname)
adv_params = def_json.get("advanced_parameters")
job_input_adv_params = self.job_input.get('advanced_parameters', {})
adv_params = self.ja.dict_update(adv_params, job_input_adv_params)
return adv_params
# end _get_advanced_params
# Store the job input on the fabric object for UI to retrieve later
def _cache_job_input(self):
job_input = copy.deepcopy(self.job_input)
job_input.update({"advanced_parameters": self.advanced_parameters})
self.ja.cache_job_input(self.fabric_uuid,
self.job_ctx.get('job_template_fqname')[-1],
job_input)
# end _cache_job_input
# Read from Node Profile to determine whether the upgrade is hitless
def _is_hitless_upgrade(self, device_obj):
node_profile_refs = device_obj.get_node_profile_refs()
if node_profile_refs:
np_uuid = node_profile_refs[0].get('uuid')
node_profile_obj = self.vncapi.node_profile_read(id=np_uuid)
is_hitless = node_profile_obj.get_node_profile_hitless_upgrade()
return is_hitless
return True
# end _is_hitless_upgrade
# Main routine to generate an upgrade plan
def _get_hitless_upgrade_plan(self):
self.device_table, self.skipped_device_table = \
self._generate_device_table()
self.role_device_groups = self._generate_role_device_groups()
self.vpg_table = self._generate_vpg_table()
self._generate_buddy_lists()
self.batches = self._generate_batches()
self.report = self._generate_report()
self.results = self._generate_results()
upgrade_plan = {
'image_upgrade_list': self.image_upgrade_list,
'advanced_parameters': self.advanced_parameters,
'device_table': self.device_table,
'device_count': len(self.device_table),
'skipped_device_table': self.skipped_device_table,
'role_device_groups': self.role_device_groups,
'vpg_table': self.vpg_table,
'batches': self.batches,
'report': self.report,
'results': self.results,
'status': "success"
}
return upgrade_plan
# end _get_hitless_upgrade_plan
# generate a table of device information
def _generate_device_table(self):
device_table = {}
skipped_device_table = {}
for image_entry in self.image_upgrade_list:
image_uuid = image_entry.get('image_uuid')
image_obj = self.vncapi.device_image_read(id=image_uuid)
device_list = image_entry.get('device_list')
for device_uuid in device_list:
device_obj = self.vncapi.physical_router_read(id=device_uuid)
routing_bridging_roles = device_obj.routing_bridging_roles
if not routing_bridging_roles:
raise ValueError("Cannot find routing-bridging roles")
rb_roles = routing_bridging_roles.get_rb_roles()
is_hitless_upgrade = self._is_hitless_upgrade(device_obj)
device_info = {
"basic": {
"device_fqname": device_obj.fq_name,
"device_vendor":
device_obj.physical_router_vendor_name,
"device_family":
device_obj.physical_router_device_family,
"device_product":
device_obj.physical_router_product_name,
"device_serial_number":
device_obj.physical_router_serial_number,
"device_management_ip":
device_obj.physical_router_management_ip,
"device_username":
device_obj.physical_router_user_credentials.
username,
"device_password": self._get_password(device_obj),
"device_image_uuid": image_uuid,
"device_hitless_upgrade": is_hitless_upgrade
},
'image_family': image_obj.device_image_device_family,
'image_version': image_obj.device_image_os_version,
'current_image_version':
device_obj.physical_router_os_version,
'name': device_obj.fq_name[-1],
'uuid': device_uuid,
'physical_role': device_obj.physical_router_role,
'rb_roles': rb_roles,
'role': self._determine_role(
device_obj.physical_router_role, rb_roles),
'err_msgs': [],
'vpg_info': {"vpg_list": [], "buddies": []},
'target_multihomed_interface': []
}
skip, reason = self._check_skip_device_upgrade(device_info)
if skip:
if reason:
device_info['skip_reason'] = reason
skipped_device_table[device_uuid] = device_info
else:
device_table[device_uuid] = device_info
return device_table, skipped_device_table
# end _generate_device_table
# generate a simple table of roles with their corresponding devices
def _generate_role_device_groups(self):
# Group devices based on role. Use dict keyed by role name
role_device_groups = {}
for device_uuid, device_info in list(self.device_table.items()):
role = device_info['role']
if role not in role_device_groups:
role_device_groups[role] = []
role_device_groups[role].append(device_uuid)
# Sort lists
for role, group in list(role_device_groups.items()):
group.sort()
return role_device_groups
# end _generate_role_device_groups
# generate a table keyed by virtual port group uuid containing member
# devices and their physical interfaces
def _generate_vpg_table(self):
vpg_table = {}
vpg_refs = self.vncapi.virtual_port_groups_list(
parent_id=self.fabric_uuid). get(
'virtual-port-groups', [])
for vpg_ref in vpg_refs:
vpg_uuid = vpg_ref.get('uuid')
vpg_table[vpg_uuid] = {"device_table": {}}
vpg_dev_table = vpg_table[vpg_uuid]['device_table']
vpg_obj = self.vncapi.virtual_port_group_read(id=vpg_uuid)
vpg_table[vpg_uuid]['name'] = vpg_obj.fq_name[2]
pi_refs = vpg_obj.get_physical_interface_refs() or []
for pi_ref in pi_refs:
pi_uuid = pi_ref.get('uuid')
pi_obj = self.vncapi.physical_interface_read(id=pi_uuid)
device_uuid = pi_obj.parent_uuid
if device_uuid not in vpg_dev_table:
vpg_dev_table[device_uuid] = []
# If this is one of the devices to upgrade, append this
# vpg to the vpg_list for use later
if device_uuid in self.device_table:
device_info = self.device_table[device_uuid]
device_info['vpg_info']['vpg_list'].append(vpg_uuid)
pi_entry = {"fq_name": pi_obj.fq_name, "uuid": pi_obj.uuid}
vpg_dev_table[device_uuid].append(pi_entry)
# Add interface name to multihomed list
if device_uuid in self.device_table:
device_info = self.device_table[device_uuid]
if_name = pi_obj.fq_name[2]
if if_name not in \
device_info['target_multihomed_interface']:
device_info['target_multihomed_interface'].\
append(if_name)
return vpg_table
# end _generate_vpg_table
# For each device, generate a list of devices which cannot be upgraded at
# the same time because they are multi-homed to the same BMS
def _generate_buddy_lists(self):
for device_uuid, device_info in list(self.device_table.items()):
vpg_info = self.device_table[device_uuid]['vpg_info']
for vpg_uuid in vpg_info['vpg_list']:
vpg_entry = self.vpg_table[vpg_uuid]
vpg_dev_table = vpg_entry['device_table']
for vpg_dev_uuid, pi_list in list(vpg_dev_table.items()):
if vpg_dev_uuid not in vpg_info['buddies'] and \
vpg_dev_uuid != device_uuid:
buddy_entry = self._get_buddy_entry(vpg_dev_uuid,
pi_list)
vpg_info['buddies'].append(buddy_entry)
# end _generate_buddy_lists
# Create entry for peer, including ip_addr, username, password
def _get_buddy_entry(self, device_uuid, pi_list):
if device_uuid in self.device_table or \
device_uuid in self.skipped_device_table:
if device_uuid in self.device_table:
device_info = self.device_table[device_uuid]
else:
device_info = self.skipped_device_table[device_uuid]
fq_name = device_info['basic']['device_fqname']
mgmt_ip = device_info['basic']['device_management_ip']
username = device_info['basic']['device_username']
password = device_info['basic']['device_password']
vendor = device_info['basic']['device_vendor']
multihomed_interface_list = \
device_info['target_multihomed_interface']
else:
device_obj = self.vncapi.physical_router_read(id=device_uuid)
fq_name = device_obj.fq_name
mgmt_ip = device_obj.physical_router_management_ip
username = device_obj.physical_router_user_credentials.username
password = self._get_password(device_obj)
vendor = device_obj.physical_router_vendor_name
multihomed_interface_list = \
self._get_multihomed_interface_list(pi_list)
return {
"uuid": device_uuid,
"fq_name": fq_name,
"name": fq_name[-1],
"mgmt_ip": mgmt_ip,
"username": username,
"password": password,
"vendor": vendor,
"multihomed_interface_list": multihomed_interface_list
}
# end _get_buddy_entry
# Get list of multihomed interface names
def _get_multihomed_interface_list(self, pi_list):
if_list = []
for pi_entry in pi_list:
if_name = pi_entry['fq_name'][-1]
if if_name not in if_list:
if_list.append(if_name)
return if_list
# end _get_multihomed_interface_list
def _device_value_based_on_number_of_critical_roles(self, device_uuid):
rb_roles = self.device_table[device_uuid].get('rb_roles')
how_many_critical_roles = 0
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
how_many_critical_roles += 1
return -how_many_critical_roles
# Creates a dict: name of critical routing bridging role -> number of
# occurences in all devices.
def _calculate_devices_with_critical_routing_bridging_roles(self):
self.critical_routing_bridging_roles_count = {}
for critical_routing_bridging_role in\
FilterModule.critical_routing_bridging_roles:
self.critical_routing_bridging_roles_count[
critical_routing_bridging_role] = 0
for device_uuid, device_info in list(self.device_table.items()):
for routing_bridging_role in device_info.get('rb_roles'):
if routing_bridging_role in\
FilterModule.critical_routing_bridging_roles:
self.critical_routing_bridging_roles_count[
routing_bridging_role] += 1
# Assumes that critical_routing_bridging_roles_count has been initialized.
def _calc_max_number_of_repr_of_critical_rb_roles_per_batch(self):
self.max_number_of_repr_of_critical_rb_roles_per_batch = {}
for role_name, number_of_occurences \
in list(self.critical_routing_bridging_roles_count.items()):
self.max_number_of_repr_of_critical_rb_roles_per_batch[role_name] \
= number_of_occurences / 2 + number_of_occurences % 2
def _calculate_max_number_of_spines_updated_in_batch(self):
number_of_spines = 0
for device_uuid, device_info in list(self.device_table.items()):
if device_info.get('physical_role') == 'spine':
number_of_spines += 1
self.max_number_of_spines_updated_in_batch = \
number_of_spines / 2 + number_of_spines % 2
def _calc_number_of_repr_of_critical_rb_roles_in_batch(self, batch):
critical_routing_bridging_roles_count = {}
for critical_routing_bridging_role in\
FilterModule.critical_routing_bridging_roles:
critical_routing_bridging_roles_count[
critical_routing_bridging_role] = 0
for device_uuid in batch['device_list']:
rb_roles = self.device_table[device_uuid].get('rb_roles')
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
critical_routing_bridging_roles_count[rb_role] += 1
return critical_routing_bridging_roles_count
# If correct batch extended with device_uuid is still correct in regards
# to vpg buddies, return True. Otherwise return False.
def _check_vpg_buddies_in_batch(self, device_uuid, batch):
# If this device shares a multi-homed vpg interface
# with another device in this batch, return False.
buddies = self._get_vpg_buddies(device_uuid)
for buddy in buddies:
if buddy['uuid'] in batch['device_list']:
return False
return True
# If correct batch extended with device_uuid is still correct in regards
# to number of spines in batch, return True. Otherwise return False.
def _check_number_of_spines_in_batch(self, device_uuid, batch):
device_info = self.device_table[device_uuid]
physical_role = device_info.get('physical_role')
if "spine" in physical_role:
spines_in_batch = 0
for device in batch['device_list']:
device_role = self.device_table[device].get('physical_role')
if "spine" in device_role:
spines_in_batch += 1
if (spines_in_batch + 1 >
self.max_number_of_spines_updated_in_batch):
return False
return True
# If correct batch extended with device_uuid is still correct in regards
# to number of critical roles, return True. Otherwise return False.
def _check_number_of_critical_rb_roles_in_batch(self, device_uuid, batch):
device_info = self.device_table[device_uuid]
rb_roles = device_info.get('rb_roles')
critical_rb_roles_in_device = list(
FilterModule.critical_routing_bridging_roles & set(rb_roles))
if critical_rb_roles_in_device:
critical_rb_roles_in_batch_count = self.\
_calc_number_of_repr_of_critical_rb_roles_in_batch(batch)
for rb_role in critical_rb_roles_in_device:
if critical_rb_roles_in_batch_count[rb_role] + 1 > self.\
max_number_of_repr_of_critical_rb_roles_per_batch[
rb_role]:
return False
return True
# It assumes that batch is correct and is not empty.
def _check_if_device_can_be_added_to_the_batch(self, device_uuid, batch):
return \
self._check_vpg_buddies_in_batch(device_uuid, batch) and \
self._check_number_of_spines_in_batch(device_uuid, batch) and \
self._check_number_of_critical_rb_roles_in_batch(
device_uuid, batch)
def _add_batch_index_to_device_info(self, batches):
for batch in batches:
for device_uuid in batch['device_list']:
self.device_table[device_uuid]['batch_index'] = batches.index(
batch)
def _add_device_to_the_batch(self, device_uuid, batch_load_list, batches):
batch = {}
loaded = False
batch_full = False
device_name = self.device_table[device_uuid].get('name')
# Try to add device into an existing batch
for batch in batch_load_list:
safe = self._check_if_device_can_be_added_to_the_batch(
device_uuid, batch)
if safe:
batch['device_list'].append(device_uuid)
batch['device_names'].append(device_name)
loaded = True
# if the batch is full, move it to the master list
if len(batch['device_list']) >= self.batch_limit:
batch_full = True
break
# if not loaded into a batch, generate a new batch
if not loaded:
idx = len(batch_load_list) + len(batches) + 1
batch = {
'name': "Batch " + str(idx),
'device_list': [device_uuid],
'device_names': [device_name]
}
batch_load_list.append(batch)
# if the batch is full, move it to the master list
if len(batch['device_list']) >= self.batch_limit:
batch_full = True
# if batch full, move from load list to master list
if batch_full:
batch_load_list.remove(batch)
batches.append(batch)
def _assign_devices_to_batches(self):
batches = []
for role_group in ordered_role_groups:
# Batching is per-role-group (constraint 1).
# TODO: Each role group contains just one role. So why do we need
# role groups?
batch_load_list = []
for role in role_group:
device_list = self.role_device_groups.get(role, [])
for device_uuid in device_list:
self._add_device_to_the_batch(
device_uuid, batch_load_list, batches)
# move remaining batches from the load list to the master list
for batch in batch_load_list:
batches.append(batch)
return batches
# Generate batches of devices that can be updated at once.
#
# Constraints:
# 1. Two devices with the different physical_router_role can not be in the
# same batch.
# 2. More than half (half + 0.5 for odd number) of spines can not be in the
# same batch.
# 3. For each routing_bridging_role in {"CRB-MCAST-Gateway",
# "DC-Gateway", "DCI-Gateway"} no more than half (half + 0.5 for odd
# number) of devices with that role can be in the same batch.
# 4. Two devices that share VPG can not be in the same batch.
def _generate_batches(self):
self._calculate_devices_with_critical_routing_bridging_roles()
self._calc_max_number_of_repr_of_critical_rb_roles_per_batch()
self._calculate_max_number_of_spines_updated_in_batch()
batches = self._assign_devices_to_batches()
self._add_batch_index_to_device_info(batches)
return batches
def _spill_device_details(self, device_name, device_info):
details = ""
basic = device_info['basic']
vpg_info = device_info['vpg_info']
batch_index = device_info.get('batch_index')
batch_name = self.batches[batch_index]['name'] \
if batch_index is not None else "N/A"
details += "\n - {}\n".format(device_name)
details += \
" uuid : {}\n"\
" vendor : {}\n"\
" family : {}\n"\
" product : {}\n"\
" serial number : {}\n"\
" management ip : {}\n"\
" username : {}\n"\
" password : {}\n"\
" new image version: {}\n"\
" current image version: {}\n"\
" image family : {}\n"\
" physical role : {}\n"\
" routing bridging roles: {}\n"\
" role : {}\n"\
" vpg list : {}\n"\
" vpg peers : {}\n"\
" batch : {}\n"\
" is hitless? : {}\n"\
.format(
device_info.get('uuid'),
basic.get('device_vendor'),
basic.get('device_family'),
basic.get('device_product'),
basic.get('device_serial_number'),
basic.get('device_management_ip'),
basic.get('device_username'),
"** hidden **", # basic.get('device_password'),
device_info.get('image_version'),
device_info.get('current_image_version'),
device_info.get('image_family'),
device_info.get('physical_role'),
device_info.get('rb_roles'),
device_info.get('role'),
vpg_info.get('vpg_list'),
[buddy['uuid'] for buddy in vpg_info.get('buddies')],
batch_name,
basic.get('device_hitless_upgrade'),
)
return details
def _generate_report(self):
report = ""
# generate devices dict with key of device name
devices = {}
for device_uuid, device_info in list(self.device_table.items()):
device_name = self.device_table[device_uuid]['name']
devices[device_name] = self.device_table[device_uuid]
# generate skipped devices dict with key of device name
sdevices = {}
for device_uuid, device_info in \
list(self.skipped_device_table.items()):
device_name = self.skipped_device_table[device_uuid]['name']
sdevices[device_name] = self.skipped_device_table[device_uuid]
# First dump summary
report += "\n********** Summary *************\n"
# Dump summary of batches
total_time = str(
timedelta(minutes=IMAGE_UPGRADE_DURATION * len(self.batches)))
if len(self.batches) > 0:
report += "\nTotal estimated " \
"duration is {}.\n".format(total_time)
report += "\nNote that this time " \
"estimate may vary depending on " \
"network speeds and system capabilities.\n"
report += "The following batches " \
"of devices will be upgraded in the order listed:\n"
for batch in self.batches:
report += "\n{}:\n".format(batch.get('name'))
for device_name in batch.get('device_names', []):
device_info = devices[device_name]
current_version = \
device_info['current_image_version'] or ""
new_version = device_info['image_version']
hitless_upgrade = \
device_info['basic']['device_hitless_upgrade']
is_hitless = "" if hitless_upgrade else "(not hitless)"
workflow_info = self._check_for_downgrade(device_info)
report += " {} {} --> {} {}{}\n".format(
device_name, current_version, new_version,
is_hitless, workflow_info)
else:
report += "\n NO DEVICES TO UPGRADE!"
report += "\n"
# Dump summary of skipped devices
if len(sdevices) > 0:
report += "\nThe following devices will not be upgraded " \
"for the reasons listed:\n"
for device_name, device_info in sorted(sdevices.items()):
report += "\n {} ({})".format(device_name,
device_info.get
('skip_reason',
"unknown reason"))
report += "\n NOTE: \n Incompatible device-image platform with " \
"the same versions could also lead to a device being " \
"skipped for image upgrade. " \
"Please recheck the platform compatibility " \
"for the above skipped devices."
# Now dump the details
report += "\n******** Details ************\n"
# Dump device info
if len(devices) > 0:
report += "\nDetailed information for the " \
"devices to be upgraded is listed below:\n"
# Spill out sorted list
for device_name, device_info in sorted(devices.items()):
details = self._spill_device_details(device_name, device_info)
report += details
# Dump skipped device info
if len(sdevices) > 0:
report += "\nDetailed information for " \
"the devices to be skipped is listed below:\n"
# Spill out sorted list
for device_name, device_info in sorted(sdevices.items()):
details = self._spill_device_details(device_name, device_info)
report += details
return report
def _generate_results(self):
return self.report
# Get the current and next batch off the batch list and return
def get_next_batch(self, job_ctx, upgrade_plan, device_uuid):
try:
return self._get_next_batch(upgrade_plan, device_uuid)
except Exception as ex:
errmsg = "Unexpected error attempting to " \
"get next batch: %s\n%s" %\
(str(ex), traceback.format_exc())
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_next_batch
# Get the current and next batch off the batch list and return
def _get_next_batch(self, upgrade_plan, device_uuid):
c_idx, n_idx = None, None
current_batch, next_batch = {}, {}
batch_info = {
'current': {
'batch_name': None, 'batch_index': None, 'batch_devices': {}
},
'next': {
'batch_name': None, 'batch_index': None, 'batch_devices': {}
},
'status': "success"
}
if device_uuid:
device_info = upgrade_plan['device_table'].get(device_uuid)
if device_info:
c_idx = device_info['batch_index']
n_idx = c_idx + 1
else:
return batch_info
else:
n_idx = 0
if c_idx is not None:
batch = upgrade_plan['batches'][c_idx]
for device_uuid in batch['device_list']:
current_batch[device_uuid] = \
upgrade_plan['device_table'][device_uuid]['basic']
batch_info['current'] = {
'batch_name': batch['name'],
'batch_index': c_idx,
'batch_devices': current_batch}
if n_idx < len(upgrade_plan['batches']):
batch = upgrade_plan['batches'][n_idx]
for device_uuid in batch['device_list']:
next_batch[device_uuid] = \
upgrade_plan['device_table'][device_uuid]['basic']
batch_info['next'] = {
'batch_name': batch['name'],
'batch_index': n_idx,
'batch_devices': next_batch}
return batch_info
# end _get_next_batch
# Get list of all devices for use in test_run
def get_all_devices(self, job_ctx, upgrade_plan):
try:
return self._get_all_devices(upgrade_plan)
except Exception as ex:
errmsg = "Unexpected error attempting " \
"to get all devices: %s\n%s" % \
(str(ex), traceback.format_exc())
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_all_devices
# Get list of all devices for use in test_run
def _get_all_devices(self, upgrade_plan):
all_devices = {}
device_table = upgrade_plan['device_table']
batch_info = {
'current': {
'batch_name': None, 'batch_index': None, 'batch_devices': {}
},
'next': {
'batch_name': 'all', 'batch_index': 0, 'batch_devices': {}
},
'status': "success"
}
for device_uuid, device_info in list(device_table.items()):
all_devices[device_uuid] = device_table[device_uuid]['basic']
batch_info['next']['batch_devices'] = all_devices
return batch_info
# end get_all_devices
# Get info for a single device
def get_device_info(self, job_ctx, device_uuid):
try:
FilterLog.instance("HitlessUpgradeFilter")
self.job_input = FilterModule._validate_job_ctx(job_ctx)
self.fabric_uuid = self.job_input['fabric_uuid']
self.vncapi = JobVncApi.vnc_init(job_ctx)
self.job_ctx = job_ctx
self.ja = JobAnnotations(self.vncapi)
self.advanced_parameters = self._get_advanced_params()
self._cache_job_input()
self.device_uuid = device_uuid
device_info = self._get_device_info()
return device_info
except Exception as ex:
errmsg = "Unexpected error getting device info: %s\n%s" % (
str(ex), traceback.format_exc()
)
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_device_info
# Get device info used for maintenance mode activate
def _get_device_info(self):
self.device_table = self._generate_device_entry()
self.skipped_device_table = {}
self.vpg_table = self._generate_vpg_table()
self._generate_buddy_lists()
device_info = {
'advanced_parameters': self.advanced_parameters,
'device_table': self.device_table,
'vpg_table': self.vpg_table,
'status': "success"
}
return device_info
# end _get_device_info
# Validate whether fabric will be hitless when the given list of
# devices go into maintenance mode
def validate_critical_roles(self, job_ctx, device_uuid_list):
try:
FilterLog.instance("HitlessUpgradeFilter")
self.job_input = FilterModule._validate_job_ctx(job_ctx)
self.fabric_uuid = self.job_input['fabric_uuid']
self.vncapi = JobVncApi.vnc_init(job_ctx)
self.job_ctx = job_ctx
self.ja = JobAnnotations(self.vncapi)
self.advanced_parameters = self._get_advanced_params()
self._cache_job_input()
self.device_uuid_list = device_uuid_list
results = self._validate_critical_roles()
return results
except Exception as ex:
errmsg = "Unexpected error validating: %s\n%s" % (
str(ex), traceback.format_exc()
)
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end hitless_validate
# Get device info used for maintenance mode activate
def _validate_critical_roles(self):
error_msg = ''
critical_dev_list = []
mm_dev_list = []
dev_list = self.vncapi.physical_routers_list(
fields=['fabric_refs', 'physical_role_refs',
'routing_bridging_roles', 'physical_router_managed_state'
]).get('physical-routers', [])
# Search through all devices in fabric and create a critical device
# list of devices which are active and performing critical roles
for dev in dev_list:
if dev['uuid'] in self.device_uuid_list:
mm_dev_list.append(dev)
continue
fabric_refs = dev.get('fabric_refs')
if not fabric_refs:
continue
fabric_uuid = fabric_refs[0]['uuid']
if fabric_uuid != self.fabric_uuid:
continue
managed_state = dev.get('physical_router_managed_state')
if managed_state and managed_state != 'active':
continue
physical_role_refs = dev.get('physical_role_refs')
if not physical_role_refs:
continue
physical_role = physical_role_refs[0]['to'][-1]
if physical_role == 'spine':
critical_dev_list.append(dev)
continue
routing_bridging_roles = dev.get('routing_bridging_roles')
if routing_bridging_roles:
rb_roles = routing_bridging_roles['rb_roles']
else:
rb_roles = []
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
critical_dev_list.append(dev)
break
# Make sure critical roles are present in critical devices
missing_roles = set()
for mm_dev in mm_dev_list:
# check critical physical roles
physical_role_refs = mm_dev.get('physical_role_refs')
if not physical_role_refs:
continue
physical_role = physical_role_refs[0]['to'][-1]
if physical_role == 'spine':
found = self._find_critical_phy_role(
physical_role, critical_dev_list)
if not found:
missing_roles.add(physical_role)
# check critical routing-bridging roles
routing_bridging_roles = mm_dev.get('routing_bridging_roles')
if routing_bridging_roles:
rb_roles = routing_bridging_roles['rb_roles']
else:
rb_roles = []
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
found = self._find_critical_rb_role(
rb_role, critical_dev_list)
if not found:
missing_roles.add(rb_role)
if missing_roles:
error_msg = 'Fabric will not be hitless because these '\
'roles will no longer be deployed: '\
'{}'.format(list(missing_roles))
if error_msg:
results = {
'error_msg': error_msg,
'status': "failure"
}
else:
results = {
'error_msg': "Fabric is hitless",
'status': "success"
}
return results
# end _hitless_validate
# Find a particular critical physical role in a list of devices
def _find_critical_phy_role(self, crit_phy_role, dev_list):
for dev in dev_list:
physical_role_refs = dev.get('physical_role_refs')
if not physical_role_refs:
continue
physical_role = physical_role_refs[0]['to'][-1]
if physical_role == crit_phy_role:
return True
return False
# end _find_critical_rb_role
# Find a particular critical routing-bridging role in a list of devices
def _find_critical_rb_role(self, crit_rb_role, dev_list):
for dev in dev_list:
routing_bridging_roles = dev.get('routing_bridging_roles')
if routing_bridging_roles:
rb_roles = routing_bridging_roles['rb_roles']
else:
rb_roles = []
for rb_role in rb_roles:
if crit_rb_role == rb_role:
return True
return False
# end _find_critical_rb_role
# generate a single entry of device information
def _generate_device_entry(self):
device_table = {}
device_obj = self.vncapi.physical_router_read(id=self.device_uuid)
routing_bridging_roles = device_obj.routing_bridging_roles
if not routing_bridging_roles:
raise ValueError("Cannot find routing-bridging roles")
rb_roles = routing_bridging_roles.get_rb_roles()
is_hitless_upgrade = self._is_hitless_upgrade(device_obj)
device_info = {
"basic": {
"device_fqname": device_obj.fq_name,
"device_vendor":
device_obj.physical_router_vendor_name,
"device_family":
device_obj.physical_router_device_family,
"device_product":
device_obj.physical_router_product_name,
"device_serial_number":
device_obj.physical_router_serial_number,
"device_management_ip":
device_obj.physical_router_management_ip,
"device_username":
device_obj.physical_router_user_credentials.username,
"device_password":
self._get_password(device_obj),
"device_hitless_upgrade": is_hitless_upgrade
},
'name': device_obj.fq_name[-1],
'uuid': self.device_uuid,
'physical_role': device_obj.physical_router_role,
'rb_roles': rb_roles,
'role': self._determine_role(
device_obj.physical_router_role, rb_roles),
'err_msgs': [],
'vpg_info': {"vpg_list": [], "buddies": []},
'target_multihomed_interface': []
}
device_table[self.device_uuid] = device_info
return device_table
# end _generate_device_entry
# Get a list of all devices that share vpg groups with this device
def _get_vpg_buddies(self, device_uuid):
device_info = self.device_table[device_uuid]
vpg_info = device_info['vpg_info']
return vpg_info.get('buddies', [])
# end _get_vpg_buddies
# Get a single role for this device to be used in determining upgrade
# ordering
def _determine_role(self, physical_role, rb_roles):
# Use physical role for now. If not in ordered table, use default
for role_group in ordered_role_groups:
for role in role_group:
if physical_role == role:
return physical_role
return "default"
# end _determine_role
# If old and new image versions match, don't upgrade
def _check_skip_device_upgrade(self, device_info):
if device_info['image_version'] == \
device_info['current_image_version']:
return True, "Upgrade image version matches current image version"
return False, ""
# end _check_skip_device_upgrade
def _check_for_downgrade(self, device_info):
new_image_int = int(re.sub(r"\D", "", device_info['image_version']))
current_image_int = int(
re.sub(
r"\D",
"",
device_info['current_image_version']))
if new_image_int > current_image_int:
return ""
else:
return "(Image downgrade)"
# Get device password
def _get_password(self, device_obj):
return JobVncApi.decrypt_password(
encrypted_password=device_obj.physical_router_user_credentials.
get_password(),
pwd_key=device_obj.uuid)
def _parse_args():
arg_parser = argparse.ArgumentParser(description='fabric filters tests')
arg_parser.add_argument('-p', '--generate_plan',
action='store_true', help='Generate Upgrade Plan')
arg_parser.add_argument('-b', '--next_batch',
action='store_true', help='Get Next Batch')
arg_parser.add_argument('-a', '--all_devices',
action='store_true', help='Get All Devices')
arg_parser.add_argument('-d', '--device_info',
action='store_true', help='Get Device Info')
return arg_parser.parse_args()
|
the-stack_0_5352 | # qubit number=4
# total number=38
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.z(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2727.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_5353 | import pathlib
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.mark.parametrize(
"show_index,status,prefix,data",
[pytest.param(False, 403, '/', None, id="index_forbidden"),
pytest.param(True, 200, '/',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_root"),
pytest.param(True, 200, '/static',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/static/my_dir">my_dir/</a></li>\n'
b'<li><a href="/static/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_static")])
async def test_access_root_of_static_handler(tmp_path,
aiohttp_client,
show_index,
status,
prefix,
data) -> None:
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
my_file = tmp_path / 'my_file'
my_dir = tmp_path / 'my_dir'
my_dir.mkdir()
my_file_in_dir = my_dir / 'my_file_in_dir'
with my_file.open('w') as fw:
fw.write('hello')
with my_file_in_dir.open('w') as fw:
fw.write('world')
app = web.Application()
# Register global static route:
app.router.add_static(prefix, str(tmp_path), show_index=show_index)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get(prefix)
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (await r.read())
assert read_ == data
async def test_follow_symlink(tmp_path, aiohttp_client) -> None:
"""
Tests the access to a symlink, in static folder
"""
data = 'hello world'
my_dir_path = tmp_path / 'my_dir'
my_dir_path.mkdir()
my_file_path = my_dir_path / 'my_file_in_dir'
with my_file_path.open('w') as fw:
fw.write(data)
my_symlink_path = tmp_path / 'my_symlink'
pathlib.Path(str(my_symlink_path)).symlink_to(str(my_dir_path), True)
app = web.Application()
# Register global static route:
app.router.add_static('/', str(tmp_path), follow_symlinks=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (await r.text()) == data
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
async def test_access_to_the_file_with_spaces(tmp_path, aiohttp_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = tmp_path / dir_name
if my_dir_path != tmp_path:
my_dir_path.mkdir()
my_file_path = my_dir_path / filename
with my_file_path.open('w') as fw:
fw.write(data)
app = web.Application()
url = '/' + str(pathlib.Path(dir_name, filename))
app.router.add_static('/', str(tmp_path))
client = await aiohttp_client(app)
r = await client.get(url)
assert r.status == 200
assert (await r.text()) == data
async def test_access_non_existing_resource(tmp_path,
aiohttp_client) -> None:
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application()
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/non_existing_resource')
assert r.status == 404
@pytest.mark.parametrize('registered_path,request_url', [
('/a:b', '/a:b'),
('/a@b', '/a@b'),
('/a:b', '/a%3Ab'),
])
async def test_url_escaping(aiohttp_client,
registered_path,
request_url) -> None:
"""
Tests accessing a resource with
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(registered_path, handler)
client = await aiohttp_client(app)
r = await client.get(request_url)
assert r.status == 200
async def test_handler_metadata_persistence() -> None:
# Tests accessing metadata of a handler after registering it on the app
# router.
app = web.Application()
async def async_handler(request):
"""Doc"""
return web.Response()
app.router.add_get('/async', async_handler)
for resource in app.router.resources():
for route in resource:
assert route.handler.__doc__ == 'Doc'
async def test_unauthorized_folder_access(tmp_path,
aiohttp_client) -> None:
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir = tmp_path / 'my_dir'
my_dir.mkdir()
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/' + my_dir.name)
assert r.status == 403
async def test_access_symlink_loop(tmp_path, aiohttp_client) -> None:
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = tmp_path / 'my_symlink'
pathlib.Path(str(my_dir_path)).symlink_to(str(my_dir_path), True)
app = web.Application()
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/' + my_dir_path.name)
assert r.status == 404
async def test_access_special_resource(tmp_path, aiohttp_client) -> None:
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/special')
assert r.status == 403
def test_system_route() -> None:
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
async def test_allow_head(aiohttp_client) -> None:
"""
Test allow_head on routes.
"""
app = web.Application()
async def handler(_):
return web.Response()
app.router.add_get('/a', handler, name='a')
app.router.add_get('/b', handler, allow_head=False, name='b')
client = await aiohttp_client(app)
r = await client.get('/a')
assert r.status == 200
await r.release()
r = await client.head('/a')
assert r.status == 200
await r.release()
r = await client.get('/b')
assert r.status == 200
await r.release()
r = await client.head('/b')
assert r.status == 405
await r.release()
@pytest.mark.parametrize("path", [
'/a',
'/{a}',
])
def test_reuse_last_added_resource(path) -> None:
"""
Test that adding a route with the same name and path of the last added
resource doesn't create a new resource.
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(path, handler, name="a")
app.router.add_post(path, handler, name="a")
assert len(app.router.resources()) == 1
def test_resource_raw_match() -> None:
app = web.Application()
async def handler(request):
return web.Response()
route = app.router.add_get("/a", handler, name="a")
assert route.resource.raw_match("/a")
route = app.router.add_get("/{b}", handler, name="b")
assert route.resource.raw_match("/{b}")
resource = app.router.add_static("/static", ".")
assert not resource.raw_match("/static")
async def test_add_view(aiohttp_client) -> None:
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_view("/a", MyView)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_decorate_view(aiohttp_client) -> None:
routes = web.RouteTableDef()
@routes.view("/a")
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app = web.Application()
app.router.add_routes(routes)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_web_view(aiohttp_client) -> None:
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_routes([
web.view("/a", MyView)
])
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_static_absolute_url(aiohttp_client, tmp_path) -> None:
# requested url is an absolute name like
# /static/\\machine_name\c$ or /static/D:\path
# where the static dir is totally different
app = web.Application()
file_path = tmp_path / 'file.txt'
file_path.write_text('sample text', 'ascii')
here = pathlib.Path(__file__).parent
app.router.add_static('/static', here)
client = await aiohttp_client(app)
resp = await client.get('/static/' + str(file_path.resolve()))
assert resp.status == 403
|
the-stack_0_5355 | # -*- coding: utf-8 -*-
"""Module scanning for the ROBOT vulnerability
Refer to CVE-2017-13099, etc.
Padding oracle for RSA-based key transport, refer to https://robotattack.org
"""
# import basic stuff
import math
# import own stuff
import tlsmate.msg as msg
import tlsmate.plugin as plg
import tlsmate.tls as tls
import tlsmate.utils as utils
# import other stuff
def _rsa_encrypt(msg, e, n, mod_bytes):
return int(pow(msg, e, n)).to_bytes(mod_bytes, byteorder="big")
class ScanRobot(plg.Worker):
name = "robot"
descr = "scan for ROBOT vulnerability"
prio = 41
def _get_oracle_results(self, with_ccs):
def cke_pre_serialization(message):
message.rsa_encrypted_pms = self.enc_pms
results = []
for self.enc_pms in self._rsa_encrypted_pms:
with self.client.create_connection() as conn:
conn.send(msg.ClientHello)
conn.wait(msg.ServerHello)
conn.wait(msg.Certificate)
conn.wait(msg.CertificateRequest, optional=True)
conn.wait(msg.ServerHelloDone)
conn.send(
msg.ClientKeyExchange, pre_serialization=cke_pre_serialization
)
self.premaster_secret = self.rnd_pms
if with_ccs:
conn.send(msg.ChangeCipherSpec)
conn.send(msg.Finished)
try:
rec_msg, rec_bytes = conn.wait_msg_bytes(msg.Any, timeout=1000)
results.append(hash(bytes(rec_bytes)))
except Exception as exc:
results.append(hash(str(exc)))
return results
def _determine_status(self):
for send_ccs_finished in [True, False]:
results = self._get_oracle_results(send_ccs_finished)
if len(set(results)) == 1:
continue
results2 = self._get_oracle_results(send_ccs_finished)
for res1, res2 in zip(results, results2):
if res1 != res2:
return tls.RobotVulnerability.INCONSITENT_RESULTS
if results[1] == results[2] == results[3]:
return tls.RobotVulnerability.WEAK_ORACLE
return tls.RobotVulnerability.STRONG_ORACLE
return tls.RobotVulnerability.NOT_VULNERABLE
def run(self):
values = self.server_profile.get_profile_values(
[tls.Version.TLS10, tls.Version.TLS11, tls.Version.TLS12], full_hs=True
)
rsa_ciphers = utils.filter_cipher_suites(
values.cipher_suites, key_algo=[tls.KeyExchangeAlgorithm.RSA]
)
if rsa_ciphers:
self.client.init_profile(profile_values=values)
self.client.profile.cipher_suites = rsa_ciphers
with self.client.create_connection() as conn:
conn.handshake()
if not conn.handshake_completed:
status = tls.RobotVulnerability.UNDETERMINED
else:
cert = conn.msg.server_certificate.chain.certificates[0]
pub_nbrs = cert.parsed.public_key().public_numbers()
modulus_bits = int(math.ceil(math.log(pub_nbrs.n, 2)))
modulus_bytes = (modulus_bits + 7) // 8
pad_len = (modulus_bytes - 48 - 3) * 2
rnd_pad = ("abcd" * (pad_len // 2 + 1))[:pad_len]
self.rnd_pms = (
"aa11223344556677889911223344556677889911223344"
"5566778899112233445566778899112233445566778899"
)
pms_good_in = int("0002" + rnd_pad + "00" + "0303" + self.rnd_pms, 16)
# wrong first two bytes
pms_bad_in1 = int("4117" + rnd_pad + "00" + "0303" + self.rnd_pms, 16)
# 0x00 on a wrong position, also trigger older JSSE bug
pms_bad_in2 = int("0002" + rnd_pad + "11" + self.rnd_pms + "0011", 16)
# no 0x00 in the middle
pms_bad_in3 = int("0002" + rnd_pad + "11" + "1111" + self.rnd_pms, 16)
# wrong version number (according to Klima / Pokorny / Rosa paper)
pms_bad_in4 = int("0002" + rnd_pad + "00" + "0202" + self.rnd_pms, 16)
self._rsa_encrypted_pms = [
_rsa_encrypt(pms, pub_nbrs.e, pub_nbrs.n, modulus_bytes)
for pms in [
pms_good_in,
pms_bad_in1,
pms_bad_in2,
pms_bad_in3,
pms_bad_in4,
]
]
status = self._determine_status()
else:
status = tls.RobotVulnerability.NOT_APPLICABLE
self.server_profile.allocate_vulnerabilities()
self.server_profile.vulnerabilities.robot = status
|
the-stack_0_5356 | from discord.ext import commands
from lxml import html
import aiohttp
import asyncio
import discord
class google:
""" Google search """
def __init__(self,bot):
self.bot = bot
@commands.command()
async def g(self,ctx,*,qstr:str):
""" Perform a google search """
p = {"q":qstr,"safe":"on"}
h = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)'}
cs = self.bot.session
async with cs.get('https://www.google.com/search',
params=p, headers=h) as resp:
if resp.status != 200:
err = f"🚫 Google responded with status code {resp.status}"
return await ctx.send(err)
tree = html.fromstring(await resp.text())
# Generate Base Embed
e = discord.Embed(colour=0xdb3236)
th = "http://i.imgur.com/2Ielpqo.png"
e.set_author(name="Google Search",icon_url=th,url=resp.url)
# Scrape Google Cards
card = tree.xpath(".//*[contains(@id,'topstuff')]")
if card:
card = card[0]
# Calculator
x = ".//table/tr/td/span[@class='nobr']/h2[@class='r']/text()"
calc = card.xpath(x)
if calc:
e.title = "Calculator"
e.description = calc[0]
# Unit Conversion
uc = tree.xpath(".//ol//div[@class='_Tsb']")
if uc:
uc = uc[0]
e.title = '🔄 Unit Conversion'
e.description = "".join(uc.xpath(".//text()"))
# Currency
curr = tree.xpath(".//ol/table[@class='std _tLi']/tr/td/h2")
if curr:
curr = curr[0]
e.title = '💷 Currency Conversion'
e.description = "".join(curr.xpath(".//text()"))
# Definition
x = ".//ol/div[@class='g']/div[h3[@class='r']/div]"
defin = tree.xpath(x)
if defin:
e.title = '📖 Definition'
defnode = defin[0]
texts = defnode.xpath(".//text()")
e.description = f"**{texts[0]}**\n{texts[1]}"
deftype = defnode.xpath(".//td/div/text()")[0]
deflist = defnode.xpath(".//ol/li/text()")
e.add_field(name=deftype,value="\n".join(deflist))
# Date
release = tree.xpath(".//div[@id='_vBb']")
if release:
release = release[0]
fields = release.xpath(".//text()")
e.title = f'🗓️ {"".join(fields[1:])}'
e.description = fields[0]
# Time in Card
timein = tree.xpath(".//ol//div[@class='_Tsb _HOb _Qeb']")
if timein:
timein = timein[0]
e.title = f"🕛 {timein.xpath('.//text()')[4].strip()}"
e.description = "".join(timein.xpath(".//text()")[0:4])
# Weather
weather = tree.xpath(".//ol//div[@class='e']")
if weather:
weather = weather[0]
items = weather.xpath('.//text()')
e.description = items[10]
e.title = "".join(items[0:3])
we = {
"Rain":"🌧️",
"Cloudy":"☁️️",
"Clear with periodic clouds":"🌤️",
"Clear":"🌞","Snow Showers":"🌨️",
"Mostly Cloudy":"☁️️",
"Mostly Sunny":"🌤",
"Partly Cloudy":"🌤️",
"Sunny":"🌞"
}
try:
e.description = f"{we[e.description]} {e.description}"
except KeyError:
await ctx.send(f"Emoji not found for {e.description}")
e.add_field(name="Temperature",value=items[3])
e.add_field(name="Humidity",value=items[13][9:])
e.add_field(name="Wind",value=items[12])
# Translate
x = (".//ol/div[@class='g'][1]//table[@class='ts']"
"//h3[@class='r'][1]//text()")
translate = tree.xpath(x)
if translate:
e.title = "Translation"
e.description = "".join(translate)
# Time Conversion
timecard = tree.xpath("..//div[@class='_NId']")
if timecard:
e.title = '≡ Time Conversion'
e.description = "".join(timecard.xpath(".//text()"))
# Write to file for debugging.
# with open('google.html', 'w', encoding='utf-8') as f:
# f.write(html.tostring(tree).decode('utf-8'))
# Search
resultnodes = tree.xpath(".//div[@class='g']")
res = []
for i in resultnodes:
link = i.xpath(".//h3[@class = 'r']/a/@href")
# if not a proper result node, go to next item.
if not link or "/search?q=" in link[0]:
continue
link = link[0]
# strip irrel.
if "/url?q=" in link:
link = link.split("/url?q=")[1]# strip irrel.
if "&sa" in link:
link = link.rsplit("&sa")[0]
link = link.replace(')',"%29")
title = i.xpath("string(.//h3[@class = 'r']/a)")
desc = i.xpath("string(.//span[contains(@class,'st')])")
res.append((link,title,desc))
if not res:
await ctx.send("🚫 No results found.")
return
if e.description == e.Empty:
e.title = res[0][1]
e.url = res[0][0]
e.description = res[0][2]
more = f"[{res[1][1]}]({res[1][0]})\n[{res[2][1]}]({res[2][0]})"
else:
more = (f"[{res[0][1]}]({res[0][0]})\n"
f"[{res[1][1]}]({res[1][0]})\n"
f"[{res[2][1]}]({res[2][0]})")
e.add_field(name="More Results",value=more)
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(google(bot)) |
the-stack_0_5357 | import clr
import sys
sys.path.append('C:\Program Files (x86)\IronPython 2.7\Lib')
import os
import math
clr.AddReference('acmgd')
clr.AddReference('acdbmgd')
clr.AddReference('accoremgd')
# Import references from AutoCAD
from Autodesk.AutoCAD.Runtime import *
from Autodesk.AutoCAD.ApplicationServices import *
from Autodesk.AutoCAD.EditorInput import *
from Autodesk.AutoCAD.DatabaseServices import *
from Autodesk.AutoCAD.Geometry import *
doc = Application.DocumentManager.MdiActiveDocument
ed = doc.Editor
db = doc.Database
#Code Here :
objects = []
with doc.LockDocument():
with doc.Database as db:
with db.TransactionManager.StartTransaction() as t:
acblkbl = t.GetObject(db.BlockTableId,OpenMode.ForRead)
print(type(acblkbl))
acblktblrec = t.GetObject(acblkbl[BlockTableRecord.ModelSpace],OpenMode.ForWrite)
print(type(acblktblrec))
sel = doc.Editor.GetSelection()
if(sel.Status== PromptStatus.OK):
results = sel.Value
for i in range(len(results)):
if(results[i] != None) : objects.append(i)
else : pass
print("Count Object Exploded:",len(objects)) |
the-stack_0_5358 | # coding=utf-8
import numpy as np
from pyhsmm.models import _HMMGibbsSampling, _HMMEM, _HMMMeanField
from pyhsmm.internals.initial_state import UniformInitialState
from autoregressive.models import _ARMixin
from autoregressive.util import AR_striding
from pyslds.models import _SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin
from rslds.states import InputHMMStates, PGRecurrentSLDSStates, SoftmaxRecurrentSLDSStates
import rslds.transitions as transitions
### Input-driven HMMs
class _InputHMMMixin(object):
# Subclasses must specify the type of transition model
_trans_class = None
# custom init method, just so we call custom input trans class stuff
def __init__(self,
obs_distns,
D_in=0,
trans_distn=None, trans_params={},
init_state_distn=None, init_state_concentration=None, pi_0=None,
):
self.obs_distns = obs_distns
self.states_list = []
self.D_in = D_in
# our trans class
if trans_distn is None:
self.trans_distn = self._trans_class(num_states=len(obs_distns),
covariate_dim=D_in,
**trans_params)
else:
self.trans_distn = trans_distn
if init_state_distn is not None:
if init_state_distn == 'uniform':
self.init_state_distn = UniformInitialState(model=self)
else:
self.init_state_distn = init_state_distn
else:
self.init_state_distn = self._init_state_class(
model=self,
init_state_concentration=init_state_concentration,
pi_0=pi_0)
self._clear_caches()
# custom add_data - includes a covariates arg
def add_data(self, data, covariates=None, **kwargs):
# NOTE! Our convention is that covariates[t] drives the
# NOTE! transition matrix going into time t. However, for
# NOTE! implementation purposes, it is easier if these inputs
# NOTE! are lagged so that covariates[t] drives the input to
# NOTE! z_{t+1}. Then, we only have T-1 inputs for the T-1
# NOTE! transition matrices in the heterogeneous model.
# Offset the covariates by one so that
# the inputs at time {t-1} determine the transition matrix
# from z_{t-1} to z_{t}.
offset_covariates = covariates[1:]
self.states_list.append(
self._states_class(
model=self, data=data,
covariates=offset_covariates, **kwargs))
def generate(self, T=100, covariates=None, keep=True):
if covariates is None:
covariates = np.zeros((T, self.D_in))
else:
assert covariates.ndim == 2 and \
covariates.shape[0] == T
s = self._states_class(model=self, covariates=covariates[1:], T=T, initialize_from_prior=True)
data = self._generate_obs(s)
if keep:
self.states_list.append(s)
return (data, covariates), s.stateseq
def resample_trans_distn(self):
self.trans_distn.resample(
stateseqs=[s.stateseq for s in self.states_list],
covseqs=[s.covariates for s in self.states_list],
)
self._clear_caches()
class PGInputHMM(_InputHMMMixin, _HMMGibbsSampling):
_trans_class = transitions.InputHMMTransitions
_states_class = InputHMMStates
class PGInputOnlyHMM(PGInputHMM):
_trans_class = transitions.InputOnlyHMMTransitions
class PGStickyInputOnlyHMM(PGInputHMM):
_trans_class = transitions.StickyInputOnlyHMMTransitions
class SoftmaxInputHMM(_InputHMMMixin, _HMMGibbsSampling, _HMMEM):
_trans_class = transitions.SoftmaxInputHMMTransitions
_states_class = InputHMMStates
## EM
def _M_step_trans_distn(self):
zs = [s.expected_states.argmax(1).astype(np.int32) for s in self.states_list]
xs = [s.covariates for s in self.states_list]
xs = [np.row_stack([x, np.zeros(x.shape[1])]) for x in xs]
self.trans_distn.initialize_with_logistic_regression(zs, xs)
class SoftmaxInputOnlyHMM(SoftmaxInputHMM):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
### ARHMM's
class _InputARHMMMixin(_InputHMMMixin, _ARMixin):
def add_data(self, data, covariates=None, strided=False, **kwargs):
if covariates is None:
covariates = np.zeros((data.shape[0], 0))
strided_data = AR_striding(data,self.nlags) if not strided else data
lagged_covariates = covariates[self.nlags:]
assert strided_data.shape[0] == lagged_covariates.shape[0]
# Pass to InputHMM
super(_InputARHMMMixin, self).add_data(data=strided_data,
covariates=lagged_covariates,
**kwargs)
class PGInputARHMM(_InputARHMMMixin, _HMMGibbsSampling):
_trans_class = transitions.InputHMMTransitions
_states_class = InputHMMStates
class PGInputOnlyARHMM(PGInputARHMM):
_trans_class = transitions.InputOnlyHMMTransitions
class PGStickyInputOnlyARHMM(PGInputARHMM):
_trans_class = transitions.StickyInputOnlyHMMTransitions
class SoftmaxInputARHMM(_InputARHMMMixin, _HMMGibbsSampling, _HMMEM):
_trans_class = transitions.SoftmaxInputHMMTransitions
_states_class = InputHMMStates
class SoftmaxInputOnlyARHMM(SoftmaxInputARHMM):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
### Recurrent ARHMM's
class _RecurrentARHMMMixin(_InputARHMMMixin):
"""
In the "recurrent" version, the data also serve as covariates.
"""
def add_data(self, data, covariates=None, strided=False, **kwargs):
# Remember that the covariates[t] drives the transition probabilities p(z[t] | ...)
# under our convention for add_data.
T = data.shape[0]
if covariates is None:
covariates = np.zeros((T, 0))
else:
assert covariates.shape[0] == T
# Combine the lagged data and the given covariates
covariates = np.column_stack((
np.row_stack((np.zeros(self.D), data[:-1])),
covariates))
super(_RecurrentARHMMMixin, self).add_data(data, covariates=covariates, **kwargs)
def generate(self, T=100, keep=True, init_data=None, covariates=None, with_noise=True):
from pybasicbayes.util.stats import sample_discrete
# Generate from the prior and raise exception if unstable
K, n = self.num_states, self.D
# Prepare the covariates
if covariates is None:
covariates = np.zeros((T, 0))
else:
assert covariates.shape[0] == T
# Initialize discrete state sequence
pi_0 = self.init_state_distn.pi_0
dss = np.empty(T, dtype=np.int32)
dss[0] = sample_discrete(pi_0.ravel())
data = np.empty((T, n), dtype='double')
if init_data is None:
data[0] = np.random.randn(n)
else:
data[0] = init_data
for t in range(1, T):
# Sample discrete state given previous continuous state and covariates
cov_t = np.column_stack((data[t-1:t], covariates[t]))
A = self.trans_distn.get_trans_matrices(cov_t)[0]
dss[t] = sample_discrete(A[dss[t-1], :])
# Sample continuous state given current discrete state
if with_noise:
data[t] = self.obs_distns[dss[t]].rvs(cov_t, return_xy=False)
else:
data[t] = self.obs_distns[dss[t]].predict(cov_t)
assert np.all(np.isfinite(data[t])), "RARHMM appears to be unstable!"
# TODO:
# if keep:
# ...
return data, dss
class PGRecurrentARHMM(_RecurrentARHMMMixin, _HMMGibbsSampling):
_trans_class = transitions.InputHMMTransitions
_states_class = InputHMMStates
class PGRecurrentOnlyARHMM(PGRecurrentARHMM):
_trans_class = transitions.InputOnlyHMMTransitions
class PGStickyRecurrentOnlyARHMM(PGRecurrentARHMM):
_trans_class = transitions.StickyInputOnlyHMMTransitions
class SoftmaxRecurrentARHMM(_RecurrentARHMMMixin, _HMMGibbsSampling, _HMMEM):
_trans_class = transitions.SoftmaxInputHMMTransitions
_states_class = InputHMMStates
class SoftmaxRecurrentOnlyARHMM(SoftmaxRecurrentARHMM):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
### Stick-breaking transition models with Pólya-gamma augmentation
class _RecurrentSLDSBase(object):
def __init__(self, dynamics_distns, emission_distns, init_dynamics_distns,
fixed_emission=False, **kwargs):
self.fixed_emission = fixed_emission
# This class must always be used in conjunction with an SLDS class
super(_RecurrentSLDSBase, self).__init__(
dynamics_distns, emission_distns, init_dynamics_distns,
D_in=dynamics_distns[0].D_out, **kwargs)
def add_data(self, data, **kwargs):
self.states_list.append(
self._states_class(model=self, data=data, **kwargs))
class PGRecurrentSLDS(_RecurrentSLDSBase, _SLDSGibbsMixin, PGInputHMM):
_states_class = PGRecurrentSLDSStates
_trans_class = transitions.InputHMMTransitions
def resample_trans_distn(self):
# Include the auxiliary variables used for state resampling
self.trans_distn.resample(
stateseqs=[s.stateseq for s in self.states_list],
covseqs=[s.covariates for s in self.states_list],
omegas=[s.trans_omegas for s in self.states_list]
)
self._clear_caches()
def resample_emission_distns(self):
if self.fixed_emission:
return
super(PGRecurrentSLDS, self).resample_emission_distns()
class StickyPGRecurrentSLDS(PGRecurrentSLDS):
_trans_class = transitions.StickyInputHMMTransitions
class PGRecurrentOnlySLDS(PGRecurrentSLDS):
_trans_class = transitions.InputOnlyHMMTransitions
class StickyPGRecurrentOnlySLDS(PGRecurrentSLDS):
_trans_class = transitions.StickyInputOnlyHMMTransitions
### Softmax transition models with variational inference
class SoftmaxRecurrentSLDS(_RecurrentSLDSBase, _SLDSMeanFieldMixin, _SLDSVBEMMixin, SoftmaxInputHMM):
_states_class = SoftmaxRecurrentSLDSStates
_trans_class = transitions.SoftmaxInputHMMTransitions
def _M_step_trans_distn(self):
stack_tuples = lambda lst: list(map(lambda xs: np.concatenate(xs, axis=0), zip(*lst)))
self.trans_distn.max_likelihood(
stats=stack_tuples([s.E_trans_stats for s in self.states_list]))
def meanfield_update_trans_distn(self):
# Include the auxiliary variables of the lower bound
stack_tuples = lambda lst: list(map(lambda xs: np.concatenate(xs, axis=0), zip(*lst)))
self.trans_distn.meanfieldupdate(
stats=stack_tuples([s.E_trans_stats for s in self.states_list]))
def _init_mf_from_gibbs(self):
self.trans_distn._initialize_mean_field()
super(SoftmaxRecurrentSLDS, self)._init_mf_from_gibbs()
def initialize_transitions_from_gibbs(self):
self.trans_distn.initialize_with_logistic_regression(
[s.stateseq for s in self.states_list],
[s.gaussian_states for s in self.states_list])
def meanfield_update_parameters(self):
self.meanfield_update_init_dynamics_distns()
self.meanfield_update_dynamics_distns()
self.meanfield_update_emission_distns()
super(SoftmaxRecurrentSLDS, self).meanfield_update_parameters()
class SoftmaxRecurrentOnlySLDS(SoftmaxRecurrentSLDS):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
|
the-stack_0_5359 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
class TestSGD(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "sgd"
self.conf()
w = np.random.random((self.h, self.w)).astype("float32")
g = np.random.random((self.h, self.w)).astype("float32")
lr = np.array([0.1]).astype("float32")
self.inputs = {'Param': w, 'Grad': g, 'LearningRate': lr}
self.outputs = {'ParamOut': w - lr * g}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def conf(self):
self.h = 12
self.w = 15
def test_check_output(self):
self.check_output_with_place(self.place)
class TestNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(32, 32)).astype('float32')
b_np = np.random.random(size=(32, 32)).astype('float32')
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
label = paddle.static.data(name="label",
shape=[32, 1],
dtype='int64')
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = fluid.layers.fc(input=z, size=128)
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.reduce_mean(cost)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss)
if run_npu:
place = paddle.NPUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(main_prog,
feed={
"a": a_np,
"b": b_np,
"label": label_np
},
fetch_list=[prediction, loss])
if epoch % 10 == 0:
print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res))
return pred_res, loss_res
def test_npu(self):
cpu_pred, cpu_loss = self._test(False)
npu_pred, npu_loss = self._test(True)
self.assertTrue(np.allclose(npu_pred, cpu_pred))
self.assertTrue(np.allclose(npu_loss, cpu_loss))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5360 | """ Unit tests cases.
Copyright (c) 2003 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
try:
import logging
except:
from simpletal import DummyLogger as logging
from pubtal import SiteUtils
import updateSite
import unittest, copy, os.path
root = logging.getLogger()
root.setLevel (logging.WARN)
TEMPLATE1 = '<html><body><h1 tal:content="page/headers/title"></h1> <div tal:content="structure page/content"></div></body></html>'
CONTENT1 = """title: Test1
<p>This is the <b>first</b> test.
With a newline
Or two</p>
<p>And a paragraph.</p>
<p>
1
2
3</p>
<p>So there, jimmy lad!</p>"""
CONFIG1 = """<Content>
content-type Raw
</Content>"""
RESULT1 = {'index.html': """<html><body><h1>Test1</h1> <div><p>This is the <b>first</b> test.
With a newline
Or two</p>
<p>And a paragraph.</p>
<p>
1
2
3</p>
<p>So there, jimmy lad!</p></div></body></html>"""}
class RawContentTestCases (unittest.TestCase):
def setUp (self):
self.site = SiteUtils.SiteBuilder()
self.site.buildDirs()
def tearDown (self):
self.site.destroySite()
pass
def _runTest_ (self, expectedResult, configFile=None):
if (configFile is None):
conf = os.path.join (self.site.getSiteDir(), "test.config")
else:
conf = configFile
update = updateSite.UpdateSite (conf, None, ui=SiteUtils.SilentUI())
update.buildSite()
comp = SiteUtils.DirCompare()
res = comp.compare (self.site.getDestDir(), expectedResult)
self.failUnless (res is None, res)
def testRawContent (self):
self.site.createTemplate ('template.html', TEMPLATE1)
self.site.createContent ('index.txt', CONTENT1)
self.site.createConfigFile ('test.config', CONFIG1)
self._runTest_ (RESULT1)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5362 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import onnx
from onnx import helper, TensorProto, mapping, numpy_helper
import torch
import torchvision
import pytest
import tvm.topi.testing
import tvm
from tvm import relay
from tvm.contrib import graph_executor
import scipy
import tvm.testing
def get_input_data_shape_dict(graph_def, input_data):
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
shape_dict[input_names[i]] = input_data[i].shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
def get_tvm_output_with_vm(
graph_def, input_data, target, device, opset=None, freeze_params=False, convert_to_static=False
):
""" Generic function to execute and get tvm output with vm executor"""
if not isinstance(input_data, list):
input_data = [input_data]
_, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(
graph_def, shape_dict, opset=opset, freeze_params=freeze_params
)
if convert_to_static:
mod = relay.transform.DynamicToStatic()(mod)
ex = relay.create_executor("vm", mod=mod, device=device, target=target)
result = ex.evaluate()(*input_data, **params)
if isinstance(result, tvm.runtime.NDArray):
return result.asnumpy()
return [r.asnumpy() for r in result]
def get_tvm_output(
graph_def, input_data, target, device, output_shape=None, output_dtype="float32", opset=None
):
""" Generic function to execute and get tvm output"""
# TODO: Resolve the issues and remove the following lines
target = "llvm"
device = tvm.cpu(0)
input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
with tvm.transform.PassContext(opt_level=1):
graph, lib, params = relay.build(mod, target, params=params)
m = graph_executor.create(graph, lib, device)
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_names):
# Its possible for some onnx inputs to not be needed in the tvm
# module, confirm its present before setting.
try:
m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
except:
continue
else:
m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list):
tvm_output_list = []
for i, _ in enumerate(output_shape):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0)
return tvm_output.asnumpy()
def get_onnxruntime_output(model, inputs):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
if isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
output = rep.run(inp)
# Unpack output if there's only a single value.
if len(output) == 1:
output = output[0]
return output
def verify_with_ort_with_inputs(
model,
inputs,
out_shape=None,
targets=None,
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
apply_softmax=False,
):
if opset is not None:
model.opset_import[0].version = opset
ort_out = get_onnxruntime_output(model, inputs)
if targets is None:
targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]
for target in targets:
dev = tvm.device(target, 0)
if use_vm:
tvm_out = get_tvm_output_with_vm(
model,
inputs,
target,
dev,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
)
else:
tvm_out = get_tvm_output(model, inputs, target, dev, out_shape, dtype, opset=opset)
if not isinstance(tvm_out, list):
tvm_out = [tvm_out]
if not isinstance(ort_out, list):
ort_out = [ort_out]
for tvm_val, ort_val in zip(tvm_out, ort_out):
if apply_softmax:
ort_val = scipy.special.softmax(ort_val)
tvm_val = scipy.special.softmax(tvm_val)
tvm.testing.assert_allclose(ort_val, tvm_val, rtol=rtol, atol=atol)
assert ort_val.dtype == tvm_val.dtype
def verify_with_ort(
model,
input_shapes,
out_shape=None,
targets=None,
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
):
inputs = [np.random.uniform(size=ishape).astype(dtype) for ishape in input_shapes]
verify_with_ort_with_inputs(
model,
inputs,
out_shape=out_shape,
targets=targets,
use_vm=use_vm,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
dtype=dtype,
rtol=rtol,
atol=atol,
)
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
@tvm.testing.uses_gpu
def test_reshape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
for target, dev in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.uses_gpu
def test_double_reshape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node1 = helper.make_node("Reshape", ["in", "ref_in"], ["out1"])
reshape_node2 = helper.make_node("Reshape", ["in", "ref_in"], ["out2"])
add_node = helper.make_node("Add", ["out1", "out2"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node1, reshape_node2, add_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
for target, dev in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.uses_gpu
def test_expand():
def _test_expand(name, data, shape, ref_data, dtype="int32"):
shape_array = np.array(shape)
if dtype == "int32":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int32"),
),
)
elif dtype == "int64":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT64,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int64"),
),
)
else:
raise "Invalid dtype"
expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])
graph = helper.make_graph(
[shape_node, expand_node],
"expand_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(data.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_data.shape))],
)
model = helper.make_model(graph, producer_name=name)
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(model, data, target, dev, freeze_params=True)
tvm.testing.assert_allclose(ref_data, tvm_out)
in_shape = (3, 1)
shape = (3, 4)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = np.tile(data, 4)
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int64")
in_shape = (3, 1)
shape = (2, 1, 6)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = data * np.ones(shape, dtype=np.float32)
_test_expand("expand_with_dim_changed_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_changed_test", data, shape, ref_data, "int64")
def verify_depth_to_space(inshape, outshape, mode, blockSize):
node = onnx.helper.make_node("DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blockSize)
graph = helper.make_graph(
[node],
"depth_to_space_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="depth_to_space_test")
verify_with_ort(model, [inshape], [outshape])
@tvm.testing.uses_gpu
def test_depth_to_space():
# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.
# TO-DO, we can add mode arguement to test CRD mode and DCR mode
# in the future when we update to a newer onnx version.
verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", blockSize=2)
def verify_space_to_depth(inshape, outshape, blockSize):
node = onnx.helper.make_node("SpaceToDepth", inputs=["x"], outputs=["y"], blocksize=blockSize)
graph = helper.make_graph(
[node],
"space_to_depth_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="space_to_depth_test")
verify_with_ort(model, [inshape], [outshape])
@tvm.testing.uses_gpu
def test_space_to_depth():
verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)
@tvm.testing.uses_gpu
def test_shape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
shape_node = helper.make_node("Shape", ["out"], ["final_out"])
graph = helper.make_graph(
[ref_node, reshape_node, shape_node],
"shape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("final_out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="shape_test")
for target, dev in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "int32")
tvm.testing.assert_allclose(ref_shape, tvm_out)
def _test_power_iteration(x_shape, y_shape):
if isinstance(y_shape, int):
y_shape = [y_shape]
x = np.random.uniform(size=x_shape).astype(np.float32)
y = np.random.uniform(size=y_shape).astype(np.float32)
np_res = np.power(x, y).astype(np.float32)
res = helper.make_node("Pow", ["x", "y"], ["out"])
graph = helper.make_graph(
[res],
"power_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(np_res.shape))],
)
model = helper.make_model(graph, producer_name="power_test")
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, y], target, dev, np_res.shape)
tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_power():
_test_power_iteration((1, 3), (1))
_test_power_iteration((2, 3), (2, 3))
_test_power_iteration((2, 3), (1, 3))
def verify_range(start, limit, delta, dtype):
dtype_map = {
"float32": TensorProto.FLOAT,
"int32": TensorProto.INT32,
"int64": TensorProto.INT64,
}
dtype_onnx = dtype_map[dtype]
y = helper.make_node("Range", ["start", "limit", "delta"], ["output"])
graph = helper.make_graph(
[y],
"range_test",
inputs=[
helper.make_tensor_value_info("start", dtype_onnx, []),
helper.make_tensor_value_info("limit", dtype_onnx, []),
helper.make_tensor_value_info("delta", dtype_onnx, []),
],
outputs=[
helper.make_tensor_value_info(
"output", dtype_onnx, np.arange(start, limit, delta).shape
)
],
)
model = helper.make_model(graph, producer_name="range_test")
inputs = [np.array(x).astype(dtype) for x in [start, limit, delta]]
verify_with_ort_with_inputs(model, inputs, use_vm=True)
@tvm.testing.uses_gpu
def test_range():
for t in ["float32", "int32", "int64"]:
verify_range(0, 10, 1, t)
verify_range(2, 8, 2, t)
verify_range(-3, 6, 4, t)
verify_range(-2, -7, -1, t)
@tvm.testing.uses_gpu
def test_squeeze():
in_shape = (1, 3, 1, 3, 1, 1)
out_shape = (3, 3)
y = helper.make_node("Squeeze", ["in"], ["out"], axes=[0, 2, 4, 5])
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
x = np.random.uniform(size=in_shape).astype("float32")
verify_with_ort_with_inputs(model, [x], [out_shape], opset=11)
@tvm.testing.uses_gpu
def test_flatten():
in_shape = (1, 3, 4, 4)
axis = 1
ref_shape = (1, 48)
flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis)
graph = helper.make_graph(
[flatten_node],
"flatten_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="flatten_test")
verify_with_ort(model, [in_shape])
@tvm.testing.uses_gpu
def test_unsqueeze():
in_shape = (3, 3)
axis = (0, 3, 4)
out_shape = (1, 3, 3, 1, 1)
y = helper.make_node("Unsqueeze", ["in"], ["out"], axes=list(axis))
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
verify_with_ort(model, [in_shape], opset=11)
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)
),
helper.make_tensor_value_info("indices", TensorProto.INT64, list(indices.shape)),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_np.shape)
)
],
)
model = helper.make_model(graph, producer_name="gather_test")
verify_with_ort_with_inputs(model, [x, indices], dtype=dtype)
@tvm.testing.uses_gpu
def test_gather():
verify_gather((4,), [1], 0, "int32")
verify_gather((1, 4), [0], 0, "int32")
verify_gather((4,), [[[1, 0], [0, 1]]], 0, "float32")
verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32")
verify_gather((3, 3, 3), [[[1, 0]]], -1, "int32")
verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32")
@tvm.testing.uses_gpu
def test_dynamic_gather():
dtype = "float32"
in_shape = [2, 2]
indices = 1
axis = 1
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
out_np = np.take(x, indices, axis=axis)
indices = helper.make_node(
"Constant",
inputs=[],
outputs=["indices"],
value=onnx.helper.make_tensor(
name="const_indices",
data_type=onnx.TensorProto.INT64,
dims=[],
vals=[1],
),
)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[indices, y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], ["?", "?"]
),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], ["?"] * len(out_np.shape)
)
],
)
model = helper.make_model(graph, producer_name="dynamic_gather_test")
mod, params = relay.frontend.from_onnx(model)
for target, device in tvm.testing.enabled_targets():
ex = relay.create_executor("vm", mod=mod, device=device, target=target)
result = ex.evaluate()(x, **params)
tvm.testing.assert_allclose(out_np, result.asnumpy(), rtol=1e-5, atol=1e-5)
def verify_gatherelements(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
y = helper.make_node("GatherElements", ["data", "indices"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"gather_elements_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="gather_elements_test")
verify_with_ort_with_inputs(model, [x, indices])
@tvm.testing.uses_gpu
def test_gatherelements():
verify_gatherelements((4,), [3, 0, 2, 1], 0)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)
verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)
indices = [
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
verify_gatherelements((3, 3, 3), indices, 2)
def verify_scatter(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
y = helper.make_node("ScatterElements", ["data", "indices", "updates"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"scatter_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates", TensorProto.FLOAT, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="scatter_test")
verify_with_ort_with_inputs(model, [x, indices, updates])
@tvm.testing.uses_gpu
def test_scatter():
verify_scatter((4,), [1], 0)
verify_scatter((1, 4), [[0]], 0)
verify_scatter((4,), [2, 3], 0)
verify_scatter((2, 2), [[1, 0], [0, 1]], 1)
verify_scatter((3, 3, 3), [[[-1, -3]]], -1)
verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)
def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):
if axes:
y = helper.make_node("Slice", ["in"], ["out"], axes=axes, starts=starts, ends=ends)
else:
y = helper.make_node("Slice", ["in"], ["out"], starts=starts, ends=ends)
graph = helper.make_graph(
[y],
"slice_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="slice_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], opset=1)
def _test_slice_iteration_v10(indata, outdata, **attrs):
starts = attrs["starts"]
ends = attrs["ends"]
axes = None if "axes" not in attrs else attrs["axes"]
steps = None if "steps" not in attrs else attrs["steps"]
starts = np.asarray(starts)
ends = np.asarray(ends)
inputs = [
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("starts", TensorProto.INT64, list(starts.shape)),
helper.make_tensor_value_info("ends", TensorProto.INT64, list(ends.shape)),
]
initializer = [
helper.make_tensor("starts", TensorProto.INT64, list(starts.shape), starts),
helper.make_tensor("ends", TensorProto.INT64, list(ends.shape), ends),
]
nodes = []
if "add_noop_to_input_attrs" in attrs:
def add_noop_to_input_attr(attr_name, attr):
output_name = attr_name + "_output"
ref_shape = list(np.array(attr).shape)
ref_shape.insert(0, 1)
ref_shape = tuple(ref_shape)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__1_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
in_shape = np.array(attr).shape
in_array = np.array(in_shape)
ref_node2 = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["input_shape_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__2_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=in_array.shape,
vals=in_array.flatten().astype(int),
),
)
reshape1_node = helper.make_node(
"Reshape", [attr_name, "ref_in_" + attr_name], ["reshape_" + attr_name]
)
reshape2_node = helper.make_node(
"Reshape", ["reshape_" + attr_name, "input_shape_" + attr_name], [output_name]
)
return [ref_node, ref_node2, reshape1_node, reshape2_node]
slice_inputs = []
for attr_name in ["starts", "ends", "axes", "steps"]:
if attr_name not in attrs:
continue
if "add_noop_to_input_attrs" in attrs and attr_name in attrs["add_noop_to_input_attrs"]:
nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))
slice_inputs.append(attr_name + "_output")
else:
slice_inputs.append(attr_name)
if axes:
axes = np.asarray(axes)
inputs.append(helper.make_tensor_value_info("axes", TensorProto.INT64, list(axes.shape)))
initializer.append(helper.make_tensor("axes", TensorProto.INT64, list(axes.shape), axes))
if steps:
assert axes is not None and len(axes) == len(steps)
steps = np.asarray(steps)
inputs.append(helper.make_tensor_value_info("steps", TensorProto.INT64, list(axes.shape)))
initializer.append(helper.make_tensor("steps", TensorProto.INT64, list(steps.shape), steps))
y = helper.make_node("Slice", ["data", *slice_inputs], ["out"])
nodes.append(y)
graph = helper.make_graph(
nodes,
"slice_test",
inputs=inputs,
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
initializer=initializer,
)
model = helper.make_model(graph, producer_name="slice_test")
verify_with_ort_with_inputs(model, [indata], opset=10, freeze_params=True, use_vm=True)
@tvm.testing.uses_gpu
def test_slice():
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["starts"],
)
_test_slice_iteration_v10(
x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=["ends"]
)
_test_slice_iteration_v10(
x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=["axes"]
)
_test_slice_iteration_v10(
x,
x[:, 0:-1],
starts=(0,),
ends=(-1,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends"],
)
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["ends", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, :, 3:4],
starts=(0, 0, 3),
ends=(20, 10, 4),
add_noop_to_input_attrs=["starts", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, 1:1000],
starts=(1,),
ends=(1000,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends", "axes"],
)
x = np.random.randn(1, 1, 1, 128).astype(np.float32)
_test_slice_iteration_v10(
x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)
)
x = np.random.randn(4, 4).astype(np.float32)
_test_slice_iteration_v10(
x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)
)
_test_slice_iteration_v10(
x,
x[0::1, 1::2],
starts=(0, 1),
ends=(4, 4),
axes=(0, 1),
steps=(1, 2),
)
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs, opset=None):
indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], opset=opset, dtype=dtype)
@tvm.testing.uses_gpu
def test_floor():
_test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, "float32", "Floor", {})
@tvm.testing.uses_gpu
def test_ceil():
_test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, "float32", "Ceil", {})
@tvm.testing.uses_gpu
def test_clip():
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": 1.0},
"float32",
"Clip",
{"min": -1.0, "max": 1.0},
opset=6,
)
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -np.inf, "a_max": 1.0},
"float32",
"Clip",
{"max": 1.0},
opset=6,
)
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": np.inf},
"float32",
"Clip",
{"min": -1.0},
opset=6,
)
@tvm.testing.uses_gpu
def test_clip_min_max_as_inputs():
input_shape = (2, 4, 5, 6)
nodes = [
make_constant_node("min", onnx.TensorProto.FLOAT, (), [0.0]),
make_constant_node("max", onnx.TensorProto.FLOAT, (), [6.0]),
]
input_names = ["in", "min", "max"]
nodes.append(helper.make_node("Clip", inputs=input_names, outputs=["out"]))
graph = helper.make_graph(
nodes,
"clip_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(input_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_shape))],
)
model = helper.make_model(graph, producer_name="clip_test")
verify_with_ort(model, [input_shape], out_shape=[input_shape])
@tvm.testing.uses_gpu
def test_round():
_test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, "float32", "Round", {})
def _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], dtype=dtype)
@tvm.testing.uses_gpu
def test_isinf():
_test_finite_ops((2, 4, 5, 6), np.isinf, {}, "float32", "IsInf", {})
@tvm.testing.uses_gpu
def test_isnan():
_test_finite_ops((2, 4, 5, 6), np.isnan, {}, "float32", "IsNaN", {})
def verify_gather_nd(in_shape, indices, out_shape, dtype="float32"):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
y = helper.make_node("GatherND", ["in", "indices"], ["out"])
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)
),
helper.make_tensor_value_info("indices", TensorProto.INT64, list(indices.shape)),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_shape)
)
],
)
model = helper.make_model(graph, producer_name="gather_test")
verify_with_ort_with_inputs(model, [x, indices], [out_shape])
@tvm.testing.uses_gpu
def test_gather_nd():
verify_gather_nd([2, 2], [[0, 0], [1, 1]], [2], "int32")
verify_gather_nd([2, 2], [[1], [0]], [2, 2])
verify_gather_nd([2, 2, 2], [[0, 1], [1, 0]], [2, 2])
verify_gather_nd([2, 2, 2], [[[0, 1]], [[1, 0]]], [2, 1, 2])
@tvm.testing.uses_gpu
def test_onehot():
indices_shape = [10]
indices_array = np.random.randint(low=0, high=9, size=indices_shape, dtype="int32")
depth = 10
values = np.asarray([0, 1]).astype("int32")
out_np = np.eye(depth)[indices_array.reshape(-1)]
onehot_node = helper.make_node("OneHot", ["indices", "depth", "values"], ["out"])
graph = helper.make_graph(
[onehot_node],
"onehot_test",
inputs=[
helper.make_tensor_value_info("indices", TensorProto.INT32, indices_shape),
helper.make_tensor_value_info("depth", TensorProto.INT32, [1]),
helper.make_tensor_value_info("values", TensorProto.INT32, values.shape),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)],
)
model = helper.make_model(graph, producer_name="onehot_test")
# TODO(jwfromm): Replace test against np with test against onnxrt once we update versions.
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(
model, [indices_array, np.array([depth]).astype("int32"), values], target, dev
)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
def verify_gemm(a_shape, b_shape, c_shape=None, freeze_params=False):
out_shape = [a_shape[0], b_shape[1]]
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
input_names = ["a", "b"]
input_nodes = [
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
]
input_values = [a_array, b_array]
if c_shape is not None:
c_array = np.random.uniform(size=c_shape).astype("float32")
input_names.append("c")
input_nodes.append(helper.make_tensor_value_info("c", TensorProto.FLOAT, list(c_shape)))
input_values.append(c_array)
gemm_node = helper.make_node("Gemm", input_names, ["out"])
graph = helper.make_graph(
[gemm_node],
"gemm_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="gemm_test")
verify_with_ort_with_inputs(model, input_values, freeze_params=freeze_params)
@tvm.testing.uses_gpu
def test_gemm():
verify_gemm(a_shape=(4, 3), b_shape=(3, 4))
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,))
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,), freeze_params=True)
@tvm.testing.uses_gpu
def test_matmul():
a_shape = (4, 3)
b_shape = (3, 4)
out_shape = [a_shape[0], b_shape[1]]
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
verify_with_ort_with_inputs(model, [a_array, b_array])
def verify_batch_matmul(a_shape, b_shape, out_shape, target, dev):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(graph, producer_name="matmul_test")
verify_with_ort_with_inputs(model, [a_array, b_array], use_vm=True, targets=[target])
# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul(target, dev):
verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4), (2, 3, 4, 4), target, dev)
verify_batch_matmul((2, 4, 3), (3, 4), (2, 4, 4), target, dev)
verify_batch_matmul((2, 3, 4, 3), (3, 4), (2, 3, 4, 4), target, dev)
# Test implicit broadcasting.
verify_batch_matmul((4, 3), (2, 3, 4), (2, 4, 4), target, dev)
verify_batch_matmul((2, 4, 3), (1, 3, 4), (2, 4, 4), target, dev)
verify_batch_matmul((1, 4, 3), (2, 3, 4), (2, 4, 4), target, dev)
def verify_simple_dynamic_model(a_shape, b_shape, target, dev):
def verify_model(ex, a_shape, b_shape):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
# relu
out_np[out_np < 0] = 0
tvm_out = ex.evaluate()(a_array, b_array).asnumpy()
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
relu_node = helper.make_node("Relu", ["out"], ["relu"])
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
graph = helper.make_graph(
[mul_node, relu_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("relu", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
a_anys = [relay.Any()] * len(a_shape)
b_anys = [relay.Any()] * len(b_shape)
mod, params = relay.frontend.from_onnx(model, {"a": a_anys, "b": b_anys})
ex = relay.create_executor("vm", mod=mod, device=dev, target=target)
verify_model(ex, a_shape, b_shape)
verify_model(ex, [a * 2 for a in a_shape], [b * 2 for b in b_shape])
verify_model(ex, [a * 3 for a in a_shape], [b * 3 for b in b_shape])
# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul_dynamic_model(target, dev):
verify_simple_dynamic_model((2, 3, 4, 3), (2, 3, 3, 4), target, dev)
verify_simple_dynamic_model((2, 4, 3), (3, 4), target, dev)
verify_simple_dynamic_model((2, 3, 4, 3), (3, 4), target, dev)
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
in_array = np.random.uniform(size=shape).astype(dtype)
if alpha == None and beta == None and bias == None:
alpha = 0.0001
beta = 0.75
bias = 1.0
node = onnx.helper.make_node("LRN", inputs=["in"], outputs=["out"], size=nsize)
else:
node = onnx.helper.make_node(
"LRN", inputs=["in"], outputs=["out"], alpha=alpha, beta=beta, bias=bias, size=nsize
)
graph = helper.make_graph(
[node],
"lrn_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="lrn_test")
verify_with_ort_with_inputs(model, [in_array])
@tvm.testing.uses_gpu
def test_lrn():
verify_lrn((5, 5, 5, 5), 3, "float32")
verify_lrn((5, 5, 5, 5), 3, "float32", alpha=0.0002, beta=0.5, bias=2.0)
def verify_instance_norm(shape, axis=1):
x = np.random.randn(*shape).astype(np.float32)
gamma = np.random.randn(shape[1]).astype(np.float32)
beta = np.random.randn(shape[1]).astype(np.float32)
epsilon = 1e-5
node = onnx.helper.make_node(
"InstanceNormalization",
inputs=["x", "gamma", "beta"],
outputs=["y"],
epsilon=epsilon,
)
graph = helper.make_graph(
[node],
"instance_norm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)),
helper.make_tensor_value_info("gamma", TensorProto.FLOAT, (shape[1],)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="instance_norm_test")
verify_with_ort_with_inputs(model, [x, gamma, beta], out_shape=[shape])
@tvm.testing.uses_gpu
def test_instance_norm():
verify_instance_norm((2, 3, 4, 5))
verify_instance_norm((32, 64, 80, 64))
verify_instance_norm((8, 6, 5))
verify_instance_norm((8, 7, 6, 5, 4))
def verify_upsample_nearest():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)
def verify_upsample3d_nearest():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node(
"Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0, 2.0]
)
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
# Upsample is deprecated after opset 9
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)
def verify_upsample_bilinear():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="linear", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_bilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_bilinear_test")
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)
def verify_upsample3d_trilinear():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in", "scales"], ["out"], mode="linear")
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.trilinear_resize3d_python(
in_array,
(3 * scale, 3 * scale, 3 * scale),
"NCDHW",
coordinate_transformation_mode="half_pixel",
)
ref_array = np.array(scales)
ref_node = helper.make_node(
"Constant",
inputs=[],
outputs=["scales"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=ref_array.shape,
vals=ref_array.flatten().astype(float),
),
)
graph = helper.make_graph(
[ref_node, y],
"upsample_trilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_trilinear_test")
# TODO(jwfromm): Trilinear upsampling not supported in 1.0.0 onnxruntime.
# Replace topi comparison with verify_with_ort once we update.
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, in_array, target, dev, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsample():
verify_upsample_nearest()
verify_upsample_bilinear()
verify_upsample3d_nearest()
verify_upsample3d_trilinear()
def verify_softmax(inshape, axis):
opname = "Softmax"
indata = np.random.uniform(size=inshape).astype(np.float32)
outshape = inshape
y = helper.make_node(opname, ["in"], ["out"])
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
y.attribute.append(axis_attr)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(model, [indata])
@tvm.testing.uses_gpu
def test_softmax():
verify_softmax((1, 10), None)
verify_softmax((1, 10), 1)
def verify_min(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[min_node],
"Min_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Min_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
@tvm.testing.uses_gpu
def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
def verify_max(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[max_node],
"Max_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Max_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
@tvm.testing.uses_gpu
def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
def verify_mean(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[mean_node],
"Mean_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Mean_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
@tvm.testing.uses_gpu
def test_forward_mean():
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
def verify_hardsigmoid(input_dim, alpha, beta):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta)
graph = helper.make_graph(
[hardsigmoid_node],
"HardSigmoid_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="HardSigmoid_test")
verify_with_ort_with_inputs(model, [a_np1])
@tvm.testing.uses_gpu
def test_forward_hardsigmoid():
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
def verify_argreduce(input_dim, op_name, axis=None, keepdims=None):
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
out_shape = list(a_np1.shape)
def_axis = axis if axis is not None else 0
if keepdims == 1 or keepdims == None:
out_shape[def_axis] = 1
else:
out_shape.pop(def_axis)
node = onnx.helper.make_node(op_name, inputs=["a_np1"], outputs=["out"])
if keepdims is not None:
keepdims_attr = helper.make_attribute("keepdims", keepdims)
node.attribute.append(keepdims_attr)
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
node.attribute.append(axis_attr)
graph = helper.make_graph(
[node],
"argreduce_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT64, list(out_shape))],
)
model = helper.make_model(graph, producer_name="argreduce_test")
verify_with_ort_with_inputs(model, [a_np1])
# TODO (mbrookhart, electriclilies) Fix argmin on GPU and enable this test
# @tvm.testing.uses_gpu
def test_forward_arg_min_max():
"""Verify argmin and argmax"""
verify_argreduce([3, 4, 4], "ArgMin")
verify_argreduce([3, 4, 4], "ArgMax")
verify_argreduce([3, 4, 4], "ArgMin", axis=1)
verify_argreduce([3, 4, 4], "ArgMax", axis=0)
verify_argreduce([3, 4, 4], "ArgMin", keepdims=0)
verify_argreduce([3, 4, 4], "ArgMax", keepdims=1)
for axis in [None, 0, 1, 2]:
for keepdims in [None, True, False]:
verify_argreduce([3, 4, 4], "ArgMin", axis, keepdims)
verify_argreduce([3, 4, 4], "ArgMax", axis, keepdims)
def verify_constantofshape(input_dim, value, dtype):
fill_node = helper.make_node(
"ConstantOfShape",
["input"],
["output"],
value=helper.make_tensor(
"value", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1,), (value,)
),
)
inputs = [helper.make_tensor_value_info("input", TensorProto.INT64, [len(input_dim)])]
graph = helper.make_graph(
[fill_node],
"fill_test",
inputs,
outputs=[
helper.make_tensor_value_info(
"output", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], input_dim
)
],
)
model = helper.make_model(graph, producer_name="fill_test")
input_np = np.array(input_dim).astype("int64")
verify_with_ort_with_inputs(model, [input_np], use_vm=True)
@tvm.testing.uses_gpu
def test_constantofshape():
verify_constantofshape((2, 3, 4, 5), 10, "float32")
verify_constantofshape((3, 3), 0, "int32")
verify_constantofshape((1, 2, 3), -1, "float32")
def verify_pad(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
# onnx graph
if mode in ["edge", "reflect"]:
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
"Pad",
inputs=["input"],
outputs=["output"],
mode=mode,
pads=pads,
)
else:
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input"], outputs=["output"], mode="constant", pads=pads, value=value
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="pad_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], dtype="float32", opset=2)
def verify_pad_v11(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ["edge", "reflect"]:
inputs = [indata]
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node("Pad", inputs=["input", "pads"], outputs=["output"], mode=mode)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads)],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
else:
inputs = [indata]
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input", "pads", "constant_value"], outputs=["output"], mode="constant"
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
helper.make_tensor_value_info("constant_value", TensorProto.FLOAT, (1,)),
],
initializer=[
helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="pad_test")
verify_with_ort_with_inputs(model, inputs, opset=11, use_vm=True)
@tvm.testing.uses_gpu
def test_pad():
verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect")
verify_pad_v11(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad_v11(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad_v11(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad_v11(
np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect"
)
def verify_reduce_func(func, data, axis, keepdims):
inshape = data.shape
outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape
if axis:
node = onnx.helper.make_node(
func, inputs=["x"], outputs=["y"], axes=axis, keepdims=keepdims
)
else:
node = onnx.helper.make_node(func, inputs=["x"], outputs=["y"], keepdims=keepdims)
graph = helper.make_graph(
[node],
"reduce_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="reduce_test")
verify_with_ort_with_inputs(model, [data], [outshape], opset=11)
@tvm.testing.uses_gpu
def test_all_reduce_funcs():
funcs = [
"ReduceMax",
"ReduceMean",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceL1",
"ReduceL2",
]
for func in funcs:
for keepdims in [True, False]:
verify_reduce_func(
func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
def verify_split(indata, outdatas, split, axis=0, pass_split=True, opset=11):
indata = np.array(indata).astype(np.float32)
outdatas = [np.array(o).astype(np.float32) for o in outdatas]
inputs = [helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))]
input_names = ["input"]
initializer = []
if split:
split_index = range(len(split))
else:
split_index = range(len(outdatas))
if pass_split:
if opset >= 13:
input_names.append("split")
np_split = np.array(split).astype(np.int64)
inputs.append(
helper.make_tensor_value_info("split", TensorProto.INT64, list(np_split.shape))
)
indata = [indata, np_split]
initializer.append(
helper.make_tensor("split", TensorProto.INT64, list(np_split.shape), np_split)
)
node = helper.make_node(
"Split",
inputs=input_names,
outputs=["output_{}".format(i) for i in range(len(split_index))],
axis=axis,
)
if pass_split and opset < 13:
split_attr = helper.make_attribute("split", split)
node.attribute.append(split_attr)
graph = helper.make_graph(
[node],
"split_test",
inputs=inputs,
initializer=initializer,
outputs=[
helper.make_tensor_value_info(
"output_{}".format(i), TensorProto.FLOAT, list(outdatas[i].shape)
)
for i in range(len(split_index))
],
)
model = helper.make_model(graph, producer_name="split_test")
verify_with_ort_with_inputs(model, indata, out_shape=list(range(len(split_index))), opset=opset)
@tvm.testing.uses_gpu
def test_split():
# 1D
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0)
verify_split(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0, False
)
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0)
# 2D
verify_split(
[[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],
[[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],
[2, 2],
1,
)
# Split evenly (unstack)
verify_split([1, 2, 3], [[1], [2], [3]], False, 0, False)
# Split a single value to a single value
verify_split([1], [[1]], [1], pass_split=True)
@tvm.testing.uses_gpu
def test_binary_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_binary_ops(op, x, y, out_type="float32"):
z = helper.make_node(op, ["in1", "in2"], ["out"])
graph = helper.make_graph(
[z],
"_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.FLOAT, x.shape),
helper.make_tensor_value_info("in2", TensorProto.FLOAT, y.shape),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_type)], list(out_shape)
)
],
)
model = helper.make_model(graph, producer_name="_test")
verify_with_ort_with_inputs(model, [x, y])
x = np.random.uniform(size=in_shape).astype(dtype)
y = np.random.uniform(size=in_shape).astype(dtype)
z = np.random.uniform(size=(3,)).astype(dtype)
verify_binary_ops("Add", x, y)
verify_binary_ops("Add", x, z)
verify_binary_ops("Sub", x, y)
verify_binary_ops("Sub", x, z)
verify_binary_ops("Mul", x, y)
verify_binary_ops("Mul", x, z)
verify_binary_ops("Div", x, y)
verify_binary_ops("Div", x, z)
verify_binary_ops("Sum", x, y)
verify_binary_ops("Sum", x, z)
verify_binary_ops("Greater", x, y, "bool")
verify_binary_ops("Greater", x, z, "bool")
verify_binary_ops("Less", x, y, "bool")
verify_binary_ops("Less", x, z, "bool")
verify_binary_ops("Equal", x, y, "bool")
verify_binary_ops("Equal", x, z, "bool")
@tvm.testing.uses_gpu
def test_unary_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_unary_ops(op, x, rtol=1e-5, atol=1e-5, dtype="float32"):
x = x.astype(dtype)
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
z = helper.make_node(op, ["in1"], ["out"])
graph = helper.make_graph(
[z],
"_test",
inputs=[
helper.make_tensor_value_info("in1", ONNX_DTYPE, list(in_shape)),
],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, list(out_shape))],
)
model = helper.make_model(graph, producer_name="_test")
verify_with_ort_with_inputs(model, [x], rtol=rtol, atol=atol)
x = np.random.uniform(size=in_shape)
verify_unary_ops("Neg", x)
verify_unary_ops("Abs", x)
verify_unary_ops("Reciprocal", x)
verify_unary_ops("Reciprocal", x, dtype="float16")
verify_unary_ops("Sqrt", x)
verify_unary_ops("Relu", x)
verify_unary_ops("Exp", x)
verify_unary_ops("Log", x)
verify_unary_ops("Log", x)
verify_unary_ops("Acos", x)
verify_unary_ops("Acosh", x)
verify_unary_ops("Asin", x)
verify_unary_ops("Asinh", x)
verify_unary_ops("Atan", x)
verify_unary_ops("Atanh", x)
verify_unary_ops("Cos", x)
verify_unary_ops("Cosh", x)
verify_unary_ops("Sin", x)
verify_unary_ops("Sinh", x)
verify_unary_ops("Tan", x)
verify_unary_ops("Tanh", x)
verify_unary_ops("Sigmoid", x)
verify_unary_ops("Softsign", x)
@tvm.testing.uses_gpu
def test_leaky_relu():
def leaky_relu_x(x, alpha):
return np.where(x >= 0, x, x * alpha)
_test_onnx_op_elementwise(
(2, 4, 5, 6), leaky_relu_x, {"alpha": 0.25}, "float32", "LeakyRelu", {"alpha": 0.25}
)
@tvm.testing.uses_gpu
def test_elu():
def elu_x(x, alpha):
return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
(2, 4, 5, 6), elu_x, {"alpha": 0.25}, "float32", "Elu", {"alpha": 0.25}
)
@tvm.testing.uses_gpu
def test_selu():
def selu_x(x, alpha, gamma):
return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
(2, 4, 5, 6),
selu_x,
{"alpha": 0.25, "gamma": 0.3},
"float32",
"Selu",
{"alpha": 0.25, "gamma": 0.3},
)
@tvm.testing.uses_gpu
def test_prelu():
def verify_prelu(x_shape, a_shape):
node = helper.make_node("PRelu", inputs=["X", "slope"], outputs=["Y"])
graph = helper.make_graph(
[node],
"prelu_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("slope", TensorProto.FLOAT, list(a_shape)),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(x_shape))],
)
model = helper.make_model(graph, producer_name="prelu_test")
verify_with_ort(
model,
[x_shape, a_shape],
out_shape=[list(x_shape)],
use_vm=True,
convert_to_static=True,
)
verify_prelu([3, 4, 5, 6], [1, 4, 1, 1])
verify_prelu([1, 8, 5, 6], [1, 8, 1, 1])
verify_prelu([2, 12, 16, 16], [1, 12, 1, 1])
verify_prelu([2, 12, 16, 16], [1]) # Test alpha broadcasting.
verify_prelu([3, 1], [3, 1]) # Test non NCHW workload.
@tvm.testing.uses_gpu
def test_ThresholdedRelu():
def ThresholdedRelu_x(x, alpha):
out_np = np.clip(x, alpha, np.inf)
out_np[out_np == alpha] = 0
return out_np
_test_onnx_op_elementwise(
(2, 4, 5, 6),
ThresholdedRelu_x,
{"alpha": 0.25},
"float32",
"ThresholdedRelu",
{"alpha": 0.25},
)
@tvm.testing.uses_gpu
def test_LogSoftmax():
_test_onnx_op_elementwise(
(1, 4), tvm.topi.testing.log_softmax_python, {}, "float32", "LogSoftmax", {"axis": 1}
)
def check_torch_conversion(model, input_size):
dummy_input = torch.randn(*input_size)
file_name = "{}.onnx".format(model.__name__)
# Set verbose=True for more output
torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)
onnx_model = onnx.load(file_name)
input_data = np.random.uniform(size=input_size).astype("float32")
verify_with_ort_with_inputs(onnx_model, [input_data], apply_softmax=True)
@tvm.testing.uses_gpu
def test_resnet():
check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224))
# check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))
# def test_alexnet():
# Torch's ONNX export does not support the adaptive pooling used by AlexNet?
# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))
# Torch's ONNX export does not support the adaptive pooling used by vgg16?
# def test_vgg16():
# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_squeezenet():
# # Torch's ONNX export does not support the max pooling used by Squezenet
# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))
@tvm.testing.uses_gpu
def test_densenet():
check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224))
@tvm.testing.uses_gpu
def test_inception():
check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_googlenet():
# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_shufflenetv2():
# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))
@tvm.testing.uses_gpu
def test_sign():
def Sign_x(x):
return np.sign(x)
_test_onnx_op_elementwise((3, 4, 5, 6), Sign_x, {}, "float32", "Sign", {})
def verify_not(indata, dtype):
x = indata.astype(dtype)
node = helper.make_node(
"Not",
inputs=["in"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"not_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.BOOL, list(x.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(x.shape))],
)
model = helper.make_model(graph, producer_name="not_test")
verify_with_ort_with_inputs(model, [x])
@tvm.testing.uses_gpu
def test_not():
# 2d
verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)
# 3d
verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)
# 4d
verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)
def verify_and(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_and(x, y)
node = helper.make_node(
"And",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"and_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="and_test")
verify_with_ort_with_inputs(model, [x, y], [outdata.shape])
@tvm.testing.uses_gpu
def test_and():
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
def verify_tile_v6(indata, repeats, outdata):
node = helper.make_node("Tile", inputs=["input", "repeats"], outputs=["out"])
graph = helper.make_graph(
[node],
"tile_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("repeats", TensorProto.INT64, list(repeats.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="tile_test")
verify_with_ort_with_inputs(model, [indata, repeats], use_vm=True, opset=6)
@tvm.testing.uses_gpu
def test_tile():
x = np.random.rand(2, 3, 4, 5).astype(np.float32)
repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)
z = np.tile(x, repeats)
verify_tile_v6(x, repeats, z)
def verify_erf(indata, outdata):
node = helper.make_node("Erf", inputs=["in"], outputs=["out"])
graph = helper.make_graph(
[node],
"erf_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="erf_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape])
@tvm.testing.uses_gpu
def test_erf():
x = np.random.rand(2, 3, 4, 6).astype(np.float32)
z = scipy.special.erf(x)
verify_erf(x, z)
def verify_where(condition, x, y, dtype, outdata, dynamic=False):
node_list = []
where_inputs = ["condition", "x", "y"]
if dynamic:
shape_node = helper.make_node("Shape", ["x"], ["shape"])
reshape_node = helper.make_node("Reshape", ["x", "shape"], ["X"])
where_inputs[1] = "X"
node_list += [shape_node, reshape_node]
node = helper.make_node("Where", inputs=where_inputs, outputs=["out"])
node_list.append(node)
graph = helper.make_graph(
node_list,
"where_test",
inputs=[
helper.make_tensor_value_info("condition", TensorProto.BOOL, list(condition.shape)),
helper.make_tensor_value_info("x", dtype, list(x.shape)),
helper.make_tensor_value_info("y", dtype, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", dtype, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="where_test")
verify_with_ort_with_inputs(model, [condition, x, y], [outdata.shape], use_vm=True)
@tvm.testing.uses_gpu
def test_where():
condition = np.array([[1, 0], [1, 1]], dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
y = np.array([[9, 8], [7, 6]], dtype=np.int64)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.INT64, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[9, 8], [7, 6]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array(1, dtype=np.float32)
y = np.array([2], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([2], dtype=np.float32)
y = np.array(1, dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
condition = np.array(1, dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[5, 6], [7, 8]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[1], [7]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
verify_where(condition, x, y, TensorProto.FLOAT, outdata, dynamic=True)
def verify_or(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_or(x, y)
node = helper.make_node(
"Or",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"or_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="or_test")
verify_with_ort_with_inputs(model, [x, y], [outdata.shape])
@tvm.testing.uses_gpu
def test_or():
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
@tvm.testing.uses_gpu
def test_batch_norm():
def verify_batch_norm(in_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
graph = helper.make_graph(
[batchnorm],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, scale, b, mean, var
inshapes = [in_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, out_shape=[in_shape])
verify_batch_norm([1, 3, 224, 224])
verify_batch_norm([1, 3, 24, 24])
verify_batch_norm([16, 3, 24, 24])
verify_batch_norm([16, 16, 24, 24])
verify_batch_norm([16, 16, 10, 10])
@tvm.testing.uses_gpu
def test_batch_norm_dynamic_subgraph():
def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
shape_node = helper.make_node("Shape", ["Y"], ["shape"])
reshape_node = helper.make_node("Reshape", ["in", "shape"], ["out"])
graph = helper.make_graph(
[batchnorm, shape_node, reshape_node],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(o_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, inp, scale, b, mean, var
inshapes = [in_shape, o_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, out_shape=[in_shape], use_vm=True)
verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])
def verify_conv(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
):
if unset_pad:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
)
elif padding is None:
## autopadding with unset default attributes
kwargs = {}
if not all([s == 1 for s in strides]):
kwargs["strides"] = strides
if not all([d == 1 for d in dilations]):
kwargs["dilations"] = dilations
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
# Default values for other attributes:
auto_pad=auto_pad,
**kwargs,
)
else:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
pads=padding,
)
graph = helper.make_graph(
[node],
"conv_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="conv_test")
verify_with_ort(model, [x_shape, w_shape], [y_shape], use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_conv():
def repeat(N, D):
return tuple([N for _ in range(D)])
for D in [1, 2, 3]:
# Convolution with padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with assymetric padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(4, D),
repeat(0, D) + repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution without padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with autopadding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="VALID",
)
# Convolution with unset padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True,
)
# Convolution with non uniform stride
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(2, D),
repeat(3, D),
repeat(1, D),
repeat(2, D),
)
def verify_convtranspose_with_padding(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
group=1,
):
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
)
if not unset_pad:
if padding is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", padding)
node.attribute.append(pad_attr)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_pad_test")
verify_with_ort(model, [x_shape, w_shape], [y_shape], use_vm=True, convert_to_static=True)
def verify_convtranspose(x_shape, w_shape, y_shape, p, group=1):
node = onnx.helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
strides=[3, 2],
kernel_shape=[3, 3],
pads=p,
)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"verify_convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_test")
verify_with_ort(model, [x_shape, w_shape], y_shape)
@tvm.testing.uses_gpu
def test_convtranspose():
# Convolution Transpose with padding
# (1, 1, 3, 3) input tensor
# (1, 2, 3, 3) tensor for convolution weights
# (1, 2, 7, 3) output tensor
# [1, 2, 1, 2] list for pads
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])
# Test undefined groups.
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2], group=None)
def repeat(N, D):
return tuple([N for _ in range(D)])
# TODO(mbrookhart): onnxruntime in CI only supports 2D,
# find something else to test 1D and 3D against
for D in [2]:
# Convolution with padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution without padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="VALID",
)
# Convolution with unset padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True,
)
# Convolution with non uniform stride
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(9, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
# TODO(mbrookhart): Relay doesn't currently support convtranspose with dilation
# verify_convtranspose_with_padding(
# (1, 1) + repeat(5, D),
# (1, 1) + repeat(3, D),
# (1, 1) + repeat(5, D),
# 2 * repeat(2, D),
# repeat(3, D),
# repeat(1, D),
# repeat(2, D),
# )
@tvm.testing.uses_gpu
def test_unsqueeze_constant():
from torch.nn import Linear, Sequential, Module
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
import tempfile
with tempfile.NamedTemporaryFile() as fp:
file_name = fp.name
input_size = (1, 16, 32, 32)
dummy_input = torch.randn(*input_size)
layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))
torch.onnx.export(layer, dummy_input, file_name, export_params=True)
onnx_model = onnx.load(file_name)
relay.frontend.from_onnx(onnx_model, {"0": input_size})
def verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad="NOTSET"):
x_np = np.random.uniform(size=x_shape).astype("float32")
if mode == "max":
node_type = "MaxPool"
elif mode == "average":
node_type = "AveragePool"
else:
raise ValueError("Pool method {} is not supported.".format(mode))
pool_node = helper.make_node(
node_type, inputs=["x"], outputs=["y"], kernel_shape=kernel_shape, strides=strides
)
if pads is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", pads)
pool_node.attribute.append(pad_attr)
if mode == "max":
storage_attr = helper.make_attribute("storage_order", 0)
pool_node.attribute.append(storage_attr)
graph = helper.make_graph(
[pool_node],
"pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pooling_test")
verify_with_ort(model, [x_shape], [out_shape], use_vm=False, convert_to_static=True)
@tvm.testing.uses_gpu
def test_pooling():
for mode in ["max", "average"]:
# Pool1D
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[1],
pads=[1, 1],
out_shape=[1, 1, 32],
mode=mode,
)
# Pool2D
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
mode=mode,
)
# Pool1D with stride
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=[1, 1],
out_shape=[1, 1, 16],
mode=mode,
)
# Pool2D with stride
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
mode=mode,
)
# Pool1D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=None,
out_shape=[1, 1, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
mode=mode,
)
# Pool3D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
def verify_global_pooling(x_shape, mode):
out_shape = x_shape[:2] + [1] * (len(x_shape) - 2)
if mode == "max":
node_type = "GlobalMaxPool"
elif mode == "average":
node_type = "GlobalAveragePool"
else:
raise ValueError("Pool method {} is not supported.".format(mode))
pool_node = helper.make_node(node_type, inputs=["x"], outputs=["y"])
graph = helper.make_graph(
[pool_node],
"global_pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="global_pooling_test")
verify_with_ort(model, [x_shape], [out_shape], use_vm=False, convert_to_static=True)
@tvm.testing.uses_gpu
def test_global_pooling():
# Test each pooling mode across all N-D inputs.
for mode in ["average", "max"]:
# 1D Pooling (NCW)
verify_global_pooling([1, 8, 8], mode)
verify_global_pooling([4, 1, 4], mode)
# 2D Pooling (NCHW)
verify_global_pooling([1, 8, 8, 8], mode)
verify_global_pooling([4, 1, 6, 4], mode)
# 3D Pooling (NCDHW)
verify_global_pooling([1, 8, 6, 8, 8], mode)
verify_global_pooling([4, 1, 2, 6, 4], mode)
def verify_mod(x_shape, y_shape, fmod, out_shape, dtype="float32"):
x_np = np.random.uniform(-100.0, 100.0, x_shape).astype(dtype)
y_np = np.random.uniform(-100.0, 100.0, y_shape).astype(dtype)
y_np = np.where(y_np == 0, 1, y_np) # remove 0's to avoid division by zero error
mod_node = helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=fmod)
onnx_dtype = TensorProto.FLOAT if dtype == "float32" else TensorProto.INT32
graph = helper.make_graph(
[mod_node],
"mod_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="mod_test")
verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape])
@tvm.testing.uses_gpu
def test_mod():
# Mod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 1, 32], fmod=0, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=0,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
# fmod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 32, 32], fmod=1, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(x_shape=[1, 1, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 1, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=1,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
def verify_xor(x_shape, y_shape):
x_np = np.random.choice(a=[False, True], size=x_shape).astype("bool")
y_np = np.random.choice(a=[False, True], size=y_shape).astype("bool")
np_out = np.logical_xor(x_np, y_np)
out_shape = np_out.shape
xor_node = helper.make_node("Xor", inputs=["x", "y"], outputs=["z"])
onnx_dtype = TensorProto.BOOL
graph = helper.make_graph(
[xor_node],
"xor_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="xor_test")
verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape])
@tvm.testing.uses_gpu
def test_xor():
# XOR
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 32, 32])
# Xor broadcast
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 1, 32])
def verify_max_roi_pool(x_shape, rois_shape, pooled_shape, spatial_scale, out_shape):
if spatial_scale is None:
pool_node = helper.make_node(
"MaxRoiPool", inputs=["x", "rois"], outputs=["y"], pooled_shape=pooled_shape
)
else:
pool_node = helper.make_node(
"MaxRoiPool",
inputs=["x", "rois"],
outputs=["y"],
pooled_shape=pooled_shape,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[pool_node],
"pool_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, list(rois_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pool_test")
verify_with_ort(model, [x_shape, rois_shape], [out_shape])
@tvm.testing.uses_gpu
def test_max_roi_pool():
verify_max_roi_pool(
x_shape=[1, 3, 6, 6],
rois_shape=[3, 5],
pooled_shape=[1, 1],
spatial_scale=None,
out_shape=[3, 3, 1, 1],
)
verify_max_roi_pool(
x_shape=[1, 3, 10, 10],
rois_shape=[4, 5],
pooled_shape=[2, 2],
spatial_scale=2.0,
out_shape=[4, 3, 2, 2],
)
def verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad="NOTSET"):
if pads is None:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
p=p,
auto_pad=auto_pad,
strides=strides,
)
else:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
p=p,
pads=pads,
strides=strides,
)
graph = helper.make_graph(
[pool_node],
"lppool_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="lppool_test")
verify_with_ort(model, [x_shape], [out_shape], use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_lppool():
# Pool1D
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[1], pads=[1, 1], out_shape=[1, 1, 32]
)
# Pool2D
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
)
# Pool1D with stride
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[2], pads=[1, 1], out_shape=[1, 1, 16]
)
# Pool2D with stride
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
)
# Pool1D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32],
kernel_shape=[3],
p=2,
strides=[2],
pads=None,
out_shape=[1, 1, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
)
# Pool3D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
auto_pad="SAME_UPPER",
)
def verify_rnn(
seq_length,
batch_size,
input_size,
hidden_size,
rnn_type="LSTM",
use_bias=False,
activations=None,
alphas=None,
betas=None,
use_initial_state=False,
use_peep=False,
linear_before_reset=False,
):
if rnn_type == "LSTM":
multiplier = 4
elif rnn_type == "GRU":
multiplier = 3
else:
raise NotImplementedError("%s RNNs not yet supported." % rnn_type)
x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype("float32")
w_np = np.random.uniform(size=(1, multiplier * hidden_size, input_size)).astype("float32")
r_np = np.random.uniform(size=(1, multiplier * hidden_size, hidden_size)).astype("float32")
input_names = ["X", "W", "R"]
input_tensors = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_np.shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_np.shape)),
helper.make_tensor_value_info("R", TensorProto.FLOAT, list(r_np.shape)),
]
input_values = [x_np, w_np, r_np]
if use_bias:
b_np = np.random.uniform(size=(1, multiplier * 2 * hidden_size)).astype("float32")
input_names.append("B")
input_tensors.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, [1, multiplier * 2 * hidden_size])
)
input_values.append(b_np)
if use_initial_state:
assert use_bias == True, "Initial states must have bias specified."
sequence_np = np.repeat(seq_length, batch_size).astype("int32")
input_names.append("sequence_lens")
input_tensors.append(
helper.make_tensor_value_info("sequence_lens", TensorProto.INT32, [batch_size])
)
input_values.append(sequence_np)
initial_h_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype("float32")
input_names.append("initial_h")
input_tensors.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_h_np)
if rnn_type == "LSTM":
initial_c_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype("float32")
input_names.append("initial_c")
input_tensors.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_c_np)
if use_peep and rnn_type == "LSTM":
assert use_initial_state == True, "Peepholes require initial state to be specified."
p_np = np.random.uniform(size=(1, 3 * hidden_size)).astype("float32")
input_names.append("P")
input_tensors.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, [1, 3 * hidden_size])
)
input_values.append(p_np)
Y_shape = [seq_length, 1, batch_size, hidden_size]
Y_h_shape = [1, batch_size, hidden_size]
outputs = ["Y", "Y_h"]
graph_outputs = [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(Y_shape)),
helper.make_tensor_value_info("Y_h", TensorProto.FLOAT, list(Y_h_shape)),
]
output_shapes = [Y_shape, Y_h_shape]
if rnn_type == "LSTM":
Y_c_shape = [1, batch_size, hidden_size]
outputs.append("Y_c")
graph_outputs.append(
helper.make_tensor_value_info("Y_c", TensorProto.FLOAT, list(Y_c_shape))
)
output_shapes.append(Y_c_shape)
rnn_node = helper.make_node(
rnn_type, inputs=input_names, outputs=outputs, hidden_size=hidden_size
)
if activations is not None:
activations_attr = helper.make_attribute("activations", activations)
rnn_node.attribute.append(activations_attr)
if alphas is not None:
alphas_attr = helper.make_attribute("activation_alpha", alphas)
rnn_node.attribute.append(alphas_attr)
if betas is not None:
betas_attr = helper.make_attribute("activation_beta", betas)
rnn_node.attribute.append(betas_attr)
if linear_before_reset and rnn_type == "GRU":
lbr_attr = helper.make_attribute("linear_before_reset", 1)
rnn_node.attribute.append(lbr_attr)
graph = helper.make_graph([rnn_node], "rnn_test", inputs=input_tensors, outputs=graph_outputs)
model = helper.make_model(graph, producer_name="rnn_test")
verify_with_ort_with_inputs(model, input_values, output_shapes, atol=1e-2, rtol=1e-2)
@tvm.testing.uses_gpu
def test_lstm():
# No bias.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type="LSTM"
)
# large batch.
verify_rnn(
seq_length=4, batch_size=8, input_size=16, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Non power of two.
verify_rnn(
seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type="LSTM"
)
# Long sequence.
verify_rnn(
seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Large hidden.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type="LSTM"
)
# Large input.
verify_rnn(
seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Different activation testing.
# Default value hardsigmoid.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Tanh", "Tanh"],
rnn_type="LSTM",
)
# Multiple parameterized activations.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu", "Tanh"],
alphas=[2.0, 0.5],
betas=[0.3],
rnn_type="LSTM",
)
# All parameterized with new Affine activation.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu", "Affine"],
alphas=[2.0, 0.5, 0.8],
betas=[0.3, 0.1],
rnn_type="LSTM",
)
# Testing with initial state and peepholes
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type="LSTM",
)
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
use_peep=True,
rnn_type="LSTM",
)
@tvm.testing.uses_gpu
def test_gru():
# No bias.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type="GRU"
)
# large batch.
verify_rnn(
seq_length=4,
batch_size=8,
input_size=16,
hidden_size=32,
use_bias=True,
rnn_type="GRU",
linear_before_reset=True,
)
# Non power of two.
verify_rnn(
seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type="GRU"
)
# Long sequence.
verify_rnn(
seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type="GRU"
)
# Large hidden.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type="GRU"
)
# Large input.
verify_rnn(
seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type="GRU"
)
# Different activation testing.
# Default value hardsigmoid.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Softsign"],
rnn_type="GRU",
)
# Multiple parameterized activations.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu"],
alphas=[2.0, 0.5],
betas=[0.3],
rnn_type="GRU",
)
# All parameterized with new Affine activation.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Affine"],
alphas=[2.0, 0.8],
betas=[0.3, 0.1],
rnn_type="GRU",
)
# Testing with initial state
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type="GRU",
)
@tvm.testing.uses_gpu
def test_resize():
def verify(ishape, oshape, scales, mode, coord_trans):
nodes = [
make_constant_node("roi", onnx.TensorProto.FLOAT, (0,), []),
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "roi", "scales"]
if oshape != []:
nodes.append(
make_constant_node("sizes", onnx.TensorProto.INT64, (len(oshape),), oshape)
)
input_names.append("sizes")
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
coordinate_transformation_mode=coord_trans,
)
)
if oshape == []:
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(model, [ishape], [oshape], use_vm=True, opset=11, freeze_params=True)
# upsampling
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "align_corners")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "align_corners")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "half_pixel")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "half_pixel")
# downsampling
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "align_corners")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "align_corners")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "half_pixel")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "half_pixel")
# scales are specified instead of sizes
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "nearest", "asymmetric")
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "linear", "asymmetric")
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "nearest", "align_corners")
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "linear", "align_corners")
verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], "linear", "half_pixel")
verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], "nearest", "half_pixel")
def verify_opset_10(ishape, scales, mode):
nodes = [
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "scales"]
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
)
)
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(model, [ishape], [oshape], use_vm=True, freeze_params=True, opset=10)
verify_opset_10([1, 16, 32, 32], [1, 1, 2, 2], "nearest")
verify_opset_10([1, 16, 32, 32], [1, 1, 0.5, 0.5], "linear")
@tvm.testing.uses_gpu
def test_nonzero():
def verify_nonzero(indata, outdata, dtype):
node = helper.make_node(
"NonZero",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"nonzero_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="nonzero_test")
verify_with_ort_with_inputs(model, [indata], dtype="int64", use_vm=True, opset=9)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 1], [0, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 2, 2], [0, 1, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
@tvm.testing.uses_gpu
def test_topk():
def verify_topk(input_dims, K, axis=-1):
output_dims = list(input_dims)
output_dims[axis] = K
node = helper.make_node(
"TopK", inputs=["X", "K"], outputs=["Values", "Indicies"], axis=axis
)
graph = helper.make_graph(
[node],
"topk_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info(
"K",
TensorProto.INT64,
[
1,
],
),
],
outputs=[
helper.make_tensor_value_info("Values", TensorProto.FLOAT, output_dims),
helper.make_tensor_value_info("Indicies", TensorProto.INT64, output_dims),
],
)
model = helper.make_model(graph, producer_name="topk_test")
indata = np.random.uniform(-10, 10, input_dims).astype(np.float32)
verify_with_ort_with_inputs(model, [indata, np.array([K])], use_vm=True)
for n in [12, 32]:
for shape in [[n], [n, n], [n, n, n]]:
for k in [1, 5, 10]:
verify_topk(shape, k)
verify_topk([n, n, n], 5, 0)
verify_topk([n, n, n], 5, 1)
verify_topk([n, n, n], 5, 2)
@tvm.testing.uses_gpu
def test_roi_align():
def verify_roi_align(
input_dims,
num_roi,
output_height,
output_width,
sampling_ratio=0,
spatial_scale=1.0,
mode="avg",
):
output_dims = [num_roi, input_dims[1], output_height, output_width]
node = helper.make_node(
"RoiAlign",
inputs=["X", "rois", "batch_indicies"],
outputs=["Y"],
mode=mode,
output_height=output_height,
output_width=output_width,
sampling_ratio=sampling_ratio,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[node],
"roialign_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, [num_roi, 4]),
helper.make_tensor_value_info(
"batch_indicies",
TensorProto.INT64,
[
num_roi,
],
),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_dims)],
)
model = helper.make_model(graph, producer_name="roialign_test")
np_data = np.random.uniform(size=input_dims).astype("float32")
np_rois = np.random.uniform(size=[num_roi, 4]).astype("float32") * input_dims[2]
np_batch_indicies = np.random.randint(low=0, high=input_dims[0], size=num_roi)
verify_with_ort_with_inputs(
model, [np_data, np_rois, np_batch_indicies], out_shape=[output_dims]
)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((4, 4, 16, 32), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 8, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 8, 8), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 16, 5, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 12), 8, 7, 3, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=0.5)
verify_roi_align((3, 4, 12, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.5)
verify_roi_align((5, 4, 16, 14), 32, 7, 7, sampling_ratio=1, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=2, spatial_scale=1.0)
# ONNX implementation of roi_align with max mode is incorrect, so we don't compare outputs here.
@tvm.testing.uses_gpu
def test_non_max_suppression():
def verify_nms(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
):
input_names = ["boxes", "scores", "max_output_boxes_per_class", "iou_threshold"]
input_nodes = [
helper.make_tensor_value_info("boxes", TensorProto.FLOAT, boxes.shape),
helper.make_tensor_value_info("scores", TensorProto.FLOAT, scores.shape),
helper.make_tensor_value_info(
"max_output_boxes_per_class", TensorProto.INT64, max_output_boxes_per_class.shape
),
helper.make_tensor_value_info("iou_threshold", TensorProto.FLOAT, iou_threshold.shape),
]
inputs = [boxes, scores, max_output_boxes_per_class, iou_threshold]
if score_threshold is not None:
input_names.append("score_threshold")
input_nodes.append(
helper.make_tensor_value_info(
"score_threshold", TensorProto.FLOAT, score_threshold.shape
)
)
inputs.append(score_threshold)
node = helper.make_node(
"NonMaxSuppression",
inputs=input_names,
outputs=["Y"],
center_point_box=0,
)
graph = helper.make_graph(
[node],
"nms_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, output_dims)],
)
model = helper.make_model(graph, producer_name="nms_test")
verify_with_ort_with_inputs(model, inputs, use_vm=True)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = np.array(2).astype("int64")
iou_threshold = np.array(0.8).astype("float32")
output_dims = [8, 3]
verify_nms(boxes, scores, max_output_boxes_per_class, iou_threshold, None, output_dims)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
output_dims = [2, 3]
verify_nms(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
)
def verify_cond_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [1])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [1])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [1])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array([-2]).astype(np.float32)
five_const_node = helper.make_node(
"Constant",
inputs=[],
outputs=["five"],
value=helper.make_tensor(
name="const_tensor_five", data_type=TensorProto.FLOAT, dims=(), vals=[5]
),
)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
less_node = helper.make_node("Less", inputs=["y_out", "five"], outputs=["cond_less"])
squeeze_node = helper.make_node("Squeeze", inputs=["cond_less"], outputs=["cond_squeeze"])
cond_cast_node = helper.make_node(
"Cast", inputs=["cond_squeeze"], outputs=["cond_out"], to=onnx.TensorProto.BOOL
)
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[
five_const_node,
iter_cast_node,
y_add_node,
less_node,
squeeze_node,
cond_cast_node,
scan_identity_node,
],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [1]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 1]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
# Set a high trip count so that condition trips first.
trip_count = np.array(40).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(loop_model, input_vals, use_vm=True, freeze_params=True)
def verify_count_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array(-2).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(loop_model, input_vals, use_vm=True, freeze_params=True)
def verify_tensor_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [3, 3, 3, 3])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [3, 3, 3, 3])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [3, 3, 3, 3])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.random.normal(size=[3, 3, 3, 3]).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 3, 3, 3, 3]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(
loop_model, input_vals, use_vm=True, freeze_params=True, convert_to_static=True
)
def test_loop():
# Test a loop that exits once a condition is met.
verify_cond_loop()
# Test a loop that exits after a fixed number of iterations with scalar outputs.
verify_count_loop()
# Test a loop that uses an array output.
verify_tensor_loop()
def verify_if(cond_array):
# Given a bool scalar input cond.
# return constant tensor x if cond is True, otherwise return constant tensor y.
then_out = onnx.helper.make_tensor_value_info("then_out", onnx.TensorProto.FLOAT, [5])
else_out = onnx.helper.make_tensor_value_info("else_out", onnx.TensorProto.FLOAT, [5])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
then_const_node = onnx.helper.make_node(
"Constant", inputs=[], outputs=["then_out"], value=numpy_helper.from_array(x)
)
else_const_node = onnx.helper.make_node(
"Constant", inputs=[], outputs=["else_out"], value=numpy_helper.from_array(y)
)
then_body = onnx.helper.make_graph([then_const_node], "then_body", [], [then_out])
else_body = onnx.helper.make_graph([else_const_node], "else_body", [], [else_out])
if_node = onnx.helper.make_node(
"If", inputs=["cond"], outputs=["res"], then_branch=then_body, else_branch=else_body
)
if_graph = onnx.helper.make_graph(
[if_node],
"if_outer",
inputs=[
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
],
outputs=[
onnx.helper.make_tensor_value_info("res", onnx.TensorProto.FLOAT, [5]),
],
)
if_model = onnx.helper.make_model(if_graph)
if cond_array:
cond = np.array([1]).astype("bool")
else:
cond = np.array(1).astype("bool")
correct_out = x if cond else y
# TODO(jwfromm): Onnxruntime 1.0.0 is buggy with If statements. Replace this with
# verify_with_ort once we update versions.
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(if_model, [cond], target, dev, freeze_params=True)
for i in range(len(tvm_out)):
tvm.testing.assert_allclose(correct_out[i], tvm_out[i], rtol=1e-05, atol=1e-05)
@tvm.testing.uses_gpu
def test_if():
# Confirm that if works with cond as an array or scalar.
verify_if(cond_array=False)
verify_if(cond_array=True)
@tvm.testing.uses_gpu
def test_size():
def verify_size(indata):
node = helper.make_node(
"Size",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"size_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, [])],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_inputs(model, [indata], dtype="int64", use_vm=True, opset=11)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
verify_size(input_data)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
verify_size(input_data)
@tvm.testing.uses_gpu
def test_maxunpool():
def verify_maxunpool(data, indices, kernel_shape, strides, output_shape=None, pads=None):
input_names = ["xT", "xI"]
input_info = [
helper.make_tensor_value_info("xT", TensorProto.FLOAT, list(data.shape)),
helper.make_tensor_value_info("xI", TensorProto.INT64, list(indices.shape)),
]
input_values = [data, indices]
if output_shape is not None:
input_names.append("output_shape")
input_info.append(
helper.make_tensor_value_info(
"output_shape", TensorProto.INT64, list(output_shape.shape)
)
)
input_values.append(output_shape)
else:
# Compute expected output shape
output_shape = np.asarray(([1, 1] + list(strides))) * np.asarray(list(data.shape))
output_shape += np.asarray(([0, 0] + list(kernel_shape))) - np.asarray(
([0, 0] + list(strides))
)
if pads is not None:
output_shape -= np.asarray(
[0, 0] + list(np.sum(np.reshape(list(pads), [-1, 2]), axis=-1))
)
output_shape = [int(i) for i in output_shape]
node = helper.make_node(
"MaxUnpool", inputs=input_names, outputs=["y"], kernel_shape=kernel_shape
)
if pads is not None:
pad_attr = helper.make_attribute("pads", pads)
node.attribute.append(pad_attr)
if strides is not None:
strides_attr = helper.make_attribute("strides", strides)
node.attribute.append(strides_attr)
graph = helper.make_graph(
[node],
"maxunpool_test",
inputs=input_info,
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, output_shape)],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_inputs(model, input_values, use_vm=True, opset=11)
# Basic test
xT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)
xI = np.array([[[[0, 7], [13, 15]]]], dtype=np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2])
# Small stride
verify_maxunpool(xT, xI, [2, 2], strides=[1, 1])
# Big kernel
verify_maxunpool(xT, xI, [3, 3], strides=[2, 2])
# With output shape
output_shape = np.array((1, 1, 5, 5), dtype=np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], output_shape=output_shape)
# With explicit reverse padding
pads = np.asarray([1, 1, 1, 1]).astype(np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], pads=pads)
@tvm.testing.uses_gpu
def test_softplus():
def verify_softplus(indata):
node = helper.make_node(
"Softplus",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"softplus_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="softplus_test")
verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11)
# Simple case with all signs.
input_data = np.array([[-1, 0, 1]], dtype=np.float32)
verify_softplus(input_data)
# More fancy case.
input_data = np.random.randn(1, 32, 32, 3).astype("float32")
verify_softplus(input_data)
def test_cumsum():
def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"):
cumsum_node = onnx.helper.make_node(
"CumSum",
inputs=["X", "axis"],
outputs=["Y"],
)
if exclusive != 0:
exclusive_attr = helper.make_attribute("exclusive", exclusive)
cumsum_node.attribute.append(exclusive_attr)
if reverse != 0:
reverse_attr = helper.make_attribute("reverse", reverse)
cumsum_node.attribute.append(reverse_attr)
nodes = [
make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]),
cumsum_node,
]
if type == "float32":
tensor_type = TensorProto.FLOAT
else:
tensor_type = TensorProto.INT32
type = "int32"
graph = helper.make_graph(
nodes,
"cumsum_test",
inputs=[
helper.make_tensor_value_info("X", tensor_type, list(indata.shape)),
],
outputs=[helper.make_tensor_value_info("Y", tensor_type, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="cumsum_test")
verify_with_ort_with_inputs(model, [indata], dtype=type, use_vm=True, opset=11)
data = (
np.array(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
]
)
.astype(np.float32)
.reshape((3, 4))
)
verify_cumsum(data, 0)
verify_cumsum(data, 1)
verify_cumsum(data, 0, 1, 0)
verify_cumsum(data, 1, 1, 0)
verify_cumsum(data, 0, 0, 1)
verify_cumsum(data, 1, 0, 1)
verify_cumsum(data, 1, 1, 1)
data = np.random.randn(1, 32, 32, 3).astype("float32")
verify_cumsum(data, 1)
data = np.random.randn(1, 32, 32, 3).astype("int32")
verify_cumsum(data, 0, type="int32")
verify_cumsum(data, 1, type="int32")
verify_cumsum(data, 0, 1, 0, type="int32")
verify_cumsum(data, 1, 1, 0, type="int32")
verify_cumsum(data, 0, 0, 1, type="int32")
verify_cumsum(data, 1, 0, 1, type="int32")
verify_cumsum(data, 1, 1, 1, type="int32")
"""
The following parameterized tests loads the tests that ONNX ships as
serialized ONNX files, inputs, and outputs. The goal of this test
is to ensure the ONNX importer is in line with the ONNX specification.
To allow these tests to run in CI before all pass, a number of tests that
are not yet supported are skipped.
"""
from onnx import numpy_helper
f = onnx.__file__
import glob
onnx_test_folders = sorted(glob.glob("/".join(f.split("/")[0:-1]) + "/backend/test/data/node/*/"))
unsupported_onnx_tests = [
"test_basic_convinteger/",
"test_cast_DOUBLE_to_FLOAT16/",
"test_cast_FLOAT_to_STRING/",
"test_cast_STRING_to_FLOAT/",
"test_compress_0/",
"test_compress_1/",
"test_compress_default_axis/",
"test_compress_negative_axis/",
"test_convinteger_with_padding/",
"test_convtranspose_dilations/",
"test_convtranspose_output_shape/",
"test_cumsum_1d/",
"test_cumsum_1d_exclusive/",
"test_cumsum_1d_reverse/",
"test_cumsum_1d_reverse_exclusive/",
"test_cumsum_2d_axis_0/",
"test_cumsum_2d_axis_1/",
"test_cumsum_2d_negative_axis/",
"test_det_2d/",
"test_det_nd/",
"test_eyelike_populate_off_main_diagonal/",
"test_eyelike_with_dtype/",
"test_eyelike_without_dtype/",
"test_isinf_negative/",
"test_isinf_positive/",
"test_matmulinteger/",
"test_maxpool_2d_dilations/",
"test_maxpool_2d_same_lower/",
"test_maxpool_2d_same_upper/",
"test_maxpool_with_argmax_2d_precomputed_pads/",
"test_maxpool_with_argmax_2d_precomputed_strides/",
"test_maxunpool_export_with_output_shape/",
"test_mvn/",
"test_qlinearconv/",
"test_qlinearmatmul_2D/",
"test_qlinearmatmul_3D/",
"test_range_float_type_positive_delta_expanded/",
"test_range_int32_type_negative_delta_expanded/",
"test_resize_tf_crop_and_resize/",
## For these three tests, ONNX 1.6.0 has incorrect graphs, they pass with ONNX 1.7.0
"test_resize_upsample_sizes_nearest_ceil_half_pixel/",
"test_resize_upsample_sizes_nearest_floor_align_corners/",
"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/",
# ----
"test_reversesequence_batch/",
"test_reversesequence_time/",
"test_rnn_seq_length/",
"test_roialign/",
"test_round/",
"test_scan9_sum/",
"test_scan_sum/",
"test_scatternd/",
"test_simple_rnn_defaults/",
"test_simple_rnn_with_initial_bias/",
"test_strnormalizer_export_monday_casesensintive_lower/",
"test_strnormalizer_export_monday_casesensintive_nochangecase/",
"test_strnormalizer_export_monday_casesensintive_upper/",
"test_strnormalizer_export_monday_empty_output/",
"test_strnormalizer_export_monday_insensintive_upper_twodim/",
"test_strnormalizer_nostopwords_nochangecase/",
"test_tfidfvectorizer_tf_batch_onlybigrams_skip0/",
"test_tfidfvectorizer_tf_batch_onlybigrams_skip5/",
"test_tfidfvectorizer_tf_batch_uniandbigrams_skip5/",
"test_tfidfvectorizer_tf_only_bigrams_skip0/",
"test_tfidfvectorizer_tf_onlybigrams_levelempty/",
"test_tfidfvectorizer_tf_onlybigrams_skip5/",
"test_tfidfvectorizer_tf_uniandbigrams_skip5/",
"test_top_k_smallest/",
"test_unique_not_sorted_without_axis/",
"test_unique_sorted_with_axis/",
"test_unique_sorted_with_axis_3d/",
"test_unique_sorted_with_negative_axis/",
"test_unique_sorted_without_axis/",
"test_upsample_nearest/",
]
@pytest.mark.parametrize("test", onnx_test_folders)
def test_onnx_nodes(test):
for failure in unsupported_onnx_tests:
if failure in test:
pytest.skip()
break
onnx_model = onnx.load(test + "/model.onnx")
inputs = []
outputs = []
for dataset in glob.glob(test + "/*/"):
tensors = sorted(glob.glob(dataset + "/*.pb"))
for tensor in tensors:
new_tensor = onnx.TensorProto()
with open(tensor, "rb") as f:
new_tensor.ParseFromString(f.read())
if "input" in tensor.split("/")[-1]:
inputs.append(numpy_helper.to_array(new_tensor))
elif "output" in tensor.split("/")[-1]:
outputs.append(numpy_helper.to_array(new_tensor))
else:
raise ImportError(str(tensor) + " not labeled as an import or an output")
tvm_val = get_tvm_output_with_vm(onnx_model, inputs, "llvm", tvm.cpu(0))
if len(outputs) == 1:
tvm.testing.assert_allclose(outputs[0], tvm_val, rtol=1e-5, atol=1e-5)
else:
for output, val in zip(outputs, tvm_val):
tvm.testing.assert_allclose(output, val, rtol=1e-5, atol=1e-5)
def test_wrong_input():
node = helper.make_node(
"Softplus",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"softplus_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list([5]))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list([5]))],
)
model = helper.make_model(graph, producer_name="softplus_test")
# Check that the graph can import correctly with proper shape definitions.
correct_shape_dict = {"X": [5]}
relay.frontend.from_onnx(model, shape=correct_shape_dict)
# Check that an assertion is triggered when an input not in the graph is provided.
wrong_shape_dict = {"Z": [5]}
with pytest.raises(AssertionError):
relay.frontend.from_onnx(model, shape=wrong_shape_dict)
def test_aten():
torch.set_grad_enabled(False)
def _convert_to_onnx(model, inputs):
file_name = "{}.onnx".format("aten_model")
torch.onnx.export(
model,
inputs,
file_name,
export_params=True,
verbose=False,
opset_version=10,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
)
onnx_model = onnx.load(file_name)
assert 's: "embedding_bag"' in str(onnx_model)
return onnx_model
def verify_embedding_bag(num_embedding, embedding_dim, data_shape, num_bags=None):
dummy_data = torch.randint(0, num_embedding - 1, data_shape)
tvm_inputs = [dummy_data.numpy()]
model = torch.nn.EmbeddingBag(num_embedding, embedding_dim)
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(
onnx_model, tvm_inputs, target, ctx, freeze_params=True, convert_to_static=True
)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)
verify_embedding_bag(10, 3, [2, 10])
verify_embedding_bag(32, 2, [3, 3])
if __name__ == "__main__":
test_flatten()
test_reshape()
test_shape()
test_expand()
test_power()
test_squeeze()
test_unsqueeze()
test_slice()
test_floor()
test_ceil()
test_round()
test_isinf()
test_isnan()
test_clip()
test_clip_min_max_as_inputs()
test_onehot()
test_gemm()
test_matmul()
test_gather()
test_gatherelements()
test_gather_nd()
test_scatter()
test_lrn()
test_instance_norm()
test_upsample()
test_forward_min()
test_forward_max()
test_forward_mean()
test_forward_hardsigmoid()
test_forward_arg_min_max()
test_softmax()
test_constantofshape()
test_all_reduce_funcs()
test_pad()
test_split()
test_binary_ops()
test_unary_ops()
test_leaky_relu()
test_elu()
test_selu()
test_prelu()
test_ThresholdedRelu()
test_LogSoftmax()
test_resnet()
test_inception()
test_densenet()
test_sign()
test_not()
test_and()
test_tile()
test_erf()
test_where()
test_or()
test_depth_to_space()
test_space_to_depth()
test_batch_norm()
test_batch_norm_dynamic_subgraph()
test_conv()
test_convtranspose()
test_unsqueeze_constant()
test_pooling()
test_lppool()
test_lstm()
test_gru()
test_resize()
test_nonzero()
test_topk()
test_mod()
test_xor()
test_max_roi_pool()
test_roi_align()
test_range()
test_loop()
test_size()
test_maxunpool()
test_softplus()
test_cumsum()
test_wrong_input()
test_aten()
|
the-stack_0_5364 | import argparse
from typing import Tuple
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import torchvision
from torch.utils.data import Dataset
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from deepext_with_lightning.callbacks import GenerateAttentionMap, CSVClassificationResult
from deepext_with_lightning.models.layers.backbone_key import BackBoneKey
from deepext_with_lightning.models.base import ClassificationModel
from deepext_with_lightning.models.classification import *
from deepext_with_lightning.transforms import AlbumentationsOnlyImageWrapperTransform
from deepext_with_lightning.dataset.functions import label_names_to_dict
from common import CLASSIFICATION_DATASET_INFO, build_data_loader, get_logger
VALID_MODEL_KEYS = ["efficientnet", "mobilenet", "abn", "custommodel"]
# NOTE モデル・データセットはここを追加
def build_model(args, n_classes) -> ClassificationModel:
if args.model == "efficientnet":
return EfficientNet(num_classes=n_classes, lr=args.lr, network=f"efficientnet-b{args.efficientnet_scale}")
if args.model == "mobilenet":
return MobileNetV3(num_classes=n_classes, lr=args.lr, pretrained=False)
if args.model == "abn":
return AttentionBranchNetwork(n_classes=n_classes, lr=args.lr, backbone=BackBoneKey.from_val(args.submodel))
if args.model == "customnet":
return CustomClassificationNetwork(n_classes=n_classes, lr=args.lr,
backbone=BackBoneKey.from_val(args.submodel))
raise RuntimeError(f"Invalid model name: {args.model}")
def build_transforms(args) -> Tuple[any, any]:
train_transforms = A.Compose([
A.HorizontalFlip(p=0.3),
A.RandomResizedCrop(width=args.image_size, height=args.image_size, scale=(0.7, 1.2)),
A.Rotate((-30, 30), p=0.3),
A.CoarseDropout(max_width=int(args.image_size / 8), max_height=int(args.image_size / 8), max_holes=3, p=0.3),
ToTensorV2(),
])
train_transforms = AlbumentationsOnlyImageWrapperTransform(train_transforms)
test_transforms = A.Compose([
A.Resize(width=args.image_size, height=args.image_size),
ToTensorV2(),
])
test_transforms = AlbumentationsOnlyImageWrapperTransform(test_transforms)
return train_transforms, test_transforms
def build_dataset(args, train_transforms, test_transforms) -> Tuple[Dataset, Dataset]:
if args.dataset == "stl10":
train_dataset = torchvision.datasets.STL10(root=args.dataset_root, download=True, split="train",
transform=train_transforms)
test_dataset = torchvision.datasets.STL10(root=args.dataset_root, download=True, split="test",
transform=test_transforms)
return train_dataset, test_dataset
if args.dataset == "cifar10":
train_dataset = torchvision.datasets.CIFAR10(root=args.dataset_root, download=True, train=True,
transform=train_transforms)
test_dataset = torchvision.datasets.CIFAR10(root=args.dataset_root, download=True, train=False,
transform=test_transforms)
return train_dataset, test_dataset
raise RuntimeError(f"Invalid dataset name: {args.dataset_root}")
parser = argparse.ArgumentParser(description='Pytorch Image classification training.')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--dataset', type=str, default="stl10",
help=f'Dataset type in {list(CLASSIFICATION_DATASET_INFO.keys())}')
parser.add_argument('--epoch', type=int, default=100, help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset folder path')
parser.add_argument('--progress_dir', type=str, default=None, help='Directory for saving progress')
parser.add_argument('--model', type=str, default="mobilenet", help=f"Model type in {VALID_MODEL_KEYS}")
parser.add_argument('--load_checkpoint_path', type=str, default=None, help="Saved checkpoint path")
parser.add_argument('--save_checkpoint_path', type=str, default="checkpoints", help="Saving checkpoint directory")
parser.add_argument('--efficientnet_scale', type=int, default=0, help="Number of scale of EfficientNet.")
parser.add_argument('--image_size', type=int, default=96, help="Image size.")
parser.add_argument('--submodel', type=str, default="resnet18", help=f'Type of submodel(resnet18, resnet34...).')
parser.add_argument('--val_every_n_epoch', type=int, default=5, help="Validate every n epoch.")
parser.add_argument('--log_type', type=str, default="mlflow", help="")
if __name__ == "__main__":
args = parser.parse_args()
# Fetch dataset.
dataset_info = CLASSIFICATION_DATASET_INFO.get(args.dataset)
if dataset_info is None:
raise ValueError(
f"Invalid dataset name - {args.dataset}. Required [{list(CLASSIFICATION_DATASET_INFO.keys())}]")
label_names = dataset_info["label_names"]
class_index_dict = label_names_to_dict(label_names)
# Fetch dataset.
train_transforms, test_transforms = build_transforms(args)
train_dataset, test_dataset = build_dataset(args, train_transforms, test_transforms)
train_data_loader, test_data_loader = build_data_loader(args, train_dataset, test_dataset)
# Fetch model and load weight.
model = build_model(args, dataset_info["n_classes"])
if args.load_checkpoint_path:
model = model.load_from_checkpoint(args.load_checkpoint_path)
# Training setting.
logger = get_logger("classification_demo", args, model)
callbacks = [ModelCheckpoint(period=args.val_every_n_epoch, filename=f"{model.generate_model_name()}",
dirpath=args.save_checkpoint_path, monitor='val_acc', verbose=True, mode="max"),
CSVClassificationResult(period=args.epoch, model=model, dataset=test_dataset,
label_names=label_names, out_filepath=f"{args.progress_dir}/result.csv"), ]
if args.progress_dir:
if isinstance(model, AttentionBranchNetwork):
callbacks.append(GenerateAttentionMap(model=model, output_dir=args.progress_dir, period=5,
dataset=test_dataset, label_names=label_names))
# Training.
Trainer(max_epochs=args.epoch, callbacks=callbacks, gpus=-1,
check_val_every_n_epoch=args.val_every_n_epoch, logger=logger) \
.fit(model, train_dataloader=train_data_loader, val_dataloaders=test_data_loader)
|
the-stack_0_5366 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_sssd_ldap_user(host):
user = host.user('test')
assert user.uid == 5000
assert user.gid == 1
def test_sssd_ldap_user_filtered(host):
user = host.user('filtered-test')
assert not user.exists
def test_sssd_service_state(host):
assert host.service('sssd').is_enabled
assert host.service('sssd').is_running
def test_sshd_service_state(host):
ssh_service_name = {
'centos': 'sshd',
'ubuntu': 'ssh'
}
assert host.service(
ssh_service_name[host.system_info.distribution]).is_enabled
assert host.service(
ssh_service_name[host.system_info.distribution]).is_running
def test_ssh_access(host):
host.run_test(
'/usr/bin/ssh '
'-o StrictHostKeyChecking=no '
'-o BatchMode=yes '
'-T '
'-i /root/.ssh/id_rsa '
'-l test '
'localhost '
'exit'
)
def test_homedir_created(host):
assert host.file('/home/test').is_directory
|
the-stack_0_5367 | from ..sdoc import (
SLine,
SAnnotationPush,
SAnnotationPop,
)
from ..syntax import Token
from ..render import as_lines
from ..utils import rfind_idx
_COLOR_DEPS_INSTALLED = True
try:
from pygments import token
from pygments import styles
except ImportError:
_COLOR_DEPS_INSTALLED = False
else:
_SYNTAX_TOKEN_TO_PYGMENTS_TOKEN = {
Token.KEYWORD_CONSTANT: token.Keyword.Constant,
Token.NAME_BUILTIN: token.Name.Builtin,
Token.NAME_ENTITY: token.Name.Entity,
Token.NAME_FUNCTION: token.Name.Function,
Token.LITERAL_STRING: token.String,
Token.STRING_AFFIX: token.String.Affix,
Token.STRING_ESCAPE: token.String.Escape,
Token.NUMBER_INT: token.Number,
Token.NUMBER_BINARY: token.Number.Bin,
Token.NUMBER_INT: token.Number.Integer,
Token.NUMBER_FLOAT: token.Number.Float,
Token.OPERATOR: token.Operator,
Token.PUNCTUATION: token.Punctuation,
Token.COMMENT_SINGLE: token.Comment.Single,
}
default_style = styles.get_style_by_name('monokai')
try:
import colorful
except ImportError:
_COLOR_DEPS_INSTALLED = False
def styleattrs_to_colorful(attrs):
c = colorful.reset
if attrs['color'] or attrs['bgcolor']:
# Colorful doesn't have a way to directly set Hex/RGB
# colors- until I find a better way, we do it like this :)
accessor = ''
if attrs['color']:
colorful.update_palette({'peprintCurrFg': attrs['color']})
accessor = 'peprintCurrFg'
if attrs['bgcolor']:
colorful.update_palette({'peprintCurrBg': attrs['bgcolor']})
accessor += '_on_peprintCurrBg'
c &= getattr(colorful, accessor)
if attrs['bold']:
c &= colorful.bold
if attrs['italic']:
c &= colorful.italic
if attrs['underline']:
c &= colorful.underline
return c
def colored_render_to_stream(stream, sdocs, style, newline='\n', separator=' '):
if not _COLOR_DEPS_INSTALLED:
raise Exception(
"'pygments' and 'colorful' packages must be "
"installed to use colored output."
)
if style is None:
style = default_style
evald = list(sdocs)
if not evald:
return
colorstack = []
sdoc_lines = as_lines(evald)
for sdoc_line in sdoc_lines:
last_text_sdoc_idx = rfind_idx(
lambda sdoc: isinstance(sdoc, str),
sdoc_line
)
# Edge case: trailing whitespace on a line.
# Currently happens on multiline str value in a dict:
# there's a trailing whitespace after the colon that's
# hard to eliminate at the doc level.
if last_text_sdoc_idx != -1:
last_text_sdoc = sdoc_line[last_text_sdoc_idx]
sdoc_line[last_text_sdoc_idx] = last_text_sdoc.rstrip()
for sdoc in sdoc_line:
if isinstance(sdoc, str):
stream.write(sdoc)
elif isinstance(sdoc, SLine):
stream.write(newline + separator * sdoc.indent)
elif isinstance(sdoc, SAnnotationPush):
if isinstance(sdoc.value, Token):
pygments_token = _SYNTAX_TOKEN_TO_PYGMENTS_TOKEN[sdoc.value]
tokenattrs = style.style_for_token(pygments_token)
color = styleattrs_to_colorful(tokenattrs)
colorstack.append(color)
stream.write(str(color))
elif isinstance(sdoc, SAnnotationPop):
try:
colorstack.pop()
except IndexError:
continue
if colorstack:
stream.write(str(colorstack[-1]))
else:
stream.write(str(colorful.reset))
if colorstack:
stream.write(str(colorful.reset))
|
the-stack_0_5368 | # [SublimeLinter @python:3]
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import threading
import win32api
import win32con
import win32gui
class drag_accept_files(object):
def __init__(self, wnd, callback):
super(drag_accept_files, self).__init__()
self.callback = callback
self.hwnd = int(wnd.wm_frame(), 16)
self._old_wnd_proc = win32gui.SetWindowLong(
self.hwnd, win32con.GWL_WNDPROC, self._new_wnd_proc)
self.accept_files = True
@property
def accept_files(self):
raise NotImplementedError()
@accept_files.setter
def accept_files(self, value):
win32gui.DragAcceptFiles(self.hwnd, bool(value))
def _new_wnd_proc(self, hwnd, msg, wparam, lparam):
assert self.hwnd == hwnd
if msg == win32con.WM_DROPFILES:
files = []
for i in range(win32api.DragQueryFile(wparam)):
files.append(win32api.DragQueryFile(wparam, i))
if files:
threading.Thread(target=self.callback, args=(files, )).start()
if msg == win32con.WM_DESTROY:
win32api.SetWindowLong(hwnd, win32con.GWL_WNDPROC, self._old_wnd_proc)
return win32gui.CallWindowProc(self._old_wnd_proc, hwnd, msg, wparam, lparam)
|
the-stack_0_5369 | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r""" TileLink-Uncached Lightweight Xbar generator
"""
import argparse
import logging as log
import sys
from pathlib import Path
import hjson
import tlgen
def main():
parser = argparse.ArgumentParser(prog="tlgen")
parser.add_argument('--topcfg',
'-t',
metavar='file',
type=argparse.FileType('r'),
help="`top_cfg.hjson` file.")
parser.add_argument('--doc',
'-d',
action='store_true',
help='Generate self HTML document in stdout')
parser.add_argument(
'--outdir',
'-o',
help=
"Target directory. tlgen needs 'rtl/' and 'dv/' directory under the target dir"
)
parser.add_argument('--ip-path',
default="",
help='''
Additional path to generated rtl/ or dv/ folders: outdir/ip_path/rtl
Only needed when there are multiple xbar in outdir''')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
args = parser.parse_args()
if args.verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if args.doc:
# Generate Doc and return
sys.stdout.write(tlgen.selfdoc(heading=3, cmd='tlgen.py --doc'))
return
# Check if topcfg defined
if not args.topcfg or not args.outdir:
log.error("--topcfg option is mandatory to generate codes.")
# Check if outdir exists. If not, show error and exit
if not Path(args.outdir).is_dir():
log.error("'--outdir' should point to writable directory")
# Load contents of top_cfg
# Skip this part and use internal structure at this time
try:
obj = hjson.load(args.topcfg, use_decimal=True)
except ValueError:
raise SystemExit(sys.exc_info()[1])
log.info(obj)
xbar = tlgen.validate(obj)
xbar.ip_path = args.ip_path
if not tlgen.elaborate(xbar):
log.error("Elaboration failed." + repr(xbar))
# Generate
out_rtl, out_pkg, out_core = tlgen.generate(xbar)
rtl_path = Path(args.outdir) / args.ip_path / 'rtl/autogen'
rtl_path.mkdir(parents=True, exist_ok=True)
dv_path = Path(args.outdir) / args.ip_path / 'dv/autogen'
dv_path.mkdir(parents=True, exist_ok=True)
rtl_filename = "xbar_%s.sv" % (xbar.name)
rtl_filepath = rtl_path / rtl_filename
with rtl_filepath.open(mode='w', encoding='UTF-8') as fout:
fout.write(out_rtl)
pkg_filename = "tl_%s_pkg.sv" % (xbar.name)
pkg_filepath = rtl_path / pkg_filename
with pkg_filepath.open(mode='w', encoding='UTF-8') as fout:
fout.write(out_pkg)
core_filename = "xbar_%s.core" % (xbar.name)
core_filepath = rtl_path / core_filename
with core_filepath.open(mode='w', encoding='UTF-8') as fout:
fout.write(out_core)
# generate TB
tlgen.generate_tb(xbar, dv_path)
if __name__ == "__main__":
main()
|
the-stack_0_5370 | import os
from urllib.request import urlretrieve
import pandas as pd
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL, force_download=False):
"""Download and cache the fremont data
Parameters
==========
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redownload of data
Returns
=======
data : pandas.DataFrame
The fremont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
data = pd.read_csv('Fremont.csv', index_col='Date')
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', 'East']
data['Total'] = data['West'] + data['East']
return data
|
the-stack_0_5371 | """Class for Braava devices."""
import logging
from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
_LOGGER = logging.getLogger(__name__)
ATTR_DETECTED_PAD = "detected_pad"
ATTR_LID_CLOSED = "lid_closed"
ATTR_TANK_PRESENT = "tank_present"
ATTR_TANK_LEVEL = "tank_level"
ATTR_PAD_WETNESS = "spray_amount"
OVERLAP_STANDARD = 67
OVERLAP_DEEP = 85
OVERLAP_EXTENDED = 25
MOP_STANDARD = "Standard"
MOP_DEEP = "Deep"
MOP_EXTENDED = "Extended"
BRAAVA_MOP_BEHAVIORS = [MOP_STANDARD, MOP_DEEP, MOP_EXTENDED]
BRAAVA_SPRAY_AMOUNT = [1, 2, 3]
# Braava Jets can set mopping behavior through fanspeed
SUPPORT_BRAAVA = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
class BraavaJet(IRobotVacuum):
"""Braava Jet."""
def __init__(self, roomba, blid):
"""Initialize the Roomba handler."""
super().__init__(roomba, blid)
# Initialize fan speed list
speed_list = []
for behavior in BRAAVA_MOP_BEHAVIORS:
for spray in BRAAVA_SPRAY_AMOUNT:
speed_list.append(f"{behavior}-{spray}")
self._speed_list = speed_list
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_BRAAVA
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
# Mopping behavior and spray amount as fan speed
rank_overlap = self.vacuum_state.get("rankOverlap", {})
behavior = None
if rank_overlap == OVERLAP_STANDARD:
behavior = MOP_STANDARD
elif rank_overlap == OVERLAP_DEEP:
behavior = MOP_DEEP
elif rank_overlap == OVERLAP_EXTENDED:
behavior = MOP_EXTENDED
pad_wetness = self.vacuum_state.get("padWetness", {})
# "disposable" and "reusable" values are always the same
pad_wetness_value = pad_wetness.get("disposable")
return f"{behavior}-{pad_wetness_value}"
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return self._speed_list
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
try:
split = fan_speed.split("-", 1)
behavior = split[0]
spray = int(split[1])
if behavior.capitalize() in BRAAVA_MOP_BEHAVIORS:
behavior = behavior.capitalize()
except IndexError:
_LOGGER.error(
"Fan speed error: expected {behavior}-{spray_amount}, got '%s'",
fan_speed,
)
return
except ValueError:
_LOGGER.error("Spray amount error: expected integer, got '%s'", split[1])
return
if behavior not in BRAAVA_MOP_BEHAVIORS:
_LOGGER.error(
"Mop behavior error: expected one of %s, got '%s'",
str(BRAAVA_MOP_BEHAVIORS),
behavior,
)
return
if spray not in BRAAVA_SPRAY_AMOUNT:
_LOGGER.error(
"Spray amount error: expected one of %s, got '%d'",
str(BRAAVA_SPRAY_AMOUNT),
spray,
)
return
overlap = 0
if behavior == MOP_STANDARD:
overlap = OVERLAP_STANDARD
elif behavior == MOP_DEEP:
overlap = OVERLAP_DEEP
else:
overlap = OVERLAP_EXTENDED
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "rankOverlap", overlap
)
await self.hass.async_add_executor_job(
self.vacuum.set_preference,
"padWetness",
{"disposable": spray, "reusable": spray},
)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
state_attrs = super().device_state_attributes
# Get Braava state
state = self.vacuum_state
detected_pad = state.get("detectedPad")
mop_ready = state.get("mopReady", {})
lid_closed = mop_ready.get("lidClosed")
tank_present = mop_ready.get("tankPresent")
tank_level = state.get("tankLvl")
state_attrs[ATTR_DETECTED_PAD] = detected_pad
state_attrs[ATTR_LID_CLOSED] = lid_closed
state_attrs[ATTR_TANK_PRESENT] = tank_present
state_attrs[ATTR_TANK_LEVEL] = tank_level
return state_attrs
|
the-stack_0_5373 | """
Author: Daisuke Oyama
Tests for normal_form_game.py
"""
from __future__ import division
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import eq_, ok_, raises
from quantecon.game_theory import (
Player, NormalFormGame, pure2mixed, best_response_2p
)
# Player #
class TestPlayer_1opponent:
"""Test the methods of Player with one opponent player"""
def setUp(self):
"""Setup a Player instance"""
coordination_game_matrix = [[4, 0], [3, 2]]
self.player = Player(coordination_game_matrix)
def test_best_response_against_pure(self):
eq_(self.player.best_response(1), 1)
def test_best_response_against_mixed(self):
eq_(self.player.best_response([1/2, 1/2]), 1)
def test_best_response_list_when_tie(self):
"""best_response with tie_breaking=False"""
assert_array_equal(
sorted(self.player.best_response([2/3, 1/3], tie_breaking=False)),
sorted([0, 1])
)
def test_best_response_with_random_tie_breaking(self):
"""best_response with tie_breaking='random'"""
ok_(self.player.best_response([2/3, 1/3], tie_breaking='random')
in [0, 1])
seed = 1234
br0 = self.player.best_response([2/3, 1/3], tie_breaking='random',
random_state=seed)
br1 = self.player.best_response([2/3, 1/3], tie_breaking='random',
random_state=seed)
eq_(br0, br1)
def test_best_response_with_smallest_tie_breaking(self):
"""best_response with tie_breaking='smallest' (default)"""
eq_(self.player.best_response([2/3, 1/3]), 0)
def test_best_response_with_payoff_perturbation(self):
"""best_response with payoff_perturbation"""
eq_(self.player.best_response([2/3, 1/3],
payoff_perturbation=[0, 0.1]),
1)
eq_(self.player.best_response([2, 1], # int
payoff_perturbation=[0, 0.1]),
1)
def test_is_best_response_against_pure(self):
ok_(self.player.is_best_response(0, 0))
def test_is_best_response_against_mixed(self):
ok_(self.player.is_best_response([1/2, 1/2], [2/3, 1/3]))
class TestPlayer_2opponents:
"""Test the methods of Player with two opponent players"""
def setUp(self):
"""Setup a Player instance"""
payoffs_2opponents = [[[3, 6],
[4, 2]],
[[1, 0],
[5, 7]]]
self.player = Player(payoffs_2opponents)
def test_payoff_vector_against_pure(self):
assert_array_equal(self.player.payoff_vector((0, 1)), [6, 0])
def test_is_best_response_against_pure(self):
ok_(not self.player.is_best_response(0, (1, 0)))
def test_best_response_against_pure(self):
eq_(self.player.best_response((1, 1)), 1)
def test_best_response_list_when_tie(self):
"""
best_response against a mixed action profile with
tie_breaking=False
"""
assert_array_equal(
sorted(self.player.best_response(([3/7, 4/7], [1/2, 1/2]),
tie_breaking=False)),
sorted([0, 1])
)
def test_random_choice():
n, m = 5, 4
payoff_matrix = np.zeros((n, m))
player = Player(payoff_matrix)
eq_(player.random_choice([0]), 0)
actions = list(range(player.num_actions))
ok_(player.random_choice() in actions)
# NormalFormGame #
class TestNormalFormGame_Sym2p:
"""Test the methods of NormalFormGame with symmetric two players"""
def setUp(self):
"""Setup a NormalFormGame instance"""
coordination_game_matrix = [[4, 0], [3, 2]]
self.g = NormalFormGame(coordination_game_matrix)
def test_getitem(self):
assert_array_equal(self.g[0, 1], [0, 3])
def test_is_nash_pure(self):
ok_(self.g.is_nash((0, 0)))
def test_is_nash_mixed(self):
ok_(self.g.is_nash(([2/3, 1/3], [2/3, 1/3])))
class TestNormalFormGame_Asym2p:
"""Test the methods of NormalFormGame with asymmetric two players"""
def setUp(self):
"""Setup a NormalFormGame instance"""
matching_pennies_bimatrix = [[(1, -1), (-1, 1)],
[(-1, 1), (1, -1)]]
self.g = NormalFormGame(matching_pennies_bimatrix)
def test_getitem(self):
assert_array_equal(self.g[1, 0], [-1, 1])
def test_is_nash_against_pure(self):
ok_(not self.g.is_nash((0, 0)))
def test_is_nash_against_mixed(self):
ok_(self.g.is_nash(([1/2, 1/2], [1/2, 1/2])))
class TestNormalFormGame_3p:
"""Test the methods of NormalFormGame with three players"""
def setUp(self):
"""Setup a NormalFormGame instance"""
payoffs_2opponents = [[[3, 6],
[4, 2]],
[[1, 0],
[5, 7]]]
player = Player(payoffs_2opponents)
self.g = NormalFormGame([player for i in range(3)])
def test_getitem(self):
assert_array_equal(self.g[0, 0, 1], [6, 4, 1])
def test_is_nash_pure(self):
ok_(self.g.is_nash((0, 0, 0)))
ok_(not self.g.is_nash((0, 0, 1)))
def test_is_nash_mixed(self):
p = (1 + np.sqrt(65)) / 16
ok_(self.g.is_nash(([1 - p, p], [1 - p, p], [1 - p, p])))
def test_normalformgame_input_action_sizes():
g = NormalFormGame((2, 3, 4))
eq_(g.N, 3) # Number of players
assert_array_equal(
g.players[0].payoff_array,
np.zeros((2, 3, 4))
)
assert_array_equal(
g.players[1].payoff_array,
np.zeros((3, 4, 2))
)
assert_array_equal(
g.players[2].payoff_array,
np.zeros((4, 2, 3))
)
def test_normalformgame_setitem():
g = NormalFormGame((2, 2))
g[0, 0] = (0, 10)
g[0, 1] = (0, 10)
g[1, 0] = (3, 5)
g[1, 1] = (-2, 0)
assert_array_equal(
g.players[0].payoff_array,
[[0, 0], [3, -2]]
)
assert_array_equal(
g.players[1].payoff_array,
[[10, 5], [10, 0]]
)
def test_normalformgame_constant_payoffs():
g = NormalFormGame((2, 2))
ok_(g.is_nash((0, 0)))
ok_(g.is_nash((0, 1)))
ok_(g.is_nash((1, 0)))
ok_(g.is_nash((1, 1)))
def test_normalformgame_payoff_profile_array():
nums_actions = (2, 3, 4)
for N in range(1, len(nums_actions)+1):
payoff_arrays = [
np.arange(np.prod(nums_actions[0:N])).reshape(nums_actions[i:N] +
nums_actions[0:i])
for i in range(N)
]
players = [Player(payoff_array) for payoff_array in payoff_arrays]
g = NormalFormGame(players)
g_new = NormalFormGame(g.payoff_profile_array)
for player_new, payoff_array in zip(g_new.players, payoff_arrays):
assert_array_equal(player_new.payoff_array, payoff_array)
# Trivial cases with one player #
class TestPlayer_0opponents:
"""Test for trivial Player with no opponent player"""
def setUp(self):
"""Setup a Player instance"""
payoffs = [0, 1]
self.player = Player(payoffs)
def test_payoff_vector(self):
"""Trivial player: payoff_vector"""
assert_array_equal(self.player.payoff_vector(None), [0, 1])
def test_is_best_response(self):
"""Trivial player: is_best_response"""
ok_(self.player.is_best_response(1, None))
def test_best_response(self):
"""Trivial player: best_response"""
eq_(self.player.best_response(None), 1)
class TestNormalFormGame_1p:
"""Test for trivial NormalFormGame with a single player"""
def setUp(self):
"""Setup a NormalFormGame instance"""
data = [[0], [1], [1]]
self.g = NormalFormGame(data)
def test_construction(self):
"""Trivial game: construction"""
ok_(self.g.N == 1)
assert_array_equal(self.g.players[0].payoff_array, [0, 1, 1])
def test_getitem(self):
"""Trivial game: __getitem__"""
eq_(self.g[0], 0)
def test_is_nash_pure(self):
"""Trivial game: is_nash with pure action"""
ok_(self.g.is_nash((1,)))
ok_(not self.g.is_nash((0,)))
def test_is_nash_mixed(self):
"""Trivial game: is_nash with mixed action"""
ok_(self.g.is_nash(([0, 1/2, 1/2],)))
def test_normalformgame_input_action_sizes_1p():
g = NormalFormGame(2)
eq_(g.N, 1) # Number of players
assert_array_equal(
g.players[0].payoff_array,
np.zeros(2)
)
def test_normalformgame_setitem_1p():
g = NormalFormGame(2)
eq_(g.N, 1) # Number of players
g[0] = 10 # Set payoff 10 for action 0
eq_(g.players[0].payoff_array[0], 10)
# Test __repre__ #
def test_player_repr():
nums_actions = (2, 3, 4)
payoff_arrays = [
np.arange(np.prod(nums_actions[0:i])).reshape(nums_actions[0:i])
for i in range(1, len(nums_actions)+1)
]
players = [Player(payoff_array) for payoff_array in payoff_arrays]
for player in players:
player_new = eval(repr(player))
assert_array_equal(player_new.payoff_array, player.payoff_array)
# Invalid inputs #
@raises(ValueError)
def test_normalformgame_invalid_input_players_shape_inconsistent():
p0 = Player(np.zeros((2, 3)))
p1 = Player(np.zeros((2, 3)))
g = NormalFormGame([p0, p1])
@raises(ValueError)
def test_normalformgame_invalid_input_players_num_inconsistent():
p0 = Player(np.zeros((2, 2, 2)))
p1 = Player(np.zeros((2, 2, 2)))
g = NormalFormGame([p0, p1])
@raises(ValueError)
def test_normalformgame_invalid_input_players_dtype_inconsistent():
p0 = Player(np.zeros((2, 2), dtype=int))
p1 = Player(np.zeros((2, 2), dtype=float))
g = NormalFormGame([p0, p1])
@raises(ValueError)
def test_normalformgame_invalid_input_nosquare_matrix():
g = NormalFormGame(np.zeros((2, 3)))
@raises(ValueError)
def test_normalformgame_invalid_input_payoff_profiles():
g = NormalFormGame(np.zeros((2, 2, 1)))
# Utility functions #
def test_pure2mixed():
num_actions = 3
pure_action = 0
mixed_action = [1., 0., 0.]
assert_array_equal(pure2mixed(num_actions, pure_action), mixed_action)
# Numba jitted functions #
def test_best_response_2p():
test_case0 = {
'payoff_array': np.array([[4, 0], [3, 2], [0, 3]]),
'mixed_actions':
[np.array([1, 0]), np.array([0.5, 0.5]), np.array([0, 1])],
'brs_expected': [0, 1, 2]
}
test_case1 = {
'payoff_array': np.zeros((2, 3)),
'mixed_actions': [np.array([1, 0, 0]), np.array([1/3, 1/3, 1/3])],
'brs_expected': [0, 0]
}
for test_case in [test_case0, test_case1]:
for mixed_action, br_expected in zip(test_case['mixed_actions'],
test_case['brs_expected']):
br_computed = \
best_response_2p(test_case['payoff_array'], mixed_action)
eq_(br_computed, br_expected)
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.