ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a348db4a558c94f18c674da870692e791474c69 | #oiracis
import re
out = open("out.txt", "w+")
c = 0
with open("file.txt", "r+", errors="ignore") as f: #ignore all errors so it reads the file not matter what
for line in f:
c = c + 1
try:
mail = (re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", line))
print("Nº: " + str(c) + "\t" + mail[0])
out.write(mail[0] + "\n")
except:
print("oopsie")
out.close()
|
py | 1a348ea8236a3c984d9f546057b0eb98e8370908 |
n, k =map(int, input().split()) # 17 4
res = 0
while True:
tar = (n//k) * k #n이 k로 나누어 떨어지는 수가 될때까지 빼기
res += (n-tar)
n=tar
if n<k: #n이 k보다 작을 때 반복문 탈출 (더 이상 나눌수 없을때)
break
res +=1
n//=k
res += (n-1) #마지막으로 남은 수 1씩 빼기
print(res)
|
py | 1a348eaa2ac92bdd84ae65f416affea138b43e5a | # -*- coding: utf-8 -*-
"""通用工具类"""
|
py | 1a348ede9bc4631a930d0ad14c089bc0df44e713 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class PyWrapOptimizeGraphTest(test.TestCase):
def testBasic(self):
"""Make sure arguments can be passed correctly."""
a = constant_op.constant(10, name='a')
b = constant_op.constant(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
# Being a train_op will make 'd' to be added as a fetch node.
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
rewriter_config = rewriter_config_pb2.RewriterConfig()
rewriter_config.optimizers.append('constfold')
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), 3)
self.assertItemsEqual([node.name for node in graph.node], ['b', 'c', 'd'])
if __name__ == '__main__':
test.main()
|
py | 1a348f19523356c5e1b447da50439e3b3ef87226 | # -*- coding: utf-8 -*-
"""
py_vollib.black.implied_volatility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A library for option pricing, implied volatility, and
greek calculation. py_vollib is based on lets_be_rational,
a Python wrapper for LetsBeRational by Peter Jaeckel as
described below.
:copyright: © 2017 Gammon Capital LLC
:license: MIT, see LICENSE for more details.
About LetsBeRational:
~~~~~~~~~~~~~~~~~~~~~
The source code of LetsBeRational resides at www.jaeckel.org/LetsBeRational.7z .
::
========================================================================================
Copyright © 2013-2014 Peter Jäckel.
Permission to use, copy, modify, and distribute this software is freely granted,
provided that this notice is preserved.
WARRANTY DISCLAIMER
The Software is provided "as is" without warranty of any kind, either express or implied,
including without limitation any implied warranties of condition, uninterrupted use,
merchantability, fitness for a particular purpose, or non-infringement.
========================================================================================
"""
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
from __future__ import division
# Related third party imports
import py_lets_be_rational as lets_be_rational
import numpy
# Local application/library specific imports
from py_vollib.black import black
from py_vollib.black import undiscounted_black
from py_vollib.black import normalised_black
from py_vollib.helpers import binary_flag
from py_vollib.helpers.exceptions import PriceIsAboveMaximum, PriceIsBelowIntrinsic
from py_vollib.helpers.constants import MINUS_FLOAT_MAX, FLOAT_MAX
# -----------------------------------------------------------------------------
# FUNCTIONS - IMPLIED VOLATILITY
def implied_volatility_of_discounted_option_price(discounted_option_price, F, K, r, t, flag):
"""Calculate the implied volatility of the Black option price
:param discounted_option_price: discounted Black price of a futures option
:type discounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: the risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility_of_discounted_option_price(
... discounted_call_price, F, K, r, t, flag)
>>> expected_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
deflater = numpy.exp(-r * t)
undiscounted_option_price = discounted_option_price / deflater
sigma_calc = lets_be_rational.implied_volatility_from_a_transformed_rational_guess(
undiscounted_option_price,
F,
K,
t,
binary_flag[flag]
)
if sigma_calc == FLOAT_MAX:
raise PriceIsAboveMaximum()
elif sigma_calc == MINUS_FLOAT_MAX:
raise PriceIsBelowIntrinsic()
return sigma_calc
def implied_volatility(discounted_option_price, F, K, r, t, flag):
"""Calculate the implied volatility of the Black option price
:param discounted_option_price: discounted Black price of a futures option
:type discounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: the risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility(
... discounted_call_price, F, K, r, t, flag)
>>> expected_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
return implied_volatility_of_discounted_option_price(discounted_option_price, F, K, r, t, flag)
# -----------------------------------------------------------------------------
# FUNCTIONS - IMPLIED VOLATILITY, FOR TEST & REFERENCE
def normalised_implied_volatility(beta, x, flag):
"""Calculate the normalised Black implied volatility,
a time invariant transformation
of Black implied volatility.
Keyword arguments:
:param x: ln(F/K) where K is the strike price, and F is the futures price
:type x: float
:param beta: the normalized Black price
:type beta: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> beta_call = normalised_black(0.0, 0.2, 'c')
>>> beta_put = normalised_black(0.1,0.23232323888,'p')
>>> normalized_b76_iv_call = normalised_implied_volatility(beta_call, 0.0, 'c')
>>> normalized_b76_iv_put = normalised_implied_volatility(beta_put, 0.1, 'p')
>>> expected_price = 0.0796556745541
>>> expected_iv = 0.2
>>> abs(expected_price - beta_call) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_call) < 0.00001
True
>>> expected_price = 0.0509710222785
>>> expected_iv = 0.23232323888
>>> abs(expected_price - beta_put) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_put) < 0.00001
True
"""
q = binary_flag[flag]
return lets_be_rational.normalised_implied_volatility_from_a_transformed_rational_guess(
beta, x, q)
def normalised_implied_volatility_limited_iterations(beta, x, flag, N):
"""Calculate the normalised Black implied volatility,
with limited iterations.
:param x: ln(F/K) where K is the strike price, and F is the futures price
:type x: float
:param beta: the normalized Black price
:type beta: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> beta_call = normalised_black(0.0, 0.2, 'c')
>>> beta_put = normalised_black(0.1,0.23232323888,'p')
>>> normalized_b76_iv_call = normalised_implied_volatility_limited_iterations(beta_call, 0.0, 'c',1)
>>> normalized_b76_iv_put = normalised_implied_volatility_limited_iterations(beta_put, 0.1, 'p',1)
>>> expected_price = 0.0796556745541
>>> expected_iv = 0.2
>>> abs(expected_price - beta_call) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_call) < 0.00001
True
>>> expected_price = 0.0509710222785
>>> expected_iv = 0.23232323888
>>> abs(expected_price - beta_put) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_put) < 0.00001
True
"""
q = binary_flag[flag]
return lets_be_rational.normalised_implied_volatility_from_a_transformed_rational_guess_with_limited_iterations(
beta, x, q, N)
def implied_volatility_of_undiscounted_option_price(
undiscounted_option_price,
F,
K,
t,
flag
):
"""Calculate the implied volatility of the undiscounted Black option price
:param undiscounted_option_price: undiscounted Black price of a futures option
:type undiscounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> undiscounted_call_price = undiscounted_black(F, K, sigma, t, flag)
>>> iv = implied_volatility_of_undiscounted_option_price(
... undiscounted_call_price, F, K, t, flag)
>>> expected_price = 5.6371977797
>>> expected_iv = 0.2
>>> abs(expected_price - undiscounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
return lets_be_rational.implied_volatility_from_a_transformed_rational_guess(
undiscounted_option_price,
F,
K,
t,
binary_flag[flag]
)
def implied_volatility_of_undiscounted_option_price_limited_iterations(
undiscounted_option_price, F, K, t, flag, N):
"""Calculate implied volatility of the undiscounted Black
option price with limited iterations.
:param undiscounted_option_price: undiscounted Black price of a futures option
:type undiscounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
>>> F = 100
>>> K = 100
>>> sigma = .232323232
>>> flag = 'c'
>>> t = .5
>>> price = undiscounted_black(F, K, sigma, t, flag)
>>> iv = implied_volatility_of_undiscounted_option_price_limited_iterations(
... price, F, K, t, flag, 1)
>>> expected_price = 6.54635543387
>>> expected_iv = 0.232323232
>>> abs(expected_price - price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
return lets_be_rational.implied_volatility_from_a_transformed_rational_guess_with_limited_iterations(
undiscounted_option_price,
F,
K,
t,
binary_flag[flag],
N
)
if __name__ == "__main__":
from py_vollib.helpers.doctest_helper import run_doctest
run_doctest()
|
py | 1a348fabf50389cdf1298cda25c958e6163b782e | from exceptionite.errors import Handler, StackOverflowIntegration, SolutionsIntegration
from .JsonHandler import JsonHandler
class ExceptionHandler:
def __init__(self, application, driver_config=None):
self.application = application
self.drivers = {}
self.driver_config = driver_config or {}
self.options = {}
def set_options(self, options):
self.options = options
return self
def add_driver(self, name, driver):
self.drivers.update({name: driver})
def set_configuration(self, config):
self.driver_config = config
return self
def get_driver(self, name=None):
if name is None:
return self.drivers[self.driver_config.get("default")]
return self.drivers[name]
def get_config_options(self, driver=None):
if driver is None:
return self.driver_config[self.driver_config.get("default")]
return self.driver_config.get(driver, {})
def handle(self, exception):
response = self.application.make("response")
request = self.application.make("request")
self.application.make("event").fire(
f"masonite.exception.{exception.__class__.__name__}", exception
)
if self.application.has(f"{exception.__class__.__name__}Handler"):
return self.application.make(
f"{exception.__class__.__name__}Handler"
).handle(exception)
if hasattr(exception, "get_response"):
return response.view(exception.get_response(), exception.get_status())
handler = Handler(exception)
if "application/json" in str(request.header("Accept")):
return response.view(JsonHandler(exception).render(), status=500)
if self.options.get("handlers.stack_overflow"):
handler.integrate(StackOverflowIntegration())
if self.options.get("handlers.solutions"):
handler.integrate(SolutionsIntegration())
handler.context(
{
"WSGI": {
"Path": request.get_path(),
"Input": request.input_bag.all_as_values() or None,
# 'Parameters': request.url_params,
"Request Method": request.get_request_method(),
},
"Headers": request.header_bag.to_dict(),
}
)
return response.view(handler.render(), status=500)
|
py | 1a34903046d07387e409d825e03cb235c4211b6d | # openioc_10_to_11.py
#
# Copyright 2013 Mandiant Corporation.
# Licensed under the Apache 2.0 license. Developed for Mandiant by William
# Gibb.
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Allows for the upgrade of OpenIOC 1.0 IOCs to OpenIOC 1.1 format
#
from ioc_writer.scripts import openioc_10_to_11
openioc_10_to_11._main()
|
py | 1a3490d692cde2a76c5f4dbeed5a2c3d99f686e1 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve, inv, solve_triangular
import numpyro
import numpyro.distributions as dist
from numpyro.handlers import plate
from numpyro.infer import HMC, HMCECS, MCMC, NUTS, DiscreteHMCGibbs, HMCGibbs
def _linear_regression_gibbs_fn(X, XX, XY, Y, rng_key, gibbs_sites, hmc_sites):
N, P = X.shape
sigma = jnp.exp(hmc_sites['log_sigma']) if 'log_sigma' in hmc_sites else hmc_sites['sigma']
sigma_sq = jnp.square(sigma)
covar_inv = XX / sigma_sq + jnp.eye(P)
L = cho_factor(covar_inv, lower=True)[0]
L_inv = solve_triangular(L, jnp.eye(P), lower=True)
loc = cho_solve((L, True), XY) / sigma_sq
beta_proposal = dist.MultivariateNormal(loc=loc, scale_tril=L_inv).sample(rng_key)
return {'beta': beta_proposal}
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_linear_model_log_sigma(kernel_cls, N=100, P=50, sigma=0.11, warmup_steps=500, num_samples=500):
np.random.seed(0)
X = np.random.randn(N * P).reshape((N, P))
XX = np.matmul(np.transpose(X), X)
Y = X[:, 0] + sigma * np.random.randn(N)
XY = np.sum(X * Y[:, None], axis=0)
def model(X, Y):
N, P = X.shape
log_sigma = numpyro.sample("log_sigma", dist.Normal(1.0))
sigma = jnp.exp(log_sigma)
beta = numpyro.sample("beta", dist.Normal(jnp.zeros(P), jnp.ones(P)))
mean = jnp.sum(beta * X, axis=-1)
numpyro.deterministic("mean", mean)
numpyro.sample("obs", dist.Normal(mean, sigma), obs=Y)
gibbs_fn = partial(_linear_regression_gibbs_fn, X, XX, XY, Y)
hmc_kernel = kernel_cls(model)
kernel = HMCGibbs(hmc_kernel, gibbs_fn=gibbs_fn, gibbs_sites=['beta'])
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(0), X, Y)
beta_mean = np.mean(mcmc.get_samples()['beta'], axis=0)
assert_allclose(beta_mean, np.array([1.0] + [0.0] * (P - 1)), atol=0.05)
sigma_mean = np.exp(np.mean(mcmc.get_samples()['log_sigma'], axis=0))
assert_allclose(sigma_mean, sigma, atol=0.25)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_linear_model_sigma(kernel_cls, N=90, P=40, sigma=0.07, warmup_steps=500, num_samples=500):
np.random.seed(1)
X = np.random.randn(N * P).reshape((N, P))
XX = np.matmul(np.transpose(X), X)
Y = X[:, 0] + sigma * np.random.randn(N)
XY = np.sum(X * Y[:, None], axis=0)
def model(X, Y):
N, P = X.shape
sigma = numpyro.sample("sigma", dist.HalfCauchy(1.0))
beta = numpyro.sample("beta", dist.Normal(jnp.zeros(P), jnp.ones(P)))
mean = jnp.sum(beta * X, axis=-1)
numpyro.sample("obs", dist.Normal(mean, sigma), obs=Y)
gibbs_fn = partial(_linear_regression_gibbs_fn, X, XX, XY, Y)
hmc_kernel = kernel_cls(model)
kernel = HMCGibbs(hmc_kernel, gibbs_fn=gibbs_fn, gibbs_sites=['beta'])
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(0), X, Y)
beta_mean = np.mean(mcmc.get_samples()['beta'], axis=0)
assert_allclose(beta_mean, np.array([1.0] + [0.0] * (P - 1)), atol=0.05)
sigma_mean = np.mean(mcmc.get_samples()['sigma'], axis=0)
assert_allclose(sigma_mean, sigma, atol=0.25)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('num_blocks', [1, 2, 50, 100])
def test_subsample_gibbs_partitioning(kernel_cls, num_blocks):
def model(obs):
with plate('N', obs.shape[0], subsample_size=100) as idx:
numpyro.sample('x', dist.Normal(0, 1), obs=obs[idx])
obs = random.normal(random.PRNGKey(0), (10000,)) / 100
kernel = HMCECS(kernel_cls(model), num_blocks=num_blocks)
hmc_state = kernel.init(random.PRNGKey(1), 10, None, model_args=(obs,), model_kwargs=None)
gibbs_sites = {'N': jnp.arange(100)}
gibbs_fn = kernel._gibbs_fn
new_gibbs_sites = gibbs_fn(random.PRNGKey(2), gibbs_sites, hmc_state.z) # accept_prob > .999
block_size = 100 // num_blocks
for name in gibbs_sites:
assert block_size == jnp.not_equal(gibbs_sites[name], new_gibbs_sites[name]).sum()
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_gaussian_model(kernel_cls, D=2, warmup_steps=3000, num_samples=5000):
np.random.seed(0)
cov = np.random.randn(4 * D * D).reshape((2 * D, 2 * D))
cov = jnp.matmul(jnp.transpose(cov), cov) + 0.25 * jnp.eye(2 * D)
cov00 = cov[:D, :D]
cov01 = cov[:D, D:]
cov10 = cov[D:, :D]
cov11 = cov[D:, D:]
cov_01_cov11_inv = jnp.matmul(cov01, inv(cov11))
cov_10_cov00_inv = jnp.matmul(cov10, inv(cov00))
posterior_cov0 = cov00 - jnp.matmul(cov_01_cov11_inv, cov10)
posterior_cov1 = cov11 - jnp.matmul(cov_10_cov00_inv, cov01)
# we consider a model in which (x0, x1) ~ MVN(0, cov)
def gaussian_gibbs_fn(rng_key, hmc_sites, gibbs_sites):
x1 = hmc_sites['x1']
posterior_loc0 = jnp.matmul(cov_01_cov11_inv, x1)
x0_proposal = dist.MultivariateNormal(loc=posterior_loc0, covariance_matrix=posterior_cov0).sample(rng_key)
return {'x0': x0_proposal}
def model():
x0 = numpyro.sample("x0", dist.MultivariateNormal(loc=jnp.zeros(D), covariance_matrix=cov00))
posterior_loc1 = jnp.matmul(cov_10_cov00_inv, x0)
numpyro.sample("x1", dist.MultivariateNormal(loc=posterior_loc1, covariance_matrix=posterior_cov1))
hmc_kernel = kernel_cls(model, dense_mass=True)
kernel = HMCGibbs(hmc_kernel, gibbs_fn=gaussian_gibbs_fn, gibbs_sites=['x0'])
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(0))
x0_mean = np.mean(mcmc.get_samples()['x0'], axis=0)
x1_mean = np.mean(mcmc.get_samples()['x1'], axis=0)
x0_std = np.std(mcmc.get_samples()['x0'], axis=0)
x1_std = np.std(mcmc.get_samples()['x1'], axis=0)
assert_allclose(x0_mean, np.zeros(D), atol=0.2)
assert_allclose(x1_mean, np.zeros(D), atol=0.2)
assert_allclose(x0_std, np.sqrt(np.diagonal(cov00)), rtol=0.05)
assert_allclose(x1_std, np.sqrt(np.diagonal(cov11)), rtol=0.1)
def test_discrete_gibbs_multiple_sites():
def model():
numpyro.sample("x", dist.Bernoulli(0.7).expand([3]))
numpyro.sample("y", dist.Binomial(10, 0.3))
kernel = DiscreteHMCGibbs(NUTS(model))
mcmc = MCMC(kernel, 1000, 10000, progress_bar=False)
mcmc.run(random.PRNGKey(0))
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["x"], 0), 0.7 * jnp.ones(3), atol=0.01)
assert_allclose(jnp.mean(samples["y"], 0), 0.3 * 10, atol=0.1)
def test_discrete_gibbs_enum():
def model():
numpyro.sample("x", dist.Bernoulli(0.7), infer={"enumerate": "parallel"})
y = numpyro.sample("y", dist.Binomial(10, 0.3))
numpyro.deterministic("y2", y ** 2)
kernel = DiscreteHMCGibbs(NUTS(model))
mcmc = MCMC(kernel, 1000, 10000, progress_bar=False)
mcmc.run(random.PRNGKey(0))
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["y"], 0), 0.3 * 10, atol=0.1)
@pytest.mark.parametrize("random_walk", [False, True])
@pytest.mark.parametrize("modified", [False, True])
def test_discrete_gibbs_bernoulli(random_walk, modified):
def model():
numpyro.sample("c", dist.Bernoulli(0.8))
kernel = DiscreteHMCGibbs(NUTS(model), random_walk=random_walk, modified=modified)
mcmc = MCMC(kernel, 1000, 200000, progress_bar=False)
mcmc.run(random.PRNGKey(0))
samples = mcmc.get_samples()["c"]
assert_allclose(jnp.mean(samples), 0.8, atol=0.05)
@pytest.mark.parametrize("modified", [False, True])
def test_discrete_gibbs_gmm_1d(modified):
def model(probs, locs):
c = numpyro.sample("c", dist.Categorical(probs))
numpyro.sample("x", dist.Normal(locs[c], 0.5))
probs = jnp.array([0.15, 0.3, 0.3, 0.25])
locs = jnp.array([-2, 0, 2, 4])
kernel = DiscreteHMCGibbs(NUTS(model), modified=modified)
mcmc = MCMC(kernel, 1000, 200000, progress_bar=False)
mcmc.run(random.PRNGKey(0), probs, locs)
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["x"]), 1.3, atol=0.1)
assert_allclose(jnp.var(samples["x"]), 4.36, atol=0.1)
assert_allclose(jnp.mean(samples["c"]), 1.65, atol=0.1)
assert_allclose(jnp.var(samples["c"]), 1.03, atol=0.1)
|
py | 1a34916386a0a73e9bc5e9893016829c6f1ada2b | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..electra.tokenizer import ElectraTokenizer
__all__ = ['ConvBertTokenizer', ]
class ConvBertTokenizer(ElectraTokenizer):
"""
Construct a ConvBERT tokenizer. `ConvBertTokenizer` is identical to `ElectraTokenizer`.
For more information regarding those methods, please refer to this superclass.
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"convbert-base":
"https://bj.bcebos.com/paddlenlp/models/transformers/convbert/convbert-base/vocab.txt",
"convbert-medium-small":
"https://bj.bcebos.com/paddlenlp/models/transformers/convbert/convbert-medium-small/vocab.txt",
"convbert-small":
"https://bj.bcebos.com/paddlenlp/models/transformers/convbert/convbert-small/vocab.txt",
}
}
pretrained_init_configuration = {
"convbert-base": {
"do_lower_case": True
},
"convbert-medium-small": {
"do_lower_case": True
},
"convbert-small": {
"do_lower_case": True
},
}
|
py | 1a34916c6b0aa6294c19bf81694983601567371f | from datetime import datetime
from utils.api import fetch_events
class Events:
def __init__(self):
self.items = []
def fetch(self, params):
self.items = []
params['pageToken'] = None
while True:
events = fetch_events(params)
if events and events.get('items'):
self.items.extend(events.get('items'))
page_token = events.get('nextPageToken')
if not page_token:
params['pageToken'] = page_token
break
return self.items
def to_csv(self, filename):
for obj in self.items:
item = EventItem(obj)
if item.is_cancelled():
continue
csv_line = '"{}","{}","{}","{}","{}"'.format(
item.get_summary(),
'1' if item.is_all_day() else '0',
item.get_start(),
item.get_end(),
item.get_total_minitues()
)
with open(filename, 'a') as f:
f.write(csv_line + '\n')
class EventItem:
def __init__(self, item):
self.item = item
def is_cancelled(self) -> bool:
return self.item.get('status') == 'cancelled'
def get_summary(self):
return self.item.get('summary')
def has_start(self):
return self.item.get('start') is not None
def get_start(self):
d = self.get_start_date()
if d != '':
return d
return self.get_start_datetime()
def get_start_date(self):
start = self.item.get('start')
if not start:
return ''
d = start.get('date')
if d:
return d
return ''
def get_start_datetime(self):
start = self.item.get('start')
if not start:
return ''
dt = start.get('dateTime')
if dt:
return dt
return ''
def is_all_day(self):
return self.get_start_date() != ''
def get_end(self):
d = self.get_end_date()
if d != '':
return d
return self.get_end_datetime()
def get_end_date(self):
end = self.item.get('end')
if not end:
return ''
d = end.get('date')
if d:
return d
return ''
def get_end_datetime(self):
end = self.item.get('end')
if not end:
return ''
dt = end.get('dateTime')
if dt:
return dt
return ''
def get_total_minitues(self):
if not self.has_start() or self.is_all_day():
return 0
start = datetime.fromisoformat(self.get_start_datetime())
end = datetime.fromisoformat(self.get_end_datetime())
return (end - start).total_seconds() / 60
|
py | 1a34922fed05907869b485eb8aa6f64e58631da0 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @name : UsernameFind
# @url : http://github.com/MasterBurnt
# @author : MasterBurnt
try:
from concurrent.futures import ThreadPoolExecutor
import requests
import threading
import time
import os
except ImportError as e:
print(e)
exit(0)
os.system('clear')
#Colors
c1 = '\033[0m' #white
c2 = '\033[92m' #green
c3 = '\033[96m' #cyan
c4 = '\033[91m' #red
c5 = '\033[93m' #yellow
c6 = '\033[94m' #blue
c7 = '\033[90m'
def banner():
print(f"""
{c2}_ _ ____ ____ ____ ___ _ _ ____ _ _ ___
{c1}| | [__ |___ |__/ |__] | | |__/ |\ | |
{c4}|__| ___] |___ | \ |__] |__| | \ | \| |
""")
banner()
#Internet connection
try:
requests.get('https://www.github.com/MasterBurnt', timeout = 5)
except (requests. ConnectionError, requests. Timeout):
print(c7+f'Please check your {c4}internet{c7} connection!')
exit(0)
def banner1():
print(f"""
{c2} ___
{c3} / / _ _ _ )_ o _ _ )
{c1}(_/ ( )_) ) ( ( ) ) (_( {c7}MᵃˢᵗᵉʳBᵘʳⁿᵗ
{c4} _) (_
""")
while 1:
user = input(c7+f'[?] {c2}Please Enter The Target{c5} Username {c2}:{c1}').strip()
if user == "":
print('Please Enter A Valid Username! ')
time.sleep(2)
os.system('clear')
banner()
else:
break
save = []
thread_local = threading.local()
def get_session():
if not hasattr(thread_local, "session"):
thread_local.session = requests.Session()
return thread_local.session
def go_site(url):
session = get_session()
with session.get(url = url+user, timeout = 10) as response:
if response.status_code == 200:
print(c7+f"[+]{c2}Find : {url}{user}")
save.append(url+user)
else:
print(c7+f'[-]{c4} Not Find! ')
pass
sys.exit()
def all_sites(sites):
with ThreadPoolExecutor(max_workers=100) as executor:
executor.map(go_site, sites)
if __name__ == "__main__":
os.system('clear')
banner1()
sites ={
'https://www.championat.com/',
'https://angel.co/u/',
'https://www.7cups.com/',
'https://www.chess.com/',
'https://www.flickr.com/',
'https://www.npmjs.com/',
'https://www.bazar.cz/',
'https://pokemonshowdown.com/',
'https://freesound.org/',
'https://www.memrise.com/',
'https://tellonym.me/',
'https://www.opennet.ru/',
'https://chatujme.cz/',
'https://www.munzee.com/',
'https://ello.co/',
'https://ask.fm/',
'https://gunsandammo.com/',
'https://hubski.com/',
'https://pastebin.com/',
'https://quizlet.com/',
'https://community.signalusers.org/',
'https://lolchess.gg/',
'http://www.jeuxvideo.com/',
'https://launchpad.net/',
'https://letterboxd.com/',
'https://www.academia.edu/',
'https://imgup.cz/',
'https://www.baby.ru/',
'https://www.patreon.com/',
'https://unsplash.com/',
'https://www.goodreads.com/',
'https://rubygems.org/',
'https://www.aparat.com/',
'https://www.smule.com/',
'https://eintracht.de/',
'https://houzz.com/',
'https://raidforums.com/',
'https://www.wattpad.com/',
'https://www.sbazar.cz/',
'https://www.kongregate.com/',
'https://packagist.org/',
'https://www.periscope.tv/',
'https://tetr.io/',
'https://youpic.com/',
'https://lobste.rs/',
'https://www.reverbnation.com/',
'https://chaos.social/',
'https://deviantart.com/',
'https://www.freelancer.com/',
'https://play.google.com/store/',
'https://imgsrc.ru/',
'https://opensource.com/',
'https://vero.co/',
'https://uid.me/',
'https://www.pinterest.com/',
'https://discussions.apple.com/',
'https://ourdjtalk.com/',
'https://slideshare.net/',
'https://gfycat.com/',
'https://www.sports.ru/',
'https://forums.whonix.org/',
'https://audiojungle.net/',
'https://www.fixya.com/',
'https://www.drive2.ru/',
'https://www.behance.net/',
'https://gab.com/',
'https://www.pinkbike.com/',
'https://www.shitpostbot.com/',
'https://community.cloudflare.com/',
'https://www.quora.com/profile/',
'https://pcpartpicker.com/',
'https://xboxgamertag.com/',
'https://habr.com/',
'https://www.nairaland.com/',
'https://flipboard.com/',
'https://www.kaggle.com/',
'https://icq.com/',
'https://www.redbubble.com/',
'https://www.forumhouse.ru/',
'https://newgrounds.com/',
'https://contently.com/',
'https://mastodon.cloud/',
'https://www.mercadolivre.com.br/',
'https://trashbox.ru/',
'https://hubpages.com/',
'https://soylentnews.org/',
'http://www.wikidot.com/',
'https://pcgamer.com/',
'https://forum.guns.ru/',
'https://irecommend.ru/',
'https://prog.hu/',
'https://codepen.io/',
'https://www.native-instruments.com/forum/',
'https://pypi.org/',
'https://scratch.mit.edu/',
'https://ok.ru/',
'https://speedrun.com/',
'https://virgool.io/',
'https://lichess.org/',
'https://www.dailykos.com/',
'https://www.toster.ru/',
'https://forum.hackthebox.eu/',
'https://naver.com/',
'https://www.codechef.com/',
'https://wordpress.com/',
'https://www.virustotal.com/',
'https://www.countable.us/',
'https://typeracer.com/',
'https://fortnitetracker.com/challenges/',
'https://www.colourlovers.com/',
'https://sourceforge.net/',
'https://forum.leasehackr.com/',
'https://www.openstreetmap.org/',
'https://soundcloud.com/',
'https://www.zhihu.com/',
'https://www.webnode.cz/',
'https://otzovik.com/',
'https://www.hackster.io/',
'https://aminoapps.com/',
'https://freelance.habr.com/',
'https://social.tchncs.de/',
'https://github.community/',
'https://www.9gag.com/',
'https://www.cnet.com/',
'https://www.etsy.com/',
'https://slack.com/',
'https://splits.io/',
'https://2Dimensions.com/',
'https://linux.org.ru/',
'https://php.ru/forum/',
'https://www.cracked.com/',
'https://ask.fedoraproject.org/',
'https://crevado.com/',
'https://www.metacritic.com/',
'https://note.com/',
'https://devrant.com/',
'https://www.fotolog.com/author/',
'https://mastodon.xyz/',
'https://www.dailymotion.com/',
'https://vk.com/',
'https://www.hunting.ru/forum/',
'https://www.fandom.com/',
'https://vsco.co/',
'https://buzzfeed.com/',
'https://open.spotify.com/',
'https://www.clozemaster.com/',
'https://forum.sublimetext.com/',
'https://hackerone.com/',
'https://www.myminifactory.com/',
'https://www.gumroad.com/',
'https://www.pornhub.com/users/',
'https://moikrug.ru/',
'https://about.me/',
'https://coroflot.com/',
'https://giphy.com/',
'https://gitee.com/',
'https://www.rajce.idnes.cz/',
'https://notabug.org/',
'https://d3.ru/',
'https://www.blogger.com/',
'https://venmo.com/',
'https://vimeo.com/',
'https://www.buymeacoffee.com/',
'https://idpay.ir/',
'https://mstdn.io/',
'https://tryhackme.com/',
'https://myspace.com/',
'https://blip.fm/',
'https://weheartit.com/',
'https://www.ifttt.com/',
'https://keybase.io/',
'https://www.flickr.com/photos/',
'https://www.gamespot.com/',
'https://disqus.com/',
'https://akniga.org/profile/blue/',
'https://www.producthunt.com/@',
'https://members.fotki.com/',
'https://www.svidbook.ru/',
'https://www.github.com/',
'https://www.babyblog.ru/',
'https://news.ycombinator.com/',
'https://dev.to/',
'https://last.fm/',
'https://www.designspiration.net/',
'https://www.girlsaskguys.com/user/',
'https://gitlab.com/',
'https://spletnik.ru/',
'https://career.habr.com/',
'http://forum.3dnews.ru/',
'https://www.strava.com/',
'https://hackaday.io/',
'https://repl.it/',
'https://www.sporcle.com/',
'https://www.researchgate.net/',
'https://www.discogs.com/',
'https://osu.ppy.sh/',
'https://story.snapchat.com/@',
'https://www.wikipedia.org/',
'https://www.warriorforum.com/',
'https://www.tradingview.com/',
'https://f3.cool/',
'https://getmyuni.com/',
'https://leetcode.com/',
'https://echo.msk.ru/',
'https://www.livejournal.com/',
'https://asciinema.org/',
'https://robertsspaceindustries.com/',
'https://egpu.io/',
'https://www.trakt.tv/',
'https://hackerrank.com/',
'https://dribbble.com/',
'https://wordpress.org/',
'https://www.flightradar24.com/',
'https://ultimate-guitar.com/',
'http://en.gravatar.com/',
'https://www.bandcamp.com/',
'https://gurushots.com/',
'https://imgur.com/',
'https://issuu.com/',
'http://forum.igromania.ru/',
'https://www.instructables.com/',
'https://rateyourmusic.com/',
'https://www.codewars.com/',
'https://www.roblox.com/',
'https://www.fl.ru/',
'https://archive.org/',
'http://promodj.com/',
'https://www.couchsurfing.com/',
'https://ko-fi.com/',
'https://www.kwork.ru/',
'https://wix.com/',
'https://www.geocaching.com/',
'https://booth.pm/',
'https://itch.io/',
'https://www.livelib.ru/',
'https://medium.com/',
'https://www.alik.cz/',
'https://www.polygon.com/',
'https://www.producthunt.com/',
'https://bitbucket.org/',
'https://www.capfriendly.com/',
'https://www.youtube.com/',
'https://www.codecademy.com/',
'https://www.scribd.com/',
'http://www.authorstream.com/',
'http://dating.ru/',
'https://jbzd.com.pl/',
'https://www.bookcrossing.com/',
'https://discuss.elastic.co/',
'https://virgool.io/@',
}
start_time = time.time()
all_sites(sites)
if save == []:
print(c6+f'\nNothing was{c4} found!')
else:
duration = time.time() - start_time
print(c6+f"\nUsername Found On {c1}{len(save)}{c6} Sites!\nThe file was saved in a history called {c1}{user}.txt{c5}\n\nSeconds {c1}{int(duration)}")
#SAVE
if save == []:
exit(0)
else:
try:
os.mkdir('history')
except:
pass
os.chdir('history')
file = open(user+".txt", "a")
for i in save:
file.write(f'{i} \n')
file.close()
|
py | 1a349287c34835c16f22c694a4a54ef4dcc561e9 | # globalprogramlib GUI example by PWRScript
# Import necessary libs
import tkinter
from globalprogramlib.v1 import Language, Translator
class App(tkinter.Tk):
def __init__(self, translator: Translator, *args, **kwargs) -> None:
"""
This class will take care of creating our application
This isn't the best sample, but will demonstrate the
principles of globalprogramlib using a sync GUI lib
"""
super().__init__(*args, **kwargs)
# Master canvas (for easely clear all screen)
self.master_canvas = tkinter.Canvas(self)
self.master_canvas.pack()
# Make translator instance available for all class
self.translator: Translator = translator
# Render app
self.render_choose_language_window()
def clear_screen(self):
"""
Deletes all widgets rendered in the Tkinter application
by destroying the canvas and replacing it
"""
self.master_canvas.destroy()
self.master_canvas = tkinter.Canvas(self)
self.master_canvas.pack()
def render_choose_language_window(self):
"""
This function is the render for our application
"""
# Ensure that screen is cleared every reload to avoid duplicate widgets
self.clear_screen()
# Creates a new label
# Displays the language pick message in current selected language in Translator
# using translator.get_translation("pwrscript.guiapp.language_picker")
tkinter.Label(
self.master_canvas,
text=self.translator.get_translation("pwrscript.guiapp.language_picker"),
).pack()
# This will store the current selected language in translator
# to be show in the "OptionMenu" widget
language = tkinter.StringVar(self.master_canvas)
language.set(self.translator.selected_language)
tkinter.Label(
self.master_canvas,
text=self.translator.get_translation("pwrscript.guiapp.important_message"),
).pack()
tkinter.OptionMenu(
self.master_canvas,
language,
# Here we pass all Languages in the translator to the OptionMenu as separated arguments
*self.translator.languages_loaded.values(),
command=(
# I know this isn't beginner friendly, but I will explain everything
#
# I need to execute an assignment (translator.SelectedLanguageCode = «selected language code»)
# and to re-render this «window» using self.render_choose_language_window() when user changes the language (event)
#
# Unfortunately tkinter "command" only accepts a unique function with one argument (the value selected)
#
# This leaded to render issues and self not being available (no access to translator/application) when I tried
# to implement a «beginner friendly» code for "command"
#
# To acoplish this tasks, I needed to create a lambda (a unique line function) which accepts the argument need
# by the OptionMenu "command" [lang] and the [self] (for getting the translator and application) which is a automatically
# passed when this «event» is «executed»
#
# To solve the assignment issue I defined the SelectedLanguageCode attribute in the translator using the built-in object method
# __set_attr__ since you cannot assign values in a lambda (the best approach to use in other environments is
# translator.SelectedLanguageCode = «selected_language_code»)
#
# The other issue, «re-rendering» was solved by the content in the 4th paragraph
#
lambda lang, self=self: [
self.translator.__setattr__("SelectedLanguageCode", lang.code),
self.render_choose_language_window(),
]
)
).pack()
def BuildTranslator():
"""
This function will take care of creating the translations dynamicaly at runtime, without
needing dependent files, which is ideal for examples and the translator object ready for
use
"""
# Other way to do this, persisting the files in a folder and generating it at runtime if need (example: langs)
"""
# load_translations.py
from os.path import is_file, is_dir, join
from os import mkdir
TRANSLATIONS_FOLDER = "langs"
if not is_dir(TRANSLATIONS_FOLDER):
mkdir(TRANSLATIONS_FOLDER)
if not is_file(join(TRANSLATIONS_FOLDER,"pt.json"))
with Language() as pt:
pt.name = "Português"
pt.code = "pt"
pt.version = 1 # The version needs to be always 1 in this case
pt.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
pt.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
pt.contributors = []
# Creating translations for our app
pt.set_translation("pwrscript.guiapp.language_picker", "Escolha o seu idioma:")
# Saving the translation to a file
pt.to_file(join(TRANSLATIONS_FOLDER,"pt.json"))
# When the context ends, the language file is always cleaned to ensure that it doesn't overflow system resources, so it will look like a new
# instanced Language() and can be clean at any moment by the garbage collector
# This object can be clean since it won't be used again
del pt
if not is_file(join(TRANSLATIONS_FOLDER,"en.json"))
with Language() as en:
en.name = "English"
en.code = "en"
en.version = 1 # The version needs to be always 1 in this case
en.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
en.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
en.contributors = []
# Creating translations for our app
en.set_translation("pwrscript.guiapp.language_picker", "Pick your language:")
# Saving the translation to a file
en.to_file(join(TRANSLATIONS_FOLDER,"en.json"))
del en
translator = Translator()
translator.load_directory(TRANSLATIONS_FOLDER)
translator.DefaultLanguageCode = "en"
translator.SelectedLanguageCode = "en"
"""
# PT Language instantiation
pt = Language()
# Add language information
pt.name = "Português"
pt.code = "pt"
pt.version = 1 # The version needs to be always 1 in this case
pt.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
pt.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
pt.contributors = []
# Creating translations for our app
pt.set_translation("pwrscript.guiapp.language_picker", "Escolha o seu idioma:")
pt.set_translation("pwrscript.guiapp.important_message", "Funcionando em Português")
# EN Language instantiation
en = Language()
# Add language information
en.name = "English"
en.code = "en"
en.version = 1 # The version needs to be always 1 in this case
en.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
en.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
en.contributors = []
# Creating translations for our app
en.set_translation("pwrscript.guiapp.language_picker", "Pick your language:")
en.set_translation("pwrscript.guiapp.important_message", "Working in English")
# Translator creation
translator = Translator()
# Loading languages from the created Language() objects
translator.load_languages(pt, en)
# Sets the default (fallback language used when translation can't be found in the selected_language)
# and the selected (first language)
# This is obligatory since the get_translation() method needs to now what languages to use and the codes
# must be valid code languages in the translator (loaded languages) else it won't translate anyting and
# willalways return None
translator.DefaultLanguageCode = "en"
translator.SelectedLanguageCode = "en"
return translator
if __name__ == "__main__":
# Creates the translator for use with the Tkinter class app
translator = BuildTranslator()
# Instances the application class and runs the application
application = App(translator)
application.mainloop()
|
py | 1a3494ad19912eea7b7a0583ac1f0afcce6e55dd | """
Module: 'ure' on micropython-v1.17-pyboard
"""
# MCU: {'ver': 'v1.17', 'port': 'pyboard', 'arch': 'armv7emsp', 'sysname': 'pyboard', 'release': '1.17.0', 'name': 'micropython', 'mpy': 7685, 'version': '1.17.0', 'machine': 'PYBv1.1 with STM32F405RG', 'build': '', 'nodename': 'pyboard', 'platform': 'pyboard', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
def compile(*args, **kwargs) -> Any:
...
def match(*args, **kwargs) -> Any:
...
def search(*args, **kwargs) -> Any:
...
def sub(*args, **kwargs) -> Any:
...
|
py | 1a3495d81d7d75b52ee9f2c510677de15eaf0e27 | import matplotlib.pyplot as plt #import the library, any procedures with plt.* come form this lib
import numpy as np #imports numpy for standard deviation
trials = []
for i in range(1,31):
trials.append(i) #sets up the X axis
#Y axis
data = [2.5105, 2.5100, 2.5103, 2.5091, 2.5101, 2.5101, 2.5103, 2.5098, 2.5098, 2.5100, 2.5090, 2.5099, 2.5101, 2.5091, 2.5100, 2.5099, 2.5089, 2.5097, 2.5099, 2.5099, 2.5099, 2.5096, 2.5099, 2.5121, 2.5094, 2.5102, 2.5090, 2.5101, 2.5089, 2.5100]
#plots the scatter with errorbars
plt.errorbar(trials, data, yerr = 0.0005, marker = '+', linestyle = '', label = "Data")
#axis labels/title
plt.xlabel("Trial Number")
plt.ylabel("Diameter of the Sphere(cm)")
plt.title("Fig. 5: Diameter of a Steel Sphere with Mean and Standard Deviation")
#mean
plt.plot([0]+trials, [2.5099]*31, c = 'red', marker = '', label = 'Mean')
#std dev
print(np.std(data))
plt.plot([0]+trials, [2.5099+np.std(data)]*31, c = 'green', marker = '', label = 'Standard Deviation')
plt.plot([0]+trials, [2.5099-np.std(data)]*31, c = 'green', marker = '')
plt.legend()#generates the legend
plt.show()#displays the plot
|
py | 1a34960e1bb20c6b294fd6cea5a1df591e44da2c | from astropy.time import Time
__all__ = [
"_checkTime"
]
def _checkTime(time, arg_name):
"""
Check that 'time' is an astropy time object, if not, raise an error.
Parameters
----------
time : `~astropy.time.core.Time`
arg_name : str
Name of argument in function.
Returns
-------
None
Raises
------
ValueError : If time is not an astropy time object.
"""
err = (
"Time ({}) has to be an `~astropy.time.core.Time` object.\n"
"Convert using:\n\n"
"from astropy.time import Time\n"
"times = Time(t_array, scale='...', format='...')"
)
if type(time) != Time:
raise TypeError(err.format(arg_name))
return
|
py | 1a34961d392906465bff930a8c3953357b0efa9f | import datetime
from rest_framework import permissions, status
from rest_framework.decorators import (api_view,
authentication_classes,
permission_classes,
throttle_classes,)
from django.db.models.expressions import RawSQL
from django.db.models import FloatField
from django.utils import timezone
from rest_framework_expiring_authtoken.authentication import (
ExpiringTokenAuthentication,)
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle, AnonRateThrottle
from accounts.permissions import HasVerifiedEmail
from base.utils import paginated_queryset, StandardResultSetPagination
from challenges.models import (
ChallengePhase,
Challenge,
ChallengePhaseSplit,
LeaderboardData,)
from challenges.utils import get_challenge_model, get_challenge_phase_model
from participants.models import (ParticipantTeam,)
from participants.utils import (
get_participant_team_id_of_user_for_a_challenge,)
from .models import Submission
from .sender import publish_submission_message
from .serializers import SubmissionSerializer
@throttle_classes([UserRateThrottle])
@api_view(['GET', 'POST'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def challenge_submission(request, challenge_id, challenge_phase_id):
"""API Endpoint for making a submission to a challenge"""
# check if the challenge exists or not
try:
challenge = Challenge.objects.get(pk=challenge_id)
except Challenge.DoesNotExist:
response_data = {'error': 'Challenge does not exist'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# check if the challenge phase exists or not
try:
challenge_phase = ChallengePhase.objects.get(
pk=challenge_phase_id, challenge=challenge)
except ChallengePhase.DoesNotExist:
response_data = {'error': 'Challenge Phase does not exist'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
if request.method == 'GET':
# getting participant team object for the user for a particular challenge.
participant_team_id = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_id)
# check if participant team exists or not.
try:
ParticipantTeam.objects.get(pk=participant_team_id)
except ParticipantTeam.DoesNotExist:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
submission = Submission.objects.filter(participant_team=participant_team_id,
challenge_phase=challenge_phase).order_by('-submitted_at')
paginator, result_page = paginated_queryset(submission, request)
try:
serializer = SubmissionSerializer(result_page, many=True, context={'request': request})
response_data = serializer.data
return paginator.get_paginated_response(response_data)
except:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'POST':
# check if the challenge is active or not
if not challenge.is_active:
response_data = {'error': 'Challenge is not active'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if challenge phase is active
if not challenge_phase.is_active:
response_data = {
'error': 'Sorry, cannot accept submissions since challenge phase is not active'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if challenge phase is public and accepting solutions
if not challenge_phase.is_public:
response_data = {
'error': 'Sorry, cannot accept submissions since challenge phase is not public'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
participant_team_id = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_id)
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_id)
except ParticipantTeam.DoesNotExist:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
serializer = SubmissionSerializer(data=request.data,
context={'participant_team': participant_team,
'challenge_phase': challenge_phase,
'request': request
})
if serializer.is_valid():
serializer.save()
response_data = serializer.data
submission = serializer.instance
# publish message in the queue
publish_submission_message(challenge_id, challenge_phase_id, submission.id)
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@throttle_classes([UserRateThrottle])
@api_view(['PATCH'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def change_submission_data_and_visibility(request, challenge_pk, challenge_phase_pk, submission_pk):
"""
API Endpoint for updating the submission meta data
and changing submission visibility.
"""
# check if the challenge exists or not
challenge = get_challenge_model(challenge_pk)
# check if the challenge phase exists or not
challenge_phase = get_challenge_phase_model(challenge_phase_pk)
if not challenge.is_active:
response_data = {'error': 'Challenge is not active'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if challenge phase is public and accepting solutions
if not challenge_phase.is_public:
response_data = {
'error': 'Sorry, cannot accept submissions since challenge phase is not public'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
participant_team_pk = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_pk)
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)
except ParticipantTeam.DoesNotExist:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
try:
submission = Submission.objects.get(participant_team=participant_team,
challenge_phase=challenge_phase,
id=submission_pk)
except Submission.DoesNotExist:
response_data = {'error': 'Submission does not exist'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
try:
is_public = request.data['is_public']
if is_public is True:
when_made_public = datetime.datetime.now()
request.data['when_made_public'] = when_made_public
except KeyError:
pass
serializer = SubmissionSerializer(submission,
data=request.data,
context={
'participant_team': participant_team,
'challenge_phase': challenge_phase,
'request': request
},
partial=True)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@throttle_classes([AnonRateThrottle])
@api_view(['GET'])
def leaderboard(request, challenge_phase_split_id):
"""Returns leaderboard for a corresponding Challenge Phase Split"""
# check if the challenge exists or not
try:
challenge_phase_split = ChallengePhaseSplit.objects.get(
pk=challenge_phase_split_id)
except ChallengePhaseSplit.DoesNotExist:
response_data = {'error': 'Challenge Phase Split does not exist'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Check if the Challenge Phase Split is publicly visible or not
if challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC:
response_data = {'error': 'Sorry, leaderboard is not public yet for this Challenge Phase Split!'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Get the leaderboard associated with the Challenge Phase Split
leaderboard = challenge_phase_split.leaderboard
# Get the default order by key to rank the entries on the leaderboard
try:
default_order_by = leaderboard.schema['default_order_by']
except:
response_data = {'error': 'Sorry, Default filtering key not found in leaderboard schema!'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Get all the successful submissions related to the challenge phase split
leaderboard_data = LeaderboardData.objects.filter(
challenge_phase_split=challenge_phase_split,
submission__is_public=True,
submission__is_flagged=False).order_by('created_at')
leaderboard_data = leaderboard_data.annotate(
filtering_score=RawSQL('result->>%s', (default_order_by, ), output_field=FloatField())).values(
'id', 'submission__participant_team__team_name',
'challenge_phase_split', 'result', 'filtering_score', 'leaderboard__schema', 'submission__submitted_at')
sorted_leaderboard_data = sorted(leaderboard_data, key=lambda k: float(k['filtering_score']), reverse=True)
distinct_sorted_leaderboard_data = []
team_list = []
for data in sorted_leaderboard_data:
if data['submission__participant_team__team_name'] in team_list:
continue
else:
distinct_sorted_leaderboard_data.append(data)
team_list.append(data['submission__participant_team__team_name'])
leaderboard_labels = challenge_phase_split.leaderboard.schema['labels']
for item in distinct_sorted_leaderboard_data:
item['result'] = [item['result'][index] for index in leaderboard_labels]
paginator, result_page = paginated_queryset(
distinct_sorted_leaderboard_data,
request,
pagination_class=StandardResultSetPagination())
response_data = result_page
return paginator.get_paginated_response(response_data)
@throttle_classes([UserRateThrottle])
@api_view(['GET'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def get_remaining_submissions(request, challenge_phase_pk, challenge_pk):
get_challenge_model(challenge_pk)
challenge_phase = get_challenge_phase_model(challenge_phase_pk)
participant_team_pk = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_pk)
# Conditional check for the existence of participant team of the user.
if not participant_team_pk:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
max_submission_per_day = challenge_phase.max_submissions_per_day
max_submission = challenge_phase.max_submissions
submissions_done_today_count = Submission.objects.filter(
challenge_phase__challenge=challenge_pk,
challenge_phase=challenge_phase_pk,
participant_team=participant_team_pk,
submitted_at__gte=timezone.now().date()).count()
failed_submissions_count = Submission.objects.filter(
challenge_phase__challenge=challenge_pk,
challenge_phase=challenge_phase_pk,
participant_team=participant_team_pk,
status=Submission.FAILED,
submitted_at__gte=timezone.now().date()).count()
# Checks if today's successfull submission is greater than or equal to max submission per day.
if ((submissions_done_today_count - failed_submissions_count) >= max_submission_per_day
or (max_submission_per_day == 0)):
# Get the UTC time of the instant when the above condition is true.
date_time_now = timezone.now()
# Calculate the next day's date.
date_time_tomorrow = date_time_now.date() + datetime.timedelta(1)
utc = timezone.utc
# Get the midnight time of the day i.e. 12:00 AM of next day.
midnight = utc.localize(datetime.datetime.combine(
date_time_tomorrow, datetime.time()))
# Subtract the current time from the midnight time to get the remaining time for the next day's submissions.
remaining_time = midnight - date_time_now
# Return the remaining time with a message.
response_data = {'message': 'You have exhausted today\'s submission limit',
'remaining_time': remaining_time
}
return Response(response_data, status=status.HTTP_200_OK)
else:
# Calculate the remaining submissions for today.
remaining_submissions_today_count = (max_submission_per_day -
(submissions_done_today_count -
failed_submissions_count)
)
# calculate the remaining submissions from total submissions.
remaining_submission_count = max_submission - \
(submissions_done_today_count - failed_submissions_count)
# Return the above calculated data.
response_data = {'remaining_submissions_today_count': remaining_submissions_today_count,
'remaining_submissions': remaining_submission_count
}
return Response(response_data, status=status.HTTP_200_OK)
|
py | 1a349644aa84e4a38b06fdbec801a13e1fcd8b7a | from concurrent.futures import ThreadPoolExecutor, as_completed
from time import time
import boto3
from botocore import UNSIGNED
from botocore.config import Config
from botocore.exceptions import ClientError
from .start_lambda_api_integ_base import StartLambdaIntegBaseClass
class TestParallelRequests(StartLambdaIntegBaseClass):
template_path = "/testdata/invoke/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_same_endpoint(self):
"""
Send two requests to the same path at the same time. This is to ensure we can handle
multiple requests at once and do not block/queue up requests
"""
number_of_requests = 10
start_time = time()
thread_pool = ThreadPoolExecutor(number_of_requests)
futures = [thread_pool.submit(self.lambda_client.invoke, FunctionName="HelloWorldSleepFunction")
for _ in range(0, number_of_requests)]
results = [r.result() for r in as_completed(futures)]
end_time = time()
self.assertEquals(len(results), 10)
self.assertGreater(end_time - start_time, 10)
self.assertLess(end_time - start_time, 20)
for result in results:
self.assertEquals(result.get("Payload").read().decode('utf-8'), '"Slept for 10s"')
class TestLambdaToLambdaInvoke(StartLambdaIntegBaseClass):
template_path = "/testdata/start_lambda/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_local_lambda_calling_local_lambda(self):
pass
class TestLambdaServiceErrorCases(StartLambdaIntegBaseClass):
template_path = "/testdata/invoke/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_invoke_with_non_json_data(self):
expected_error_message = "An error occurred (InvalidRequestContent) when calling the Invoke operation: " \
"Could not parse request body into json: No JSON object could be decoded"
with self.assertRaises(ClientError) as error:
self.lambda_client.invoke(FunctionName="EchoEventFunction", Payload='notat:asdfasdf')
self.assertEquals(str(error.exception), expected_error_message)
def test_invoke_with_log_type_not_None(self):
expected_error_message = "An error occurred (NotImplemented) when calling the Invoke operation: " \
"log-type: Tail is not supported. None is only supported."
with self.assertRaises(ClientError) as error:
self.lambda_client.invoke(FunctionName="EchoEventFunction", LogType="Tail")
self.assertEquals(str(error.exception), expected_error_message)
def test_invoke_with_invocation_type_not_RequestResponse(self):
expected_error_message = "An error occurred (NotImplemented) when calling the Invoke operation: " \
"invocation-type: DryRun is not supported. RequestResponse is only supported."
with self.assertRaises(ClientError) as error:
self.lambda_client.invoke(FunctionName="EchoEventFunction", InvocationType="DryRun")
self.assertEquals(str(error.exception), expected_error_message)
class TestLambdaService(StartLambdaIntegBaseClass):
template_path = "/testdata/invoke/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_invoke_with_data(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction", Payload='"This is json data"')
self.assertEquals(response.get("Payload").read().decode('utf-8'), '"This is json data"')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_no_data(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction")
self.assertEquals(response.get("Payload").read().decode('utf-8'), '{}')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_log_type_None(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction", LogType='None')
self.assertEquals(response.get("Payload").read().decode('utf-8'), '{}')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_invocation_type_RequestResponse(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction", InvocationType='RequestResponse')
self.assertEquals(response.get("Payload").read().decode('utf-8'), '{}')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_lambda_function_raised_error(self):
response = self.lambda_client.invoke(FunctionName="RaiseExceptionFunction", InvocationType='RequestResponse')
self.assertEquals(response.get("Payload").read().decode('utf-8'),
'{"errorMessage": "Lambda is raising an exception", '
'"errorType": "Exception", '
'"stackTrace": [["/var/task/main.py", 43, "raise_exception", '
'"raise Exception(\\"Lambda is raising an exception\\")"]]}')
self.assertEquals(response.get("FunctionError"), 'Unhandled')
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_function_timeout(self):
"""
This behavior does not match the actually Lambda Service. For functions that timeout, data returned like the
following:
{"errorMessage":"<timestamp> <request_id> Task timed out after 5.00 seconds"}
For Local Lambda's, however, timeouts are an interrupt on the thread that runs invokes the function. Since the
invoke is on a different thread, we do not (currently) have a way to communicate this back to the caller. So
when a timeout happens locally, we do not add the FunctionError: Unhandled to the response and have an empty
string as the data returned (because no data was found in stdout from the container).
"""
response = self.lambda_client.invoke(FunctionName="TimeoutFunction")
self.assertEquals(response.get("Payload").read().decode('utf-8'), '')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
|
py | 1a349699acd81ed7093b545ac8c15f0aad29de89 | import asyncio
import time
def timed(fn, *args, **kwargs):
name = fn.__name__
times = []
last = before = time.time()
duration = 0
while duration < 1.0:
if asyncio.iscoroutinefunction(fn):
asyncio.run(fn(*args, **kwargs))
else:
fn(*args, **kwargs)
now = time.time()
times.append(now - last)
last = now
duration = now - before
count = len(times)
times = list(sorted(times))
best = times[:3]
avg = sum(best) / len(best)
if avg < 0.001:
avg *= 1000000
unit = "usec"
elif avg < 0.1:
avg *= 1000
unit = "msec"
else:
unit = "sec"
print(f"{count} runs of {name} in {duration:.1f}s: {avg:.3f} {unit} per run")
return count, duration
|
py | 1a3497173b10da5306db7821d0ad3fc7892d2719 | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, observe
)
from enaml.core.declarative import d_
from .view_animator import ViewAnimator, ProxyViewAnimator
class ProxyViewSwitcher(ProxyViewAnimator):
""" The abstract definition of a proxy ViewSwitcher object.
"""
#: A reference to the declaration.
declaration = ForwardTyped(lambda: ViewSwitcher)
class ViewSwitcher(ViewAnimator):
""" A simple control for a ViewSwitcher.
"""
#: A reference to the ProxyViewSwitcher object.
proxy = Typed(ProxyViewSwitcher)
|
py | 1a349867c0091aed3702600ea7600193a8b6d3ba | from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.views import (
PasswordChangeDoneView,
PasswordChangeView,
PasswordResetCompleteView,
PasswordResetDoneView,
PasswordResetView,
)
from django.utils.translation import ugettext as _
from django.views.generic import RedirectView
from corehq.apps.callcenter.views import CallCenterOwnerOptionsView
from corehq.apps.domain.forms import (
ConfidentialPasswordResetForm,
HQSetPasswordForm,
)
from corehq.apps.domain.views.accounting import (
BillingStatementPdfView,
BulkStripePaymentView,
CardsView,
CardView,
ConfirmBillingAccountInfoView,
ConfirmSelectedPlanView,
ConfirmSubscriptionRenewalView,
CreditsStripePaymentView,
CreditsWireInvoiceView,
DomainBillingStatementsView,
DomainSubscriptionView,
EditExistingBillingAccountView,
EmailOnDowngradeView,
InternalSubscriptionManagementView,
InvoiceStripePaymentView,
SelectedAnnualPlanView,
SelectedEnterprisePlanView,
SelectPlanView,
SubscriptionRenewalView,
WireInvoiceView,
pause_subscription,
)
from corehq.apps.domain.views.base import select, accept_all_invitations
from corehq.apps.domain.views.fixtures import LocationFixtureConfigView
from corehq.apps.domain.views.internal import (
ActivateTransferDomainView,
DeactivateTransferDomainView,
EditInternalCalculationsView,
EditInternalDomainInfoView,
FlagsAndPrivilegesView,
ProjectLimitsView,
TransferDomainView,
calculated_properties,
toggle_diff,
)
from corehq.apps.domain.views.pro_bono import ProBonoView
from corehq.apps.domain.views.releases import (
ManageReleasesByAppProfile,
ManageReleasesByLocation,
activate_release_restriction,
deactivate_release_restriction,
toggle_release_restriction_by_app_profile,
)
from corehq.apps.domain.views.repeaters import generate_repeater_payloads
from corehq.apps.domain.views.settings import (
CaseSearchConfigView,
DefaultProjectSettingsView,
EditBasicProjectInfoView,
EditMyProjectSettingsView,
EditOpenClinicaSettingsView,
EditPrivacySecurityView,
FeaturePreviewsView,
CustomPasswordResetView,
RecoveryMeasuresHistory,
)
from corehq.apps.domain.views.sms import SMSRatesView
from corehq.apps.linked_domain.views import DomainLinkView
from corehq.apps.reports.dispatcher import DomainReportDispatcher
from corehq.motech.repeaters.views import (
RepeatRecordView,
cancel_repeat_record,
requeue_repeat_record,
)
PASSWORD_RESET_KWARGS = {
'template_name': 'login_and_password/password_reset_form.html',
'form_class': ConfidentialPasswordResetForm,
'from_email': settings.DEFAULT_FROM_EMAIL,
'extra_context': {'current_page': {'page_name': _('Password Reset')}}
}
PASSWORD_RESET_DONE_KWARGS = {
'template_name': 'login_and_password/password_reset_done.html',
'extra_context': {'current_page': {'page_name': _('Reset My Password')}}
}
urlpatterns = [
url(r'^domain/select/$', select, name='domain_select'),
url(r'^domain/select_redirect/$', select, {'do_not_redirect': True}, name='domain_select_redirect'),
url('^accept_all_invitations/$', accept_all_invitations, name='accept_all_invitations'),
url(r'^domain/transfer/(?P<guid>\w+)/activate$',
ActivateTransferDomainView.as_view(), name='activate_transfer_domain'),
url(r'^domain/transfer/(?P<guid>\w+)/deactivate$',
DeactivateTransferDomainView.as_view(), name='deactivate_transfer_domain'),
url(r'^accounts/password_change/$',
PasswordChangeView.as_view(
template_name='login_and_password/password_change_form.html'),
name='password_change'),
url(r'^accounts/password_change_done/$',
PasswordChangeDoneView.as_view(
template_name='login_and_password/password_change_done.html',
extra_context={'current_page': {'page_name': _('Password Change Complete')}}),
name='password_change_done'),
url(r'^accounts/password_reset_email/$',
PasswordResetView.as_view(**PASSWORD_RESET_KWARGS), name='password_reset_email'),
url(r'^accounts/password_reset_email/done/$',
PasswordResetDoneView.as_view(**PASSWORD_RESET_DONE_KWARGS),
name='password_reset_done'),
url(r'^accounts/password_reset_confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
CustomPasswordResetView.as_view(
template_name='login_and_password/password_reset_confirm.html',
form_class=HQSetPasswordForm,
extra_context={'current_page': {'page_name': _('Password Reset Confirmation')}},
),
name=CustomPasswordResetView.urlname),
url(r'^accounts/password_reset_confirm/done/$', PasswordResetCompleteView.as_view(
template_name='login_and_password/password_reset_complete.html',
extra_context={'current_page': {'page_name': _('Password Reset Complete')}}),
name='password_reset_complete'),
]
domain_settings = [
url(r'^$', DefaultProjectSettingsView.as_view(), name=DefaultProjectSettingsView.urlname),
url(r'^my_settings/$', EditMyProjectSettingsView.as_view(), name=EditMyProjectSettingsView.urlname),
url(r'^basic/$', EditBasicProjectInfoView.as_view(), name=EditBasicProjectInfoView.urlname),
url(r'^call_center_owner_options/', CallCenterOwnerOptionsView.as_view(),
name=CallCenterOwnerOptionsView.url_name),
url(r'^privacy/$', EditPrivacySecurityView.as_view(), name=EditPrivacySecurityView.urlname),
url(r'^openclinica/$', EditOpenClinicaSettingsView.as_view(), name=EditOpenClinicaSettingsView.urlname),
url(r'^subscription/change/$', SelectPlanView.as_view(), name=SelectPlanView.urlname),
url(r'^subscription/change/confirm/$', ConfirmSelectedPlanView.as_view(),
name=ConfirmSelectedPlanView.urlname),
url(r'^subscription/change/request/$', SelectedEnterprisePlanView.as_view(),
name=SelectedEnterprisePlanView.urlname),
url(r'^subscription/change/request_annual/$', SelectedAnnualPlanView.as_view(),
name=SelectedAnnualPlanView.urlname),
url(r'^subscription/change/account/$', ConfirmBillingAccountInfoView.as_view(),
name=ConfirmBillingAccountInfoView.urlname),
url(r'^subscription/change/pause/$', pause_subscription, name='pause_subscription'),
url(r'^subscription/change/email/$', EmailOnDowngradeView.as_view(), name=EmailOnDowngradeView.urlname),
url(r'^subscription/pro_bono/$', ProBonoView.as_view(), name=ProBonoView.urlname),
url(r'^subscription/credits/make_payment/$', CreditsStripePaymentView.as_view(),
name=CreditsStripePaymentView.urlname),
url(r'^subscription/credis/make_wire_payment/$', CreditsWireInvoiceView.as_view(),
name=CreditsWireInvoiceView.urlname),
url(r'^billing/statements/download/(?P<statement_id>[\w-]+).pdf$',
BillingStatementPdfView.as_view(),
name=BillingStatementPdfView.urlname),
url(r'^billing/statements/$', DomainBillingStatementsView.as_view(),
name=DomainBillingStatementsView.urlname),
url(r'^billing/make_payment/$', InvoiceStripePaymentView.as_view(),
name=InvoiceStripePaymentView.urlname),
url(r'^billing/make_bulk_payment/$', BulkStripePaymentView.as_view(),
name=BulkStripePaymentView.urlname),
url(r'^billing/make_wire_invoice/$', WireInvoiceView.as_view(),
name=WireInvoiceView.urlname),
url(r'^billing/cards/$', CardsView.as_view(), name=CardsView.url_name),
url(r'^billing/cards/(?P<card_token>card_[\w]+)/$', CardView.as_view(), name=CardView.url_name),
url(r'^subscription/$', DomainSubscriptionView.as_view(), name=DomainSubscriptionView.urlname),
url(r'^subscription/renew/$', SubscriptionRenewalView.as_view(),
name=SubscriptionRenewalView.urlname),
url(r'^subscription/renew/confirm/$', ConfirmSubscriptionRenewalView.as_view(),
name=ConfirmSubscriptionRenewalView.urlname),
url(r'^internal_subscription_management/$', InternalSubscriptionManagementView.as_view(),
name=InternalSubscriptionManagementView.urlname),
url(r'^billing_information/$', EditExistingBillingAccountView.as_view(),
name=EditExistingBillingAccountView.urlname),
url(r'^repeat_record/', RepeatRecordView.as_view(), name=RepeatRecordView.urlname),
url(r'^repeat_record_report/cancel/', cancel_repeat_record, name='cancel_repeat_record'),
url(r'^repeat_record_report/requeue/', requeue_repeat_record, name='requeue_repeat_record'),
url(r'^repeat_record_report/generate_repeater_payloads/', generate_repeater_payloads,
name='generate_repeater_payloads'),
url(r'^integration/', include('corehq.apps.integration.urls')),
url(r'^transfer/$', TransferDomainView.as_view(), name=TransferDomainView.urlname),
url(r'^case_search/$', CaseSearchConfigView.as_view(), name=CaseSearchConfigView.urlname),
url(r'^domain_links/$', DomainLinkView.as_view(), name=DomainLinkView.urlname),
url(r'^location_settings/$', LocationFixtureConfigView.as_view(), name=LocationFixtureConfigView.urlname),
url(r'^commtrack/settings/$', RedirectView.as_view(url='commtrack_settings', permanent=True)),
url(r'^internal/info/$', EditInternalDomainInfoView.as_view(), name=EditInternalDomainInfoView.urlname),
url(r'^internal/calculations/$', EditInternalCalculationsView.as_view(), name=EditInternalCalculationsView.urlname),
url(r'^internal/calculated_properties/$', calculated_properties, name='calculated_properties'),
url(r'^previews/$', FeaturePreviewsView.as_view(), name=FeaturePreviewsView.urlname),
url(r'^flags/$', FlagsAndPrivilegesView.as_view(), name=FlagsAndPrivilegesView.urlname),
url(r'^project_limits/$', ProjectLimitsView.as_view(), name=ProjectLimitsView.urlname),
url(r'^toggle_diff/$', toggle_diff, name='toggle_diff'),
url(r'^sms_rates/$', SMSRatesView.as_view(), name=SMSRatesView.urlname),
url(r'^recovery_measures_history/$',
RecoveryMeasuresHistory.as_view(),
name=RecoveryMeasuresHistory.urlname),
url(r'^manage_releases_by_location/$', ManageReleasesByLocation.as_view(),
name=ManageReleasesByLocation.urlname),
url(r'^manage_releases_by_app_profile/$', ManageReleasesByAppProfile.as_view(),
name=ManageReleasesByAppProfile.urlname),
url(r'^deactivate_release_restriction/(?P<restriction_id>[\w-]+)/$', deactivate_release_restriction,
name='deactivate_release_restriction'),
url(r'^activate_release_restriction/(?P<restriction_id>[\w-]+)/$', activate_release_restriction,
name='activate_release_restriction'),
url(r'^toggle_release_restriction_by_app_profile/(?P<restriction_id>[\w-]+)/$',
toggle_release_restriction_by_app_profile, name='toggle_release_restriction_by_app_profile'),
DomainReportDispatcher.url_pattern()
]
|
py | 1a3498b13bd05e991c25ef4efc7b34082f40e439 | import pytest
import math
import numpy as np
from pandas import read_table, DataFrame, Series
from catboost import Pool, CatBoost, CatBoostClassifier, CatBoostRegressor, CatboostError, cv
from catboost_pytest_lib import data_file, local_canonical_file, remove_time_from_json
import yatest.common
EPS = 1e-5
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERY_TRAIN_FILE = data_file('querywise_pool', 'train_full3')
QUERY_TEST_FILE = data_file('querywise_pool', 'test3')
QUERY_CD_FILE = data_file('querywise_pool', 'train_full3.cd')
OUTPUT_MODEL_PATH = 'model.bin'
PREDS_PATH = 'predictions.npy'
FIMP_PATH = 'feature_importance.npy'
JSON_LOG_PATH = 'catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = yatest.common.binary_path("catboost/tools/model_comparator/model_comparator")
def compare_canonical_models(*args, **kwargs):
return local_canonical_file(*args, diff_tool=model_diff_tool, **kwargs)
def map_cat_features(data, cat_features):
for i in range(len(data)):
for j in cat_features:
data[i][j] = str(data[i][j])
return data
def _check_shape(pool):
return np.shape(pool.get_features()) == (101, 17)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE))
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features))
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features))
def test_load_df():
pool = Pool(NAN_TRAIN_FILE, column_description=NAN_CD_FILE)
data = read_table(NAN_TRAIN_FILE, header=None)
label = DataFrame(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, label, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert _check_data(pool.get_label(), pool2.get_label())
def test_load_series():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
data = read_table(TRAIN_FILE, header=None)
label = Series(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
data = Series(list(data.values))
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, label, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert _check_data(pool.get_label(), pool2.get_label())
def test_pool_cat_features():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
assert np.all(pool.get_cat_feature_indices() == CAT_FEATURES)
def test_load_generated():
pool_size = (100, 10)
data = np.round(np.random.normal(size=pool_size), decimals=3)
label = np.random.randint(2, size=pool_size[0])
pool = Pool(data, label)
assert _check_data(pool.get_features(), data)
assert _check_data(pool.get_label(), label)
def test_load_dumps():
pool_size = (100, 10)
data = np.random.randint(10, size=pool_size)
label = np.random.randint(2, size=pool_size[0])
pool1 = Pool(data, label)
lines = []
for i in range(len(data)):
line = [str(label[i])] + [str(x) for x in data[i]]
lines.append('\t'.join(line))
text = '\n'.join(lines)
with open('test_data_dumps', 'w') as f:
f.write(text)
pool2 = Pool('test_data_dumps')
assert _check_data(pool1.get_features(), pool2.get_features())
assert _check_data(pool1.get_label(), pool2.get_label())
def test_predict_regress():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'random_seed': 0, 'loss_function': 'RMSE'})
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_predict_sklearn_regress():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=2, random_seed=0)
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_predict_sklearn_class():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0, loss_function='Logloss:border=0.5')
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_predict_class_raw():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred = model.predict(test_pool)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_predict_class():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred = model.predict(test_pool, prediction_type="Class")
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_predict_class_proba():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred = model.predict_proba(test_pool)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_no_cat_in_predict():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred1 = model.predict(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()))
pred2 = model.predict(Pool(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()), cat_features=train_pool.get_cat_feature_indices()))
assert _check_data(pred1, pred2)
def test_save_model():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost()
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
model2 = CatBoost(model_file=OUTPUT_MODEL_PATH)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
def test_multiclass():
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=2, random_seed=0, loss_function='MultiClass', thread_count=8)
classifier.fit(pool)
classifier.save_model(OUTPUT_MODEL_PATH)
new_classifier = CatBoostClassifier()
new_classifier.load_model(OUTPUT_MODEL_PATH)
pred = new_classifier.predict_proba(pool)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_querywise():
train_pool = Pool(QUERY_TRAIN_FILE, column_description=QUERY_CD_FILE)
test_pool = Pool(QUERY_TEST_FILE, column_description=QUERY_CD_FILE)
model = CatBoost(params={'loss_function': 'QueryRMSE', 'random_seed': 0, 'iterations': 2, 'thread_count': 8})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERY_TRAIN_FILE, delimiter='\t', header=None)
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 0]
train_data = df.drop([0, 1], axis=1).astype(str)
df = read_table(QUERY_TEST_FILE, delimiter='\t', header=None)
test_data = df.drop([0, 1], axis=1).astype(str)
model.fit(train_data, train_target, query_id=train_query_id)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_zero_baseline():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
baseline = np.zeros(pool.num_row())
pool.set_baseline(baseline)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_ones_weight():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.ones(pool.num_row())
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_non_ones_weight():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.arange(1, pool.num_row()+1)
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_fit_data():
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
eval_pool = Pool(CLOUDNESS_TEST_FILE, column_description=CLOUDNESS_CD_FILE)
base_model = CatBoostClassifier(iterations=2, random_seed=0, loss_function="MultiClass")
base_model.fit(pool)
baseline = np.array(base_model.predict(pool, prediction_type='RawFormulaVal'))
eval_baseline = np.array(base_model.predict(eval_pool, prediction_type='RawFormulaVal'))
eval_pool.set_baseline(eval_baseline)
model = CatBoostClassifier(iterations=2, random_seed=0, loss_function="MultiClass")
data = map_cat_features(pool.get_features(), pool.get_cat_feature_indices())
model.fit(data, pool.get_label(), pool.get_cat_feature_indices(), sample_weight=np.arange(1, pool.num_row()+1), baseline=baseline, use_best_model=True, eval_set=eval_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_ntree_limit():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=100, random_seed=0)
model.fit(train_pool)
pred = model.predict_proba(test_pool, ntree_end=10)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_staged_predict():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, random_seed=0)
model.fit(train_pool)
preds = []
for pred in model.staged_predict(test_pool):
preds.append(pred)
np.save(PREDS_PATH, np.array(preds))
return local_canonical_file(PREDS_PATH)
def test_invalid_loss_base():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({"loss_function": "abcdef"})
model.fit(pool)
def test_invalid_loss_classifier():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(loss_function="abcdef")
model.fit(pool)
def test_invalid_loss_regressor():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(loss_function="fee")
model.fit(pool)
def test_no_eval_set():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier()
model.fit(pool, use_best_model=True)
def test_fit_no_label():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier()
model.fit(pool.get_features())
def test_predict_without_fit():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier()
model.predict(pool)
def test_real_numbers_cat_features():
with pytest.raises(CatboostError):
data = np.random.rand(100, 10)
label = np.random.randint(2, size=100)
Pool(data, label, [1, 2])
def test_wrong_ctr_for_classification():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(ctr_description=['Borders:TargetBorderCount=5:TargetBorderType=Uniform'])
model.fit(pool)
def test_wrong_feature_count():
with pytest.raises(CatboostError):
data = np.random.rand(100, 10)
label = np.random.randint(2, size=100)
model = CatBoostClassifier()
model.fit(data, label)
model.predict(data[:, :-1])
def test_feature_importance_off():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, calc_feature_importance=False)
model.fit(pool)
model.feature_importances_
def test_wrong_params_classifier():
with pytest.raises(CatboostError):
CatBoostClassifier(wrong_param=1)
def test_wrong_params_base():
with pytest.raises(CatboostError):
CatBoost({'wrong_param': 1})
def test_wrong_params_regressor():
with pytest.raises(CatboostError):
CatBoostRegressor(wrong_param=1)
def test_wrong_kwargs_base():
with pytest.raises(CatboostError):
CatBoost({'kwargs': {'wrong_param': 1}})
def test_custom_eval():
class LoglossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in xrange(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += w * (target[i] * approx[i] - math.log(1 + math.exp(approx[i])))
return error_sum, weight_sum
train_pool = Pool(data=TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(data=TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True, eval_metric=LoglossMetric())
model.fit(train_pool, eval_set=test_pool)
pred1 = model.predict(test_pool)
model2 = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True, eval_metric="Logloss")
model2.fit(train_pool, eval_set=test_pool)
pred2 = model2.predict(test_pool)
for p1, p2 in zip(pred1, pred2):
assert abs(p1 - p2) < EPS
def test_custom_objective():
class LoglossObjective(object):
def calc_ders_range(self, approxes, targets, weights):
assert len(approxes) == len(targets)
if weights is not None:
assert len(weights) == len(approxes)
exponents = []
for index in xrange(len(approxes)):
exponents.append(math.exp(approxes[index]))
result = []
for index in xrange(len(targets)):
p = exponents[index] / (1 + exponents[index])
der1 = (1 - p) if targets[index] > 0.0 else -p
der2 = -p * (1 - p)
if weights is not None:
der1 *= weights[index]
der2 *= weights[index]
result.append((der1, der2))
return result
train_pool = Pool(data=TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(data=TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True,
loss_function=LoglossObjective(), eval_metric="Logloss",
# Leaf estimation method and gradient iteration are set to match
# defaults for Logloss.
leaf_estimation_method="Newton", leaf_estimation_iterations=10)
model.fit(train_pool, eval_set=test_pool)
pred1 = model.predict(test_pool, prediction_type='RawFormulaVal')
model2 = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True, loss_function="Logloss")
model2.fit(train_pool, eval_set=test_pool)
pred2 = model2.predict(test_pool, prediction_type='RawFormulaVal')
for p1, p2 in zip(pred1, pred2):
assert abs(p1 - p2) < EPS
def test_pool_after_fit():
pool1 = Pool(TRAIN_FILE, column_description=CD_FILE)
pool2 = Pool(TRAIN_FILE, column_description=CD_FILE)
assert _check_data(pool1.get_features(), pool2.get_features())
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool2)
assert _check_data(pool1.get_features(), pool2.get_features())
def test_priors():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, has_time=True, ctr_description=["Borders:Prior=0:Prior=0.6:Prior=1:Prior=5", "Counter:Prior=0:Prior=0.6:Prior=1:Prior=5"])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_ignored_features():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model1 = CatBoostClassifier(iterations=5, random_seed=0, ignored_features=[1, 2, 3])
model2 = CatBoostClassifier(iterations=5, random_seed=0)
model1.fit(train_pool)
model2.fit(train_pool)
predictions1 = model1.predict(test_pool)
predictions2 = model2.predict(test_pool)
assert not _check_data(predictions1, predictions2)
model1.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_class_weights():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, class_weights=[1, 2])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_classification_ctr():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, ctr_description=['Borders', 'Counter'])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_regression_ctr():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=5, random_seed=0, ctr_description=['Borders:TargetBorderCount=5:TargetBorderType=Uniform', 'Counter'])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_copy_model():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model1 = CatBoostRegressor(iterations=5, random_seed=0)
model1.fit(pool)
model2 = model1.copy()
predictions1 = model1.predict(pool)
predictions2 = model2.predict(pool)
assert _check_data(predictions1, predictions2)
model2.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_cv():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss"})
assert isinstance(results, dict)
assert "Logloss_train_avg" in results
prev_value = results["Logloss_train_avg"][0]
for value in results["Logloss_train_avg"][1:]:
assert value < prev_value
prev_value = value
def test_cv_query():
pool = Pool(QUERY_TRAIN_FILE, column_description=QUERY_CD_FILE)
results = cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "QueryRMSE"})
assert isinstance(results, dict)
assert "QueryRMSE_train_avg" in results
prev_value = results["QueryRMSE_train_avg"][0]
for value in results["QueryRMSE_train_avg"][1:]:
assert value < prev_value
prev_value = value
def test_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.feature_importances_))
return local_canonical_file(FIMP_PATH)
def test_interaction_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.get_feature_importance(pool, fstr_type='Interaction')))
return local_canonical_file(FIMP_PATH)
def test_doc_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.get_feature_importance(pool, fstr_type='Doc')))
return local_canonical_file(FIMP_PATH)
def test_one_doc_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.get_feature_importance(np.ones(pool.num_col(), dtype=int), 0, cat_features=pool.get_cat_feature_indices(), fstr_type='Doc')))
return local_canonical_file(FIMP_PATH)
def test_od():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(od_type='Iter', od_wait=20, random_seed=42)
model.fit(train_pool, eval_set=test_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_clone():
estimator = CatBoostClassifier(
custom_metric="Accuracy",
loss_function="MultiClass",
iterations=400)
# This is important for sklearn.base.clone since
# it uses get_params for cloning estimator.
params = estimator.get_params()
new_estimator = CatBoostClassifier(**params)
new_params = new_estimator.get_params()
for param in params:
assert param in new_params
assert new_params[param] == params[param]
def test_different_cat_features_order():
dataset = np.array([[2, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
labels = [1.2, 3.4, 9.5, 24.5]
pool1 = Pool(dataset, labels, cat_features=[0, 1])
pool2 = Pool(dataset, labels, cat_features=[1, 0])
model = CatBoost({'learning_rate': 1, 'loss_function': 'RMSE', 'iterations': 2, 'random_seed': 42})
model.fit(pool1)
assert (model.predict(pool1) == model.predict(pool2)).all()
def test_full_history():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(od_type='Iter', od_wait=20, random_seed=42, approx_on_full_history=True)
model.fit(train_pool, eval_set=test_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_bad_params_in_cv():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
with pytest.warns(UserWarning):
cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "use_best_model": True})
def test_cv_logging():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "json_log": JSON_LOG_PATH})
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_with_not_binarized_target():
train_file = data_file('adult_not_binarized', 'train_small')
cd = data_file('adult_not_binarized', 'train.cd')
pool = Pool(train_file, column_description=cd)
cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "json_log": JSON_LOG_PATH})
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
|
py | 1a3499003c0b2968171f3d1959e41ca25d2212c9 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""ErexCoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import CBlockHeader, MIN_VERSION_SUPPORTED, msg_addr, msg_block, MSG_BLOCK, msg_blocktxn, msg_cmpctblock, msg_feefilter, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_ping, msg_pong, msg_reject, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, NODE_NETWORK, NODE_WITNESS, sha256
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.network = net
logger.debug('Connecting to ErexCoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if not self.is_connected:
raise IOError('Not connected')
self._log_message("send", message)
tmsg = self._build_message(message)
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as ErexCoin Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def _build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a ErexCoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
self.reject_code_received = None
self.reject_reason_received = None
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def on_reject(self, message):
"""Store reject reason and code for testing."""
self.reject_code_received = message.code
self.reject_reason_received = message.reason
def send_blocks_and_test(self, blocks, node, *, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received"""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
if request_block:
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received."""
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
if reject_code is not None:
wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
if reject_reason is not None:
wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
|
py | 1a3499c824ac8fd13025a156dfdf059afa91a298 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class GetJobGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'GetJobGroup','CCC')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_JobGroupId(self):
return self.get_query_params().get('JobGroupId')
def set_JobGroupId(self,JobGroupId):
self.add_query_param('JobGroupId',JobGroupId) |
py | 1a349aa8fc9a3e9864f690be735b6e2cee7a2db2 | import turtle, random
rat = turtle.Turtle()
screen = turtle.Screen()
dot_distance = 75
#width = 5
height = 5
rat.penup()
screen.register_shape("NickCage.gif")
rat.shape("NickCage.gif")
def draw_a_star():
for i in range(5):
rat.pendown()
rat.forward(50)
rat.right(144)
rat.penup()
for y in range(height):
# rat.dot()
rat.right(random.randrange(0,360,1))
rat.forward(dot_distance-random.randrange(-100,100,1))
draw_a_star()
turtle.done()
|
py | 1a349b981c5a389f145a1b92f05a448ec008fe7f | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MedicationKnowledge
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import sys
from . import backboneelement, domainresource
class MedicationKnowledge(domainresource.DomainResource):
""" Definition of Medication Knowledge.
Information about a medication that is used to support knowledge.
"""
resource_type = "MedicationKnowledge"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.administrationGuidelines = None
""" Guidelines for administration of the medication.
List of `MedicationKnowledgeAdministrationGuidelines` items (represented as `dict` in JSON). """
self.amount = None
""" Amount of drug in package.
Type `Quantity` (represented as `dict` in JSON). """
self.associatedMedication = None
""" A medication resource that is associated with this medication.
List of `FHIRReference` items referencing `['Medication']` (represented as `dict` in JSON). """
self.code = None
""" Code that identifies this medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.contraindication = None
""" Potential clinical issue with or between medication(s).
List of `FHIRReference` items referencing `['DetectedIssue']` (represented as `dict` in JSON). """
self.cost = None
""" The pricing of the medication.
List of `MedicationKnowledgeCost` items (represented as `dict` in JSON). """
self.doseForm = None
""" powder | tablets | capsule +.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.drugCharacteristic = None
""" Specifies descriptive properties of the medicine.
List of `MedicationKnowledgeDrugCharacteristic` items (represented as `dict` in JSON). """
self.ingredient = None
""" Active or inactive ingredient.
List of `MedicationKnowledgeIngredient` items (represented as `dict` in JSON). """
self.intendedRoute = None
""" The intended or approved route of administration.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.kinetics = None
""" The time course of drug absorption, distribution, metabolism and
excretion of a medication from the body.
List of `MedicationKnowledgeKinetics` items (represented as `dict` in JSON). """
self.manufacturer = None
""" Manufacturer of the item.
Type `FHIRReference` referencing `['Organization']` (represented as `dict` in JSON). """
self.medicineClassification = None
""" Categorization of the medication within a formulary or
classification system.
List of `MedicationKnowledgeMedicineClassification` items (represented as `dict` in JSON). """
self.monitoringProgram = None
""" Program under which a medication is reviewed.
List of `MedicationKnowledgeMonitoringProgram` items (represented as `dict` in JSON). """
self.monograph = None
""" Associated documentation about the medication.
List of `MedicationKnowledgeMonograph` items (represented as `dict` in JSON). """
self.packaging = None
""" Details about packaged medications.
Type `MedicationKnowledgePackaging` (represented as `dict` in JSON). """
self.preparationInstruction = None
""" The instructions for preparing the medication.
Type `str`. """
self.productType = None
""" Category of the medication or product.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.regulatory = None
""" Regulatory information about a medication.
List of `MedicationKnowledgeRegulatory` items (represented as `dict` in JSON). """
self.relatedMedicationKnowledge = None
""" Associated or related medication information.
List of `MedicationKnowledgeRelatedMedicationKnowledge` items (represented as `dict` in JSON). """
self.status = None
""" active | inactive | entered-in-error.
Type `str`. """
self.synonym = None
""" Additional names for a medication.
List of `str` items. """
super(MedicationKnowledge, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledge, self).elementProperties()
js.extend(
[
(
"administrationGuidelines",
"administrationGuidelines",
MedicationKnowledgeAdministrationGuidelines,
"MedicationKnowledgeAdministrationGuidelines",
True,
None,
False,
),
("amount", "amount", quantity.Quantity, "Quantity", False, None, False),
(
"associatedMedication",
"associatedMedication",
fhirreference.FHIRReference,
"Reference",
True,
None,
False,
),
(
"code",
"code",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"contraindication",
"contraindication",
fhirreference.FHIRReference,
"Reference",
True,
None,
False,
),
(
"cost",
"cost",
MedicationKnowledgeCost,
"MedicationKnowledgeCost",
True,
None,
False,
),
(
"doseForm",
"doseForm",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"drugCharacteristic",
"drugCharacteristic",
MedicationKnowledgeDrugCharacteristic,
"MedicationKnowledgeDrugCharacteristic",
True,
None,
False,
),
(
"ingredient",
"ingredient",
MedicationKnowledgeIngredient,
"MedicationKnowledgeIngredient",
True,
None,
False,
),
(
"intendedRoute",
"intendedRoute",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"kinetics",
"kinetics",
MedicationKnowledgeKinetics,
"MedicationKnowledgeKinetics",
True,
None,
False,
),
(
"manufacturer",
"manufacturer",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"medicineClassification",
"medicineClassification",
MedicationKnowledgeMedicineClassification,
"MedicationKnowledgeMedicineClassification",
True,
None,
False,
),
(
"monitoringProgram",
"monitoringProgram",
MedicationKnowledgeMonitoringProgram,
"MedicationKnowledgeMonitoringProgram",
True,
None,
False,
),
(
"monograph",
"monograph",
MedicationKnowledgeMonograph,
"MedicationKnowledgeMonograph",
True,
None,
False,
),
(
"packaging",
"packaging",
MedicationKnowledgePackaging,
"MedicationKnowledgePackaging",
False,
None,
False,
),
(
"preparationInstruction",
"preparationInstruction",
str,
"markdown",
False,
None,
False,
),
(
"productType",
"productType",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"regulatory",
"regulatory",
MedicationKnowledgeRegulatory,
"MedicationKnowledgeRegulatory",
True,
None,
False,
),
(
"relatedMedicationKnowledge",
"relatedMedicationKnowledge",
MedicationKnowledgeRelatedMedicationKnowledge,
"MedicationKnowledgeRelatedMedicationKnowledge",
True,
None,
False,
),
("status", "status", str, "code", False, None, False),
("synonym", "synonym", str, "string", True, None, False),
]
)
return js
class MedicationKnowledgeAdministrationGuidelines(backboneelement.BackboneElement):
""" Guidelines for administration of the medication.
Guidelines for the administration of the medication.
"""
resource_type = "MedicationKnowledgeAdministrationGuidelines"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dosage = None
""" Dosage for the medication for the specific guidelines.
List of `MedicationKnowledgeAdministrationGuidelinesDosage` items (represented as `dict` in JSON). """
self.indicationCodeableConcept = None
""" Indication for use that apply to the specific administration
guidelines.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.indicationReference = None
""" Indication for use that apply to the specific administration
guidelines.
Type `FHIRReference` referencing `['ObservationDefinition']` (represented as `dict` in JSON). """
self.patientCharacteristics = None
""" Characteristics of the patient that are relevant to the
administration guidelines.
List of `MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics` items (represented as `dict` in JSON). """
super(MedicationKnowledgeAdministrationGuidelines, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(
MedicationKnowledgeAdministrationGuidelines, self
).elementProperties()
js.extend(
[
(
"dosage",
"dosage",
MedicationKnowledgeAdministrationGuidelinesDosage,
"MedicationKnowledgeAdministrationGuidelinesDosage",
True,
None,
False,
),
(
"indicationCodeableConcept",
"indicationCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"indication",
False,
),
(
"indicationReference",
"indicationReference",
fhirreference.FHIRReference,
"Reference",
False,
"indication",
False,
),
(
"patientCharacteristics",
"patientCharacteristics",
MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics,
"MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics",
True,
None,
False,
),
]
)
return js
class MedicationKnowledgeAdministrationGuidelinesDosage(
backboneelement.BackboneElement
):
""" Dosage for the medication for the specific guidelines.
"""
resource_type = "MedicationKnowledgeAdministrationGuidelinesDosage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dosage = None
""" Dosage for the medication for the specific guidelines.
List of `Dosage` items (represented as `dict` in JSON). """
self.type = None
""" Type of dosage.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeAdministrationGuidelinesDosage, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(
MedicationKnowledgeAdministrationGuidelinesDosage, self
).elementProperties()
js.extend(
[
("dosage", "dosage", dosage.Dosage, "Dosage", True, None, True),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics(
backboneelement.BackboneElement
):
""" Characteristics of the patient that are relevant to the administration
guidelines.
Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight, gender, etc.).
"""
resource_type = "MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.characteristicCodeableConcept = None
""" Specific characteristic that is relevant to the administration
guideline.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.characteristicQuantity = None
""" Specific characteristic that is relevant to the administration
guideline.
Type `Quantity` (represented as `dict` in JSON). """
self.value = None
""" The specific characteristic.
List of `str` items. """
super(
MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, self
).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(
MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, self
).elementProperties()
js.extend(
[
(
"characteristicCodeableConcept",
"characteristicCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"characteristic",
True,
),
(
"characteristicQuantity",
"characteristicQuantity",
quantity.Quantity,
"Quantity",
False,
"characteristic",
True,
),
("value", "value", str, "string", True, None, False),
]
)
return js
class MedicationKnowledgeCost(backboneelement.BackboneElement):
""" The pricing of the medication.
The price of the medication.
"""
resource_type = "MedicationKnowledgeCost"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.cost = None
""" The price of the medication.
Type `Money` (represented as `dict` in JSON). """
self.source = None
""" The source or owner for the price information.
Type `str`. """
self.type = None
""" The category of the cost information.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeCost, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeCost, self).elementProperties()
js.extend(
[
("cost", "cost", money.Money, "Money", False, None, True),
("source", "source", str, "string", False, None, False),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeDrugCharacteristic(backboneelement.BackboneElement):
""" Specifies descriptive properties of the medicine.
Specifies descriptive properties of the medicine, such as color, shape,
imprints, etc.
"""
resource_type = "MedicationKnowledgeDrugCharacteristic"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.type = None
""" Code specifying the type of characteristic of medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueBase64Binary = None
""" Description of the characteristic.
Type `str`. """
self.valueCodeableConcept = None
""" Description of the characteristic.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueQuantity = None
""" Description of the characteristic.
Type `Quantity` (represented as `dict` in JSON). """
self.valueString = None
""" Description of the characteristic.
Type `str`. """
super(MedicationKnowledgeDrugCharacteristic, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeDrugCharacteristic, self).elementProperties()
js.extend(
[
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"valueBase64Binary",
"valueBase64Binary",
str,
"base64Binary",
False,
"value",
False,
),
(
"valueCodeableConcept",
"valueCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"value",
False,
),
(
"valueQuantity",
"valueQuantity",
quantity.Quantity,
"Quantity",
False,
"value",
False,
),
("valueString", "valueString", str, "string", False, "value", False),
]
)
return js
class MedicationKnowledgeIngredient(backboneelement.BackboneElement):
""" Active or inactive ingredient.
Identifies a particular constituent of interest in the product.
"""
resource_type = "MedicationKnowledgeIngredient"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.isActive = None
""" Active ingredient indicator.
Type `bool`. """
self.itemCodeableConcept = None
""" Medication(s) or substance(s) contained in the medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.itemReference = None
""" Medication(s) or substance(s) contained in the medication.
Type `FHIRReference` referencing `['Substance']` (represented as `dict` in JSON). """
self.strength = None
""" Quantity of ingredient present.
Type `Ratio` (represented as `dict` in JSON). """
super(MedicationKnowledgeIngredient, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeIngredient, self).elementProperties()
js.extend(
[
("isActive", "isActive", bool, "boolean", False, None, False),
(
"itemCodeableConcept",
"itemCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"item",
True,
),
(
"itemReference",
"itemReference",
fhirreference.FHIRReference,
"Reference",
False,
"item",
True,
),
("strength", "strength", ratio.Ratio, "Ratio", False, None, False),
]
)
return js
class MedicationKnowledgeKinetics(backboneelement.BackboneElement):
""" The time course of drug absorption, distribution, metabolism and excretion
of a medication from the body.
"""
resource_type = "MedicationKnowledgeKinetics"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.areaUnderCurve = None
""" The drug concentration measured at certain discrete points in time.
List of `Quantity` items (represented as `dict` in JSON). """
self.halfLifePeriod = None
""" Time required for concentration in the body to decrease by half.
Type `Duration` (represented as `dict` in JSON). """
self.lethalDose50 = None
""" The median lethal dose of a drug.
List of `Quantity` items (represented as `dict` in JSON). """
super(MedicationKnowledgeKinetics, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeKinetics, self).elementProperties()
js.extend(
[
(
"areaUnderCurve",
"areaUnderCurve",
quantity.Quantity,
"Quantity",
True,
None,
False,
),
(
"halfLifePeriod",
"halfLifePeriod",
duration.Duration,
"Duration",
False,
None,
False,
),
(
"lethalDose50",
"lethalDose50",
quantity.Quantity,
"Quantity",
True,
None,
False,
),
]
)
return js
class MedicationKnowledgeMedicineClassification(backboneelement.BackboneElement):
""" Categorization of the medication within a formulary or classification
system.
"""
resource_type = "MedicationKnowledgeMedicineClassification"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.classification = None
""" Specific category assigned to the medication.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" The type of category for the medication (for example, therapeutic
classification, therapeutic sub-classification).
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMedicineClassification, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeMedicineClassification, self).elementProperties()
js.extend(
[
(
"classification",
"classification",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeMonitoringProgram(backboneelement.BackboneElement):
""" Program under which a medication is reviewed.
The program under which the medication is reviewed.
"""
resource_type = "MedicationKnowledgeMonitoringProgram"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of the reviewing program.
Type `str`. """
self.type = None
""" Type of program under which the medication is monitored.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMonitoringProgram, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeMonitoringProgram, self).elementProperties()
js.extend(
[
("name", "name", str, "string", False, None, False),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class MedicationKnowledgeMonograph(backboneelement.BackboneElement):
""" Associated documentation about the medication.
"""
resource_type = "MedicationKnowledgeMonograph"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.source = None
""" Associated documentation about the medication.
Type `FHIRReference` referencing `['DocumentReference', 'Media']` (represented as `dict` in JSON). """
self.type = None
""" The category of medication document.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMonograph, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeMonograph, self).elementProperties()
js.extend(
[
(
"source",
"source",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class MedicationKnowledgePackaging(backboneelement.BackboneElement):
""" Details about packaged medications.
Information that only applies to packages (not products).
"""
resource_type = "MedicationKnowledgePackaging"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.quantity = None
""" The number of product units the package would contain if fully
loaded.
Type `Quantity` (represented as `dict` in JSON). """
self.type = None
""" A code that defines the specific type of packaging that the
medication can be found in.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgePackaging, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgePackaging, self).elementProperties()
js.extend(
[
(
"quantity",
"quantity",
quantity.Quantity,
"Quantity",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class MedicationKnowledgeRegulatory(backboneelement.BackboneElement):
""" Regulatory information about a medication.
"""
resource_type = "MedicationKnowledgeRegulatory"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.maxDispense = None
""" The maximum number of units of the medication that can be dispensed
in a period.
Type `MedicationKnowledgeRegulatoryMaxDispense` (represented as `dict` in JSON). """
self.regulatoryAuthority = None
""" Specifies the authority of the regulation.
Type `FHIRReference` referencing `['Organization']` (represented as `dict` in JSON). """
self.schedule = None
""" Specifies the schedule of a medication in jurisdiction.
List of `MedicationKnowledgeRegulatorySchedule` items (represented as `dict` in JSON). """
self.substitution = None
""" Specifies if changes are allowed when dispensing a medication from
a regulatory perspective.
List of `MedicationKnowledgeRegulatorySubstitution` items (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatory, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatory, self).elementProperties()
js.extend(
[
(
"maxDispense",
"maxDispense",
MedicationKnowledgeRegulatoryMaxDispense,
"MedicationKnowledgeRegulatoryMaxDispense",
False,
None,
False,
),
(
"regulatoryAuthority",
"regulatoryAuthority",
fhirreference.FHIRReference,
"Reference",
False,
None,
True,
),
(
"schedule",
"schedule",
MedicationKnowledgeRegulatorySchedule,
"MedicationKnowledgeRegulatorySchedule",
True,
None,
False,
),
(
"substitution",
"substitution",
MedicationKnowledgeRegulatorySubstitution,
"MedicationKnowledgeRegulatorySubstitution",
True,
None,
False,
),
]
)
return js
class MedicationKnowledgeRegulatoryMaxDispense(backboneelement.BackboneElement):
""" The maximum number of units of the medication that can be dispensed in a
period.
"""
resource_type = "MedicationKnowledgeRegulatoryMaxDispense"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.period = None
""" The period that applies to the maximum number of units.
Type `Duration` (represented as `dict` in JSON). """
self.quantity = None
""" The maximum number of units of the medication that can be dispensed.
Type `Quantity` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatoryMaxDispense, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatoryMaxDispense, self).elementProperties()
js.extend(
[
("period", "period", duration.Duration, "Duration", False, None, False),
(
"quantity",
"quantity",
quantity.Quantity,
"Quantity",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeRegulatorySchedule(backboneelement.BackboneElement):
""" Specifies the schedule of a medication in jurisdiction.
"""
resource_type = "MedicationKnowledgeRegulatorySchedule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.schedule = None
""" Specifies the specific drug schedule.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatorySchedule, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatorySchedule, self).elementProperties()
js.extend(
[
(
"schedule",
"schedule",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeRegulatorySubstitution(backboneelement.BackboneElement):
""" Specifies if changes are allowed when dispensing a medication from a
regulatory perspective.
"""
resource_type = "MedicationKnowledgeRegulatorySubstitution"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allowed = None
""" Specifies if regulation allows for changes in the medication when
dispensing.
Type `bool`. """
self.type = None
""" Specifies the type of substitution allowed.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatorySubstitution, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatorySubstitution, self).elementProperties()
js.extend(
[
("allowed", "allowed", bool, "boolean", False, None, True),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeRelatedMedicationKnowledge(backboneelement.BackboneElement):
""" Associated or related medication information.
Associated or related knowledge about a medication.
"""
resource_type = "MedicationKnowledgeRelatedMedicationKnowledge"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.reference = None
""" Associated documentation about the associated medication knowledge.
List of `FHIRReference` items referencing `['MedicationKnowledge']` (represented as `dict` in JSON). """
self.type = None
""" Category of medicationKnowledge.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRelatedMedicationKnowledge, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(
MedicationKnowledgeRelatedMedicationKnowledge, self
).elementProperties()
js.extend(
[
(
"reference",
"reference",
fhirreference.FHIRReference,
"Reference",
True,
None,
True,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + ".dosage"]
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + ".duration"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import money
except ImportError:
money = sys.modules[__package__ + ".money"]
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + ".quantity"]
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + ".ratio"]
|
py | 1a349bd1fc9d43ba58bf7b0601f7848e12808786 | from django.shortcuts import render
from sermar.models import Locality, Collection, Occurrence
from rest_framework import viewsets
from rest_framework import permissions
from sermar.serializers import LocalitySerializer, CollectionSerializer, SpecimenSerializer
from djgeojson.views import GeoJSONLayerView
from djgeojson.serializers import Serializer as GeoJSONSerializer
# API Views
class LocalityGeoJSONView(GeoJSONLayerView):
crs = False
def render_to_response(self, context, **response_kwargs):
serializer = GeoJSONSerializer()
response = self.response_class(**response_kwargs)
queryset = self.get_queryset()
options = dict(properties=self.properties,
precision=self.precision,
simplify=self.simplify,
srid=self.srid,
geometry_field=self.geometry_field,
force2d=self.force2d,
bbox=self.bbox,
bbox_auto=self.bbox_auto,
use_natural_keys=self.use_natural_keys)
serializer.serialize(queryset, stream=response, ensure_ascii=False,
crs=self.crs, # in geoJSON crs is deprecated, raises error 36 in ol.source
**options)
return response
class LocalityViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows Nomina to be viewed
"""
queryset = Locality.objects.all()
serializer_class = LocalitySerializer
permission_classes = [permissions.IsAuthenticated]
class CollectionViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows Taxon Ranks to be viewed
"""
queryset = Collection.objects.all()
serializer_class = CollectionSerializer
permission_classes = [permissions.IsAuthenticated]
|
py | 1a349cefd6117d17075a2499e8fbde0c911a7044 | #!/usr/bin/python3
import unittest
from base import TestBase
class LoginTest(TestBase):
def test_anonymous_login(self):
info = self.call('/user')
self.assertIsNone(info['user'])
def test_logged_in(self):
with self.client:
email = '[email protected]'
self.login(email)
info = self.call('/user')
self.assertEqual(email, info['user']['email'])
def test_new_user_has_zero_credit(self):
with self.client:
self.login('[email protected]')
info = self.call('/user')
self.assertEqual(0, info['user']['credit'])
def test_user_cannot_change_his_credit(self):
with self.client:
user = self.login('[email protected]')
self.assertEqual(0, user['credit'])
userUrl = '/api/user/{}'.format(user['id'])
self.call(userUrl, credit=20, expectedStatus=405)
self.assertEqual(0, self.call('/user')['user']['credit'])
if __name__ == "__main__":
unittest.main(verbosity=2)
|
py | 1a349dfcf34718caf2cb883ac98148dbe9632570 | """Utilities for recursive decorator."""
from types import CodeType, FunctionType, MethodType
import sys
DECORATOR_LIST_FIELD_NAME = "__wraped_with_"
def mount_to_module(module_to_mount, object_to_mount, name_in_module):
"""Mount Given function to given module.
Args:
module_to_mount(module): module to mount function.
object_to_mount(object): object to mount.
name_in_module(str): name of object after mount to module.
"""
setattr(module_to_mount, name_in_module, object_to_mount)
def set_func_args_and_kwargs_count(function, args_count, kwargs_count):
"""Set to given code args and kwargs count.
Args:
function(function): function to change.
args_count(int): arg count to apply.
kwargs_count(int): kwarg count to apply.
"""
code = function.__code__
function.__code__ = CodeType(args_count,
kwargs_count,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars)
def set_function_kwargs_default_values(func, kwargs_default_values):
"""Set kwargs default values of function.
Args:
func(function): function to set is kwargs defaults.
kwargs_default_values(dict): kwargs default values.
"""
func.__kwdefaults__ = kwargs_default_values
def get_func_module(func):
"""Return function module.
Args:
func(function): function to return is module.
"""
return sys.modules[func.__module__]
def is_function(obj):
"""Return if object is function.
Args:
obj(object): the tested object.
Return:
bool. true if is function else false.
"""
return type(obj) is FunctionType
def is_method(obj):
"""Return if object is function.
Args:
obj(object): the tested object.
Return:
bool. true if is method else false.
"""
return type(obj) is MethodType
def is_wrapped(func, decorator):
"""Return if function is already wrapped with the given decorator.
Args:
func(function): function to check if is wrapped.
decorator(function): the tested decorator.
Return:
bool. true if is function is already wrapped else false.
"""
if hasattr(func, DECORATOR_LIST_FIELD_NAME):
return decorator.__name__ in getattr(func, DECORATOR_LIST_FIELD_NAME)
return False
def get_function_wrapped_value(func):
"""Return list of decorators applied on func by recursive_decorator.
Args:
func(function): function to get is decorator's list.
Return:
list. decorators applied on func by recursive_decorator
"""
if hasattr(func, DECORATOR_LIST_FIELD_NAME):
return getattr(func, DECORATOR_LIST_FIELD_NAME)
return []
def set_function_wrapped_value(func, decorator_list):
"""Set list of decorators applied by recursive_decorator.
Args:
func(function): function to set is decorator list.
decorator_list(list): list of decorators applied by recursive_decorator.
"""
setattr(func, DECORATOR_LIST_FIELD_NAME, decorator_list)
|
py | 1a349f33d881beb25cc114efe0cf62c410e7e930 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard',
'Premium' or 'Basic'. Possible values include: 'Standard', 'Premium',
'Basic'
:type tier: str or
~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, *, name: str=None, tier=None, family=None, **kwargs) -> None:
super(ExpressRouteCircuitSku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.family = family
|
py | 1a34a0dca1221e10d12d2762f23195b64135739a | """
Canny edge detection adapted from https://github.com/DCurro/CannyEdgePytorch
"""
import torch
import torch.nn as nn
import numpy as np
from scipy.signal.windows import gaussian
class CannyEdgeDetector(nn.Module):
def __init__(self,
non_max_suppression=True,
gaussian_filter_std=1.0,
gaussian_filter_size=5,
threshold=0.2):
super(CannyEdgeDetector, self).__init__()
self.threshold = threshold
self.non_max_suppression = non_max_suppression
# Gaussian filter for smoothing
gaussian_filter = gaussian(gaussian_filter_size, std=gaussian_filter_std).reshape([1, gaussian_filter_size])
gaussian_filter = gaussian_filter / gaussian_filter.sum()
self.gaussian_filter_horizontal = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=(1, gaussian_filter_size),
padding=(0, gaussian_filter_size // 2),
bias=False)
# self.gaussian_filter_horizontal.weight[:] = torch.from_numpy(gaussian_filter).float()
self.gaussian_filter_horizontal.weight.data = torch.from_numpy(gaussian_filter).float()[None, None, :, :]
self.gaussian_filter_vertical = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=(gaussian_filter_size, 1),
padding=(gaussian_filter_size // 2, 0),
bias=False)
# self.gaussian_filter_vertical.weight[:] = torch.from_numpy(gaussian_filter.T)
self.gaussian_filter_vertical.weight.data = torch.from_numpy(gaussian_filter.T).float()[None, None, :, :]
# Sobel filter for gradient
sobel_filter = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
self.sobel_filter_horizontal = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=sobel_filter.shape,
padding=sobel_filter.shape[0] // 2,
bias=False)
# self.sobel_filter_horizontal.weight[:] = torch.from_numpy(sobel_filter).float()
self.sobel_filter_horizontal.weight.data = torch.from_numpy(sobel_filter).float()[None, None, :, :]
self.sobel_filter_vertical = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=sobel_filter.shape,
padding=sobel_filter.shape[0] // 2,
bias=False)
# self.sobel_filter_vertical.weight[:] = torch.from_numpy(sobel_filter.T).float()
self.sobel_filter_vertical.weight.data = torch.from_numpy(sobel_filter.T).float()[None, None, :, :]
# Directional filters for non-max suppression (edge thinning) using gradient orientations.
# filters were flipped manually
if self.non_max_suppression:
filter_0 = np.array([[0, 0, 0],
[0, 1, -1],
[0, 0, 0]])
filter_45 = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, -1]])
filter_90 = np.array([[0, 0, 0],
[0, 1, 0],
[0, -1, 0]])
filter_135 = np.array([[0, 0, 0],
[0, 1, 0],
[-1, 0, 0]])
filter_180 = np.array([[0, 0, 0],
[-1, 1, 0],
[0, 0, 0]])
filter_225 = np.array([[-1, 0, 0],
[0, 1, 0],
[0, 0, 0]])
filter_270 = np.array([[0, -1, 0],
[0, 1, 0],
[0, 0, 0]])
filter_315 = np.array([[0, 0, -1],
[0, 1, 0],
[0, 0, 0]])
all_filters = np.stack([filter_0, filter_45, filter_90, filter_135, filter_180, filter_225, filter_270, filter_315])
self.directional_filter = nn.Conv2d(in_channels=1,
out_channels=8,
kernel_size=filter_0.shape,
padding=filter_0.shape[-1] // 2,
bias=False)
# self.directional_filter.weight[:] = torch.from_numpy(all_filters[:, None, ...])
self.directional_filter.weight.data = torch.from_numpy(all_filters[:, None, :, :]).float()
def forward(self, img):
"""
:param img: (batch_size, num_channels, img_wh, img_wh)
:return:
"""
batch_size = img.shape[0]
num_channels = img.shape[1]
blurred_img = torch.zeros_like(img) # (batch_size, num_channels, img_wh, img_wh)
grad_x = torch.zeros((batch_size, 1, *img.shape[2:]), device=img.device) # (batch_size, 1, img_wh, img_wh)
grad_y = torch.zeros((batch_size, 1, *img.shape[2:]), device=img.device) # (batch_size, 1, img_wh, img_wh)
for c in range(num_channels):
# Gaussian smoothing
blurred = self.gaussian_filter_vertical(self.gaussian_filter_horizontal(img[:, [c], :, :])) # (batch_size, 1, img_wh, img_wh)
blurred_img[:, [c]] = blurred
# Gradient
grad_x += self.sobel_filter_horizontal(blurred) # (batch_size, 1, img_wh, img_wh)
grad_y += self.sobel_filter_vertical(blurred) # (batch_size, 1, img_wh, img_wh)
# Gradient magnitude and orientation
grad_x, grad_y = grad_x / num_channels, grad_y / num_channels # Average per-pixel gradients over channels
grad_magnitude = (grad_x ** 2 + grad_y ** 2) ** 0.5 # Per-pixel gradient magnitude
grad_orientation = torch.atan2(grad_y, grad_x) * (180.0/np.pi) + 180.0 # Per-pixel gradient orientation in degrees with range (0°, 360°)
grad_orientation = torch.round(grad_orientation / 45.0) * 45.0 # Bin gradient orientations
# Thresholding
thresholded_grad_magnitude = grad_magnitude.clone()
thresholded_grad_magnitude[grad_magnitude < self.threshold] = 0.0
output = {'blurred_img': blurred_img, # (batch_size, num_channels, img_wh, img_wh)
'grad_magnitude': grad_magnitude, # (batch_size, 1, img_wh, img_wh)
'grad_orientation': grad_orientation, # (batch_size, 1, img_wh, img_wh)
'thresholded_grad_magnitude': thresholded_grad_magnitude} # (batch_size, 1, img_wh, img_wh)
assert grad_magnitude.size() == grad_orientation.size() == thresholded_grad_magnitude.size()
# Non-max suppression (edge thinning)
if self.non_max_suppression:
all_direction_filtered = self.directional_filter(grad_magnitude) # (batch_size, 8, img_wh, img_wh)
positive_idx = (grad_orientation / 45) % 8 # (batch_size, 1, img_wh, img_wh) Index of positive gradient direction (0: 0°, ..., 7: 315°) at each pixel
thin_edges = grad_magnitude.clone() # (batch_size, 1, img_wh, img_wh)
for pos_i in range(4):
neg_i = pos_i + 4
is_oriented_i = (positive_idx == pos_i) * 1
is_oriented_i = is_oriented_i + (positive_idx == neg_i) * 1 # > 0 if pixel is oriented in pos_i or neg_i direction
pos_directional = all_direction_filtered[:, pos_i]
neg_directional = all_direction_filtered[:, neg_i]
selected_direction = torch.stack([pos_directional, neg_directional])
# get the local maximum pixels for the angle
is_max = selected_direction.min(dim=0)[0] > 0.0 # Check if pixel greater than neighbours in pos_i and neg_i directions.
is_max = torch.unsqueeze(is_max, dim=1)
# apply non maximum suppression
to_remove = (is_max == 0) * 1 * (is_oriented_i) > 0
thin_edges[to_remove] = 0.0
thresholded_thin_edges = thin_edges.clone()
thresholded_thin_edges[thin_edges < self.threshold] = 0.0
output['thin_edges'] = thin_edges
output['thresholded_thin_edges'] = thresholded_thin_edges
return output
|
py | 1a34a165c083bbc2d64ad06fb9e94c28e6679e0e | # button.py - write to screen if button is down or up
# (c) BotBook.com - Karvinen, Karvinen, Valtokari
import time
import botbook_gpio as gpio # <1>
def main():
buttonpin = 3 # has internal pull-up # <2>
gpio.mode(buttonpin, "in") # <3>
while (True): # <4>
buttonUp = gpio.read(buttonpin) # <5>
if(buttonUp == gpio.HIGH):
print "Button is up"
else:
print "Button is pressed"
time.sleep(0.3) # seconds # <6>
if __name__ == "__main__":
main()
|
py | 1a34a1d9f8f327090cfd8e9932bd43053b7a832c | import time
import orjson
import asyncio
import websockets
from typing import Optional
from enum import IntEnum
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from websockets.exceptions import ConnectionClosedError, ConnectionClosed, ConnectionClosedOK
from athanor.app import Service
UNKNOWN = "UNKNOWN"
class MudProtocol(IntEnum):
TELNET = 0
WEBSOCKET = 1
def __str__(self):
if self == 0:
return "Telnet"
elif self == 1:
return "WebSocket"
else:
return "Unknown"
#Shamelessly yoinked this IntEnum from Rich for K.I.S.S. purposes.
class ColorSystem(IntEnum):
"""One of the 3 color system supported by terminals."""
STANDARD = 1
EIGHT_BIT = 2
TRUECOLOR = 3
WINDOWS = 4
COLOR_MAP = {
"ansi": ColorSystem.STANDARD,
"xterm256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR
}
@dataclass_json
@dataclass
class ConnectionDetails:
protocol: MudProtocol = 0
client_id: str = UNKNOWN
client_name: str = UNKNOWN
client_version: str = UNKNOWN
host_address: str = UNKNOWN
host_name: str = UNKNOWN
host_port: int = 0
connected: float = time.time()
utf8: bool = False
color: Optional[ColorSystem] = None
screen_reader: bool = False
proxy: bool = False
osc_color_palette: bool = False
vt100: bool = False
mouse_tracking: bool = False
naws: bool = False
width: int = 78
height: int = 24
mccp2: bool = False
mccp2_active: bool = False
mccp3: bool = False
mccp3_active: bool = False
mtts: bool = False
ttype: bool = False
mnes: bool = False
suppress_ga: bool = False
force_endline: bool = False
linemode: bool = False
mssp: bool = False
mxp: bool = False
mxp_active: bool = False
oob: bool = False
class ConnectionInMessageType(IntEnum):
GAMEDATA = 0
CONNECT = 1
READY = 2
MSSP = 4
DISCONNECT = 5
UPDATE = 6
@dataclass_json
@dataclass
class ConnectionInMessage:
msg_type: ConnectionInMessageType
client_id: str
data: Optional[object]
class ConnectionOutMessageType(IntEnum):
GAMEDATA = 0
MSSP = 1
DISCONNECT = 2
@dataclass_json
@dataclass
class ConnectionOutMessage:
msg_type: ConnectionOutMessageType
client_id: str
data: Optional[object]
class PortalOutMessageType(IntEnum):
EVENTS = 0
HELLO = 1
SYSTEM = 2
@dataclass_json
@dataclass
class PortalOutMessage:
msg_type: PortalOutMessageType
process_id: int
data: Optional[object]
class ServerInMessageType(IntEnum):
EVENTS = 0
HELLO = 1
SYSTEM = 2
@dataclass_json
@dataclass
class ServerInMessage:
msg_type: ServerInMessageType
process_id: int
data: Optional[object]
class LinkProtocol:
def __init__(self, service, ws, path):
self.service = service
self.connection = ws
self.path = path
self.outbox = asyncio.Queue()
self.task = None
self.running = False
async def run(self):
self.running = True
self.task = asyncio.create_task(self.run_tasks())
await self.task
self.running = False
async def run_tasks(self):
await asyncio.gather(self.read(), self.write())
async def read(self):
try:
async for message in self.connection:
await self.process_message(message)
except ConnectionClosedError:
self.running = False
self.task.cancel()
except ConnectionClosedOK:
self.running = False
self.task.cancel()
except ConnectionClosed:
self.running = False
self.task.cancel()
async def write(self):
while self.running:
msg = await self.outbox.get()
#print(f"{self.service.app.config.name.upper()} SENDING MESSAGE: {msg}")
if isinstance(msg, str):
await self.connection.send(msg)
else:
await self.connection.send(orjson.dumps(msg))
async def process_message(self, message):
#print(f"{self.service.app.config.name.upper()} RECEIVED MESSAGE: {message}")
if isinstance(message, bytes):
data = orjson.loads(message.decode())
await self.service.message_from_link(data)
else:
print(f"{self.service.app.config.name} got unknown websocket message: {message}")
class LinkService(Service):
def __init__(self, app):
super().__init__(app)
self.app.link = self
self.link: Optional[LinkProtocol] = None
self.interface: Optional[str] = None
self.port: int = 0
self.in_events: Optional[asyncio.Queue] = None
self.out_events: Optional[asyncio.Queue] = None
def setup(self):
link_conf = self.app.config.link
interface = self.app.config.interfaces.get(link_conf["interface"], None)
if interface is None:
raise ValueError("Portal must have a link interface!")
self.interface = interface
port = int(link_conf["port"])
if port < 0 or port > 65535:
raise ValueError(f"Invalid port: {port}. Port must be 16-bit unsigned integer")
self.port = port
async def async_setup(self):
self.in_events = asyncio.Queue()
self.out_events = asyncio.Queue()
async def async_run(self):
pass
async def handle_in_events(self):
pass
async def handle_out_events(self):
pass
def new_link(self, ws, path):
link = LinkProtocol(self, ws, path)
if self.link:
self.close_link()
self.link = link
self.on_new_link()
return link.run()
def on_new_link(self):
pass
def close_link(self):
pass
async def message_from_link(self, message):
pass
class LinkServiceServer(LinkService):
def __init__(self, app):
super().__init__(app)
self.listener = None
async def async_run(self):
await asyncio.gather(self.listener, self.handle_in_events(), self.handle_out_events())
async def async_setup(self):
await super().async_setup()
self.listener = websockets.serve(self.new_link, self.interface, self.port)
class LinkServiceClient(LinkService):
async def async_run(self):
await asyncio.gather(self.async_link(), self.handle_in_events(), self.handle_out_events())
async def async_link(self):
url = f"ws://{self.interface}:{self.port}"
while True:
async with websockets.connect(url) as ws:
self.link = LinkProtocol(self, ws, "/")
self.on_new_link()
await self.link.run()
await asyncio.sleep(0.1)
async def handle_in_events(self):
while True:
msg = await self.in_events.get()
await self.app.conn.in_events.put(msg)
async def handle_out_events(self):
while True:
if self.link:
msg = await self.out_events.get()
await self.link.outbox.put(msg)
else:
await asyncio.sleep(1) |
py | 1a34a24802cffdaf7896537b7d1dba47b880877f | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
import os
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hdfs(name=None):
import params
if params.create_lib_snappy_symlinks:
install_snappy()
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
recursive=True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hdfs.conf.j2")
)
if params.security_enabled:
tc_mode = 0644
tc_owner = "root"
else:
tc_mode = None
tc_owner = params.hdfs_user
if "hadoop-policy" in params.config['configurations']:
XmlConfig("hadoop-policy.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hadoop-policy'],
configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
owner=params.hdfs_user,
group=params.user_group
)
if "ssl-client" in params.config['configurations']:
XmlConfig("ssl-client.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['ssl-client'],
configuration_attributes=params.config['configuration_attributes']['ssl-client'],
owner=params.hdfs_user,
group=params.user_group
)
Directory(params.hadoop_conf_secure_dir,
recursive=True,
owner='root',
group=params.user_group,
cd_access='a',
)
XmlConfig("ssl-client.xml",
conf_dir=params.hadoop_conf_secure_dir,
configurations=params.config['configurations']['ssl-client'],
configuration_attributes=params.config['configuration_attributes']['ssl-client'],
owner=params.hdfs_user,
group=params.user_group
)
if "ssl-server" in params.config['configurations']:
XmlConfig("ssl-server.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['ssl-server'],
configuration_attributes=params.config['configuration_attributes']['ssl-server'],
owner=params.hdfs_user,
group=params.user_group
)
XmlConfig("hdfs-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.hdfs_user,
group=params.user_group
)
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
mode=0644
)
File(os.path.join(params.hadoop_conf_dir, 'slaves'),
owner=tc_owner,
content=Template("slaves.j2")
)
if params.lzo_enabled and len(params.lzo_packages) > 0:
Package(params.lzo_packages)
def install_snappy():
import params
Directory([params.so_target_dir_x86, params.so_target_dir_x64],
recursive=True,
)
Link(params.so_target_x86,
to=params.so_src_x86,
)
Link(params.so_target_x64,
to=params.so_src_x64,
)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hdfs(component=None):
import params
if component == "namenode":
directories = params.dfs_name_dir.split(",")
Directory(directories,
owner=params.hdfs_user,
mode="(OI)(CI)F",
recursive=True
)
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=params.hdfs_user,
mode="f",
)
if params.service_map.has_key(component):
service_name = params.service_map[component]
ServiceConfig(service_name,
action="change_user",
username=params.hdfs_user,
password=Script.get_password(params.hdfs_user))
if "hadoop-policy" in params.config['configurations']:
XmlConfig("hadoop-policy.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hadoop-policy'],
owner=params.hdfs_user,
mode="f",
configuration_attributes=params.config['configuration_attributes']['hadoop-policy']
)
XmlConfig("hdfs-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
owner=params.hdfs_user,
mode="f",
configuration_attributes=params.config['configuration_attributes']['hdfs-site']
)
|
py | 1a34a38f3a2e5509b1d02a93f648f8763c654919 | import inspect
from typing import Callable, Type
from open_mafia_engine.util.repr import ReprMixin
class MafiaError(Exception, ReprMixin):
"""Base class for Mafia exceptions."""
class MafiaAmbiguousTypeName(MafiaError):
"""The type name conficts with an existing name."""
def __init__(self, existing_type: Type[object], new_type: Type[object]) -> None:
self.existing_type = existing_type
self.new_type = new_type
self.type_name = type_name = existing_type.__qualname__
super().__init__(
f"""Type {type_name!r} conficts with existing type.
Existing type defined in: {inspect.getmodule(existing_type)}
New type defined in: {inspect.getmodule(new_type)}
"""
)
class MafiaTypeNotFound(MafiaError):
"""The type was not found."""
def __init__(self, type_name: str) -> None:
self.type_name = type_name
super().__init__(f"Couldn't find GameObject subtype {type_name!r}")
class MafiaConverterError(MafiaError, TypeError):
"""Could not convert object to the requested type."""
def __init__(self, obj: str, type_: Type):
self.obj = obj
self.type_ = type_
super().__init__(f"Couldn't convert {obj!r} to {type_!r}")
class MafiaBadHandler(MafiaError, TypeError):
"""Function can't be used as an event handler."""
def __init__(self, func: Callable):
self.func = func
super().__init__(f"Function isn't a legal event handler: {func!r}")
class MafiaBadBuilder(MafiaError, TypeError):
"""Function can't be used as a game builder."""
def __init__(self, func: Callable):
self.func = func
super().__init__(f"Function isn't a legal game builder: {func!r}")
|
py | 1a34a44888506ac33837bb4fb8eaeb73653b182a | from setuptools import setup, find_packages
setup(
name='driving_gridworld_experiments',
version='0.0.1',
license='',
packages=find_packages(),
install_requires=['setuptools >= 20.2.2'],
tests_require=['pytest', 'pytest-cov'],
setup_requires=['pytest-runner'],
)
|
py | 1a34a4c35b5c6b876bfe1ae295195687f454006f | '''
Manage the pipeline : reading the logs, parsing them and generating stats.
'''
import os
import time
from monilog.parser import Parser
from monilog.statistics import Statistics
from monilog.utils import init_logger
HIGH_TRAFFIC_DUR = 2*60
STAT_DUR = 10
MAX_IDLE_TIME = 5*60
class MonilogPipeline:
'''
Read logs and generates statistics.
Args:
file (str): The file with the logs to monitor.
threshold (int): Max traffic entries for the past 2 mn.
stop (bool): Whether to stop the monitoring.
'''
def __init__(self,
file='/tmp/access.log',
threshold=10):
self.file = file
self.threshold = threshold
self.stop = False
def stop_monitoring(self):
'''
To call when the monitoring app should be stopped.
'''
self.stop = True
def run(self):
'''
Run the monitoring pipeline.
'''
parser = Parser()
get_stats = Statistics(STAT_DUR)
alert = False
high_traffic_nb = 0
traffic_buffer = []
if not os.path.exists(self.file):
time.sleep(1)
file = open(self.file, 'r', os.O_NONBLOCK)
stat_time = time.time()
high_traffic_time = time.time()
start_idle_time = None
idle_duration = 0
logger = init_logger()
while not self.stop:
line = file.readline()
if not line:
if not start_idle_time:
start_idle_time = time.time()
else:
idle_duration = time.time() - start_idle_time
if idle_duration > MAX_IDLE_TIME:
logger.info(
'Stopping monitoring : Logging app not used for %d s.\n'
% (int(idle_duration))
)
self.stop = True
else:
start_idle_time = None
idle_duration = 0
try:
parsed_line = parser(line)
except:
#logger.warning(f"There was an error parsing : {line}")
continue
traffic_buffer.append(
parsed_line
)
high_traffic_nb += 1
if time.time() - stat_time >= STAT_DUR:
logger.info('\n'+get_stats(traffic_buffer))
stat_time = time.time()
traffic_buffer = []
if time.time() - high_traffic_time >= HIGH_TRAFFIC_DUR:
if high_traffic_nb/HIGH_TRAFFIC_DUR > self.threshold and not alert:
alert = True
logger.warning(
"High traffic generated an alert - hits = %f, triggered at %s.\n"
% (
high_traffic_nb/HIGH_TRAFFIC_DUR,
time.strftime('%d/%b/%Y %H:%M:%S')
)
)
elif high_traffic_nb/HIGH_TRAFFIC_DUR <= self.threshold and alert:
logger.info(
"The high traffic alert is recovered at %s.\n"
% (time.strftime('%d/%b/%Y %H:%M:%S'))
)
high_traffic_time = time.time()
high_traffic_nb = 0
|
py | 1a34a51526160cdcdf55013ef8cf343eb7697e27 | import sys
sys.setrecursionlimit(500000)
class Solution:
# @param A : list of integers
# @return an integer
def solve(self, parents):
if not parents:
return 0
assert len(parents) >= 1
tree = make_tree(parents)
depth, max_dist = find_max_dist(tree)
return max_dist
class TreeNode:
__slots__ = ['childs']
def __init__(self):
self.childs = []
def append(self, child):
self.childs.append(child)
def make_tree(parents):
n = len(parents)
assert -1 in parents
root = parents.index(-1)
nodes = [TreeNode() for _id in range(n)]
for i,p in enumerate(parents):
if p==-1: continue
nodes[p].append(nodes[i])
assert parents[root] == -1
return nodes[root]
def find_max_dist(tree):
''' @return (depth, max_dist) '''
assert tree
if len(tree.childs) == 0:
return (0, 0)
dms = [find_max_dist(child) for child in tree.childs]
ds, ms = zip(*dms)
max_depth_so_far = 1+max(ds)
if len(tree.childs) == 1:
assert len(ds) == 1
max_dist_so_far = max(ds[0]+1, max(ms))
else:
max_dist_so_far = max(sum(top2_among(ds))+2, max(ms))
return (max_depth_so_far, max_dist_so_far)
def top2_among(ds):
top2 = Top2()
for d in ds:
top2.push(d)
return top2.fst, top2.snd
class Top2:
def __init__(self):
self.fst, self.snd = 0, 0
def push(self, n):
if self.fst <= n:
self.fst, self.snd = n, self.fst
elif self.snd < n:
self.snd = n
|
py | 1a34a7dcc8c99dd83d9d487a323c797e009dacf7 | """Documenter module docstring."""
import ast
import importlib
import inspect
import os
import re
import textwrap
from collections import namedtuple
from functools import lru_cache
from types import ModuleType
from typing import Any, Callable, Dict, GenericMeta, List, Optional, Pattern, Tuple, Type, Union
RECURSIVE_NODES = (ast.If, ast.IfExp, ast.Try, ast.With, ast.ExceptHandler)
# exactly two leading underscores, exactly two trailing underscores
# since we enforce one non-underscore after the two leading underscores,
# we put the rest in an optional group
RE_SPECIAL: Pattern = re.compile(r"^__[^_]([\w_]*[^_])?__$")
# at least two leading underscores, at most one trailing underscore
# since we enforce one non-underscore before the last,
# we make the previous characters optional with an asterisk
RE_CLASS_PRIVATE: Pattern = re.compile(r"^__[\w_]*[^_]_?$")
# at most one leading underscore, then whatever
RE_PRIVATE: Pattern = re.compile(r"^_[^_][\w_]*$")
CATEGORY_ATTRIBUTE = "attribute"
CATEGORY_METHOD = "method"
CATEGORY_FUNCTION = "function"
CATEGORY_MODULE = "module"
CATEGORY_CLASS = "class"
NAME_SPECIAL = ("special", lambda n: bool(RE_SPECIAL.match(n)))
NAME_CLASS_PRIVATE = ("class-private", lambda n: bool(RE_CLASS_PRIVATE.match(n)))
NAME_PRIVATE = ("private", lambda n: bool(RE_PRIVATE.match(n)))
NAME_PROPERTIES = {
CATEGORY_ATTRIBUTE: [NAME_SPECIAL, NAME_CLASS_PRIVATE, NAME_PRIVATE],
CATEGORY_METHOD: [NAME_SPECIAL, NAME_PRIVATE],
CATEGORY_FUNCTION: [NAME_PRIVATE],
CATEGORY_CLASS: [NAME_PRIVATE],
CATEGORY_MODULE: [NAME_SPECIAL, NAME_PRIVATE],
}
def node_is_docstring(node: ast.AST) -> bool:
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def node_to_docstring(node: Union[ast.Expr, ast.Str]) -> str:
return node.value.s
def node_is_assignment(node: ast.AST) -> bool:
return isinstance(node, ast.Assign)
def node_to_names(node: ast.Assign) -> List[str]:
names = []
for target in node.targets:
if isinstance(target, ast.Attribute):
names.append(target.attr)
elif isinstance(target, ast.Name):
names.append(target.id)
return names
def get_name_properties(name: str, category: str) -> List[str]:
properties = []
for prop in NAME_PROPERTIES[category]:
if prop[1](name):
properties.append(prop[0])
return properties
def get_attribute_names_and_docstring(node1, node2):
if node_is_docstring(node2) and node_is_assignment(node1):
return node_to_names(node1), node_to_docstring(node2)
raise ValueError
@lru_cache(maxsize=None)
def get_attributes(module: ModuleType) -> List["Object"]:
with open(module.__file__) as stream:
code = stream.read()
initial_ast_body = ast.parse(code).body
return _get_attributes(initial_ast_body, name_prefix=module.__name__)
def _get_attributes(ast_body: list, name_prefix: str, properties: Optional[List[str]] = None) -> List["Object"]:
if not properties:
properties = []
documented_attributes = []
previous_node = None
for node in ast_body:
try:
names, docstring = get_attribute_names_and_docstring(previous_node, node)
except ValueError:
if isinstance(node, RECURSIVE_NODES):
documented_attributes.extend(_get_attributes(node.body, name_prefix, properties))
if isinstance(node, ast.Try):
documented_attributes.extend(_get_attributes(node.finalbody, name_prefix, properties))
elif isinstance(node, ast.FunctionDef) and node.name == "__init__":
documented_attributes.extend(_get_attributes(node.body, name_prefix))
elif isinstance(node, ast.ClassDef):
documented_attributes.extend(
_get_attributes(node.body, f"{name_prefix}.{node.name}", properties=["class"])
)
else:
for name in names:
documented_attributes.append(
Object(
category=CATEGORY_ATTRIBUTE,
path=f"{name_prefix}.{name}",
name=name,
docstring=docstring,
properties=properties + get_name_properties(name, CATEGORY_ATTRIBUTE),
)
)
previous_node = node
return documented_attributes
def import_object(path: str) -> Tuple[ModuleType, Any]:
"""
Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path: the dot-separated path of the object.
Returns:
tuple: the imported module and obtained object.
"""
if not path:
raise ValueError(f"path must be a valid Python path, not {path}")
obj_parent_modules = path.split(".")
objects = []
while True:
try:
parent_module_path = ".".join(obj_parent_modules)
parent_module = importlib.import_module(parent_module_path)
break
except ImportError:
if len(obj_parent_modules) == 1:
raise ImportError("No module named '%s'" % obj_parent_modules[0])
objects.insert(0, obj_parent_modules.pop(-1))
current_object = parent_module
for obj in objects:
current_object = getattr(current_object, obj)
module = inspect.getmodule(current_object)
return module, current_object
ADMONITIONS = {
"note:": "note",
"see also:": "seealso",
"abstract:": "abstract",
"summary:": "summary",
"tldr:": "tldr",
"info:": "info",
"information:": "info",
"todo:": "todo",
"tip:": "tip",
"hint:": "hint",
"important:": "important",
"success:": "success",
"check:": "check",
"done:": "done",
"question:": "question",
"help:": "help",
"faq:": "faq",
"warning:": "warning",
"caution:": "caution",
"attention:": "attention",
"failure:": "failure",
"fail:": "fail",
"missing:": "missing",
"danger:": "danger",
"error:": "error",
"bug:": "bug",
"example:": "example",
"snippet:": "snippet",
"quote:": "quote",
"cite:": "cite",
}
def render_signature(signature):
# credits to https://github.com/tomchristie/mkautodoc
params = []
render_pos_only_separator = True
render_kw_only_separator = True
for parameter in signature.parameters.values():
value = parameter.name
if parameter.default is not parameter.empty:
value = f"{value}={parameter.default!r}"
if parameter.kind is parameter.VAR_POSITIONAL:
render_kw_only_separator = False
value = f"*{value}"
elif parameter.kind is parameter.VAR_KEYWORD:
value = f"**{value}"
elif parameter.kind is parameter.POSITIONAL_ONLY:
if render_pos_only_separator:
render_pos_only_separator = False
params.append("/")
elif parameter.kind is parameter.KEYWORD_ONLY:
if render_kw_only_separator:
render_kw_only_separator = False
params.append("*")
params.append(value)
return ", ".join(params)
def get_param_info(signature, param_name):
parameter = signature.parameters[param_name]
param_default = param_type = ""
if parameter.annotation is not parameter.empty:
if inspect.isclass(parameter.annotation) and not isinstance(parameter.annotation, GenericMeta):
param_type = parameter.annotation.__name__
else:
param_type = str(parameter.annotation).replace("typing.", "")
if parameter.kind is parameter.VAR_KEYWORD:
param_name = f"**{param_name}"
if parameter.default is not parameter.empty:
param_default = str(parameter.default)
return namedtuple("Param", "name default type")(param_name, param_default, param_type)
def get_return_type(signature):
ret = signature.return_annotation
if ret is not signature.empty:
if inspect.isclass(ret) and not isinstance(ret, GenericMeta):
ret_type = ret.__name__
else:
ret_type = str(ret).replace("typing.", "")
else:
ret_type = ""
return ret_type
def parse_docstring(docstring: str, signature) -> str:
"""
Parse a docstring!
Note:
to try notes.
Args:
docstring: this is the docstring to parse.
Raises:
OSError: no it doesn't lol.
Returns:
markdown: the docstring converted to a nice markdown text.
"""
params = {}
exceptions = {}
returns = ""
lines = docstring.split("\n")
new_lines = []
i = 0
while i < len(lines):
if lines[i].lower() in ("args:", "arguments:", "params:", "parameters:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and params[name]:
params[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
params[name] = description.lstrip(" ")
j += 1
new_lines.append("**Parameters**\n")
new_lines.append("| Name | Type | Description |")
new_lines.append("| ---- | ---- | ----------- |")
for param_name, param_description in params.items():
param_name, param_default, param_type = get_param_info(signature, param_name)
# if param_default:
# param_default = f"`{param_default}`"
new_lines.append(f"| `{param_name}` | `{param_type}` | {param_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("raise:", "raises:", "except:", "exceptions:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and exceptions[name]:
exceptions[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
exceptions[name] = description.lstrip(" ")
j += 1
new_lines.append("**Exceptions**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
for exception_name, exception_description in exceptions.items():
new_lines.append(f"| `{exception_name}` | {exception_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("return:", "returns:"):
j = i + 1
while j < len(lines) and lines[j].startswith(" "):
description = lines[j].lstrip(" ")
returns += " " + description
j += 1
new_lines.append("**Returns**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
new_lines.append(f"| `{get_return_type(signature)}` | {returns} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ADMONITIONS.keys():
j = i + 1
admonition = []
while j < len(lines) and lines[j].startswith(" ") or lines[j] == "":
admonition.append(lines[j])
j += 1
new_lines.append(f"!!! {ADMONITIONS[lines[i].lower()]}")
new_lines.append("\n".join(admonition))
new_lines.append("")
i = j - 1
else:
new_lines.append(lines[i])
i += 1
return "\n".join(new_lines)
class Object:
"""
Class to store information about a Python object.
- the object category (ex: module, function, class, method or attribute)
- the object path (ex: `package.submodule.class.inner_class.method`)
- the object name (ex: `__init__`)
- the object docstring
- the object properties, depending on its category (ex: special, classmethod, etc.)
- the object signature (soon)
Each instance additionally stores references to its children, grouped by category (see Attributes).
"""
def __init__(
self,
category: str,
name: str,
path: str,
docstring: str,
properties: List[str],
signature: Optional[str] = None,
source: Optional[str] = None,
file: Optional[str] = None,
) -> None:
self.category = category
self.name = name
self.signature = signature or ""
self.path = path
self.docstring = docstring or ""
self.properties = properties
self.parent = None
self.source = source or ""
self.file = file or ""
self._path_map = {}
self.attributes = []
"""List of all the object's attributes."""
self.methods = []
"""List of all the object's methods."""
self.functions = []
"""List of all the object's functions."""
self.modules = []
"""List of all the object's submodules."""
self.classes = []
"""List of all the object's classes."""
self.children = []
"""List of all the object's children."""
def __str__(self):
return self.path
@property
def parent_path(self) -> str:
"""The parent's path, computed from the current path."""
return self.path.rsplit(".", 1)[0]
def add_child(self, obj: "Object") -> None:
"""Add an object as a child of this object."""
if obj.parent_path != self.path:
return
self.children.append(obj)
{
CATEGORY_ATTRIBUTE: self.attributes,
CATEGORY_METHOD: self.methods,
CATEGORY_FUNCTION: self.functions,
CATEGORY_MODULE: self.modules,
CATEGORY_CLASS: self.classes,
}.get(obj.category).append(obj)
obj.parent = self
self._path_map[obj.path] = obj
def add_children(self, children: List["Object"]) -> None:
"""Add a list of objects as children of this object."""
for child in children:
self.add_child(child)
def dispatch_attributes(self, attributes: List["Object"]) -> None:
for attribute in attributes:
try:
attach_to = self._path_map[attribute.parent_path]
except KeyError:
pass
else:
attach_to.attributes.append(attribute)
attach_to.children.append(attribute)
attribute.parent = attach_to
def render_references(self, base_url: str):
lines = [f"[{self.path}]: {base_url}#{self.path}"]
for child in self.children:
lines.append(child.render_references(base_url))
return "\n".join(lines)
def render(self, heading: int = 1, **config: Dict[str, Any]) -> str:
"""
Render this object as Markdown.
This is dirty and will be refactored as a Markdown extension soon.
Parameters:
heading: The initial level of heading to use.
config: The rendering configuration dictionary.
Returns:
The rendered Markdown.
"""
lines = []
show_top_object_heading = config.pop("show_top_object_heading", True)
show_top_object_full_path = config.pop("show_top_object_full_path", False)
if show_top_object_heading:
if self.docstring or not config["hide_no_doc"] or not self.parent:
signature = ""
toc_signature = ""
if self.category in (CATEGORY_FUNCTION, CATEGORY_METHOD):
if self.signature:
signature = f"({render_signature(self.signature)})"
toc_signature = "()"
object_heading = f"`:::python {self.path if show_top_object_full_path else self.name}{signature}`"
object_permalink = self.path.replace("__", r"\_\_")
object_toc = self.name.replace("__", r"\_\_") + toc_signature
properties = ", ".join(self.properties)
if properties:
object_heading += f"*({properties})*"
lines.append(
f"{'#' * heading} {object_heading} {{: #{object_permalink} data-toc-label='{object_toc}' }}"
)
if config["add_source_details"] and self.source:
lines.append("")
lines.append(f'??? note "Show source code"')
lines.append(f' ```python linenums="{self.source[1]}"')
lines.append(textwrap.indent("".join(self.source[0]), " "))
lines.append(" ```")
lines.append("")
if self.docstring:
lines.append(parse_docstring(self.docstring, self.signature))
lines.append("")
if config["group_by_categories"]:
lines.append(self.render_categories(heading + 1, **config,))
else:
for child in sorted(self.children, key=lambda o: o.name.lower()):
lines.append(child.render(heading + 1, **config,))
lines.append("")
return "\n".join(lines)
def render_categories(self, heading: int, **config):
extra_level = 1 if config["show_groups_headings"] else 0
lines = []
if self.attributes:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Attributes")
lines.append("")
for attribute in sorted(self.attributes, key=lambda o: o.name.lower()):
lines.append(attribute.render(heading + extra_level, **config))
lines.append("")
if self.classes:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Classes")
lines.append("")
for class_ in sorted(self.classes, key=lambda o: o.name.lower()):
lines.append(class_.render(heading + extra_level, **config))
lines.append("")
if self.methods:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Methods")
lines.append("")
for method in sorted(self.methods, key=lambda o: o.name.lower()):
lines.append(method.render(heading + extra_level, **config))
lines.append("")
if self.functions:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Functions")
lines.append("")
for function in sorted(self.functions, key=lambda o: o.name.lower()):
lines.append(function.render(heading + extra_level, **config))
lines.append("")
return "\n".join(lines)
class Documenter:
"""Class that contains the object documentation loading mechanisms."""
def __init__(self, global_filters):
self.global_filters = [(f, re.compile(f.lstrip("!"))) for f in global_filters]
def get_object_documentation(self, import_string: str) -> Object:
"""
Documenting to see return type.
Return:
The object with all its children populated.
"""
module, obj = import_object(import_string)
path = module.__name__
if inspect.ismodule(obj):
root_object = self.get_module_documentation(obj, path)
elif inspect.isclass(obj):
path = f"{path}.{obj.__name__}"
root_object = self.get_class_documentation(obj, path)
elif inspect.isfunction(obj):
path = f"{path}.{obj.__name__}"
root_object = self.get_function_documentation(obj, path)
else:
raise ValueError(f"{obj}:{type(obj)} not yet supported")
attributes = get_attributes(module)
root_object.dispatch_attributes([a for a in attributes if not self.filter_name_out(a.name)])
return root_object
def get_module_documentation(self, module: ModuleType, path: str) -> Object:
module_name = path.split(".")[-1]
module_file_basename = os.path.splitext(os.path.basename(module.__file__))[0]
properties = get_name_properties(module_file_basename, CATEGORY_MODULE)
root_object = Object(
category=CATEGORY_MODULE,
name=module_name,
path=path,
docstring=inspect.getdoc(module),
properties=properties,
)
for member_name, member in inspect.getmembers(module):
if self.filter_name_out(member_name):
continue
member_path = f"{path}.{member_name}"
if inspect.isclass(member) and inspect.getmodule(member) == module:
root_object.add_child(self.get_class_documentation(member, member_path))
elif inspect.isfunction(member) and inspect.getmodule(member) == module:
root_object.add_child(self.get_function_documentation(member, member_path))
return root_object
def get_class_documentation(self, class_: Type[Any], path: str) -> Object:
class_name = class_.__name__
root_object = Object(
category=CATEGORY_CLASS,
name=class_name,
path=path,
docstring=inspect.getdoc(class_),
properties=get_name_properties(class_name, CATEGORY_CLASS),
signature=inspect.signature(class_),
)
for member_name, member in sorted(class_.__dict__.items()):
if self.filter_name_out(member_name):
continue
member_path = f"{path}.{member_name}"
if inspect.isclass(member):
root_object.add_child(self.get_class_documentation(member, member_path))
continue
actual_member = getattr(class_, member_name)
docstring = inspect.getdoc(actual_member)
try:
source = inspect.getsourcelines(actual_member)
except TypeError:
source = ""
if isinstance(member, classmethod):
root_object.add_child(
Object(
category=CATEGORY_METHOD,
name=member_name,
path=member_path,
docstring=docstring,
properties=get_name_properties(member_name, CATEGORY_METHOD) + ["classmethod"],
source=source,
signature=inspect.signature(actual_member),
)
)
elif isinstance(member, staticmethod):
root_object.add_child(
Object(
category=CATEGORY_METHOD,
name=member_name,
path=member_path,
docstring=docstring,
properties=get_name_properties(member_name, CATEGORY_METHOD) + ["staticmethod"],
source=source,
signature=inspect.signature(actual_member),
)
)
elif isinstance(member, type(lambda: 0)): # regular method
root_object.add_child(
Object(
category=CATEGORY_METHOD,
name=member_name,
path=member_path,
docstring=docstring,
properties=get_name_properties(member_name, CATEGORY_METHOD),
source=source,
signature=inspect.signature(actual_member),
)
)
elif isinstance(member, property):
properties = ["property"]
if member.fset is None:
properties.append("readonly")
root_object.add_child(
Object(
category=CATEGORY_ATTRIBUTE,
name=member_name,
path=member_path,
docstring=docstring,
properties=properties + get_name_properties(member_name, CATEGORY_ATTRIBUTE),
source=source,
signature=inspect.signature(actual_member.fget),
)
)
return root_object
def get_function_documentation(self, function: Callable, path: str) -> Object:
function_name = function.__name__
return Object(
category=CATEGORY_FUNCTION,
name=function_name,
path=path,
docstring=inspect.getdoc(function),
properties=get_name_properties(function_name, CATEGORY_FUNCTION),
source=inspect.getsourcelines(function),
signature=inspect.signature(function),
)
@lru_cache(maxsize=None)
def filter_name_out(self, name: str) -> bool:
keep = True
for f, regex in self.global_filters:
is_matching = bool(regex.match(name))
if is_matching:
if str(f).startswith("!"):
is_matching = not is_matching
keep = is_matching
return not keep
|
py | 1a34a81fa6d4e8a64b750ce9724fc6cc348574eb | import os
import asyncio
import pygame
import random
from functools import partial
import json
import asyncio
import websockets
import logging
import argparse
import time
from mapa import Map, Tiles
logging.basicConfig(level=logging.DEBUG)
logger_websockets = logging.getLogger("websockets")
logger_websockets.setLevel(logging.WARN)
logger = logging.getLogger("Map")
logger.setLevel(logging.DEBUG)
BOMBERMAN = {
"up": (3 * 16, 1 * 16),
"left": (0, 0),
"down": (3 * 16, 0),
"right": (0, 1 * 16),
}
BALLOOM = {
"up": (0, 15 * 16),
"left": (16, 15 * 16),
"down": (2 * 16, 15 * 16),
"right": (3 * 16, 15 * 16),
}
ONEAL = {
"up": (0, 16 * 16),
"left": (16, 16 * 16),
"down": (2 * 16, 16 * 16),
"right": (3 * 16, 16 * 16),
}
DOLL = {
"up": (0, 17 * 16),
"left": (16, 17 * 16),
"down": (2 * 16, 17 * 16),
"right": (3 * 16, 17 * 16),
}
MINVO = {
"up": (0, 18 * 16),
"left": (16, 18 * 16),
"down": (2 * 16, 18 * 16),
"right": (3 * 16, 18 * 16),
}
ENEMIES = {"Balloom": BALLOOM, "Oneal": ONEAL, "Doll": DOLL, "Minvo": MINVO}
POWERUPS = {"Bombs": (0, 14 * 16), "Flames": (1 * 16, 14 * 16), "Detonator": (4 * 16, 14 * 16)}
STONE = (48, 48)
WALL = (64, 48)
PASSAGE = (0, 64)
EXIT = (11 * 16, 3 * 16)
BOMB = [(32, 48), (16, 48), (0, 48)]
EXPLOSION = {
"c": (112, 96),
"l": (96, 96),
"r": (128, 96),
"u": (112, 80),
"d": (112, 112),
"xl": (80, 96),
"xr": (144, 96),
"xu": (112, 64),
"xd": (112, 128),
}
FALLOUT = {"c": (32, 96)}
CHAR_LENGTH = 16
CHAR_SIZE = CHAR_LENGTH, CHAR_LENGTH
SCALE = 1
COLORS = {
"white": (255, 255, 255),
"red": (255, 0, 0),
"pink": (255, 105, 180),
"blue": (135, 206, 235),
"orange": (255, 165, 0),
"yellow": (255, 255, 0),
"grey": (120, 120, 120),
}
BACKGROUND = (0, 0, 0)
RANKS = {
1: "1ST",
2: "2ND",
3: "3RD",
4: "4TH",
5: "5TH",
6: "6TH",
7: "7TH",
8: "8TH",
9: "9TH",
10: "10TH",
}
SPRITES = None
async def messages_handler(ws_path, queue):
async with websockets.connect(ws_path) as websocket:
await websocket.send(json.dumps({"cmd": "join"}))
while True:
r = await websocket.recv()
queue.put_nowait(r)
class GameOver(BaseException):
pass
class Artifact(pygame.sprite.Sprite):
def __init__(self, *args, **kw):
self.x, self.y = None, None # postpone to update_sprite()
x, y = kw.pop("pos", ((kw.pop("x", 0), kw.pop("y", 0))))
new_pos = scale((x, y))
self.image = pygame.Surface(CHAR_SIZE)
self.rect = pygame.Rect(new_pos + CHAR_SIZE)
self.update_sprite((x, y))
super().__init__()
def update_sprite(self, pos=None):
if not pos:
pos = self.x, self.y
else:
pos = scale(pos)
self.rect = pygame.Rect(pos + CHAR_SIZE)
self.image.fill((0, 0, 230))
self.image.blit(*self.sprite)
# self.image = pygame.transform.scale(self.image, scale((1, 1)))
self.x, self.y = pos
def update(self, *args):
self.update_sprite()
class BomberMan(Artifact):
def __init__(self, *args, **kw):
self.direction = "left"
self.sprite = (SPRITES, (0, 0), (*BOMBERMAN[self.direction], *scale((1, 1))))
super().__init__(*args, **kw)
def update(self, new_pos):
x, y = scale(new_pos)
if x > self.x:
self.direction = "right"
if x < self.x:
self.direction = "left"
if y > self.y:
self.direction = "down"
if y < self.y:
self.direction = "up"
self.sprite = (SPRITES, (0, 0), (*BOMBERMAN[self.direction], *scale((1, 1))))
self.update_sprite(tuple(new_pos))
class Enemy(Artifact):
def __init__(self, *args, **kw):
self.direction = "left"
self.name = kw.pop("name")
self.sprite = (
SPRITES,
(0, 0),
(*ENEMIES[self.name][self.direction], *scale((1, 1))),
)
super().__init__(*args, **kw)
def update(self, new_pos):
x, y = scale(new_pos)
if x > self.x:
self.direction = "right"
if x < self.x:
self.direction = "left"
if y > self.y:
self.direction = "down"
if y < self.y:
self.direction = "up"
self.sprite = (
SPRITES,
(0, 0),
(*ENEMIES[self.name][self.direction], *scale((1, 1))),
)
self.update_sprite(new_pos)
class Bomb(Artifact):
def __init__(self, *args, **kw):
self.index = 0
self.sprite = (SPRITES, (0, 0), (*BOMB[self.index], *scale((1, 1))))
self.exploded = False
self.timeout = kw.pop("timeout", -1)
self.radius = kw.pop("radius", 0)
super().__init__(*args, **kw)
def update(self, bombs_state):
for pos, timeout, radius in bombs_state:
if scale(pos) == (self.x, self.y):
# It's me!
self.timeout = int(timeout)
self.radius = radius
self.index = (self.index + 1) % len(BOMB)
self.sprite = (SPRITES, (0, 0), (*BOMB[self.index], *scale((1, 1))))
self.update_sprite()
if self.timeout == 0:
self.exploded = True
self.sprite = ()
self.rect.inflate_ip(
self.radius * 2 * CHAR_LENGTH, self.radius * 2 * CHAR_LENGTH
)
self.image = pygame.Surface(
(
self.radius * 2 * CHAR_LENGTH + CHAR_LENGTH,
self.radius * 2 * CHAR_LENGTH + CHAR_LENGTH,
)
)
self.image.blit(
SPRITES,
scale((self.radius, self.radius)),
(*EXPLOSION["c"], *scale((1, 1))),
)
for r in range(1, self.radius):
self.image.blit(
SPRITES,
scale((self.radius - r, self.radius)),
(*EXPLOSION["l"], *scale((1, 1))),
)
self.image.blit(
SPRITES,
scale((self.radius + r, self.radius)),
(*EXPLOSION["r"], *scale((1, 1))),
)
self.image.blit(
SPRITES,
scale((self.radius, self.radius - r)),
(*EXPLOSION["u"], *scale((1, 1))),
)
self.image.blit(
SPRITES,
scale((self.radius, self.radius + r)),
(*EXPLOSION["d"], *scale((1, 1))),
)
self.image.blit(
SPRITES, scale((0, self.radius)), (*EXPLOSION["xl"], *scale((1, 1)))
)
self.image.blit(
SPRITES,
scale((2 * self.radius, self.radius)),
(*EXPLOSION["xr"], *scale((1, 1))),
)
self.image.blit(
SPRITES, scale((self.radius, 0)), (*EXPLOSION["xu"], *scale((1, 1)))
)
self.image.blit(
SPRITES,
scale((self.radius, 2 * self.radius)),
(*EXPLOSION["xd"], *scale((1, 1))),
)
class Wall(Artifact):
def __init__(self, *args, **kw):
self.sprite = (SPRITES, (0, 0), (*WALL, *scale((1, 1))))
super().__init__(*args, **kw)
class Exit(Artifact):
def __init__(self, *args, **kw):
self.sprite = (SPRITES, (0, 0), (*EXIT, *scale((1, 1))))
super().__init__(*args, **kw)
class Powerups(Artifact):
def __init__(self, *args, **kw):
self.type = kw.pop("name")
self.sprite = (SPRITES, (0, 0), (*POWERUPS[self.type], *scale((1, 1))))
super().__init__(*args, **kw)
def clear_callback(surf, rect):
"""beneath everything there is a passage."""
surf.blit(SPRITES, (rect.x, rect.y), (*PASSAGE, rect.width, rect.height))
def scale(pos):
x, y = pos
return int(x * CHAR_LENGTH / SCALE), int(y * CHAR_LENGTH / SCALE)
def draw_background(mapa):
background = pygame.Surface(scale((int(mapa.size[0]), int(mapa.size[1]))))
for x in range(int(mapa.size[0])):
for y in range(int(mapa.size[1])):
wx, wy = scale((x, y))
if mapa.map[x][y] == Tiles.STONE:
background.blit(SPRITES, (wx, wy), (*STONE, *scale((1, 1))))
else:
background.blit(SPRITES, (wx, wy), (*PASSAGE, *scale((1, 1))))
return background
def draw_info(SCREEN, text, pos, color=(0, 0, 0), background=None):
myfont = pygame.font.Font(None, int(22 / SCALE))
textsurface = myfont.render(text, True, color, background)
x, y = pos
if x > SCREEN.get_width():
pos = SCREEN.get_width() - textsurface.get_width(), y
if y > SCREEN.get_height():
pos = x, SCREEN.get_height() - textsurface.get_height()
if background:
SCREEN.blit(background, pos)
else:
erase = pygame.Surface(textsurface.get_size())
erase.fill(COLORS["grey"])
# SCREEN.blit(erase, pos)
SCREEN.blit(textsurface, pos)
async def main_loop(q):
while True:
await main_game()
async def main_game():
global SPRITES, SCREEN
main_group = pygame.sprite.LayeredUpdates()
bombs_group = pygame.sprite.OrderedUpdates()
enemies_group = pygame.sprite.OrderedUpdates()
walls_group = pygame.sprite.OrderedUpdates()
logging.info("Waiting for map information from server")
state = await q.get() # first state message includes map information
logging.debug("Initial game status: %s", state)
newgame_json = json.loads(state)
GAME_SPEED = newgame_json["fps"]
mapa = Map(size=newgame_json["size"], mapa=newgame_json["map"])
TIMEOUT = newgame_json["timeout"]
SCREEN = pygame.display.set_mode(scale(mapa.size))
SPRITES = pygame.image.load("data/nes.png").convert_alpha()
BACKGROUND = draw_background(mapa)
SCREEN.blit(BACKGROUND, (0, 0))
main_group.add(BomberMan(pos=mapa.bomberman_spawn))
state = {"score": 0, "player": "player1", "bomberman": (1, 1)}
while True:
pygame.event.pump()
if pygame.key.get_pressed()[pygame.K_ESCAPE]:
asyncio.get_event_loop().stop()
main_group.clear(SCREEN, clear_callback)
bombs_group.clear(SCREEN, BACKGROUND)
enemies_group.clear(SCREEN, clear_callback)
if "score" in state and "player" in state:
text = str(state["score"])
draw_info(SCREEN, text.zfill(6), (0, 0))
text = str(state["player"]).rjust(32)
draw_info(SCREEN, text, (4000, 0))
if "bombs" in state:
for bomb in bombs_group:
if bomb.exploded:
bombs_group.remove(bomb)
if len(bombs_group.sprites()) < len(state["bombs"]):
pos, timeout, radius = state["bombs"][-1]
bombs_group.add(Bomb(pos=pos, timeout=timeout, radius=radius))
bombs_group.update(state["bombs"])
if "enemies" in state:
enemies_group.empty()
for enemy in state["enemies"]:
enemies_group.add(Enemy(name=enemy["name"], pos=enemy["pos"]))
if "walls" in state:
walls_group.empty()
for wall in state["walls"]:
walls_group.add(Wall(pos=wall))
if "exit" in state and len(state["exit"]):
if not [p for p in main_group if isinstance(p, Exit)]:
logger.debug("Add Exit")
ex = Exit(pos=state["exit"])
main_group.add(ex)
main_group.move_to_back(ex)
if "powerups" in state:
for pos, name in state["powerups"]:
if name not in [p.type for p in main_group if isinstance(p, Powerups)]:
logger.debug(f"Add {name}")
p = Powerups(pos=pos, name=name)
main_group.add(p)
main_group.move_to_back(p)
for powerup in main_group:
if isinstance(powerup, Powerups):
name = powerup.type
if name not in [p[1] for p in state["powerups"]]:
logger.debug(f"Remove {name}")
main_group.remove(powerup)
walls_group.draw(SCREEN)
main_group.draw(SCREEN)
enemies_group.draw(SCREEN)
bombs_group.draw(SCREEN)
# Highscores Board
if (
("lives" in state and state["lives"] == 0)
or ("step" in state and state["step"] >= TIMEOUT)
or (
"bomberman" in state
and "exit" in state
and state["bomberman"] == state["exit"]
and "enemies" in state
and state["enemies"] == []
)
):
highscores = newgame_json["highscores"]
HIGHSCORES = pygame.Surface(scale((20, 16)))
HIGHSCORES.fill(COLORS["grey"])
draw_info(HIGHSCORES, "THE 10 BEST PLAYERS", scale((5, 1)), COLORS["white"])
draw_info(HIGHSCORES, "RANK", scale((2, 3)), COLORS["orange"])
draw_info(HIGHSCORES, "SCORE", scale((6, 3)), COLORS["orange"])
draw_info(HIGHSCORES, "NAME", scale((11, 3)), COLORS["orange"])
for i, highscore in enumerate(highscores):
c = (i % 5) + 1
draw_info(
HIGHSCORES,
RANKS[i + 1],
scale((2, i + 5)),
list(COLORS.values())[c],
)
draw_info(
HIGHSCORES,
str(highscore[1]),
scale((6, i + 5)),
list(COLORS.values())[c],
)
draw_info(
HIGHSCORES,
highscore[0],
scale((11, i + 5)),
list(COLORS.values())[c],
)
SCREEN.blit(
HIGHSCORES,
(
(SCREEN.get_width() - HIGHSCORES.get_width()) / 2,
(SCREEN.get_height() - HIGHSCORES.get_height()) / 2,
),
)
if "bomberman" in state:
main_group.update(state["bomberman"])
pygame.display.flip()
try:
state = json.loads(q.get_nowait())
if (
"step" in state
and state["step"] == 1
or "level" in state
and state["level"] != mapa.level
):
# New level! lets clean everything up!
SCREEN.blit(BACKGROUND, (0, 0))
walls_group.empty()
main_group.empty()
enemies_group.empty()
bombs_group.empty()
main_group.add(BomberMan(pos=mapa.bomberman_spawn))
mapa.level = state["level"]
except asyncio.queues.QueueEmpty:
await asyncio.sleep(1.0 / GAME_SPEED)
continue
if __name__ == "__main__":
SERVER = os.environ.get("SERVER", "localhost")
PORT = os.environ.get("PORT", "8000")
parser = argparse.ArgumentParser()
parser.add_argument("--server", help="IP address of the server", default=SERVER)
parser.add_argument(
"--scale", help="reduce size of window by x times", type=int, default=1
)
parser.add_argument("--port", help="TCP port", type=int, default=PORT)
args = parser.parse_args()
SCALE = args.scale
LOOP = asyncio.get_event_loop()
pygame.font.init()
q = asyncio.Queue()
ws_path = f"ws://{args.server}:{args.port}/viewer"
try:
LOOP.run_until_complete(
asyncio.gather(messages_handler(ws_path, q), main_loop(q))
)
finally:
LOOP.stop()
|
py | 1a34a8a7431caff3b1d32094f04a85c9f2679b21 | from multipledispatch import dispatch
from accountifie.gl.models import Company
@dispatch(str, dict)
def company(company_id, qstring):
company = Company.objects.get(id=company_id)
flds = ['cmpy_type', 'color_code', 'name', 'id']
data = dict((str(k),str(v)) for k,v in company.__dict__.items() if k in flds)
if company.cmpy_type == 'CON':
data['subs'] = [sub.id for sub in company.subs.all()]
return data
@dispatch(dict)
def company(qstring):
company_list = list(Company.objects.all())
flds = ['cmpy_type', 'color_code', 'name', 'id']
data = []
for company in company_list:
company_data = dict((str(k),str(v)) for k,v in company.__dict__.items() if k in flds)
if company.cmpy_type == 'CON':
company_data['subs'] = [sub.id for sub in company.subs.all()]
data.append(company_data)
return data
"""
def companies(qstring={}):
company_list = list(Company.objects.all())
flds = ['cmpy_type', 'color_code', 'name', 'id']
data = []
for company in company_list:
company_data = dict((str(k),str(v)) for k,v in company.__dict__.iteritems() if k in flds)
if company.cmpy_type == 'CON':
company_data['subs'] = [sub.id for sub in company.subs.all()]
data.append(company_data)
return data
"""
def company_list(company_id, qstring={}):
company = Company.objects.get(id=company_id)
if company.cmpy_type == 'CON':
return [sub.id for sub in company.subs.all()]
else:
return [company_id] |
py | 1a34a8e456e66a8e7997ce66234afaf61f114fe4 | from in_data import in_data
from random import choice
import operator
from GA import *
class main:
DeliveryPoints = in_data()
nth_population = 0
BestSolution = None
population= Inicial_Population(DeliveryPoints)
while nth_population < EndPoint:
nth_population+=1
Population_Fitness = PopulationFitness(population, DeliveryPoints)
if BestSolution == None:
BestSolution = max(Population_Fitness.items(), key=operator.itemgetter(1))
else:
b = max(Population_Fitness.items(), key=operator.itemgetter(1))
if b[1] > BestSolution[1]:
BestSolution = max(Population_Fitness.items(), key=operator.itemgetter(1))
selection = Selection(Population_Fitness)
i = 0
#crossing over
NextGeneration = []
while i < PopulationSize:
while True:
p, j = choice(list(selection.values())), choice(list(selection.values()))
p1 = [i for i in p]
p2 = [i for i in j]
while p2 == p1:
j = choice(list(selection.values()))
p2 = [i for i in j]
if p2 != p1: break
f1, f2 = CrossingOver(p1, p2)
if f1 not in NextGeneration:
NextGeneration.append(f1)
i+=1
if f2 not in NextGeneration:
NextGeneration.append(f2)
i+=1
#mutation
population = Mutation(NextGeneration)
print(BestSolution[0])
|
py | 1a34a8f5e63474e8b49642bf4af64b2be400e473 | # coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN.
These methods come from https://arxiv.org/abs/1606.03498 and
https://arxiv.org/abs/1706.08500.
NOTE: This implementation uses the same weights as in
https://github.com/openai/improved-gan/blob/master/inception_score/model.py,
but is more numerically stable and is an unbiased estimator of the true
Inception score even when splitting the inputs into batches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tarfile
# Dependency imports
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'classifier_score_from_logits',
'frechet_inception_distance',
'frechet_classifier_distance',
'frechet_classifier_distance_from_activations',
'INCEPTION_DEFAULT_IMAGE_SIZE',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
INCEPTION_INPUT = 'Mul:0'
INCEPTION_OUTPUT = 'logits:0'
INCEPTION_FINAL_POOL = 'pool_3:0'
INCEPTION_DEFAULT_IMAGE_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with(
[None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
def preprocess_image(
images, height=INCEPTION_DEFAULT_IMAGE_SIZE,
width=INCEPTION_DEFAULT_IMAGE_SIZE, scope=None):
"""Prepare a batch of images for evaluation.
This is the preprocessing portion of the graph from
http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz.
Note that it expects Tensors in [0, 255]. This function maps pixel values to
[-1, 1] and resizes to match the InceptionV1 network.
Args:
images: 3-D or 4-D Tensor of images. Values are in [0, 255].
height: Integer. Height of resized output image.
width: Integer. Width of resized output image.
scope: Optional scope for name_scope.
Returns:
3-D or 4-D float Tensor of prepared image(s). Values are in [-1, 1].
"""
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.to_float(images)
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
resized = (resized - 128.0) / 128.0
if is_single:
resized = array_ops.squeeze(resized, axis=0)
return resized
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.FastGFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename):
"""Get a GraphDef proto from a tarball on the web."""
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
url, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, reporthook=_progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH)
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name or list of output Tensors. This function will compute
activations at the specified layer. Examples include INCEPTION_V3_OUTPUT
and INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Tensor or Tensors corresponding to computed `output_tensor`.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if isinstance(activations, list):
for i, activation in enumerate(activations):
if array_ops.rank(activation) != 2:
activations[i] = layers.flatten(activation)
else:
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor, graph_def, input_tensor,
output_tensor, scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: A tensor name or list of tensor names in graph def.
scope: Name scope for classifier.
Returns:
Classifier output if `output_tensor` is a string, or a list of outputs if
`output_tensor` is a list.
Raises:
ValueError: If `input_tensor` or `output_tensor` aren't in the graph_def.
"""
input_map = {input_tensor: tensor}
is_singleton = isinstance(output_tensor, str)
if is_singleton:
output_tensor = [output_tensor]
classifier_outputs = importer.import_graph_def(
graph_def, input_map, output_tensor, name=scope)
if is_singleton:
classifier_outputs = classifier_outputs[0]
return classifier_outputs
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
NOTE: This function consumes images, computes their logits, and then
computes the classifier score. If you would like to precompute many logits for
large batches, use clasifier_score_from_logits(), which this method also
uses.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `classifier_fn`.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return classifier_score_from_logits(logits)
def classifier_score_from_logits(logits):
"""Classifier score for evaluating a generative model from logits.
This method computes the classifier score for a set of logits. This can be
used independently of the classifier_score() method, especially in the case
of using large batches during evaluation where we would like precompute all
of the logits before computing the classifier score.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates:
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
logits: Precomputed 2D tensor of logits that will be used to
compute the classifier score.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `logits`.
"""
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.to_double(logits)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, logits_dtype)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(
sqrt_sigma, math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
NOTE: This function consumes images, computes their activations, and then
computes the classifier score. If you would like to precompute many
activations for real and generated images for large batches, please use
frechet_clasifier_distance_from_activations(), which this method also uses.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of `classifier_fn`.
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
imgs = array_ops.stack(real_images_list + generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
activations = functional_ops.map_fn(
fn=classifier_fn,
elems=imgs,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
# Split the activations by the real and generated images.
real_a, gen_a = array_ops.split(activations, [num_batches, num_batches], 0)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
return frechet_classifier_distance_from_activations(real_a, gen_a)
def frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model from activations.
This methods computes the Frechet classifier distance from activations of
real images and generated images. This can be used independently of the
frechet_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like precompute all of the
activations before computing the classifier distance.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.to_double(real_activations)
generated_activations = math_ops.to_double(generated_activations)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_v = math_ops.reduce_mean(generated_activations, 0)
num_examples = math_ops.to_double(array_ops.shape(real_activations)[0])
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
real_centered = real_activations - m
sigma = math_ops.matmul(
real_centered, real_centered, transpose_a=True) / (num_examples - 1)
gen_centered = generated_activations - m_v
sigma_v = math_ops.matmul(
gen_centered, gen_centered, transpose_a=True) / (num_examples - 1)
# Find the Tr(sqrt(sigma sigma_v)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_v)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_v) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_v)) # This uses the L2 norm.
fid = trace + mean
if activations_dtype != dtypes.float64:
fid = math_ops.cast(fid, activations_dtype)
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
|
py | 1a34aa2efac04bf2b87b861015dca6a03f7481d1 | from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from pylmnn import LargeMarginNearestNeighbor as LMNN
# Load a data set
X, y = load_iris(return_X_y=True)
# Split in training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.7, stratify=y, random_state=42)
# Set up the hyperparameters
k_train, k_test, n_components, max_iter = 3, 3, X.shape[1], 180
# Instantiate the metric learner
lmnn = LMNN(n_neighbors=k_train, max_iter=max_iter, n_components=n_components)
# Train the metric learner
lmnn.fit(X_train, y_train)
# Fit the nearest neighbors classifier
knn = KNeighborsClassifier(n_neighbors=k_test)
knn.fit(lmnn.transform(X_train), y_train)
# Compute the k-nearest neighbor test accuracy after applying the learned transformation
lmnn_acc = knn.score(lmnn.transform(X_test), y_test)
print('LMNN accuracy on test set of {} points: {:.4f}'.format(X_test.shape[0], lmnn_acc))
|
py | 1a34aac60eb571f62eb0abe1aa5d78b24e77716a | # Generated by Django 2.0.7 on 2018-07-31 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_auto_20180731_2025'),
]
operations = [
migrations.AddField(
model_name='notification',
name='comment',
field=models.TextField(blank=True, null=True),
),
]
|
py | 1a34aacecc1de1124b54fb4affa65fdc5b7b687a | from Roteiro8.Roteiro8__funcoes import Grafo
grafo = Grafo()
for v in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:
grafo.adicionaVertice(v)
for a, p in {'a-b': 9, 'a-g': 4,
'b-c': 6, 'b-g': 10, 'b-h': 7,
'c-d': 8, 'c-e': 12, 'c-f': 8,
'd-e': 14,
'e-f': 2,
'f-h': 2, 'f-g': 1}.items():
grafo.adicionaArestaComPeso(a, p)
print('Grafo:')
print(grafo)
print('Pesos:', grafo.pesos())
print()
print('Minimum Spanning Tree com Prim Modificado:')
print(grafo.prim_mod())
|
py | 1a34aca93f5d21c23a8dff247f2b76d8dd52c120 | from random import randint
from kivy.animation import Animation
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import Clock, NumericProperty, ListProperty
from kivy.uix.floatlayout import FloatLayout
Builder.load_file("uix/components/kv/confetti_rain.kv")
class ConfettiItem(FloatLayout):
color = ListProperty()
def __init__(self, color, **kwargs):
super().__init__(**kwargs)
self.color = color
Clock.schedule_once(self._update)
def _update(self, *args):
(Animation(opacity=1, d=randint(*self.parent.time_before_fade))).start(
self)
class ConfettiRain(FloatLayout):
size_range = ListProperty([dp(2), dp(7)])
time_range = ListProperty([3, 5])
speed = ListProperty([3, 6])
time_before_fade = ListProperty([1, 3])
number = NumericProperty(150)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.running = False
def start(self):
if not self.running:
self.running = True
Clock.schedule_once(self._update)
def stop(self):
self.running = False
def add_item(self, count):
for x in range(count):
color = [randint(0, 255)/255, randint(0, 255)/255, randint(0, 255)/255, 1]
item = ConfettiItem(
size=[
randint(int(self.size_range[0]), int(self.size_range[1])),
randint(int(self.size_range[0]), int(self.size_range[1]))
],
pos=[
randint(0, int(self.width)),
randint(int(self.height * 0.9), int(self.height))
],
color=color
)
self.add_widget(item)
self.start_anim(item)
return None
def _update(self, *args):
self.add_item(self.number)
def start_anim(self, item):
target_pos = [randint(0, self.width), 0]
final_time = randint(*self.time_range)
speed = randint(*self.time_range)
anim = Animation(pos=target_pos, d=speed)
anim.start(item)
# remove
Clock.schedule_once(lambda x: self.remove_widget(item), final_time)
# add new
if self.running:
Clock.schedule_once(lambda x: self.add_item(1), final_time)
fade_time = final_time - randint(*self.time_before_fade)
Clock.schedule_once(lambda x: self._fade_out(item, fade_time),
final_time - fade_time)
def _fade_out(self, item, time):
anim = Animation(opacity=0, d=time)
anim.start(item) |
py | 1a34ae255497883913e9bff6e4ae9546d10c3dc7 | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for Building Contractor."""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
|
py | 1a34ae2de342d55c61930962ffdb6a1859db6bc0 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.views.generic.edit import FormView
from django.views.generic.base import View
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout
class RegisterFormView(FormView):
form_class = UserCreationForm
success_url = "/accounts/login/"
template_name = "register.html"
def form_valid(self, form):
form.save()
return super(RegisterFormView, self).form_valid(form)
class LoginFormView(FormView):
form_class = AuthenticationForm
template_name = "login.html"
success_url = "/"
def form_valid(self, form):
self.user = form.get_user()
login(self.request, self.user)
return super(LoginFormView, self).form_valid(form)
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect("/") |
py | 1a34aec63b2ab2fca23c8c3354aef13932627de0 | from __future__ import absolute_import
import argparse
import docker
import os
import random
import sys
import shutil
import traceback
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.constants import INDEX_DIR
from ann_benchmarks.algorithms.definitions import get_definitions, list_algorithms, algorithm_status, InstantiationStatus
from ann_benchmarks.results import get_result_filename
from ann_benchmarks.runner import run, run_docker
def positive_int(s):
i = None
try:
i = int(s)
except ValueError:
pass
if not i or i < 1:
raise argparse.ArgumentTypeError("%r is not a positive integer" % s)
return i
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
metavar='NAME',
help='the dataset to load training points from',
default='glove-100-angular',
choices=DATASETS.keys())
parser.add_argument(
"-k", "--count",
default=10,
type=positive_int,
help="the number of near neighbours to search for")
parser.add_argument(
'--definitions',
metavar='FILE',
help='load algorithm definitions from FILE',
default='algos.yaml')
parser.add_argument(
'--algorithm',
metavar='NAME',
help='run only the named algorithm',
default=None)
parser.add_argument(
'--docker-tag',
metavar='NAME',
help='run only algorithms in a particular docker image',
default=None)
parser.add_argument(
'--list-algorithms',
help='print the names of all known algorithms and exit',
action='store_true')
parser.add_argument(
'--force',
help='''re-run algorithms even if their results already exist''',
action='store_true')
parser.add_argument(
'--runs',
metavar='COUNT',
type=positive_int,
help='run each algorithm instance %(metavar)s times and use only the best result',
default=2)
parser.add_argument(
'--timeout',
type=int,
help='Timeout (in seconds) for each individual algorithm run, or -1 if no timeout should be set',
default=5*3600)
parser.add_argument(
'--local',
action='store_true',
help='If set, then will run everything locally (inside the same process) rather than using Docker')
parser.add_argument(
'--batch',
action='store_true',
help='If set, algorithms get all queries at once')
parser.add_argument(
'--max-n-algorithms',
type=int,
help='Max number of algorithms to run (just used for testing)',
default=-1)
parser.add_argument(
'--run-disabled',
help='run algorithms that are disabled in algos.yml',
action='store_true')
args = parser.parse_args()
if args.timeout == -1:
args.timeout = None
if args.list_algorithms:
list_algorithms(args.definitions)
sys.exit(0)
# Nmslib specific code
# Remove old indices stored on disk
if os.path.exists(INDEX_DIR):
shutil.rmtree(INDEX_DIR)
dataset = get_dataset(args.dataset)
dimension = len(dataset['train'][0]) # TODO(erikbern): ugly
point_type = dataset.attrs.get('point_type', 'float')
distance = dataset.attrs['distance']
definitions = get_definitions(args.definitions, dimension, point_type, distance, args.count)
# Filter out, from the loaded definitions, all those query argument groups
# that correspond to experiments that have already been run. (This might
# mean removing a definition altogether, so we can't just use a list
# comprehension.)
filtered_definitions = []
for definition in definitions:
query_argument_groups = definition.query_argument_groups
if not query_argument_groups:
query_argument_groups = [[]]
not_yet_run = []
for query_arguments in query_argument_groups:
fn = get_result_filename(args.dataset,
args.count, definition, query_arguments, args.batch)
if args.force or not os.path.exists(fn):
not_yet_run.append(query_arguments)
if not_yet_run:
if definition.query_argument_groups:
definition = definition._replace(
query_argument_groups = not_yet_run)
filtered_definitions.append(definition)
definitions = filtered_definitions
random.shuffle(definitions)
if args.algorithm:
print('running only', args.algorithm)
definitions = [d for d in definitions if d.algorithm == args.algorithm]
if not args.local:
# See which Docker images we have available
docker_client = docker.from_env()
docker_tags = set()
for image in docker_client.images.list():
for tag in image.tags:
tag = tag.split(':')[0]
docker_tags.add(tag)
if args.docker_tag:
print('running only', args.docker_tag)
definitions = [d for d in definitions if d.docker_tag == args.docker_tag]
if set(d.docker_tag for d in definitions).difference(docker_tags):
print('not all docker images available, only:', set(docker_tags))
print('missing docker images:', set(d.docker_tag for d in definitions).difference(docker_tags))
definitions = [d for d in definitions if d.docker_tag in docker_tags]
else:
def _test(df):
status = algorithm_status(df)
# If the module was loaded but doesn't actually have a constructor of
# the right name, then the definition is broken
assert status != InstantiationStatus.NO_CONSTRUCTOR, """\
%s.%s(%s): error: the module '%s' does not expose the named constructor""" % (df.module, df.constructor, df.arguments, df.module)
if status == InstantiationStatus.NO_MODULE:
# If the module couldn't be loaded (presumably because of a missing
# dependency), print a warning and remove this definition from the
# list of things to be run
print("""\
%s.%s(%s): warning: the module '%s' could not be loaded; skipping""" % (df.module, df.constructor, df.arguments, df.module))
return False
else:
return True
definitions = [d for d in definitions if _test(d)]
if not args.run_disabled:
if len([d for d in definitions if d.disabled]):
print('Not running disabled algorithms:', [d for d in definitions if d.disabled])
definitions = [d for d in definitions if not d.disabled]
if args.max_n_algorithms >= 0:
definitions = definitions[:args.max_n_algorithms]
if len(definitions) == 0:
raise Exception('Nothing to run')
else:
print('Order:', definitions)
for definition in definitions:
print(definition, '...')
try:
if args.local:
run(definition, args.dataset, args.count, args.runs, args.batch)
else:
run_docker(definition, args.dataset, args.count, args.runs, args.timeout, args.batch)
except KeyboardInterrupt:
break
except:
traceback.print_exc()
|
py | 1a34af47a47ef5431afd7f23dce9b43eef86979e | import unittest
from tests._compat import patch, call
import requests_mock
from proxy_db.utils import download_file, get_domain
class TestDownloadFile(unittest.TestCase):
url = 'https://domain.com/'
def setUp(self):
super(TestDownloadFile, self).setUp()
self.session_mock = requests_mock.Mocker()
self.session_mock.start()
def tearDown(self):
super(TestDownloadFile, self).tearDown()
self.session_mock.stop()
@patch('proxy_db.utils.open')
def test_request(self, m):
text = 'foo' * 1000
self.session_mock.get(self.url, text=text)
download_file(self.url)
self.assertEqual(self.session_mock.call_count, 1)
calls = [call.write(text[i:i+1024].encode('utf-8')) for i in range(0, len(text), 1024)]
self.assertEqual(m.return_value.__enter__.return_value.mock_calls, calls)
class TestGetDomain(unittest.TestCase):
def test_get_domain(self):
self.assertEqual(get_domain('https://user:[email protected]:8888/'), 'domain.com')
|
py | 1a34af8755c1a697505f44fdc19a726ad88629f9 | import asyncio
import aiopg
import aiosqlite
from motor import motor_asyncio
import discordSuperUtils
async def database_test():
mongo_database = discordSuperUtils.DatabaseManager.connect(
motor_asyncio.AsyncIOMotorClient("con-string")["name"]
)
# Replace 'con-string' with the MongoDB connection string and 'name' by the database name you want to use.
postgre_database = discordSuperUtils.DatabaseManager.connect(
await aiopg.create_pool("con-string")
)
# Replace 'con-string' with the PostrgeSQL connection string.
# PostgreSQL connection string example:
# "dbname=name user=postgres password=xxx host=host" host is not required.
mysql_database = await discordSuperUtils.create_mysql(
host=..., port=..., user=..., password=..., dbname=...
)
# Replace '...' with the arguments.
# create_mysql supports mysql AND mariaDB
sqlite_database = discordSuperUtils.DatabaseManager.connect(
await aiosqlite.connect("path")
)
# Replace 'path' with the SQLite database path. (must be on your computer)
await sqlite_database.insert(
"economy", {"guild": ..., "member": ..., "currency": ..., "bank": ...}
)
await sqlite_database.close() # not required.
loop = asyncio.get_event_loop()
loop.run_until_complete(database_test())
|
py | 1a34afeddbb7fb0ff9a434fa71c633021495b238 | from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
from KratosMultiphysics import kratos_utilities
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.CoSimulationApplication.solver_wrappers.sdof.sdof_static_solver import SDoFStaticSolver
import os
import numpy as np
class TestSdofStaticSolver(KratosUnittest.TestCase):
def setUp(self):
self.system_settings = {
"system_parameters":{
"stiffness" : 50000.0,
},
"output_parameters":{
"write_output_file": True,
"file_name" : "result.dat"
}
}
#result.dat
self.end_time = 1.0
self.time = 0.0
@classmethod
def tearDownClass(self):
kratos_utilities.DeleteFileIfExisting("result.dat")
kratos_utilities.DeleteFileIfExisting('fsi_sdof_static/results_final_sdof.dat')
def __CompareResults(self, reference, result):
ref = np.loadtxt(reference, skiprows=1)
res = np.loadtxt(result, skiprows=1)
self.assertEqual(ref.all(), res.all())
def __ExecuteTest(self, settings, ref_file_name):
settings.update(self.system_settings)
system = SDoFStaticSolver(settings)
system.Initialize()
system.SolveSolutionStep()
system.OutputSolutionStep()
self.__CompareResults(os.path.join("reference_files", ref_file_name), "result.dat")
def test_initial_displacement(self):
settings = {
"initial_values":{
"displacement" : 1.0,
}
}
self.__ExecuteTest(settings, "ref_sdof_static_initial_displacement.dat")
def test_final_displacement(self):
import json
parameter_file_name = "fsi_sdof_static/ProjectParametersSDoF.json"
with open(parameter_file_name, 'r') as parameter_file:
settings = json.load(parameter_file)
settings["output_parameters"]["write_output_file"] = True
system = SDoFStaticSolver(settings)
system.Initialize()
system.SolveSolutionStep()
system.OutputSolutionStep()
results_obtained = np.loadtxt('fsi_sdof_static/results_final_sdof.dat', skiprows=1)
results_reference = np.loadtxt('reference_files/ref_sdof_static_final_displacement.dat', skiprows=1)
self.assertEqual(results_reference.all(), results_obtained.all())
if __name__ == '__main__':
KratosUnittest.main()
|
py | 1a34b032c0148944294bf34ca0cfbf442b1887e5 | from nltk import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.corpus import stopwords
import re
import nltk
import collections
from sklearn.metrics import silhouette_samples, silhouette_score
import pandas as pd
import json
FILE = open("/tmp/resultat_clustering.txt", "w")
# Read Token
with open('token.json') as json_file:
TOKENIZED_WORDS = json.load(json_file)
def body_to_words(raw_body):
"""
Args:
raw_body:
Returns:
"""
letters_only = re.sub("[^a-zA-Z]", " ", raw_body)
text = re.sub("<[^<]+?>", "", letters_only)
text_clean = " ".join([w for w in text.split() if ((len(w) > 3) and (len(w) < 23))])
words = text_clean.lower().split()
stop_words = set(
stopwords.words("french") + stopwords.words("english") + TOKENIZED_WORDS
)
meaningful_words = [w for w in words if w not in stop_words]
# clean_words = [w for w in meaningful_words if w not in TOKENIZED_WORDS]
return " ".join(meaningful_words)
def word_tokenizer(text):
"""
Args:
text:
Returns:
"""
tokens = word_tokenize(text, language="french")
stemmer = PorterStemmer()
tokens = [
stemmer.stem(t)
for t in tokens
if t not in (stopwords.words("french") + stopwords.words("english"))
]
return tokens
def tokenize_and_stem(text):
"""
Args:
text:
Returns:
"""
tokens = [
word
for sent in nltk.sent_tokenize(text, language="french")
for word in nltk.word_tokenize(sent, language="french")
]
filtered_tokens = []
stemmer = SnowballStemmer(language="french")
for token in tokens:
if re.search("[a-zA-Z]", token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
"""
first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
Args:
text:
Returns:
"""
tokens = [
word.lower()
for sent in nltk.sent_tokenize(text, language="french")
for word in nltk.word_tokenize(sent, language="french")
]
filtered_tokens = []
for token in tokens:
if re.search("[a-zA-Z]", token):
filtered_tokens.append(token)
return filtered_tokens
def pre_processing_dataset(dataset_train):
"""Prepare dataset to list[dict] to do clustering in body
Args:
dataset_train:
Returns:
"""
num_reviews = dataset_train["body"].size
clean_train_reviews = []
for i in range(0, num_reviews):
if (i + 1) % 1000 == 0:
print("body %d of %d\n" % (i + 1, num_reviews))
clean_train_reviews.append(
{
"body": body_to_words(str(dataset_train["body"][i])),
"idMail": dataset_train["idMail"][i],
}
)
print("Creating the bag of words...\n")
return clean_train_reviews
def predict_clustering_group(k_means_model, tfidf_matrix):
"""
Args:
k_means_model:
tfidf_matrix:
Returns:
"""
cluster_labels = k_means_model.fit_predict(tfidf_matrix)
print("cluster_labels", cluster_labels)
silhouette_avg = silhouette_score(tfidf_matrix, cluster_labels)
print("silhouette_avg", silhouette_avg)
sample_silhouette_values = silhouette_samples(tfidf_matrix, cluster_labels)
print("sample_silhouette_values", sample_silhouette_values)
centers = k_means_model.cluster_centers_
print("centers", centers)
n_clusters = centers.shape[0]
print("n_clusters", n_clusters)
return cluster_labels, silhouette_avg, sample_silhouette_values
def build_label_mails(
vocab_frame,
k_means_model,
tfidf_vectorizer,
clusters,
clean_train_reviews,
n_clusters,
):
"""
Args:
vocab_frame:
k_means_model:
tfidf_vectorizer:
clusters:
clean_train_reviews:
n_clusters:
Returns:
"""
order_centroids = k_means_model.cluster_centers_.argsort()[:, ::-1]
terms = tfidf_vectorizer.get_feature_names()
label = []
for cluster in range(n_clusters):
cluster_label = []
for ind in order_centroids[cluster, :n_clusters]:
label_name = (
vocab_frame.loc[terms[ind].split(" ")]
.values.tolist()[0][0]
.encode("utf-8", "ignore")
)
cluster_label.insert(cluster, label_name.decode("utf-8"))
label.append(cluster_label)
for cluster in range(n_clusters):
FILE.write("cluster " + str(cluster) + ":" + "\n")
FILE.write("centroid" + str(cluster) + "\n")
for i, sentence in enumerate(clusters[cluster]):
clean_train_reviews[sentence]["cluster_group"] = str(cluster)
clean_train_reviews[sentence]["label"] = label[cluster]
FILE.write(
"mail :" + str(i) + ": " + str(clean_train_reviews[sentence]) + "\n"
)
centers = k_means_model.cluster_centers_
return label
def build_cluster_from_model(n_clusters, tfidf_matrix):
"""
Args:
n_clusters:
tfidf_matrix:
Returns:
dict(clusters):
k_means_model:
"""
k_means_model = KMeans(
n_clusters=n_clusters, init="k-means++", max_iter=300, n_init=1
)
k_means_model.fit(tfidf_matrix)
clusters = collections.defaultdict(list)
for i, label in enumerate(k_means_model.labels_):
clusters[label].append(i)
return dict(clusters), k_means_model
def build_tfidf_matrix_vector(dataset):
"""
Args:
dataset:
Returns:
tfidf_matrix:
tfidf_vectorizer:
"""
train_body = []
for i in range(0, len(dataset)):
train_body.append(dataset[i]["body"])
tfidf_vectorizer = TfidfVectorizer(
tokenizer=tokenize_and_stem,
analyzer="word",
stop_words=stopwords.words("french")
+ TOKENIZED_WORDS
+ stopwords.words("english"),
max_df=0.8,
min_df=0.1,
lowercase=False,
use_idf=True,
max_features=200000,
ngram_range=(1, 3),
)
tfidf_matrix = tfidf_vectorizer.fit_transform(train_body)
print(tfidf_matrix.shape)
return tfidf_matrix, tfidf_vectorizer
def build_vocab_frame(clean_train_reviews):
""" Build frame of vocabulary
Args:
clean_train_reviews(list): list of mails
Returns:
vocab_frame:
"""
body = [mail["body"] for mail in clean_train_reviews]
total_vocab_stemmed = []
total_vocab_tokenized = []
for i in body:
allwords_stemmed = tokenize_and_stem(
i
) # for each item in 'synopses', tokenize/stem
total_vocab_stemmed.extend(
allwords_stemmed
) # extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(i)
total_vocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame(
{"words": total_vocab_tokenized}, index=total_vocab_stemmed
)
return vocab_frame
|
py | 1a34b0c66f451b3063c14518b06b4a84fb1a2d6b | from flask import request, jsonify, Blueprint
from app.models import Nonprofit, NonprofitSchema
api_blueprint = Blueprint('api', __name__,)
npschema = NonprofitSchema()
npschemas = NonprofitSchema(many=True)
def jsonsift(obj, attrlist):
''' Use a custom attribute list to filter attributes from the model object to send a specific built JSON response back '''
resp = {}
for attr in attrlist:
resp[attr] = getattr(obj,attr)
return resp
@api_blueprint.route("/api/orgs/all", methods=["GET"])
def get_orgs():
all_orgs = Nonprofit.query.all()
# paginate logic
records = []
for x, org in enumerate(all_orgs):
records.append(org)
if x == 10:
break
return npschemas.jsonify(records)
@api_blueprint.route("/api/orgs/id/<int:id>", methods=["GET"])
def get_org_by_id(id):
org = Nonprofit.query.get(id)
return npschema.jsonify(org)
@api_blueprint.route("/api/orgs/id/<int:id>/address", methods=["GET"])
def get_org_address_by_id(id):
org = Nonprofit.query.get(id)
only_these_fields = ["id", "ein", "name", "street", "city", "state", "zipcode"]
return jsonify(jsonsift(org, only_these_fields))
@api_blueprint.route("/api/orgs/id/<int:id>/geocode", methods=["GET"])
def get_org_geocode_by_id(id):
org = Nonprofit.query.get(id)
only_these_fields = ["id", "ein", "name", "longitude", "latitude"]
return jsonify(jsonsift(org, only_these_fields))
@api_blueprint.route("/api/orgs/ein/<int:ein>", methods=["GET"])
def get_org_by_ein(ein):
org = Nonprofit.query.filter(Nonprofit.ein == ein).first()
return npschema.jsonify(org)
@api_blueprint.route("/api/orgs/ein/<int:ein>/address", methods=["GET"])
def get_org_address_by_ein(ein):
org = Nonprofit.query.filter(Nonprofit.ein == ein).first()
only_these_fields = ["id", "ein", "name", "street", "city", "state", "zipcode"]
return jsonify(jsonsift(org, only_these_fields))
@api_blueprint.route("/api/orgs/ein/<int:ein>/geocode", methods=["GET"])
def get_org_geocode_by_ein(ein):
org = Nonprofit.query.filter(Nonprofit.ein == ein).first()
only_these_fields = ["id", "ein", "name", "longitude", "latitude"]
return jsonify(jsonsift(org, only_these_fields))
|
py | 1a34b0f4c65733791e5ac727bc3a1ba31316629b | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import math
import mxnet as mx
from mxnet import np, npx
from mxnet.gluon.nn.activations import Activation
from . import constants as C
logger = logging.getLogger(__name__)
# Modified from the source to mxnet.gluon.nn.basic_layers.Dense which is:
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class QuantizableDense(mx.gluon.HybridBlock):
r"""Optionally Quantized fully-connected NN layer.
`QuantDense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool, default True
Whether the layer uses a bias vector.
flatten: bool, default True
Whether the input tensor should be flattened.
If true, all but the first axis of input data are collapsed together.
If false, all but the last axis of input data are kept the same, and the transformation
applies on the last axis.
dtype : str or np.dtype, default C.DTYPE_FP32
Data type of output embeddings.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Inputs:
- **data**: if `flatten` is True, `data` should be a tensor with shape
`(batch_size, x1, x2, ..., xn)`, where x1 * x2 * ... * xn is equal to
`in_units`. If `flatten` is False, `data` should have shape
`(x1, x2, ..., xn, in_units)`.
Outputs:
- **out**: if `flatten` is True, `out` will be a tensor with shape
`(batch_size, units)`. If `flatten` is False, `out` will have shape
`(x1, x2, ..., xn, units)`.
"""
def __init__(self, units, dtype: str, activation=None, use_bias=True, flatten=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0):
super(QuantizableDense, self).__init__()
self._flatten = flatten
self._dtype = dtype
self._units = units
self._in_units = in_units
self.scaling = None
if dtype == C.DTYPE_INT8:
self.scaling = mx.gluon.Parameter('scaling', shape=(1,),
# Initialize to an obviously wrong value so we can detect later
init=mx.initializer.Constant(-1.0), dtype=C.DTYPE_FP32,
allow_deferred_init=True)
weight_initializer = 'zeros' # Most initializers don't work for int8, but this is for inference anyway.
self.weight = mx.gluon.Parameter('weight', shape=(units, in_units),
init=weight_initializer, dtype=dtype,
allow_deferred_init=True)
self.bias = mx.gluon.Parameter('bias', shape=(units,),
init=bias_initializer, dtype=C.DTYPE_FP32,
allow_deferred_init=True) if use_bias else None
self.act = Activation(activation) if activation is not None else None
if activation is not None:
self.act = Activation(activation)
def cast(self, dtype):
if self._dtype != C.DTYPE_INT8:
self._dtype = dtype
super(QuantizableDense, self).cast(dtype)
else:
#No casting an already quantized matrix.
logger.warning("Ignoring casting on int8 matrix")
def infer_shape(self, x, *args):
if self._flatten:
num_input = 1
for i in range(1, x.ndim):
num_input *= x.shape[i]
self.weight.shape = (self.weight.shape[0], num_input)
else:
self.weight.shape = (self.weight.shape[0], x.shape[x.ndim - 1])
def forward(self, x):
if self._dtype == C.DTYPE_INT8:
if self.bias is not None:
act = npx.intgemm_fully_connected(x,
weight=self.weight.data(),
scaling=self.scaling.data(),
bias=self.bias.data(), no_bias=False,
num_hidden=self._units,
flatten=self._flatten)
else:
act = npx.intgemm_fully_connected(x,
weight=self.weight.data(),
scaling=self.scaling.data(),
no_bias=True,
num_hidden=self._units,
flatten=self._flatten)
else:
act = npx.fully_connected(x,
weight=self.weight.data(),
bias=self.bias.data() if self.bias else None, no_bias=self.bias is None,
num_hidden=self._units,
flatten=self._flatten)
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]))
def optimize_quantization_mse(tensor, rounds=10):
"""
Minimize mean squared error of quantizing a tensor, returning the top value
(i.e. the one that quantizes to 127). Scaling = 127.0 / return value.
This is a convex optimization problem. EM works but makes slow steps.
Instead of EM, use binary search in the direction minimization suggests.
"""
best_mse = math.inf
best_top = None
maxabs = npx.intgemm_maxabsolute(tensor)
low = 0.0
high = maxabs
for _ in range(rounds):
value = (low + high) / 2.0
quant = npx.intgemm_prepare_data(tensor, value)
quant_float = quant.astype(C.DTYPE_FP32)
mse = (quant_float * (value / 127.0) - tensor).norm().item() / math.sqrt(float(tensor.size))
if mse < best_mse:
best_mse = mse
best_top = value
# This optimizes scaling subject to cluster assignment.
# It can be used for EM but the step is really slow, so use it for direction.
scale = np.sum(quant_float * quant_float) / np.sum(quant_float * tensor)
top = 127.0 / scale.item()
if top < value:
high = value
else:
low = value
return best_top
def extract_quant_max(tensor_param: mx.gluon.parameter.Parameter, scaling_param: mx.gluon.parameter.Parameter) -> float:
"""
Extract or tune the scaling factor for a parameter.
"""
scaling = scaling_param.data()
if scaling.item() < 0:
# Bogus auto initialized scaling factor.
b_max = optimize_quantization_mse(tensor_param.data())
scaling_param.set_data(b_max / 127.0)
else:
b_max = scaling * 127.0
return b_max
def convert_weights_disk_format(params: C.ParameterDict, dtype_store: str):
"""
Convert weights from float32 MXNet format (B^T in float32) to disk format
(B^T in int8 format).
If dtype_store == 'int8' then compute scaling and quantize the model.
If dtype_store == 'float32' then just annotate with scaling factors.
:param params model parameters from model.collect_params() in a float32
model.
:param dtype_store data type to store on disk.
"""
logger.info("Optimizing quantization scaling factors")
for name, param in params.items():
if name.endswith("_weight"):
scaling_name = name[0:-6] + "scaling"
if scaling_name in params:
b_max = extract_quant_max(param, params[scaling_name])
if dtype_store == C.DTYPE_INT8:
quantized = npx.intgemm_prepare_data(param.data(), b_max)
param.set_data(quantized)
param.dtype = C.DTYPE_INT8
def convert_weights_cpu_dependent(params: C.ParameterDict):
"""
Convert weights from disk format to intgemm's CPU-dependent format for
quantized matrix multiplication.
:param params model parameters from model.collect_params() in a model that
came from convert_weights_disk_format.
"""
logger.info("Converting weights to CPU format.")
for name, param in params.items():
if name.endswith("_weight"):
scaling_name = name[0:-6] + "scaling"
if scaling_name in params:
if param.dtype == C.DTYPE_INT8:
# Already fully quantized, just rearrange.
weight = npx.intgemm_prepare_weight(param.data(), already_quantized=True)
else:
# Use offline scaling factor if available.
b_max = extract_quant_max(param, params[scaling_name])
weight = npx.intgemm_prepare_weight(param.data(), b_max)
param.set_data(weight)
param.dtype = C.DTYPE_INT8
|
py | 1a34b1103d003a8250d1fe69187804532272d8be | """
Test label generation for nodes.
"""
from map_machine.map_configuration import LabelMode
from map_machine.text import Label
from tests import SCHEME
__author__ = "Sergey Vartanov"
__email__ = "[email protected]"
def construct_labels(tags: dict[str, str]) -> list[Label]:
"""Construct labels from OSM node tags."""
processed: set[str] = set()
return SCHEME.construct_text(tags, processed, LabelMode.ALL)
def test_1_label() -> None:
"""Test tags that should be converted into single label."""
labels = construct_labels({"name": "Name"})
assert len(labels) == 1
assert labels[0].text == "Name"
def test_1_label_unknown_tags() -> None:
"""
Test tags with some unknown tags that should be converted into single label.
"""
labels = construct_labels({"name": "Name", "aaa": "bbb"})
assert len(labels) == 1
assert labels[0].text == "Name"
def test_2_labels() -> None:
"""Test tags that should be converted into two labels."""
labels = construct_labels({"name": "Name", "ref": "5"})
assert len(labels) == 2
assert labels[0].text == "Name"
assert labels[1].text == "5"
|
py | 1a34b16ee0a968f0b0c0efc46b76120f7a3325b4 | """Typing middleware."""
from typing import Any, Callable, Dict, Optional
import falcon
from falcon import Request, Response
from falcontyping.base import (PydanticBaseModel, TypedResource,
TypeValidationError)
from falcontyping.typedjson import DecodingError, ExternalSerializerException
from falcontyping.typedjson import decode as decode_using_hints
_VALID_RESPONSE_TYPES = set([PydanticBaseModel, dict, type(None)])
class TypingMiddleware:
@staticmethod
def _decode_or_raise_error(hint: Any, parameter: Any) -> Any:
"""
Decode value using type hint or fail.
:raises: falcon.HTTPError or ExternalSerializerException
"""
result = decode_using_hints(hint, parameter)
if isinstance(result, DecodingError):
if isinstance(result.reason, ExternalSerializerException):
raise result.reason.exception from None
else:
raise falcon.HTTPError(status=falcon.HTTP_UNPROCESSABLE_ENTITY, # pylint: disable=no-member
description=f'\'{parameter}\' must be of type {hint} not {type(parameter)}')
return result
@staticmethod
def _try_decode_query_or_body(request: falcon.Request, hint: Any) -> Any:
"""Decode values by looking for them in both URI and request body."""
# An assumption is being made here, That only POST, PUT and PATCH can have bodies.
if request.method.lower() in ['post', 'put', 'patch']:
key = 'media'
else:
key = 'params'
return TypingMiddleware._decode_or_raise_error(hint, getattr(request, key, None))
def process_request(self, request: Request, response: Response) -> None:
"""
Process the request before routing it.
Because Falcon routes each request based on req.path, a
request can be effectively re-routed by setting that
attribute to a new value from within process_request().
:param request: Request object that will eventually be
routed to an on_* responder method.
:param response: Response object that will be routed to
the on_* responder.
"""
...
def process_resource(self, request: Request, response: Response, resource: Any, parameters: Dict) -> None:
"""
Process the request after routing.
This method is only called when the request matches
a route to a resource.
:param request: Request object that will be passed to the
routed responder.
:param response: Response object that will be passed to the
responder.
:param resource: Resource object to which the request was
routed.
:param parameters: A dict-like object representing any additional
parameters derived from the route's URI template fields,
that will be passed to the resource's responder
method as keyword arguments.
"""
if not isinstance(resource, TypedResource):
return
handler: Optional[Callable] = getattr(resource, 'on_%s' % request.method.lower(), None)
if handler:
# Get hints for only those variables that should be passed to the request handler.
hints = resource.hints[handler.__name__]
# Decode values using type hints, All values in parameters will be based as
# Keyword arguments to the request handler.
for parameter in filter(hints.get, parameters):
parameters[parameter] = self._decode_or_raise_error(hints[parameter], parameters.get(parameter))
# Decode body parameter if there is one.
body_parameter = resource.methods_body_parameter[handler.__name__]
if body_parameter:
parameters[body_parameter] = self._try_decode_query_or_body(request, hints[body_parameter])
def process_response(self, request: Request, response: Response, resource: Any, request_succeeded: bool) -> None:
"""
Post-processing of the response (after routing).
:param request: Request object.
:param response: Response object.
:param resource: Resource object to which the request was routed.
May be None if no route was found for the request.
:param request_succeeded: True if no exceptions were raised while the framework processed and
routed the request; otherwise False.
"""
if not (isinstance(resource, TypedResource) and request_succeeded):
return
handler: Optional[Callable] = getattr(resource, 'on_%s' % request.method.lower(), None)
# Get type hint for the return type of the request handler.
hint: Any = resource.hints[handler.__name__].get('return') if handler else None
if hint:
media = getattr(response, 'media', None)
media = decode_using_hints(hint, media)
if not any(isinstance(media, type_) for type_ in _VALID_RESPONSE_TYPES): # type: ignore
raise TypeValidationError(f'{resource}.{handler} returned a unexpected value. ',
f'Resource methods must return either Nothing, '
f'marshmallow.Schema or pydantic.BaseModel not {type(media)}')
if isinstance(media, PydanticBaseModel):
media = media.dict()
response.media = media
|
py | 1a34b16fade6b8ecc320ee3507885a5fda74a3a2 | from rest_framework import serializers
from api.models import Hospital, HospitalNetwork, Department
from api.models import Bed, Depression, Cancer
from api.models import Population, PopulationDetailed
class PopulationSerializer(serializers.ModelSerializer):
class Meta:
model = Population
fields = ('id', 'name', 'year', 'amount')
class PopulationDetailedSerializer(serializers.ModelSerializer):
class Meta:
model = PopulationDetailed
fields = ('id', 'name', 'year', 'amount', 'age', 'gender', 'code')
class CancerSerializer(serializers.ModelSerializer):
class Meta:
model = Cancer
fields = ('id', 'agegroup', 'gender', 'region', 'cancer', 'value')
class DepressionSerializer(serializers.ModelSerializer):
class Meta:
model = Depression
fields = ('id', 'gender', 'agegroup', 'crude', 'province', 'year')
class BedSerializer(serializers.ModelSerializer):
class Meta:
model = Bed
fields = ('id', 'year', 'month', 'amount','department')
depth = 1
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model = Department
fields = ('id', 'code', 'name')
class HospitalNetworkSerializer(serializers.ModelSerializer):
class Meta:
model = HospitalNetwork
fields = ('id', 'name')
class HospitalSerializer(serializers.ModelSerializer):
class Meta:
model = Hospital
fields = ('id', 'siteNbr', 'name', 'latitude', 'longitude', 'nbBeds', 'address', 'postalCode', 'town', 'telephone', 'website', 'province', 'type','network')
|
py | 1a34b1ef3bcf3e4d9d71225c3db45be27cc14e1b | import networkx as nx
import utils
import sys
import logging
import os
import uuid
def convert(args):
for graph in args.graphs:
if args.nocycles:
g=nx.DiGraph()
else:
g=nx.MultiDiGraph()
g.graph['paths']=[]
g.graph['path2id']=dict()
g.graph['id2path']=dict()
if graph.endswith(".gfa"): #gfa to gml/gfa
utils.read_gfa(graph,None,None,g,minsamples=args.minsamples,
maxsamples=args.maxsamples,
targetsample=args.targetsample,
remap=False)
if args.type=="gfa":
fn=graph.replace(".gfa",".rewrite.gfa")
graph=utils.write_gfa(g,"", outputfile=fn)
logging.info("gfa graph written to: %s"%fn)
elif args.type=="gml":
fn=utils.write_gml(g,"", hwm=args.hwm, outputfile=graph.replace(".gfa",""), partition=args.partition)
logging.info("gml graph written to: %s"%fn)
elif args.type=="maf":
logging.info("Converting graph to maf..")
graph2maf(g,graph.replace(".gfa",".maf"))
elif graph.endswith(".maf"): #multiple alignment format, convert to graph
g=maf2graph(graph)
filename=graph[:graph.rfind(".")]+".gml"
utils.write_gml(g,"", outputfile=filename)
filename=graph[:graph.rfind(".")]+".gfa"
utils.write_gfa(g,"", outputfile=filename)
logging.debug("gfa graph written to: %s"%filename)
elif graph.endswith(".fa") or graph.endswith(".fasta") or graph.endswith(".fna"): #assume fasta to gfa
if args.aligned:
seqs=[]
names=[]
for name,seq in utils.fasta_reader(graph,keepdash=True):
seqs.append(seq)
names.append(name)
g,nid=utils.aln2graph(seqs,names)
else:
i=0
start=uuid.uuid4().hex
end=uuid.uuid4().hex
g.graph['startnodes']=[start]
g.graph['endnodes']=[end]
g.add_node(start,offsets=dict())
g.add_node(end,offsets=dict())
for i,v in enumerate(utils.fasta_reader(graph)):
name,seq=v
g.graph['paths'].append(name)
g.graph['path2id'][name]=i
g.graph['id2path'][i]=name
g.node[start]['offsets'][i]=0
g.node[end]['offsets'][i]=len(seq)
g.add_node(i,offsets={i:0},seq=seq)
g.add_edge(start,i,paths=set([i]))
g.add_edge(i,end,paths=set([i]))
filename=graph[:graph.rfind(".")]+".gfa"
utils.write_gfa(g,"", outputfile=filename)
logging.debug("gfa graph written to: %s"%filename)
else:
logging.fatal("Unknown filetype, need gfa or fasta extension.")
return
#converts a multiple alignment format file to a graph
def maf2graph(maffile):
files=set()
G=nx.MultiDiGraph()
startnode=uuid.uuid4().hex
endnode=uuid.uuid4().hex
G.graph['startnodes']=set([startnode])
G.graph['endnodes']=set([endnode])
G.graph['path2id']=dict()
G.add_node(startnode,offsets=dict())
G.add_node(endnode,offsets=dict())
nid=0
with open(maffile,"r") as maf:
for line in maf:
if line.startswith("#"):
continue
elif line.startswith("a"): #start of an aligned segment
nid+=1
G.add_node(nid,data=dict())
elif line.startswith("s"):
cols=line.rstrip().split()
if '.' in cols[1]: #TODO: use db parameter to specificy a single mfa file with all sequence
file,name=cols[1][:cols[1].find('.')],cols[1][cols[1].find('.')+1:]
files.add(file)
else:
file=None #args.db?
name=cols[1]
if name not in G.graph['path2id']:
G.graph['path2id'][name]=len(G.graph['path2id'])
G.node[startnode]['offsets'][G.graph['path2id'][name]]=0
G.node[nid]['data'][(file,name)]={'start':int(cols[2]),
'end':int(cols[2])+int(cols[3]),
'orientation':cols[4],
'aln':cols[6]
}
nid+=1
remove=[]
for node,d in G.nodes(data=True):
if 'data' in d and len(d['data'])==1: #multiplicity of 1, strictly not an alignment
remove.append(node)
G.remove_nodes_from(remove)
db=dict() #map name to sequence
for file in files:
for name,seq in utils.fasta_reader(file+".fasta"): #guess that the original file has a ".fasta" extension
name=name.split()[0]
key=(file,name)
if key in db:
logging.fatal("Non unique contig-name: %s. quit."%name)
sys.exit(1)
else:
db[key]=seq
remove=[]
#for every sequence, check that none of the alignments overlap, otherwise assignment is not 1-1
for file,name in db:
seq=db[(file,name)]
intvs=[]
for node in G:
if 'data' in G.node[node]: #does the node represent an aligned segment?
if (file,name) in G.node[node]['data']:
intvs.append((G.node[node]['data'][(file,name)]['start'] , G.node[node]['data'][(file,name)]['end'], node))
intvs.sort() #sort by start position
pstart=0
pend=0
pnode=startnode
unaligned=[]
for start,end,node in intvs:
if start>pend:
unaligned.append((pend,start))
G.add_node(nid,intv=(pend,start),seq=seq[pend:start])
G.add_edge(pnode,nid,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
G.add_edge(nid,node,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
nid+=1
elif start<pend:
logging.fatal("Overlapping alignments for sequence: %s.%s --> (%d,%d) and (%d,%d)."%(file,name,pstart,pend,start,end))
remove.append(node)
# sys.exit(1)
else: #no gap, just connect subsequent intervals
G.add_edge(pnode,node,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
pstart,pend,pnode=start,end,node
if len(seq)!=pend:
unaligned.append((pend,len(seq)))
G.add_node(nid,intv=((pend,len(seq))),seq=seq[pend:len(seq)])
G.add_edge(pnode,nid,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
G.add_edge(nid,endnode,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
nid+=1
else:
G.add_edge(pnode,endnode,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
G.remove_nodes_from(remove)
# print "Unaligned segments",unaligned
alignments=[node for node in G if 'data' in G.node[node]]
for node in alignments: #expand all alignments in the graph
if 'data' in G.node[node]:
seqs=[]
names=[]
offsets={}
for file,name in G.node[node]['data']:
seqs.append(G.node[node]['data'][(file,name)]['aln'])
offsets[G.graph['path2id'][name]]=G.node[node]['data'][(file,name)]['start']
names.append(name)
sg,nid=utils.aln2graph(seqs,names,idoffset=nid,path2id=G.graph['path2id'],offsets=offsets)
nid+=1
G.add_nodes_from(sg.nodes(data=True))
G.add_edges_from(sg.edges(data=True))
assert(len(sg.graph['startnodes'])==1)
assert(len(sg.graph['endnodes'])==1)
sgstart=sg.graph['startnodes'][0]
sgend=sg.graph['endnodes'][0]
for v,t,d in G.in_edges(node,data=True):
G.add_edge(v,sgstart,paths=d['paths'],ofrom="+",oto="+")
for v,t,d in G.out_edges(node,data=True):
G.add_edge(sgend,t,paths=d['paths'],ofrom="+",oto="+")
#hack this in here so we can continue
G.node[sgstart]['seq']=""
G.node[sgend]['seq']=""
nx.relabel_nodes(G,{sgstart: nid, sgend: nid+1},copy=False)
nid+=2
G.remove_node(node)
return G
def graph2maf(G,filename):
if isinstance(G,nx.MultiDiGraph):
#TODO: decompose global alignment into local alignments by deconnecting structure edges
#determine set of structure edges
orgpaths=set([G.graph['path2id'][p] for p in G.graph['paths'] if p.startswith('*')])
refpaths=set([G.graph['path2id'][p] for p in G.graph['paths'] if not p.startswith('*')])
es=[]
for e0,e1,d in G.edges(data=True):
if len(d['paths'] & refpaths)==0: #edge that exclusively represents structural event
es.append((e0,e1))
toremove=es
G.remove_edges_from(toremove)
sizes={sid:0 for sid in G.graph['id2path']}
with open(filename,'w') as maf:
for g in nx.weakly_connected_component_subgraphs(G):
longest=0
sids=set()
for node in nx.topological_sort(g):
if type(node)!=str:
go=max([0]+[G.node[pred]['graphoffset']+len(G.node[pred]['seq']) for pred in G.predecessors(node) if type(pred)!=str])
G.node[node]['graphoffset']=go
if go+len(G.node[node]['seq'])>longest:
longest=go+len(G.node[node]['seq'])
for k in G.node[node]['offsets']:
sids.add(k)
if G.node[node]['offsets'][k]+len(G.node[node]['seq'])>sizes[k]:
sizes[k]=G.node[node]['offsets'][k]+len(G.node[node]['seq'])
ml=max([len(p) for p in G.graph['paths']])
maf.write("##maf version=1\n")
maf.write("a\n")
for sid in sids:
path=G.graph['id2path'][sid]
o=0
sl=0
maf.write("s %s %d %d + %-10d "%(path.ljust(ml), 0, sizes[G.graph['path2id'][path]], sizes[G.graph['path2id'][path]]) )
for node in nx.topological_sort(g):
if type(node)!=str and sid in G.node[node]['offsets']:
while o<G.node[node]['graphoffset']:
maf.write("-")
o+=1
sl+=len(G.node[node]['seq'].replace("-",""))
maf.write("%s"%G.node[node]['seq'])
o+=len(G.node[node]['seq'])
maf.write("-"*(longest-o)) #pad with dash so all lines are equally long
maf.write("\n")
maf.write("\n")
|
py | 1a34b286a460eb0def907b07667ade256694e95d | from collections import Counter
from itertools import product
def count_letters(input_):
"""
Given an input_ like "abcdef" return a tuple with the result of the following rules
a letter appears exactly 2 times
a letter appears exactly 3 times
"""
counter = Counter(input_)
two_times = 0
three_times = 0
for quantity in counter.values():
if quantity == 2:
two_times += 1
elif quantity == 3:
three_times += 1
return two_times > 0, three_times > 0
def check_repeated(str1, str2):
"""
Given 2 string check if these strings have a diff of 1 character
if it's the case return the position of the character otherwise return None
"""
assert len(str1) == len(str2)
position = None
quantity = 0
for i in range(len(str1)):
if str1[i] != str2[i]:
quantity += 1
position = i
if quantity == 1:
return position
else:
return None
def run():
with open("../inputs/day02.txt") as f:
lines = f.readlines()
two_times, three_times = 0, 0
for line in lines:
if not line:
continue
two, three = count_letters(line)
if two:
two_times += 1
if three:
three_times += 1
print(f"The checksum is {two_times * three_times}")
for id_1, id_2 in product(lines, lines):
if id_1 == id_2:
continue
pos = check_repeated(id_1, id_2)
if pos is not None:
res = id_1[0:pos] + id_1[pos + 1:]
print(f"The result is {res}")
|
py | 1a34b2f551d4b37676c06264e17671ae81b91d24 | """Cloud optical properties from ECHAM."""
from os.path import dirname, join
import numpy as np
import xarray as xr
from scipy.interpolate import interp1d
class EchamCloudOptics:
"""Interface to interpolate cloud optical properties used in ECHAM."""
def __init__(self):
self.database = xr.open_dataset(
join(dirname(__file__), "data", "ECHAM6_CldOptProps.nc")
)
def interp_ice_properties(self, particle_size=100.0, kind="linear"):
x = self.database.re_crystal
r = interp1d(
x,
self.database[f"co_albedo_crystal"],
axis=1,
kind=kind,
)
s = interp1d(
x,
self.database[f"asymmetry_factor_crystal"],
axis=1,
kind=kind,
)
t = interp1d(
x,
self.database[f"extinction_per_mass_crystal"],
axis=1,
kind=kind,
)
return (
1 - r(particle_size)[16:], # SW bands
s(particle_size)[16:],
t(particle_size)[16:],
t(particle_size)[:16], # LW bands
)
def interp_liquid_properties(self, particle_size=10.0, kind="linear"):
x = self.database.re_droplet
r = interp1d(
x,
self.database[f"co_albedo_droplet"],
axis=1,
kind="linear",
)
s = interp1d(
x,
self.database[f"asymmetry_factor_droplet"],
axis=1,
kind="linear",
)
t = interp1d(
x,
self.database[f"extinction_per_mass_droplet"],
axis=1,
kind="linear",
)
return (
1 - r(particle_size)[16:],
s(particle_size)[16:],
t(particle_size)[16:],
t(particle_size)[:16],
)
def get_cloud_properties(self, particle_size, water_path, phase="ice"):
if phase == "ice":
ssa, asym, tau_sw, tau_lw = self.interp_ice_properties(particle_size)
elif phase == "liquid":
ssa, asym, tau_sw, tau_lw = self.interp_liquid_properties(particle_size)
else:
raise ValueError('Invalid phase. Allowed values are "ice" and "liquid".')
cld_optics = xr.Dataset(
coords={
"num_shortwave_bands": np.arange(14),
"num_longwave_bands": np.arange(16),
},
)
cld_optics["single_scattering_albedo_due_to_cloud"] = (
("num_shortwave_bands",),
ssa.ravel(),
)
cld_optics["cloud_asymmetry_parameter"] = (
("num_shortwave_bands",),
asym.ravel(),
)
cld_optics["cloud_forward_scattering_fraction"] = (
("num_shortwave_bands",),
asym.ravel() ** 2,
)
cld_optics["shortwave_optical_thickness_due_to_cloud"] = (
("num_shortwave_bands",),
water_path * tau_sw.ravel(),
)
cld_optics["longwave_optical_thickness_due_to_cloud"] = (
("num_longwave_bands",),
water_path * tau_lw.ravel(),
)
return cld_optics
|
py | 1a34b34bf93d01223e3b548083bab618ab42a0ef | # Django imports
from rest_framework.serializers import ModelSerializer, ReadOnlyField
# Project imports
from address.serializers import AddressSerializer
from client.serializers import ClientSerializer
from user_address.models import UserAddress
from .models import User
class UserSerializer(ModelSerializer):
addresses = AddressSerializer(read_only=True, many=True)
client = ClientSerializer(read_only=True)
date_joined = ReadOnlyField()
class Meta:
model = User
fields = (
'id',
'email',
'first_name',
'last_name',
'date_joined',
'password',
'addresses',
'client',
)
extra_kwargs = {
'password': {'write_only': True}
}
def to_representation(self, instance):
ret = super().to_representation(instance)
user_addresses = UserAddress.objects.filter(user__id=instance.id).distinct()
if user_addresses is not None:
serialized = AddressSerializer(data=user_addresses, many=True)
ret.update({
'addresses': serialized.data if serialized.is_valid() else [],
})
return ret
|
py | 1a34b3773eb4875536a56804c7966a4e3ab4ae09 | import os, pymysql, logging, matplotlib, sys
from logging.handlers import RotatingFileHandler
from flask import Flask
from config import app_config
from .utils.mattermostdriver import Driver
config_name = os.getenv('FLASK_CONFIG', 'default')
app = Flask(__name__)
# load the color list from the matplotlib
color_list = set()
for name, hex in matplotlib.colors.cnames.items():
if name.startswith("light"): color_list.add(hex)
# inherit the configuration object from the config file
app.config.from_object(app_config[config_name])
app_config = app_config[config_name]
logger = logging.getLogger(__name__)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(app_config.LOG_FILE_PATH, maxBytes=10000000, backupCount=5)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
try:
mysql = pymysql.connect(host=app_config.MYSQL_HOST,
port=app_config.MYSQL_PORT,
user=app_config.MYSQL_USER,
password=app_config.MYSQL_PASSWORD,
db=app_config.MYSQL_DB,
cursorclass=pymysql.cursors.DictCursor)
except Exception as e:
logger.critical("Not able to connect to MySQL Server. Can't proceed further. Shutting down gracefully", exc_info=True)
sys.exit()
default_options = {
'scheme': app_config.MM_SCHEME,
'url': app_config.MM_URL,
'port': app_config.MM_PORT,
'basepath': '/api/v4',
'verify': True,
'timeout': 30,
'request_timeout': None,
'login_id': None,
'password': None,
'token': app_config.MM_BOT_TOKEN,
'mfa_token': None,
'auth': None,
'debug': False
}
SLASH_TOKEN = app_config.MM_SLASH_TOKEN
mm_client = Driver(default_options)
try:
mm_client.login()
except Exception as e:
logger.critical("Not able to connect to MatterSQL Server. Can't proceed further. \
Shutting down gracefully", exc_info=True)
sys.exit()
DAILY_POINT_LIMIT = app_config.DAILY_POINT_LIMIT
PER_TRANSACTION_POINT_LIMIT = app_config.PER_TRANSACTION_POINT_LIMIT
INSERT_QUERY_STRING = "insert into transaction(channel_id, channel_name, from_user_id, from_user_name, points, to_user_id, to_user_name, post_id, insertionTime, message) values (\"%s\", \"%s\", \"%s\", \"%s\", %d, \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");"
WEEKLY_THRESHOLD = app_config.WEEKLY_THRESHOLD
from .utils.helpers import *
# REGISTER BLUEPRINTS
from app.routes.index import main_service
app.register_blueprint(main_service, url_prefix='/v1/index') |
py | 1a34b3834e3cad770c4e879c4dadd0802ca0535c | version = (0, 4, 0)
info = '.dev1'
__version__ = '.'.join(map(str, version))+info
|
py | 1a34b38dc246ba677df46dfd452b5c70324fcbd0 | from sqlalchemy import Column, Integer, String, ForeignKey, DateTime, Boolean
from sqlalchemy.orm import relationship
import datetime
from models.SessionWorkouts import SessionWorkouts
# from .SessionModel import Session
from database import Base
class Workout(Base):
__tablename__ = "workout"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, nullable=False)
repetition = Column(Integer, unique=False, nullable=False)
set = Column(Integer, unique=False, nullable=False)
weight = Column(Integer, unique=False, nullable=False)
done = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, default=datetime.datetime.utcnow)
sessions = relationship("Session", secondary=SessionWorkouts.__tablename__, backref="workout")
exercise_id = Column(Integer, ForeignKey("exercise.id"), nullable=False)
exercise = relationship("Exercise", back_populates="workout")
# session_has_user = relationship("SessionHasUser", back_populates="workout")
|
py | 1a34b391afa88e14ea7aaa217861372b88aea03b | # -*- coding: utf-8 -*-
import random
from os3.core.item import Os3Item
from os3.utils.console import pprint_list
def name_id_parent_function(elem):
return elem[0], elem[1], elem[2]
def init_tree(process, name_id_parent_fn=None):
name_id_parent_fn = name_id_parent_fn or name_id_parent_function
from treelib import Tree
tree = Tree()
for children in process:
tree.create_node(*name_id_parent_fn(children))
return tree
class Os3List(Os3Item):
default_format = 'list'
_tuple_filters = None
_dict_filters = None
_sort = None # []
_iter = None
_format_interfaces = None # Uses in table
def _add_filters(self, tuple_filters, dict_filters):
self._tuple_filters = self._tuple_filters or ()
self._dict_filters = self._dict_filters or {}
self._tuple_filters += tuple_filters
self._dict_filters.update(dict_filters)
def _set_sort(self, *interfaces):
self._sort = interfaces
def filter(self, *args, **kwargs):
instance = self.clone()
instance._add_filters(args, kwargs)
return instance
def sort(self, *interfaces):
instance = self.clone()
instance._set_sort(*interfaces)
return instance
def group_by(self, interface):
pass
def _get_iter(self):
raise NotImplementedError
def _prepare_iter(self):
it = self._get_iter()
if self._sort:
it = map(self._prepare_next, it)
it = sorted(it, key=lambda x: [x.value(interface) for interface in self._sort])
it = iter(it)
return it
def _next(self, reset=False):
"""Obtener el siguiente elemento de la iteración sin filtros
"""
if not self._iter and not reset:
self._iter = self._prepare_iter()
return next(self._iter)
def _prepare_next(self, elem):
"""Devolver el siguiente elemento de la iteración preparado
"""
raise NotImplementedError
def __prepare_next(self, elem):
"""Ejecutar _prepare_next solo si no es un Os3Item.
"""
if isinstance(elem, Os3Item):
return elem
return self._prepare_next(elem)
def list(self):
return filter(self._elem_is_valid, [self.__prepare_next(elem) for elem in self._prepare_iter()])
def _elem_is_valid(self, elem):
"""Comprobar si el elemento se puede devolver por los filtros
"""
return elem.check_filters(*self._tuple_filters or (), **self._dict_filters or {})
def __iter__(self):
while True:
try:
elem = self.__prepare_next(self._next())
except StopIteration:
self._iter = None
return
if not self._elem_is_valid(elem):
continue
yield elem
def _table_class(self):
from terminaltables import SingleTable
return SingleTable
def print_format(self):
return getattr(self, '{}_format'.format(self.default_format))()
def tree_format(self, roots=None, fn_tree=None):
roots = roots if roots is not None else self
fn_tree = fn_tree or init_tree
forest = [fn_tree(x) for x in roots]
output = ''
for tree in forest:
output += str(tree)
return output
def list_format(self):
return pprint_list([x.print_format() for x in self])
def table_format(self, *interfaces):
interfaces = interfaces or self._format_interfaces
table_class = self._table_class()
data = self.values_list(*interfaces)
data = [interfaces] + data
table = table_class(data)
return table.table
def values(self, *interfaces, **kwargs):
return [n.values(*interfaces, this=True) for n in self.list()]
def values_list(self, *interfaces):
return [n.values_list(*interfaces) for n in self.list()]
def value(self, interface, **kwargs):
return [n.value(interface) for n in self.list()]
def random(self):
return random.choice(list(self))
def tree(self):
return self.clone(default_format='tree')
def table(self, *interfaces):
ls = self.clone(default_format='table')
ls._format_interfaces = interfaces
return ls
def count(self):
return len(list(self))
|
py | 1a34b40b2280546152857d6aec09d1116b72c157 | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 Antti Lukats <[email protected]>
# Copyright (c) 2019 msloniewski <[email protected]>
# Copyright (c) 2019 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from litex_boards.platforms import c10lprefkit
from litex.soc.cores.clock import Cyclone10LPPLL
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MT48LC16M16
from litedram.phy import GENSDRPHY
from liteeth.phy.mii import LiteEthPHYMII
from litex.soc.cores.hyperbus import HyperRAM
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain(reset_less=True)
# # #
# Clk / Rst
clk12 = platform.request("clk12")
# PLL
self.submodules.pll = pll = Cyclone10LPPLL(speedgrade="-A7")
self.comb += pll.reset.eq(~platform.request("cpu_reset") | self.rst)
pll.register_clkin(clk12, 12e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90)
# SDRAM clock
self.comb += platform.request("sdram_clock").eq(self.cd_sys_ps.clk)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
mem_map = {
"hyperram": 0x20000000,
}
mem_map.update(SoCCore.mem_map)
def __init__(self, sys_clk_freq=int(50e6), with_led_chaser=True,
with_ethernet=False, with_etherbone=False,
**kwargs):
platform = c10lprefkit.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on C10 LP RefKit",
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# HyperRam ---------------------------------------------------------------------------------
self.submodules.hyperram = HyperRAM(platform.request("hyperram"))
self.add_wb_slave(self.mem_map["hyperram"], self.hyperram.bus)
self.add_memory_region("hyperram", self.mem_map["hyperram"], 8*1024*1024)
# SDR SDRAM --------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq)
self.add_sdram("sdram",
phy = self.sdrphy,
module = MT48LC16M16(sys_clk_freq, "1:1"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
if with_ethernet:
self.add_ethernet(phy=self.ethphy)
if with_etherbone:
self.add_etherbone(phy=self.ethphy)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on C10 LP RefKit")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--sys-clk-freq", default=50e6, help="System clock frequency.")
parser.add_argument("--with-ethernet", action="store_true", help="Enable Ethernet support.")
parser.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support.")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".sof"))
if __name__ == "__main__":
main()
|
py | 1a34b44104bd22181072901f479c63bfa6892382 | import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from simupy.systems import LTISystem
from simupy.systems.symbolic import DynamicalSystem, dynamicsymbols
from simupy.block_diagram import BlockDiagram
from sympy.tensor.array import Array
legends = [r'$x_1(t)$', r'$x_2(t)$', r'$x_3(t)$', r'$u(t)$']
tF = 6
"""
This example shows the design of a linear quadratic regulator for a
nonlinear system linearized about the origin. It is stable for some initial
conditions, but not all initial conditions. The region of stability is not
dependent only on the distance from the origin.
"""
# construct system
x = Array(dynamicsymbols('x1:4'))
u = dynamicsymbols('u')
x1, x2, x3 = x
sys = DynamicalSystem(Array([-x1+x2-x3, -x1*x2-x2+u, -x1+u]), x, Array([u]))
# linearization to design LQR
t0 = 0
x0 = np.zeros((3, 1))
u0 = 0
A = sys.state_jacobian_equation_function(t0, x0, u0)
B = sys.input_jacobian_equation_function(t0, x0, u0)
# LQR gain
Q = np.matlib.eye(3)
R = np.matrix([1])
S = linalg.solve_continuous_are(A, B, Q, R,)
K = linalg.solve(R, B.T @ S).reshape(1, -1)
ctr_sys = LTISystem(-K)
# Construct block diagram
BD = BlockDiagram(sys, ctr_sys)
BD.connect(sys, ctr_sys)
BD.connect(ctr_sys, sys)
# case 1 - un-recoverable
sys.initial_condition = np.r_[1, 1, 2.25]
result1 = BD.simulate(tF)
plt.figure()
plt.plot(result1.t, result1.y)
plt.legend(legends)
plt.title('controlled system with unstable initial conditions')
plt.xlabel('$t$, s')
plt.tight_layout()
plt.show()
# case 2 - recoverable
sys.initial_condition = np.r_[5, -3, 1]
result2 = BD.simulate(tF)
plt.figure()
plt.plot(result2.t, result2.y)
plt.legend(legends)
plt.title('controlled system with stable initial conditions')
plt.xlabel('$t$, s')
plt.tight_layout()
plt.show()
|
py | 1a34b4a8de1ae1538bafa803a09aec0fdc21824d | # -*- coding: utf-8 -*-
# python imports
from __future__ import unicode_literals
# lib imports
from django.db import models
# project imports
from utils.core.managers import TimeStampableMixin
class SkillQueryset(TimeStampableMixin):
pass
class SkillManager(models.Manager):
def get_queryset(self):
return SkillQueryset(self.model, using=self._db)
|
py | 1a34b4d61467938f87f51aa9cfd064d17936887e | """Bit manipulation class."""
import math
from abc import abstractmethod
from copy import deepcopy
from typing import (
Any,
Container,
Iterable,
Iterator,
MutableSequence,
SupportsInt,
Tuple,
Union,
overload,
)
from biterator._biterators import biterate
from biterator.bits_exceptions import SubscriptError
from biterator.const import ONES, ZEROS, DirtyBits, ValidBit
class Bits(MutableSequence[bool]):
"""
Stores bits in a list-like object, supports all bit-wise operators.
Bits can be instantiated with:
* A string of binary e.g. "1010" or "0b1100_0010".
* A prefixed string of hexadecimals e.g. "0x1f 0xb2" or "0xbadc0de".
* A bytes-like object.
* An integer-like object with a specified bit_length.
* An Iterable containing any of: True, False, 0, 1, "0", "1".
* An Iterable of arbitrary objects specifed by 'ones' and 'zero' collections as arguments.
The add (+) operator functions as concatination only, and supports all of
the above schemes. Addition may be done by first casting to int.
Binary and hexadecimal representations may be accessed with the 'bin' and
'hex' properties and the 'decode' method may be used to read the bits as
bytes using a specified codec.
>>> bits = Bits(); bits.extend('1010'); bits.bin() # Concatenate regular strings of binary.
'0b1010'
>>> bits.extend(dict(value=15, bit_length=4)); bits.bin() # Concatenate bits from an integer.
'0b1010_1111'
>>> bits.extend(b"A"); bits.bin(compact=True) # Concatenate bytes-like objects.
'0b1010111101000001'
>>> Bits("0xFF") + "0b1001_1001" # Concatenation directly with (+) operator.
Bits("0b1111111110011001")
>>> Bits("1111 0011 0000 1010")[:8] # Instantiate with binary; slicing is supported.
Bits("0b11110011")
>>> Bits("0xAAAA")[0:8:2] # Instantiate with hex; advanced slicing
Bits("0b1111")
>>> Bits("1111") << 4 # Bitshift operators supported
Bits("0b11110000")
>>> Bits(15, bit_length=4) # Add bits from integers
Bits("0b1111")
>>> Bits(255, -8)
Traceback (most recent call last):
ValueError: 'bit_length' must be provided and must be greater than 0 for integer values
All bitwise operators are supported.
'NOR' mask example, left and right 'NOR' with eachother when the mask is active:
>>> mask_ = Bits('00001111')
>>> left_ = Bits('01010101')
>>> right = Bits('00110011')
>>> ((mask_ ^ left_) & (mask_ | left_) & ~(mask_ & right)).bin()
'0b0101_1000'
"""
# Pylint literally crashes on this line for some reason.
__slots__ = ["__bytes", "__last_byte", "__len_last_byte", "__len"] # pylint: disable=all (literally will crash)
__bytes: bytearray
__len: int
# Contains the trailing (incomplete) byte; has less than the 8 bits of an actual byte.
__last_byte: int
__len_last_byte: int
def __new__(cls, bit_values: Union[Iterable, int] = None, *args, **kwargs):
"""
Copy Bits object if passed as argument to Bits class.
>>> class BitsTest(Bits):
... def copy(self):
... print('copied')
... return super().copy()
>>> bits_1 = BitsTest('1010')
>>> bits_2 = BitsTest(bits_1)
copied
>>> bits_2 += '1111'
>>> bits_1
Bits("0b1010")
>>> bits_2
Bits("0b10101111")
"""
if isinstance(bit_values, cls):
return bit_values.copy()
return super().__new__(cls)
def __init__(
self,
bit_values: Union[Iterable, int] = None,
bit_length: int = None,
ones: Container = None,
zeros: Container = None,
):
"""
Create a new Bits object from an Iterable of bit like object.
Create from a string of binary e.g.: "1010", "0b1001", or "0b1001_1101"
Create from a string of hex values: "0xFA 0xDE", "0XFFAA", "0Xab"
Create from bytes-like objects.
Create from Iterable of arbitrary objects by specifying containers
of objects for 'ones' and 'zeros'.
>>> Bits("10011001")
Bits("0b10011001")
>>> Bits("ffffabababffff", ones={"a"}, zeros={"b"})
Bits("0b101010")
>>> Bits("0xFF")
Bits("0b11111111")
>>> Bits("MPMPEEMP", ones="M", zeros="EP")
Bits("0b10100010")
>>> Bits() + b"Hi"
Bits("0b0100100001101001")
>>> def double_gen(size: int):
... if size:
... yield size % 4 < 2
... yield from double_gen(size - 1)
>>> Bits(double_gen(16)) # Supports generators
Bits("0b1001100110011001")
>>> Bits(255, 8)
Bits("0b11111111")
>>> Bits(255)
Traceback (most recent call last):
ValueError: 'bit_length' must be provided and must be greater than 0 for integer values
:param bit_values: Values to initialize a Bits object with.
:param bit_length: Bit length if an integer is given for bit_values.
:param ones: If set, symbols in this collection will represent True bits.
:param zeros: If set, symbols in this collection will represent False bits.
"""
self.__bytes = bytearray()
self.__len_last_byte = 0
self.__last_byte = 0
self.__len = 0
if bit_values is None and any(arg is not None for arg in (bit_length, ones, zeros)):
raise ValueError("unexpected argument, 'bit_values' must be set or there must be no other args set")
elif bit_values is not None:
for value in biterate(bit_values, bit_length=bit_length, ones=ones, zeros=zeros):
self.append(value)
@classmethod
def _clean_bits(
cls,
dirty_bits: DirtyBits,
ones: Container = None,
zeros: Container = None,
) -> Iterator[bool]:
# noinspection PyUnresolvedReferences
"""
Attempt, by a biterator, to iterate over `dirty_bits`; yields Booleans.
`dirty_bits` can be a dictionary of the form {"value": 15, "bit_length": 4}
to iterate over the bits of an integer.
>>> list(Bits._clean_bits(dict(value=255, bit_length=8))) == [True] * 8
True
>>> "".join("1" if bit else "0" for bit in Bits._clean_bits((1, 0, 0, 1)))
'1001'
>>> list(Bits._clean_bits(dict(value=255)))
Traceback (most recent call last):
ValueError: unsupported dict format {'value': 255}
:param dirty_bits: The bits containing object.
:param ones: If set, symbols in this collection will represent True bits.
:param zeros: If set, symbols in this collection will represent False bits.
"""
# Iterate from another Bits object.
if isinstance(dirty_bits, cls):
yield from dirty_bits
return
# Biterate an integer
if isinstance(dirty_bits, dict):
if "value" in dirty_bits and "bit_length" in dirty_bits:
bit_values = dirty_bits["value"]
bit_length = dirty_bits["bit_length"]
yield from biterate(bit_values=bit_values, bit_length=bit_length)
return
raise ValueError(f"unsupported dict format {repr(dirty_bits)}")
# Biterate other values
yield from biterate(bit_values=dirty_bits, ones=ones, zeros=zeros)
def copy(self) -> "Bits":
"""
Return a deep copy of the Bits object.
>>> bits_1 = Bits('1111')
>>> bits_2 = bits_1.copy()
>>> bits_2 += '1111'
>>> bits_2.bin(True, prefix="")
'11111111'
>>> bits_1.bin(True, prefix="")
'1111'
"""
return deepcopy(self)
def __repr__(self) -> str:
# noinspection PyUnresolvedReferences
"""
Represent the Bits object.
Equivalent to code which would create an identical object
but only up to a size of 64 bytes; after which it is abbreviated.
>>> Bits([1, 0]*32)
Bits("0b1010101010101010101010101010101010101010101010101010101010101010")
>>> exec("bits = " + repr(Bits([1, 0]*32))); bits == Bits([1, 0]*32)
True
>>> Bits('0xCAB00D1E'*6)
Bits(4969887947907717934627081996608040267272832614365316255006, 192)
>>> exec("bits = " + repr(Bits('0xCAB00D1E'*6))); bits == Bits('0xCAB00D1E'*6)
True
>>> Bits('0xBA5EBA11'*10)
Bits("0xBA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11")
>>> exec("bits = " + repr(Bits('0xBA5EBA11'*10))); bits == Bits('0xBA5EBA11'*10)
True
>>> Bits('0xDEADBEEF'*10) + '1'
Bits("0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF80", 321)
>>> exec("bits = " + repr(Bits('0xDEADBEEF'*10) + '1')); bits == Bits('0xDEADBEEF'*10) + '1'
True
>>> Bits('0x0DDBA11'*200)
Bits("0x0D 0xDB 0xA1 ... 0xDD 0xBA 0x11", bit_length=5_600)
>>> Bits('0x0DDBA11'*200) + '1001'
Bits("0x0D 0xDB 0xA1 ... 0xBA 0x11 0x90", bit_length=5_604)
"""
if self.__len <= 64:
return f'Bits("{format(int(self), f"#0{self.__len + 2}b")}")'
largest_possible_decimal = int(math.log(1 << self.__len, 10))
if largest_possible_decimal <= 64:
return f'Bits({format(int(self), f"0{largest_possible_decimal}d")}, {self.__len})'
if self.__len // 8 <= 64:
if self.__len_last_byte > 0:
return f'Bits("{self.hex(compact=True)}", {self.__len})'
return f'Bits("{self.hex(compact=True)}")'
length_str = f"bit_length={self.__len:_d}"
if self.__len_last_byte > 0:
return f'Bits("{self[:24].hex()} ... {self[-(self.__len_last_byte + 16):].hex()}", {length_str})'
return f'Bits("{self[:24].hex()} ... {self[-24:].hex()}", {length_str})'
def iter_bytes(self) -> Iterator[int]:
# noinspection PyUnresolvedReferences
"""
Generate bytes from the bits.
Yield an integer representation of each byte, uless `as_bytes` is set,
in which case return single `bytes` objects.
An incomplete byte will be written from the left, for example:
>>> for byte in Bits('10101010 1111').iter_bytes(): print(bin(byte))
0b10101010
0b11110000
:return: The Iterator.
"""
yield from self.__bytes
if self.__len_last_byte:
yield self.__last_byte << 8 - self.__len_last_byte
def __bytes__(self) -> bytes:
r"""
Return bytes object; an incomplete byte is written from the left.
For example:
>>> bytes(Bits('1111'))
b'\xf0'
"""
return bytes(self.iter_bytes())
def decode(self, *args, **kwargs) -> str:
"""
Decode the bytes using the codec registered for encoding.
Wraps the `bytes.decode()` method.
>>> Bits('01101000 01101001 00100000 01101101 01100001 01110010 01101011').decode('utf-8')
'hi mark'
:param args:
:param kwargs:
:return:
"""
return bytes(self).decode(*args, **kwargs)
def __bool__(self):
"""Return true if not empty."""
for byte in self.__bytes:
if byte > 0:
return True
return bool(self.__last_byte)
def _byte_bit_indices(self, index: int) -> Tuple[int, int, int]:
"""
Calculate byte index, bit index (on the byte), and apply clipping to the original index.
:param index: The index to calculate.
:return: The tuple with computed index values.
"""
if index >= self.__len or index < -self.__len:
raise IndexError
# Modulo corrects negative indices
if index < 0:
index %= self.__len
# The first is the index of the byte that the index is within.
# The second is the index of the bit within the byte (counting from the left).
return index // 8, index % 8, index
@classmethod
def _clean_bit(cls, value: ValidBit) -> bool:
"""
Ensure bit is a ValidBit, cast to bool.
>>> Bits._clean_bit('1')
True
>>> Bits._clean_bit(0)
False
>>> Bits._clean_bit('a')
Traceback (most recent call last):
TypeError: could not determine single bit value for 'a'
:param value: The value to check.
:return: The bool representation.
"""
if value in ONES:
return True
if value in ZEROS:
return False
raise TypeError(f"could not determine single bit value for {repr(value)}")
def insert(self, index: int, value: ValidBit) -> None:
"""
Insert a bit at given index.
>>> bits = Bits('001'); bits.insert(0, True); bits.bin()
'0b1001'
>>> for _ in range(4): bits.insert(len(bits), False)
>>> bits.bin()
'0b1001_0000'
>>> bits.insert(5, "1"); bits.bin()
'0b1001_0100 0b0'
>>> bits.insert(-2, 1); bits.bin()
'0b1001_0101 0b00'
>>> bits = Bits('11110000 11110000 11110000 '); bits.insert(5, "1"); bits.bin(prefix="", group=False)
'11110100 01111000 01111000 0'
>>> bits.insert(0, 'g')
Traceback (most recent call last):
TypeError: could not determine single bit value for 'g'
:param index: The index at whitch to insert the bit.
:param value: The bit to be inserted.
"""
if not isinstance(value, bool):
value = self._clean_bit(value)
# If the index is above the length, set it to the length.
# If the index is below the negative length, set it to the negative length.
# Then if the new index is negative, take the modulo to get the correct positive index.
if self.__len == 0:
index = 0
else:
if index >= 0:
index = min(self.__len, index)
else:
index = max(-self.__len, index) % self.__len
byte_index, bit_index = index // 8, index % 8
# If appending to the end.
if index == self.__len:
self.__last_byte = (self.__last_byte << 1) | value
self._increment_last_byte()
# If inserting within the last (incomplete) byte.
elif byte_index == len(self.__bytes):
self.__last_byte = self._insert_bit_in_byte(self.__last_byte, self.__len_last_byte, bit_index, value)
self._increment_last_byte()
# If inserting anywhere else.
else:
# Insert the bit then remove the rightmost bit to carry over into the next byte to the right.
new_byte = self._insert_bit_in_byte(self.__bytes[byte_index], 8, bit_index, value)
carry = new_byte & 1
new_byte >>= 1
# Append the byte with the carry over bit removed.
self.__bytes[byte_index] = new_byte
# Repeat for the remaining whole bytes to the right of the index.
for i in range(byte_index + 1, len(self.__bytes)):
new_byte = (carry << 8) | self.__bytes[i]
carry = new_byte & 1
new_byte >>= 1
self.__bytes[i] = new_byte
# Append the last carry bit to the last (incomplete) byte, and increment it's length.
self.__last_byte = (carry << self.__len_last_byte) | self.__last_byte
self._increment_last_byte()
def extend(self, values: DirtyBits) -> None:
"""Override of the mixin to add data validation."""
# Prevent race conditions by copying if extending by self
for v in self.copy() if values is self else self._clean_bits(values):
self.append(v)
@staticmethod
def _insert_bit_in_byte(byte: int, length: int, index: int, value: bool) -> int:
"""
Insert a bit in a byte, indexed from the left.
>>> bin(Bits._insert_bit_in_byte(0b1010010, 7, 4, True))
'0b10101010'
:param byte: Byte in which to insert the bit.
:param length: Length of the Byte.
:param index: Index at which to insert the bit.
:param value: Value to be inserted.
:return: Byte with new bit inserted.
"""
right_index = length - index
left_bits = byte >> right_index
right_bits = byte & ((1 << right_index) - 1)
return (((left_bits << 1) | value) << right_index) | right_bits
def _increment_last_byte(self) -> None:
"""
Call when a bit has been added anywhere in the last (incomplete) byte.
>>> bits = Bits(0b111_1111, 7); bits.last_byte_length
7
>>> bits.append(False); bits.last_byte_length
0
>>> len(bits)
8
"""
self.__len_last_byte += 1
self.__len += 1
if self.__len_last_byte == 8:
self.__bytes.append(self.__last_byte)
self.__last_byte = 0
self.__len_last_byte = 0
@overload
@abstractmethod
def __getitem__(self, i: int) -> bool:
"""Retrieve a bit."""
...
@overload
@abstractmethod
def __getitem__(self, s: slice) -> "Bits":
"""Retrieve a slice of bits."""
...
def __getitem__(self, index):
"""
Retrieve a bit or a slice of bits.
>>> Bits('0001 0000')[3]
True
>>> Bits('0001 0000')[-5]
True
>>> Bits('0001 1000')[3:5]
Bits("0b11")
>>> Bits("00001111 00110011 01010101")[:-16]
Bits("0b00001111")
>>> Bits("00001111 00110011 01010101")[-8:]
Bits("0b01010101")
>>> Bits('01001001')["s"]
Traceback (most recent call last):
biterator.bits_exceptions.SubscriptError: unsupported subscript, 'Bits' does not support 'str' subscripts
:param index: The index or slice to retrieve.
:return: The new Bits object or a bit value.
"""
if isinstance(index, int):
byte_index, bit_index, index = self._byte_bit_indices(index)
# If the index is in the last (incomplete) byte.
if byte_index == len(self.__bytes):
return self._get_bit_from_byte(self.__last_byte, self.__len_last_byte, bit_index)
# If the index is anywhere else.
return self._get_bit_from_byte(self.__bytes[byte_index], 8, bit_index)
if isinstance(index, slice):
start, stop, step = index.indices(self.__len)
# For the case where the slice starts from a whole byte.
if step == 1 and start % 8 == 0:
last_byte_index, last_bit_index = stop // 8, stop % 8
start_byte_index = start // 8
new = type(self)(self.__bytes[start_byte_index:last_byte_index])
# Append any remaining bits.
if last_bit_index:
for i in range(stop - last_bit_index, stop):
# Recurse into the branch for integers
new.append(self[i])
return new
# For all other cases (not particularly efficient).
new = type(self)()
for i in range(start, stop, step):
# Recurse into the branch for integers
new.append(self[i])
return new
raise SubscriptError(self, index)
@staticmethod
def _get_bit_from_byte(byte: int, length: int, index: int) -> bool:
"""
Return the bit value at the given index, indexed from the left.
>>> Bits._get_bit_from_byte(0b00000100, 8, 5)
True
:param byte: Byte from which to get a bit.
:param index: Index of bit to retrieve.
:param length: Length of byte.
:return: The value of the bit.
"""
right_index = length - index - 1
return bool((1 << right_index) & byte)
@overload
@abstractmethod
def __setitem__(self, i: int, o: ValidBit) -> None:
"""Set a bit."""
...
@overload
@abstractmethod
def __setitem__(self, s: slice, o: DirtyBits) -> None:
"""Set a slice of bits."""
...
def __setitem__(self, index, other):
"""
Set a bit or slice of bits.
>>> bits = Bits('1111 1111 1111'); bits[4:8] = '0000'; bits.bin()
'0b1111_0000 0b1111'
>>> bits[4:8] = 15; bits.bin()
'0b1111_1111 0b1111'
>>> bits[-4:] = '0000'; bits.bin()
'0b1111_1111 0b0000'
>>> bits[0] = False; bits.bin()
'0b0111_1111 0b0000'
:param index: The index or slice to modify.
:param other: The bit or bits to replace the old bit or bits.
"""
if isinstance(index, int):
other = self._clean_bit(other)
byte_index, bit_index, index = self._byte_bit_indices(index)
# If the index is in the last (incomplete) byte.
if byte_index == len(self.__bytes):
self.__last_byte = self._set_bit_in_byte(self.__last_byte, self.__len_last_byte, bit_index, other)
# If the index is anywhere else.
else:
self.__bytes[byte_index] = self._set_bit_in_byte(self.__bytes[byte_index], 8, bit_index, other)
elif isinstance(index, slice):
start, stop, step = index.indices(self.__len)
# Cast other to a Bits object
if isinstance(other, int):
other_bit = iter(type(self)(other, stop - start))
else:
other_bit = iter(type(self)(other))
try:
for i in range(start, stop, step):
# Recurse into the branch for integers
self[i] = next(other_bit)
except StopIteration:
pass
else:
raise SubscriptError(self, index)
@classmethod
def _set_bit_in_byte(cls, byte: int, length: int, index: int, value: bool) -> int:
"""
Modify a bit in a byte, indexed from the left.
>>> Bits._set_bit_in_byte(0b11011111, 8, 2, True)
255
:param byte: Byte in which to modify a bit.
:param length: Length of the byte.
:param index: Index of the bit to modify.
:param value: Value to modify the bit to.
:return: The Byte with bit modified.
"""
right_index = length - index - 1
# If the bit is the same, do nothing.
if bool((1 << right_index) & byte) == value:
return byte
# The bit is different, flip it.
return (1 << right_index) ^ byte
@overload
@abstractmethod
def __delitem__(self, i: int) -> None:
"""Remove a single bit."""
...
@overload
@abstractmethod
def __delitem__(self, i: slice) -> None:
"""Remove a slice."""
...
def __delitem__(self, index):
"""
Remove a bit or a slice.
>>> bits = Bits("1000 0000 0000 0100 0001"); del bits[13]; bits.bin()
'0b1000_0000 0b0000_0000 0b001'
>>> bits = Bits("1010 1010 1010 1010 0000"); del bits[1::2]; bits.bin()
'0b1111_1111 0b00'
>>> del bits[8:10]; bits.bin()
'0b1111_1111'
>>> del bits[-4:]; bits.bin()
'0b1111'
:param index: Index or slice to delete.
"""
if isinstance(index, int):
byte_index, bit_index, index = self._byte_bit_indices(index)
# If the bit deleted in in the last (incomplete) byte.
if byte_index == len(self.__bytes):
self.__last_byte = self._del_bit_from_byte(self.__last_byte, self.__len_last_byte, bit_index)
self._decrement_last_byte()
# All other cases.
else:
# Remove the bit from the target byte, then append the first bit from the next byte.
# Cascade similarly through the list of bytes.
new_byte = self._del_bit_from_byte(self.__bytes[byte_index], 8, bit_index)
for i in range(byte_index + 1, len(self.__bytes)):
first_bit = bool(self.__bytes[i] & 0b1000_0000)
self.__bytes[i - 1] = (new_byte << 1) | first_bit
new_byte = self.__bytes[i] & 0b0111_1111
# If the last (incomplete) byte is not empty, append the first bit from it.
if self.__len_last_byte:
first_bit = bool(self.__last_byte & (1 << self.__len_last_byte - 1))
self.__bytes[-1] = (new_byte << 1) | first_bit
# Truncate the first bit of the last (incomplete) byte.
self.__last_byte &= (1 << self.__len_last_byte - 1) - 1
# If the last (incomplete) byte is empty, remove the last full byte.
else:
self.__bytes.pop()
# The former last full byte becomes the last (incomplete) byte with it's first bit removed.
self.__last_byte = new_byte
# Decrement the length and last (incomplete) byte length in both cases.
self._decrement_last_byte()
elif isinstance(index, slice):
start, stop, step = index.indices(self.__len)
# NOTE: ***VERY inefficient*** Consider refactor.
# NOTE: Good opportunity to use interval library to remove all deleted bits and concat what remains.
# Always proceeds in reverse order to not mess up the indexing.
removal_indices = sorted(list(range(start, stop, step)), reverse=True)
for i in removal_indices:
del self[i]
else:
raise SubscriptError(self, index)
@staticmethod
def _del_bit_from_byte(byte: int, length: int, index: int) -> int:
"""
Remove a bit from a byte, indexed from the left.
>>> Bits._del_bit_from_byte(0b00010000, 8, 3)
0
:param byte: Byte from which to remove a bit.
:param length: Length of the byte.
:param index: Index of the bit to remove.
:return: The Byte with bit removed.
"""
right_index = length - index
left_bits = (byte >> right_index) << right_index - 1
right_bits = byte & ((1 << right_index - 1) - 1)
return left_bits | right_bits
def _decrement_last_byte(self) -> None:
"""
Call when a bit has been removed anywhere in the last (incomplete) byte.
>>> bits = Bits(0b010001000, 9); bits.last_byte_length
1
>>> del bits[0]; bits.last_byte_length
0
"""
self.__len_last_byte -= 1
self.__len -= 1
if self.__len_last_byte < 0:
self.__len_last_byte = 7
def __invert__(self) -> "Bits":
"""
Return a Bits object with each bit inverted.
>>> (~Bits('01001110')).bin()
'0b1011_0001'
:return: The Bits object with inverted bits.
"""
return type(self)(not bit for bit in self)
def __int__(self) -> int:
"""
Represent the sequence of bits as an int.
>>> int(Bits("0xff"))
255
>>> int(Bits("0xfff"))
4095
>>> int(Bits("0xffff"))
65535
:return: The integer representation.
"""
return (int.from_bytes(self.__bytes, "big") << self.__len_last_byte) | self.__last_byte
def __len__(self) -> int:
"""Total number of bits."""
return self.__len
def __lt__(self, other: SupportsInt) -> bool:
"""Int value of bits is less than the int value of other."""
if isinstance(other, SupportsInt):
return int(self) < int(other)
return NotImplemented
def __le__(self, other: SupportsInt) -> bool:
"""Int value of bits is less than or equal to the int value of other."""
if isinstance(other, SupportsInt):
return int(self) <= int(other)
return NotImplemented
def __eq__(self, other: Any) -> bool:
"""Bits are equal or Int value of Bits are equal to the int value of other."""
if isinstance(other, type(self)):
if all(
(
self.__len == other.__len,
self.__bytes == other.__bytes,
self.__last_byte == other.__last_byte,
),
):
return True
return False
if isinstance(other, SupportsInt):
return int(self) == int(other)
return NotImplemented
def __ne__(self, other: Any) -> bool:
"""Bits are not equal or Int value of Bits are not equal to the int value of other."""
if isinstance(other, type(self)):
if not all(
(
self.__len == other.__len,
self.__bytes == other.__bytes,
self.__last_byte == other.__last_byte,
),
):
return True
return False
if isinstance(other, SupportsInt):
return int(self) != int(other)
return NotImplemented
def __gt__(self, other: SupportsInt) -> bool:
"""Int value of bits is greater than the int value of other."""
if isinstance(other, SupportsInt):
return int(self) > int(other)
return NotImplemented
def __ge__(self, other: SupportsInt) -> bool:
"""Int value of bits is greater than or equal to the int value of other."""
if isinstance(other, SupportsInt):
return int(self) >= int(other)
return NotImplemented
# Concatenate
def __add__(self, other: DirtyBits) -> "Bits":
"""
Concatenate bits; NOT addition.
>>> (Bits("0110") + Bits("1001")).bin()
'0b0110_1001'
>>> (Bits("0110") + "1001").bin()
'0b0110_1001'
>>> (Bits("0110") + dict(value=15, bit_length=4)).bin() # Concat an integer
'0b0110_1111'
>>> bits = Bits('10'*10); bits += bits; bits.bin(True, "")
'1010101010101010101010101010101010101010'
>>> Bits('01000101') + b"Z"
Bits("0b0100010101011010")
>>> Bits('01000101') + "Z"
Traceback (most recent call last):
ValueError: non valid binary 'Z' was found in the string
:param other: Other object to be concatenated.
:return: New Bits object that is a concatenation of the inputs.
"""
if isinstance(other, (Iterable, dict)):
new = self.copy()
new.extend(other)
return new
return NotImplemented
def __radd__(self, other: DirtyBits) -> "Bits":
"""
Right concatenation.
>>> "1001" + Bits("0110")
Bits("0b10010110")
"""
if isinstance(other, (Iterable, dict)):
new = type(self)()
new.extend(other)
new.extend(self)
return new
return NotImplemented
def __iadd__(self, other: DirtyBits) -> "Bits":
"""
Extend in-place.
>>> bits = Bits("1111"); bits += "0000"; bits.bin()
'0b1111_0000'
>>> bits += dict(value=255, bit_length=8); bits.bin()
'0b1111_0000 0b1111_1111'
:param other: Bits to extend.
:return: The Bits object that was modified in place.
"""
if isinstance(other, (Iterable, dict)):
self.extend(other)
return self
return NotImplemented
# Left Bitshift
def __lshift__(self, index: int) -> "Bits":
"""
Left shift the bits.
>>> (Bits("1111") << 4).bin()
'0b1111_0000'
:param index: Number of places to shift
:return: Shifted Bits object
"""
if isinstance(index, SupportsInt):
new = self.copy()
new.extend(type(self)(0, int(index)))
return new
return NotImplemented
def __ilshift__(self, index: int) -> "Bits":
"""
Left bitshift in-place.
>>> bits = Bits("1111"); bits <<= 4; bits.bin()
'0b1111_0000'
:param index: Number of places to shift.
:return: The Bits object that was modified in place.
"""
if isinstance(index, SupportsInt):
self.extend({"value": 0, "bit_length": int(index)})
return self
return NotImplemented
# Right Bitshift
def __rshift__(self, index: int) -> "Bits":
"""
Right shift the bits.
>>> (Bits("11110000") >> 4).bin()
'0b1111'
:param index: Number of places to shift
:return: Shifted Bits object
"""
if isinstance(index, SupportsInt):
return type(self)(self[: -int(index)])
return NotImplemented
def __irshift__(self, index: int) -> "Bits":
"""
Right bitshift in-place.
>>> bits = Bits("1111 1111"); bits >>= 4; bits.bin()
'0b1111'
:param index: Number of places to shift.
:return: The Bits object that was modified in place.
"""
if index:
del self[-index:]
return self
# AND
def __and__(self, other: DirtyBits) -> "Bits":
"""
Bitwise and operation.
>>> (Bits('01111000') & Bits('00011110')).bin()
'0b0001_1000'
>>> (Bits('0111') & Bits('00011110')).bin()
'0b0001'
>>> (Bits("1110") & "0b0111").bin()
'0b0110'
>>> Bits("1110") & dict(value=7, bit_length=4)
Bits("0b0110")
:param other: Other Bits to 'and' with
:return: Combined Bits objects
"""
if isinstance(other, (Iterable, dict)):
return type(self)(a & b for a, b in zip(self, self._clean_bits(other)))
return NotImplemented
__rand__ = __and__
def __iand__(self, other: DirtyBits) -> "Bits":
"""
Bitwise 'and' with other bits; in-place.
>>> bits_ = Bits("1110"); bits_ &= "0111"; bits_.bin()
'0b0110'
:param other: The Iterable bits to 'and' with.
:return: The Bits object that was modified in place.
"""
if isinstance(other, (Iterable, dict)):
len_other = 1
for index, bits in enumerate(zip(self, self._clean_bits(other))):
self[index] = bits[0] & bits[1]
len_other += 1
if self.__len > len_other:
del self[-len_other:]
return self
return NotImplemented
# XOR
def __xor__(self, other: DirtyBits) -> "Bits":
"""
Bitwise xor operation.
>>> (Bits('01111000') ^ Bits('00011110')).bin()
'0b0110_0110'
>>> (Bits('01111000') ^ '0b00011110').bin()
'0b0110_0110'
>>> (Bits("1110") ^ "0111").bin()
'0b1001'
:param other: Other Bits to 'xor' with
:return: Combined Bits objects
"""
if isinstance(other, (Iterable, dict)):
return type(self)(a ^ b for a, b in zip(self, self._clean_bits(other)))
return NotImplemented
__rxor__ = __xor__
def __ixor__(self, other: DirtyBits) -> "Bits":
"""
Bitwise 'xor' with other bits; in-place.
>>> bits_ = Bits("0110"); bits_ ^= "0101"; bits_.bin()
'0b0011'
:param other: The Iterable bits to 'xor' with.
:return: The Bits object that was modified in place.
"""
len_other = 1
for index, bits in enumerate(zip(self, self._clean_bits(other))):
self[index] = bits[0] ^ bits[1]
len_other += 1
if self.__len > len_other:
del self[-len_other:]
return self
# OR
def __or__(self, other: DirtyBits) -> "Bits":
"""
Bitwise or operation.
>>> (Bits('01111000') | Bits('00011110')).bin()
'0b0111_1110'
>>> (Bits("1100") | "0011").bin()
'0b1111'
:param other: Other Bits to 'or' with
:return: Combined Bits objects
"""
return type(self)(a | b for a, b in zip(self, self._clean_bits(other)))
__ror__ = __or__
def __ior__(self, other: DirtyBits) -> "Bits":
"""
Bitwise 'or' with other bits; in-place.
>>> bits_ = Bits("1100"); bits_ |= "0011"; bits_.bin()
'0b1111'
:param other: The Iterable bits to 'or' with.
:return: The Bits object that was modified in place.
"""
len_other = 1
for index, bits in enumerate(zip(self, self._clean_bits(other))):
self[index] = bits[0] | bits[1]
len_other += 1
if self.__len > len_other:
del self[-len_other:]
return self
@property
def last_byte_length(self):
"""
If the totall number of bits is not divisible by 8, get the remainder.
This property gives the length of the last incomplete byte in the object.
>>> bits = Bits("10011001 1010"); bits[-bits.last_byte_length:].bin(True, prefix="")
'1010'
:return: Number of bits in the last incomplete byte.
"""
return self.__len_last_byte
def hex(self, compact: bool = False, prefix: str = "0x", sep: str = " ", fmt: str = None) -> str:
r"""
Return a string with hexadecimal representation of each byte.
NOTE: The prefix argument can be set to the empty string and then
enabled in the formatting argument if that is preferred.
>>> Bits("0b1111").hex()
'0xF0'
>>> Bits("0b00_1111").hex() # Interpreted as 0011_1100
'0x3C'
>>> Bits("0b1111_1111 0b1111").hex()
'0xFF 0xF0'
>>> Bits("0b1111_1111 0b1111_1111").hex(compact=True, prefix=r"\x")
'\\xFFFF'
>>> Bits("0b1011_0001 0b1010_1101 0b1110_0101").hex(prefix="", compact=True)
'B1ADE5'
>>> Bits("0b1111_1111 0b1111_1111").hex(compact=True, prefix='', fmt="4X")
' FF FF'
:param compact: No separators and only prefixed at the beggining.
:param prefix: Prefix for each byte, default: '0x'.
:param sep: Separator between bytes, default ' '.
:param fmt: Formatting for each byte.
:return: The string representation of the bytes as hexadecimal.
"""
if compact:
ret_str = prefix + "".join(format(byte, fmt or "02X") for byte in self.iter_bytes())
else:
ret_str = sep.join(prefix + format(byte, fmt or "02X") for byte in self.iter_bytes())
return ret_str
def bin(self, compact: bool = False, prefix: str = "0b", sep: str = " ", group: bool = True) -> str:
"""
Return a string with the binary representations of each byte.
NOTE: The prefix argument can be set to the empty string and then
enabled in the formatting argument if that is preferred.
>>> Bits(255, 8).bin()
'0b1111_1111'
>>> Bits(4095, 12).bin(prefix="")
'1111_1111 1111'
>>> Bits(65535, 16).bin(group=False)
'0b11111111 0b11111111'
>>> Bits("1111 11").bin()
'0b11_1111'
>>> Bits(43690, 16).bin(compact=True, prefix="")
'1010101010101010'
:param compact: No separators or grouping, only prefixed at the beggining.
:param prefix: Prefix on each byte, default '0b'.
:param sep: Spacer between bytes, default: ' '.
:param group: Digit grouping symbol, may be '_' or None default: '_'.
:return: The string of the bits in binary representation.
"""
if compact:
ret_str = "".join(format(byte, "08b") for byte in self.__bytes)
if self.__len_last_byte:
ret_str += format(self.__last_byte, f"0{self.__len_last_byte}b")
ret_str = prefix + ret_str
else:
ret_str = sep.join(prefix + format(byte, "09_b" if group else "08b") for byte in self.__bytes)
if self.__len_last_byte:
ret_str += sep if ret_str else ""
if group:
has_group = 1 if self.__len_last_byte > 4 else 0
last_byte_fmt = f"0{self.__len_last_byte + has_group}_b"
else:
last_byte_fmt = f"0{self.__len_last_byte}b"
ret_str += prefix + format(self.__last_byte, last_byte_fmt)
return ret_str
|
py | 1a34b65ba20ed95ee376dde85b4c200441ff2394 | import asyncio
import logging
from zof.event import load_event
LOGGER = logging.getLogger(__package__)
class Protocol(asyncio.SubprocessProtocol):
"""Implements an asyncio Protocol for parsing data received from oftr.
"""
def __init__(self, post_event):
self.post_event = post_event
self.buf = b''
self.exit_future = asyncio.Future()
def pipe_data_received(self, fd, data):
LOGGER.debug('zof.Protocol.pipe_data_received: %d bytes, fd=%d',
len(data), fd)
begin = 0
offset = len(self.buf)
self.buf += data
while True:
offset = self.buf.find(b'\x00', offset)
if offset < 0:
self.buf = self.buf[begin:]
return
if begin != offset:
self.post_event(load_event(self.buf[begin:offset]))
offset += 1
begin = offset
def pipe_connection_lost(self, fd, exc):
if exc is not None:
LOGGER.warning('zof.Protocol.pipe_connection_lost: fd=%d, exc=%r',
fd, exc)
def process_exited(self):
LOGGER.debug('zof.Protocol.process_exited')
self.post_event(load_event(b''))
self.exit_future.set_result(0)
|
py | 1a34b6ce6db7b0a9fb480bc8c9149afb7d0e339b | """
WRITEME
"""
from __future__ import absolute_import, print_function, division
import logging
import theano
from theano import gof
import theano.gof.vm
from theano.configparser import config
from theano.compile.ops import _output_guard
from six import string_types
_logger = logging.getLogger('theano.compile.mode')
# If a string is passed as the linker argument in the constructor for
# Mode, it will be used as the key to retrieve the real linker in this
# dictionary
predefined_linkers = {
'py': gof.PerformLinker(), # Use allow_gc Theano flag
'c': gof.CLinker(), # Don't support gc. so don't check allow_gc
'c|py': gof.OpWiseCLinker(), # Use allow_gc Theano flag
'c|py_nogc': gof.OpWiseCLinker(allow_gc=False),
'vm': gof.vm.VM_Linker(use_cloop=False), # Use allow_gc Theano flag
'cvm': gof.vm.VM_Linker(use_cloop=True), # Use allow_gc Theano flag
'vm_nogc': gof.vm.VM_Linker(allow_gc=False, use_cloop=False),
'cvm_nogc': gof.vm.VM_Linker(allow_gc=False, use_cloop=True)}
def register_linker(name, linker):
"""Add a `Linker` which can be referred to by `name` in `Mode`."""
if name in predefined_linkers:
raise ValueError('Linker name already taken: %s' % name)
predefined_linkers[name] = linker
# If a string is passed as the optimizer argument in the constructor
# for Mode, it will be used as the key to retrieve the real optimizer
# in this dictionary
exclude = []
if not theano.config.cxx:
exclude = ['cxx_only']
OPT_NONE = gof.Query(include=[], exclude=exclude)
# Even if multiple merge optimizer call will be there, this shouldn't
# impact performance.
OPT_MERGE = gof.Query(include=['merge'], exclude=exclude)
OPT_FAST_RUN = gof.Query(include=['fast_run'], exclude=exclude)
OPT_FAST_RUN_STABLE = OPT_FAST_RUN.requiring('stable')
# We need fast_compile_gpu here. As on the GPU, we don't have all
# operation that exist in fast_compile, but have some that get
# introduced in fast_run, we want those optimization to also run in
# fast_compile+gpu. We can't tag them just as 'gpu', as this would
# exclude them if we exclude 'gpu'.
OPT_FAST_COMPILE = gof.Query(include=['fast_compile', 'fast_compile_gpu'],
exclude=exclude)
OPT_STABILIZE = gof.Query(include=['fast_run'], exclude=exclude)
OPT_STABILIZE.position_cutoff = 1.5000001
OPT_NONE.name = 'OPT_NONE'
OPT_MERGE.name = 'OPT_MERGE'
OPT_FAST_RUN.name = 'OPT_FAST_RUN'
OPT_FAST_RUN_STABLE.name = 'OPT_FAST_RUN_STABLE'
OPT_FAST_COMPILE.name = 'OPT_FAST_COMPILE'
OPT_STABILIZE.name = 'OPT_STABILIZE'
predefined_optimizers = {
None: OPT_NONE,
'None': OPT_NONE,
'merge': OPT_MERGE,
'fast_run': OPT_FAST_RUN,
'fast_run_stable': OPT_FAST_RUN_STABLE,
'fast_compile': OPT_FAST_COMPILE,
'stabilize': OPT_STABILIZE}
def register_optimizer(name, opt):
"""Add a `Optimizer` which can be referred to by `name` in `Mode`."""
if name in predefined_optimizers:
raise ValueError('Optimizer name already taken: %s' % name)
predefined_optimizers[name] = opt
class AddDestroyHandler(gof.Optimizer):
"""
This optimizer performs two important functions:
1) It has a 'requirement' of the destroyhandler. This means that the fgraph
will include it as a feature for this optimization, and keep this feature
enabled for subsequent optimizations. All optimizations that work inplace
on any of their inputs must run *after* this optimization to ensure that
the DestroyHandler has been included in the fgraph.
2) It tries to replace each output with an Op that purports to destroy it
(but it won't I promise). If this replacement succeeds it means that
there is a bug in theano. It should not be possible to destroy outputs.
"""
def apply(self, fgraph):
for o in fgraph.outputs:
try:
fgraph.replace_validate(o, _output_guard(o),
reason='output_guard')
_logger.info("Output variable %s required output_guard, "
"how was this output left unprotected against "
"destructive operations?"
% o)
except gof.InconsistencyError:
# This output is already impossible to destroy.
# No guard necessary
pass
def add_requirements(self, fgraph):
super(AddDestroyHandler, self).add_requirements(fgraph)
fgraph.attach_feature(gof.DestroyHandler())
class AddFeatureOptimizer(gof.Optimizer):
"""
This optimizer adds a provided feature to the function graph.
"""
def __init__(self, feature):
self.feature = feature
def add_requirements(self, fgraph):
super(AddFeatureOptimizer, self).add_requirements(fgraph)
fgraph.attach_feature(self.feature)
class PrintCurrentFunctionGraph(gof.Optimizer):
"""
This optimizer is for debugging.
Toss it into the optimization pipeline to see the state of things at any
given point.
"""
def __init__(self, header):
self.header = header
def apply(self, fgraph):
import theano.printing
print("PrintCurrentFunctionGraph:", self.header)
theano.printing.debugprint(fgraph.outputs)
optdb = gof.SequenceDB()
optdb.register('merge1', gof.MergeOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
# After scan1 opt at 0.5 and before ShapeOpt at 1
# This should only remove nodes.
# The opt should not do anything that need shape inference.
# New nodes that don't have infer_shape need that the original node
# also don't have infer_shape
local_useless = gof.optdb.LocalGroupDB(apply_all_opts=True, profile=True)
optdb.register(
'useless',
gof.optdb.TopoDB(local_useless,
failure_callback=gof.opt.NavigatorOptimizer.warn_inplace),
0.6, 'fast_run', 'fast_compile')
optdb.register('merge1.1', gof.MergeOptimizer(),
0.65, 'fast_run', 'fast_compile', 'merge')
# rearranges elemwise expressions
optdb.register('canonicalize', gof.EquilibriumDB(ignore_newtrees=False),
1, 'fast_run', 'fast_compile', 'canonicalize_db')
# Register in the canonizer Equilibrium as a clean up opt the merge opt.
# Without this, as the equilibrium have ignore_newtrees=False, we
# won't merge all nodes if it is set as a global optimizer with
# final_opt=True.
# We need a new instance of MergeOptimizer to don't have its name
# changed by other usage of it.
optdb['canonicalize'].register("merge", gof.opt.MergeOptimizer(), 'fast_run',
"fast_compile", cleanup=True)
optdb.register('merge1.2', gof.MergeOptimizer(),
1.2, 'fast_run', 'fast_compile', 'merge')
optdb.register('Print1.21', PrintCurrentFunctionGraph('Post-canonicalize'),
1.21,) # 'fast_run', 'fast_compile')
# replace unstable subgraphs
optdb.register('stabilize', gof.EquilibriumDB(),
1.5, 'fast_run')
optdb.register('Print1.51', PrintCurrentFunctionGraph('Post-stabilize'),
1.51,) # 'fast_run', 'fast_compile')
# misc special cases for speed
optdb.register('specialize', gof.EquilibriumDB(),
2, 'fast_run', 'fast_compile_gpu')
# misc special cases for speed that break canonicalization
optdb.register('uncanonicalize', gof.EquilibriumDB(),
3, 'fast_run')
# misc special cases for speed that are dependent on the device.
optdb.register('specialize_device', gof.EquilibriumDB(),
48.6, 'fast_compile', 'fast_run') # must be after gpu stuff at 48.5
# especially constant merge
optdb.register('merge2', gof.MergeOptimizer(),
49, 'fast_run', 'merge')
optdb.register('add_destroy_handler', AddDestroyHandler(),
49.5, 'fast_run', 'inplace')
# final pass just to make sure
optdb.register('merge3', gof.MergeOptimizer(),
100, 'fast_run', 'merge')
class Mode(object):
"""
The Mode represents a way to optimize and then link a computation graph.
Parameters
----------
optimizer : a structure of type Optimizer
An Optimizer may simplify the math, put similar computations together,
improve numerical stability and various other improvements.
linker : a structure of type Linker
A Linker decides which implementations to use (C or Python, for example)
and how to string them together to perform the computation.
See Also
--------
predefined_linkers
predefined_optimizers
predefined_modes
"""
def __init__(self, linker=None, optimizer='default'):
if linker is None:
linker = config.linker
if optimizer is 'default':
optimizer = config.optimizer
Mode.__setstate__(self, (linker, optimizer))
# self.provided_optimizer - typically the `optimizer` arg.
# But if the `optimizer` arg is keyword corresponding to a predefined
# Query, then this stores the query
# self._optimizer - typically same as provided_optimizer??
# self.__get_optimizer - returns self._optimizer (possibly querying
# optdb with self._optimizer)
# self.optimizer - property that returns __get_optimizer()
def __getstate__(self):
return (self.provided_linker, self.provided_optimizer)
def __setstate__(self, state):
linker, optimizer = state
self.provided_linker = linker
self.provided_optimizer = optimizer
if isinstance(linker, string_types) or linker is None:
linker = predefined_linkers[linker]
self.linker = linker
if isinstance(optimizer, string_types) or optimizer is None:
optimizer = predefined_optimizers[optimizer]
if isinstance(optimizer, gof.Query):
self.provided_optimizer = optimizer
self._optimizer = optimizer
self.call_time = 0
self.fn_time = 0
linker.mode = self # TODO: WHY IS THIS HERE?
def __str__(self):
return "%s(linker = %s, optimizer = %s)" % (self.__class__.__name__,
self.provided_linker,
self.provided_optimizer)
def __get_optimizer(self):
if isinstance(self._optimizer, gof.Query):
return optdb.query(self._optimizer)
else:
return self._optimizer
optimizer = property(__get_optimizer)
def get_linker_optimizer(self, linker, optimizer):
if isinstance(linker, string_types) or linker is None:
linker = predefined_linkers[linker]
if isinstance(optimizer, string_types) or optimizer is None:
optimizer = predefined_optimizers[optimizer]
return (linker, optimizer)
def including(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
# N.B. opt might be a Query instance, not sure what else it might be...
# string? Optimizer? OptDB? who knows???
return self.clone(optimizer=opt.including(*tags))
def register(self, *optimizations):
"""Adds new optimization instances to a mode.
This method adds new optimization instances to a compilation mode. It
works like the `including()` method but takes as inputs optimization
instances to add instead of tags.
Parameters
----------
optimizations :
Every element of `optimizations` is a tuple containing an
optimization instance and a floating point value indicating the
position at which to insert the optimization in the mode.
Returns
-------
Mode
Copy of the current Mode which includes the provided
optimizations.
"""
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
return self.clone(optimizer=opt.register(*optimizations))
def excluding(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
return self.clone(optimizer=opt.excluding(*tags))
def requiring(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
return self.clone(optimizer=opt.requiring(*tags))
def clone(self, link_kwargs=None, optimizer="", **kwargs):
"""
Create a new instance of this Mode.
Keyword arguments can be provided for the linker,
in which case its `clone` method will be called with these
arguments.
"""
if link_kwargs is None:
link_kwargs = {}
new_linker = self.linker.clone(**link_kwargs)
if optimizer == "":
optimizer = self.provided_optimizer
new_mode = type(self)(linker=new_linker,
optimizer=optimizer)
return new_mode
# If a string is passed as the mode argument in function or
# FunctionMaker, the Mode will be taken from this dictionary using the
# string as the key
# Use VM_linker to allow lazy evaluation by default.
FAST_COMPILE = Mode(theano.gof.vm.VM_Linker(use_cloop=False, c_thunks=False),
'fast_compile')
if theano.config.cxx:
FAST_RUN = Mode('cvm', 'fast_run')
else:
FAST_RUN = Mode('vm', 'fast_run')
predefined_modes = {'FAST_COMPILE': FAST_COMPILE,
'FAST_RUN': FAST_RUN,
}
instantiated_default_mode = None
def get_mode(orig_string):
if orig_string is None:
string = config.mode
else:
string = orig_string
if not isinstance(string, string_types):
return string # it is hopefully already a mode...
global instantiated_default_mode
# The default mode is cached. However, config.mode can change
# If instantiated_default_mode has the right class, use it.
if orig_string is None and instantiated_default_mode:
if string in predefined_modes:
default_mode_class = predefined_modes[string].__class__.__name__
else:
default_mode_class = string
if (instantiated_default_mode.__class__.__name__ ==
default_mode_class):
return instantiated_default_mode
if string in ['Mode', 'DebugMode', 'NanGuardMode']:
if string == 'DebugMode':
# need to import later to break circular dependency.
from .debugmode import DebugMode
# DebugMode use its own linker.
ret = DebugMode(optimizer=config.optimizer)
elif string == 'NanGuardMode':
# need to import later to break circular dependency.
from .nanguardmode import NanGuardMode
# NanGuardMode use its own linker.
ret = NanGuardMode(True, True, True, optimizer=config.optimizer)
else:
# TODO: Can't we look up the name and invoke it rather than using eval here?
ret = eval(string +
'(linker=config.linker, optimizer=config.optimizer)')
elif string in predefined_modes:
ret = predefined_modes[string]
else:
raise Exception("No predefined mode exist for string: %s" % string)
if orig_string is None:
# Build and cache the default mode
if theano.config.optimizer_excluding:
ret = ret.excluding(*theano.config.optimizer_excluding.split(':'))
if theano.config.optimizer_including:
ret = ret.including(*theano.config.optimizer_including.split(':'))
if theano.config.optimizer_requiring:
ret = ret.requiring(*theano.config.optimizer_requiring.split(':'))
instantiated_default_mode = ret
return ret
def get_default_mode():
return get_mode(None)
def register_mode(name, mode):
"""
Add a `Mode` which can be referred to by `name` in `function`.
"""
if name in predefined_modes:
raise ValueError('Mode name already taken: %s' % name)
predefined_modes[name] = mode
|
py | 1a34b70c3be9d379c66fcc762efb0d4c51104e1f | from pydantic import BaseModel
from models.platform import Platform
class PredictionRequest(BaseModel):
url: str
platform: Platform
|
py | 1a34b711e327048517b3420d99019670967b19d1 | #!/usr/bin/env python3
"""This module has the necessary tests of functions"""
import unittest
from id_card_face_access.card_comms import APDU
from id_card_face_access.bac import (
compute_key,
compute_mac,
secure_messaging,
process_rapdu,
)
from id_card_face_access.mrz import calculate_check_digit
class TestMethods(unittest.TestCase):
def test_calculate_check_digit(self):
self.assertEqual(calculate_check_digit("D23145890734"), "9")
self.assertEqual(calculate_check_digit("340712"), "7")
self.assertEqual(calculate_check_digit("950712"), "2")
self.assertEqual(calculate_check_digit("L898902C<"), "3")
self.assertEqual(calculate_check_digit("690806"), "1")
self.assertEqual(calculate_check_digit("940623"), "6")
def test_compute_key(self):
key_seed = bytes.fromhex("239AB9CB282DAF66231DC5A4DF6BFBAE")
self.assertEqual(
compute_key(key_seed, "enc"),
bytes.fromhex("AB94FDECF2674FDFB9B391F85D7F76F2"),
)
self.assertEqual(
compute_key(key_seed, "mac"),
bytes.fromhex("7962D9ECE03D1ACD4C76089DCE131543"),
)
key_seed = bytes.fromhex("0036D272F5C350ACAC50C3F572D23600")
self.assertEqual(
compute_key(key_seed, "enc"),
bytes.fromhex("979EC13B1CBFE9DCD01AB0FED307EAE5"),
)
self.assertEqual(
compute_key(key_seed, "mac"),
bytes.fromhex("F1CB1F1FB5ADF208806B89DC579DC1F8"),
)
def test_compute_mac(self):
key = bytes.fromhex("7962D9ECE03D1ACD4C76089DCE131543")
data = bytes.fromhex(
"72C29C2371CC9BDB65B779B8E8D37B29ECC154AA56A8799FAE2F498F76ED92F2"
)
self.assertEqual(compute_mac(key, data), bytes.fromhex("5F1448EEA8AD90A7"))
data = bytes.fromhex(
"46B9342A41396CD7386BF5803104D7CEDC122B9132139BAF2EEDC94EE178534F"
)
self.assertEqual(compute_mac(key, data), bytes.fromhex("2F2D235D074D7449"))
key = bytes.fromhex("F1CB1F1FB5ADF208806B89DC579DC1F8")
data = bytes.fromhex("887022120C06C2270CA4020C800000008709016375432908C044F6")
self.assertEqual(compute_mac(key, data), bytes.fromhex("BF8B92D635FF24F8"))
data = bytes.fromhex("887022120C06C22899029000")
self.assertEqual(compute_mac(key, data), bytes.fromhex("FA855A5D4C50A8ED"))
def test_secure_messaging(self):
ks_enc = bytes.fromhex("979EC13B1CBFE9DCD01AB0FED307EAE5")
ks_mac = bytes.fromhex("F1CB1F1FB5ADF208806B89DC579DC1F8")
SSC = bytes.fromhex("887022120C06C226")
apdu = APDU(b"\x00", b"\xA4", b"\x02", b"\x0C", Lc=b"\x02", cdata=b"\x01\x1E")
protected_apdu, SSC = secure_messaging(ks_enc, ks_mac, SSC, apdu)
self.assertEqual(
protected_apdu,
bytes.fromhex("0CA4020C158709016375432908C044F68E08BF8B92D635FF24F800"),
)
self.assertEqual(SSC, bytes.fromhex("887022120C06C227"))
SSC = bytes.fromhex("887022120C06C228")
apdu = APDU(b"\x00", b"\xB0", b"\x00", b"\x00", Le=b"\x04")
protected_apdu, SSC = secure_messaging(ks_enc, ks_mac, SSC, apdu)
self.assertEqual(
protected_apdu, bytes.fromhex("0CB000000D9701048E08ED6705417E96BA5500")
)
self.assertEqual(SSC, bytes.fromhex("887022120C06C229"))
SSC = bytes.fromhex("887022120C06C22A")
apdu = APDU(b"\x00", b"\xB0", b"\x00", b"\x04", Le=b"\x12")
protected_apdu, SSC = secure_messaging(ks_enc, ks_mac, SSC, apdu)
self.assertEqual(
protected_apdu, bytes.fromhex("0CB000040D9701128E082EA28A70F3C7B53500")
)
self.assertEqual(SSC, bytes.fromhex("887022120C06C22B"))
def test_process_rapdu(self):
ks_mac = bytes.fromhex("F1CB1F1FB5ADF208806B89DC579DC1F8")
SSC = bytes.fromhex("887022120C06C227")
apdu = APDU(b"\x00", b"\xA4", b"\x02", b"\x0C", Lc=b"\x02", cdata=b"\x01\x1E")
rapdu = bytes.fromhex("990290008E08FA855A5D4C50A8ED9000")
SSC, decrypted_data = process_rapdu(ks_mac, SSC, apdu, rapdu)
self.assertIsNone(decrypted_data)
self.assertEqual(SSC, bytes.fromhex("887022120C06C228"))
ks_enc = ks_enc = bytes.fromhex("979EC13B1CBFE9DCD01AB0FED307EAE5")
SSC = bytes.fromhex("887022120C06C229")
apdu = APDU(b"\x00", b"\xB0", b"\x00", b"\x00", Le=b"\x04")
rapdu = bytes.fromhex("8709019FF0EC34F9922651990290008E08AD55CC17140B2DED9000")
SSC, decrypted_data = process_rapdu(ks_mac, SSC, apdu, rapdu, ks_enc=ks_enc)
self.assertEqual(decrypted_data, bytes.fromhex("60145F01"))
self.assertEqual(SSC, bytes.fromhex("887022120C06C22A"))
SSC = bytes.fromhex("887022120C06C22B")
apdu = APDU(b"\x00", b"\xB0", b"\x00", b"\x04", Le=b"\x12")
rapdu = bytes.fromhex(
"871901FB9235F4E4037F2327DCC8964F1F9B8C30F42C8E2FFF224A990290008E08C8B2787EAEA07D749000"
)
SSC, decrypted_data = process_rapdu(ks_mac, SSC, apdu, rapdu, ks_enc=ks_enc)
self.assertEqual(
decrypted_data, bytes.fromhex("04303130365F36063034303030305C026175")
)
self.assertEqual(SSC, bytes.fromhex("887022120C06C22C"))
if __name__ == "__main__":
unittest.main()
|
bzl | 1a34b833caa13645088558bcadbb5b0f97d8f10c | """Definitions for targets that use the TFLite shims."""
load("//tensorflow/lite:build_def.bzl", "tflite_jni_binary")
load("@build_bazel_rules_android//android:rules.bzl", "android_library")
def alias_with_tflite(name, actual, **kwargs):
"""Defines an alias for a target that uses the TFLite shims.
This rule 'alias_with_tflite' should be used instead of the native
'alias' rule whenever the 'actual' target that is being aliased
is defined using one of the *_with_tflite build macros.
Args:
name: determines the name used for the alias target.
actual: the target that the alias target is aliased to.
**kwargs: additional alias parameters.
"""
native.alias(name, actual, **kwargs)
def android_library_with_tflite(
name,
deps = [],
tflite_deps = [],
exports = [],
tflite_exports = [],
**kwargs):
"""Defines an android_library that uses the TFLite shims.
This is a hook to allow applying different build flags (etc.)
for targets that use the TFLite shims.
Note that this build rule doesn't itself add any dependencies on
TF Lite; this macro should normally be used in conjunction with a
direct or indirect 'tflite_deps' dependency on one of the "shim"
library targets from //third_party/tensorflow/lite/core/shims:*.
Args:
name: as for android_library.
deps: as for android_library.
tflite_deps: dependencies on rules that are themselves defined using
'cc_library_with_tflite' / 'android_library_with_tflite'.
exports: same as for android_library.
tflite_exports: exported dependencies that are themselves defined using
'cc_library_with_tflite' / 'android_library_with_tflite'.
**kwargs: Additional android_library parameters.
"""
android_library(
name = name,
exports = exports + tflite_exports,
deps = deps + tflite_deps,
**kwargs
)
def cc_library_with_tflite(
name,
srcs = [],
tflite_jni_binaries = [],
deps = [],
tflite_deps = [],
**kwargs):
"""Defines a cc_library that uses the TFLite shims.
This is a hook to allow applying different build flags (etc.)
for targets that use the TFLite shims.
Note that this build rule doesn't itself add any dependencies on
TF Lite; this macro should normally be used in conjunction with a
direct or indirect 'tflite_deps' dependency on one of the "shim"
library targets from //tensorflow/lite/core/shims:*.
Args:
name: as for cc_library.
srcs: as for cc_library.
tflite_jni_binaries: dependencies on shared libraries that are defined
using 'jni_binary_with_tflite'.
deps: as for cc_library.
tflite_deps: dependencies on rules that are themselves defined using
'cc_library_with_tflite'.
**kwargs: Additional cc_library parameters.
"""
native.cc_library(
name = name,
srcs = srcs + tflite_jni_binaries,
deps = deps + tflite_deps,
**kwargs
)
def cc_test_with_tflite(
name,
deps = [],
tflite_deps = [],
**kwargs):
"""Defines a cc_test that uses the TFLite shims.
This is a hook to allow applying different build flags (etc.)
for targets that use the TFLite shims.
Note that this build rule doesn't itself add any dependencies on
TF Lite; this macro should normally be used in conjunction with a
direct or indirect 'tflite_deps' dependency on one of the "shim"
library targets from //third_party/tensorflow/lite/core/shims:*.
Args:
name: as for cc_test.
deps: as for cc_test.
tflite_deps: dependencies on rules that are themselves defined using
'cc_library_with_tflite'.
**kwargs: Additional cc_test parameters.
"""
native.cc_test(
name = name,
deps = deps + tflite_deps,
**kwargs
)
def java_library_with_tflite(
name,
deps = [],
tflite_deps = [],
exports = [],
tflite_exports = [],
**kwargs):
"""Defines an java_library that uses the TFLite shims.
This is a hook to allow applying different build flags (etc.)
for targets that use the TFLite shims.
Note that this build rule doesn't itself add any dependencies on
TF Lite; this macro should normally be used in conjunction with a
direct or indirect 'tflite_deps' dependency on one of the "shim"
library targets from //third_party/tensorflow/lite/core/shims:*.
Args:
name: as for java_library.
deps: as for java_library.
tflite_deps: dependencies on rules that are themselves defined using
'cc_library_with_tflite' / 'java_library_with_tflite'.
exports: same as for java_library.
tflite_exports: exported dependencies that are themselves defined using
'cc_library_with_tflite' / 'java_library_with_tflite'.
**kwargs: Additional java_library parameters.
"""
native.java_library(
name = name,
exports = exports + tflite_exports,
deps = deps + tflite_deps,
**kwargs
)
def java_test_with_tflite(
name,
deps = [],
tflite_deps = [],
**kwargs):
"""Defines an java_library that uses the TFLite shims.
This is a hook to allow applying different build flags (etc.)
for targets that use the TFLite shims.
Note that this build rule doesn't itself add any dependencies on
TF Lite; this macro should normally be used in conjunction with a
direct or indirect 'tflite_deps' dependency on one of the "shim"
library targets from //third_party/tensorflow/lite/core/shims:*.
Args:
name: as for java_library.
deps: as for java_library.
tflite_deps: dependencies on rules that are themselves defined using
'cc_library_with_tflite' / 'java_library_with_tflite'.
**kwargs: Additional java_library parameters.
"""
native.java_test(
name = name,
deps = deps + tflite_deps,
**kwargs
)
def jni_binary_with_tflite(
name,
deps = [],
tflite_deps = [],
**kwargs):
"""Defines a tflite_jni_binary that uses the TFLite shims.
This is a hook to allow applying different build flags (etc.)
for targets that use the TFLite shims.
Note that this build rule doesn't itself add any dependencies on
TF Lite; this macro should normally be used in conjunction with a
direct or indirect 'tflite_deps' dependency on one of the "shim"
library targets from //third_party/tensorflow/lite/core/shims:*.
Args:
name: as for tflite_jni_binary.
deps: as for tflite_jni_binary.
tflite_deps: dependencies on rules that are themselves defined using
'cc_library_with_tflite'.
**kwargs: Additional tflite_jni_binary parameters.
"""
tflite_jni_binary(
name = name,
deps = deps + tflite_deps,
**kwargs
)
|
py | 1a34b88d36255d227222660655584c29afd38ff5 | '''
Written by Heng Fan
在ILSVRC_crops生成剪切之后的图片,成对,每一帧都有一个x和一个z。
如:
000000.00.crop.x.jpg
000000.00.crop.z.jpg
'''
import numpy as np
import os
import glob
import xml.etree.ElementTree as ET
import cv2
import datetime
'''
# default setting for cropping
'''
examplar_size = 127.0 # 模板z的尺寸
# instance_size = 255.0
instance_size = 271.0 # 实例x的尺寸
context_amount = 0.5 # 数据放大的比例 (1/2)(w+h)
def get_subwindow_avg(im, pos, model_sz, original_sz):
'''
# obtain image patch, padding with avg channel if area goes outside of border
'''
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if original_sz is None:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert (im_sz[0] > 2) & (im_sz[1] > 2), "The size of image is too small!"
c = (sz + 1) / 2
# check out-of-bounds coordinates, and set them to black
context_xmin = round(pos[1] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[0] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = max(0, 1 - context_xmin) # in python, index starts from 0
top_pad = max(0, 1 - context_ymin)
right_pad = max(0, context_xmax - im_sz[1])
bottom_pad = max(0, context_ymax - im_sz[0])
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
im_R = im[:, :, 0]
im_G = im[:, :, 1]
im_B = im[:, :, 2]
# padding
if (top_pad != 0) | (bottom_pad != 0) | (left_pad != 0) | (right_pad != 0):
im_R = np.pad(im_R, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[0])
im_G = np.pad(im_G, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[1])
im_B = np.pad(im_B, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[2])
im = np.stack((im_R, im_G, im_B), axis=2)
im_patch_original = im[int(context_ymin) - 1:int(context_ymax), int(context_xmin) - 1:int(context_xmax), :]
if model_sz != original_sz:
im_patch = cv2.resize(im_patch_original, (int(model_sz), int(model_sz)), interpolation=cv2.INTER_CUBIC)
else:
im_patch = im_patch_original
return im_patch
def get_crops(img, bbox, size_z, size_x, context_amount):
'''
# get examplar and search region crops
'''
cx = bbox[0] + bbox[2]/2
cy = bbox[1] + bbox[3]/2
w = bbox[2]
h = bbox[3]
# for examplar
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
im_crop_z = get_subwindow_avg(img, np.array([cy, cx]), size_z, round(s_z))
# for search region
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
im_crop_x = get_subwindow_avg(img, np.array([cy, cx]), size_x, round(s_x))
return im_crop_z, im_crop_x
def generate_image_crops(vid_root_path, vid_curated_path):
'''
# save image crops to the vid_curated_path
'''
anno_str = "Annotations/VID/train/"
data_str = "Data/VID/train/"
vid_anno_path = os.path.join(vid_root_path, anno_str)
vid_data_path = os.path.join(vid_root_path, data_str)
cur_procesed_fraem = 0
start_time = datetime.datetime.now()
total_time = 0
# dirs of level1: e.g., a/, b/, ...
all_dirs_level1 = os.listdir(vid_anno_path)
for i in range(len(all_dirs_level1)):
all_dirs_level2 = os.listdir(os.path.join(vid_anno_path, all_dirs_level1[i]))
# dirs of level2: e.g., a/ILSVRC2015_train_00000000/, a/ILSVRC2015_train_00001000/, ...
for j in range(len(all_dirs_level2)):
frame_list = glob.glob(os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], "*.xml"))
frame_list.sort()
# level3: frame level
for k in range(len(frame_list)):
frame_xml_name = os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], frame_list[k])
frame_xml_tree = ET.parse(frame_xml_name)
frame_xml_root = frame_xml_tree.getroot()
# image file path
frame_img_name = (frame_list[k].replace(".xml", ".JPEG")).replace(vid_anno_path, vid_data_path)
img = cv2.imread(frame_img_name)
if img is None:
print("Cannot find %s!"%frame_img_name)
exit(0)
# image file name
frame_filename = frame_xml_root.find('filename').text
# process (all objects in) each frame
for object in frame_xml_root.iter("object"):
# get trackid
id = object.find("trackid").text
# get bounding box
bbox_node = object.find("bndbox")
xmax = float(bbox_node.find('xmax').text)
xmin = float(bbox_node.find('xmin').text)
ymax = float(bbox_node.find('ymax').text)
ymin = float(bbox_node.find('ymin').text)
width = xmax - xmin + 1
height = ymax - ymin + 1
bbox = np.array([xmin, ymin, width, height])
# print("processing %s, %s, %s, %s ..." % (all_dirs_level1[i], all_dirs_level2[j], frame_filename+".JPEG", id))
# get crops
im_crop_z, im_crop_x = get_crops(img, bbox, examplar_size, instance_size, context_amount)
# save crops
save_path = os.path.join(vid_curated_path, data_str, all_dirs_level1[i], all_dirs_level2[j])
if not os.path.exists(save_path):
os.makedirs(save_path)
savename_crop_z = os.path.join(save_path, '{}.{:02d}.crop.z.jpg'.format(frame_filename, int(id)))
savename_crop_x = os.path.join(save_path, '{}.{:02d}.crop.x.jpg'.format(frame_filename, int(id)))
cv2.imwrite(savename_crop_z, im_crop_z, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cv2.imwrite(savename_crop_x, im_crop_x, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cur_procesed_fraem = cur_procesed_fraem + 1
if cur_procesed_fraem % 1000 == 0:
end_time = datetime.datetime.now()
total_time = total_time + int((end_time-start_time).seconds)
print("finished processing %d frames in %d seconds (FPS: %d ) ..." % (cur_procesed_fraem, total_time, int(1000/(end_time-start_time).seconds)))
start_time = datetime.datetime.now()
def generate_image_crops4otb(vid_root_path, vid_curated_path):
'''
# save image crops to the vid_curated_path
'''
anno_str = "groundtruth_rect.txt"
data_str = "img"
vid_anno_path = os.path.join(vid_root_path, anno_str)
vid_data_path = os.path.join(vid_root_path, data_str)
cur_procesed_fraem = 0
start_time = datetime.datetime.now()
total_time = 0
bboxs = [list(map(float,x.split(','))) for x in open(vid_anno_path, 'r').readlines()]
img_list = glob.glob(os.path.join(vid_data_path,"*.jpg"))
for i in range(len(img_list)):
# image file path
img_path = img_list[i]
img = cv2.imread(img_path)
if img is None:
print("Cannot find %s!" % img_path)
exit(0)
img_name = img_path.split('\\')[-1]
# get bounding box
bbox = bboxs[i]
xmax = bbox[0]+bbox[2]
xmin = bbox[0]
ymax = bbox[1]+bbox[3]
ymin = bbox[1]
width = bbox[2]
height = bbox[3]
new_bbox = np.array([xmin, ymin, width, height])
# print("processing %s, %s, %s, %s ..." % (all_dirs_level1[i], all_dirs_level2[j], frame_filename+".JPEG", id))
# get crops
im_crop_z, im_crop_x = get_crops(img, new_bbox, examplar_size, instance_size, context_amount)
# save crops
save_path = os.path.join(vid_curated_path, data_str)
if not os.path.exists(save_path):
os.makedirs(save_path)
savename_crop_z = os.path.join(save_path, '{}.crop.z.jpg'.format(img_name))
savename_crop_x = os.path.join(save_path, '{}.crop.x.jpg'.format(img_name))
cv2.imwrite(savename_crop_z, im_crop_z, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cv2.imwrite(savename_crop_x, im_crop_x, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cur_procesed_fraem = cur_procesed_fraem + 1
if cur_procesed_fraem % 1000 == 0:
end_time = datetime.datetime.now()
total_time = total_time + int((end_time-start_time).seconds)
print("finished processing %d frames in %d seconds (FPS: %d ) ..." % (cur_procesed_fraem, total_time, int(1000/(end_time-start_time).seconds)))
start_time = datetime.datetime.now()
if __name__ == "__main__":
# path to your VID dataset
# vid_root_path = "/home/hfan/Dataset/ILSVRC2015"
# vid_curated_path = "/home/hfan/Dataset/ILSVRC2015_crops"
# Windows ILSVRC
vid_root_path = r"D:\workspace\MachineLearning\asimo\ILSVRC"
vid_curated_path = r"D:\workspace\MachineLearning\asimo\ILSVRC_crops"
# Windows OTB
vid_root_path = r"D:\workspace\MachineLearning\asimo\OTB_Train"
vid_curated_path = r"D:\workspace\MachineLearning\asimo\OTB_Train_crops"
# Linux
# vid_root_path = r"/home/zzx/vot/VGG/ILSVRC"
# vid_curated_path = r"/home/sjl/dataset/ILSVRC_crops"
if not os.path.exists(vid_curated_path):
os.mkdir(vid_curated_path)
# generate_image_crops(vid_root_path, vid_curated_path)
generate_image_crops4otb(vid_root_path, vid_curated_path)
|
py | 1a34b97b67740ba3dbd5f43cba11571227f67a05 | '''
Created on 22 Sep 2016
@author: andrew
'''
DATA_DIR = '/home/andrew/workspace/BKData/'
TESTS_DIR = DATA_DIR + 'tests/'
CIRCUIT_TEST_DIR = TESTS_DIR + 'circuit/'
CIRCUIT_ANGLE_TEST_DIR = CIRCUIT_TEST_DIR + 'angles/'
DEFAULT_CUTOFF = 1e-14
from yaferp.analysis import analyser
import cPickle
import scipy.sparse
import os
def loadCircuitAngles(fileName,boolJWorBK,cutoff=DEFAULT_CUTOFF):
if cutoff != None:
if boolJWorBK:
path = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) + '/BK/' + fileName + '.angs'
else:
path = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) +'/JW/' + fileName + '.angs'
fred = loadDict(path)
return fred
def saveDict(thing,dictPath):
with open(dictPath,'wb') as f:
cPickle.dump(thing,f,cPickle.HIGHEST_PROTOCOL)
def storeInDict(dictPath,theKey,theValue,rewrite=0):
thing = loadDict(dictPath)
if thing == None:
thing = {}
if(theKey in thing) and rewrite==0:
return thing
else:
thing[theKey] = theValue
saveDict(thing,dictPath)
return thing
def loadDict(dictPath):
if not os.path.isfile(dictPath):
return None
else:
with open(dictPath,'rb') as f:
it = cPickle.load(f)
return it
def calculateCircuitAngle(filename,boolJWorBK,cutoff=DEFAULT_CUTOFF,circuitType='normal',overwrite=0):
'''!!!this WILL FAIL if eigenvectors have not been previously generated!!! (also if cutoff ==None'''
circ = analyser.generateCircuit(filename, boolJWorBK, cutoff, circuitType, overwrite)
eigvec = analyser.readEigenvector(filename, boolJWorBK, cutoff)
# print(eigvec)
if circuitType in ['ancilla','ancillaOptimised']:
testVec = scipy.sparse.kron([[1.],[0.]],eigvec)
else:
testVec = eigvec
ang = circ.angle(testVec)
return ang
def generateCircuitAngle(filename,boolJWorBK,cutoff=DEFAULT_CUTOFF,circuitType='normal',overwrite=0):
if cutoff != None:
if boolJWorBK:
outputPath = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) + '/BK/' + filename + '.angs'
else:
outputPath = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) +'/JW/' + filename + '.angs'
ang = calculateCircuitAngle(filename,boolJWorBK,cutoff,circuitType,overwrite)
storeInDict(outputPath,circuitType,ang,overwrite)
return ang
def generateManyCircuitAngles(filename,boolJWorBK,cutoff=DEFAULT_CUTOFF,listCircuitTypes='all',overwrite=0):
ALL_CIRCUIT_TYPES=['normal',
'optimised',
'interior',
'interiorOptimised',
'ancilla',
'ancillaOptimised']
if listCircuitTypes == 'all':
listCircuitTypes = ALL_CIRCUIT_TYPES
angles = {}
for circuitType in listCircuitTypes:
thisAngle = generateCircuitAngle(filename,boolJWorBK,cutoff,circuitType,overwrite)
angles[circuitType] = thisAngle
return angles
|
py | 1a34b994a329a7455e6dc073092eac367f29157d | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import random
from numbers import Number
from functools import partial
from operator import methodcaller
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
import numpy as np
import cv2
import imghdr
from PIL import Image
import paddlers
from .functions import normalize, horizontal_flip, permute, vertical_flip, center_crop, is_poly, \
horizontal_flip_poly, horizontal_flip_rle, vertical_flip_poly, vertical_flip_rle, crop_poly, \
crop_rle, expand_poly, expand_rle, resize_poly, resize_rle, de_haze, pca, select_bands, \
to_intensity, to_uint8, img_flip, img_simple_rotate
__all__ = [
"Compose",
"ImgDecoder",
"Resize",
"RandomResize",
"ResizeByShort",
"RandomResizeByShort",
"ResizeByLong",
"RandomHorizontalFlip",
"RandomVerticalFlip",
"Normalize",
"CenterCrop",
"RandomCrop",
"RandomScaleAspect",
"RandomExpand",
"Padding",
"MixupImage",
"RandomDistort",
"RandomBlur",
"RandomSwap",
"Defogging",
"DimReducing",
"BandSelecting",
"ArrangeSegmenter",
"ArrangeChangeDetector",
"ArrangeClassifier",
"ArrangeDetector",
"RandomFlipOrRotation",
]
interp_dict = {
'NEAREST': cv2.INTER_NEAREST,
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'AREA': cv2.INTER_AREA,
'LANCZOS4': cv2.INTER_LANCZOS4
}
class Transform(object):
"""
Parent class of all data augmentation operations
"""
def __init__(self):
pass
def apply_im(self, image):
pass
def apply_mask(self, mask):
pass
def apply_bbox(self, bbox):
pass
def apply_segm(self, segms):
pass
def apply(self, sample):
if 'image' in sample:
sample['image'] = self.apply_im(sample['image'])
else: # image_tx
sample['image'] = self.apply_im(sample['image_t1'])
sample['image2'] = self.apply_im(sample['image_t2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'gt_bbox' in sample:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
return sample
def __call__(self, sample):
if isinstance(sample, Sequence):
sample = [self.apply(s) for s in sample]
else:
sample = self.apply(sample)
return sample
class ImgDecoder(Transform):
"""
Decode image(s) in input.
Args:
to_rgb (bool, optional): If True, convert input images from BGR format to RGB format. Defaults to True.
"""
def __init__(self, to_rgb=True, to_uint8=True):
super(ImgDecoder, self).__init__()
self.to_rgb = to_rgb
self.to_uint8 = to_uint8
def read_img(self, img_path, input_channel=3):
img_format = imghdr.what(img_path)
name, ext = os.path.splitext(img_path)
if img_format == 'tiff' or ext == '.img':
try:
import gdal
except:
try:
from osgeo import gdal
except:
raise Exception(
"Failed to import gdal! You can try use conda to install gdal"
)
six.reraise(*sys.exc_info())
dataset = gdal.Open(img_path)
if dataset == None:
raise Exception('Can not open', img_path)
im_data = dataset.ReadAsArray()
if im_data.ndim == 2:
im_data = to_intensity(im_data) # is read SAR
im_data = im_data[:, :, np.newaxis]
elif im_data.ndim == 3:
im_data = im_data.transpose((1, 2, 0))
return im_data
elif img_format in ['jpeg', 'bmp', 'png', 'jpg']:
if input_channel == 3:
return cv2.imread(img_path, cv2.IMREAD_ANYDEPTH |
cv2.IMREAD_ANYCOLOR | cv2.IMREAD_COLOR)
else:
return cv2.imread(img_path, cv2.IMREAD_ANYDEPTH |
cv2.IMREAD_ANYCOLOR)
elif ext == '.npy':
return np.load(img_path)
else:
raise Exception('Image format {} is not supported!'.format(ext))
def apply_im(self, im_path):
if isinstance(im_path, str):
try:
image = self.read_img(im_path)
except:
raise ValueError('Cannot read the image file {}!'.format(
im_path))
else:
image = im_path
if self.to_rgb and image.shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.to_uint8:
image = to_uint8(image)
return image
def apply_mask(self, mask):
try:
mask = np.asarray(Image.open(mask))
except:
raise ValueError("Cannot read the mask file {}!".format(mask))
if len(mask.shape) != 2:
raise Exception(
"Mask should be a 1-channel image, but recevied is a {}-channel image.".
format(mask.shape[2]))
return mask
def apply(self, sample):
"""
Args:
sample (dict): Input sample.
Returns:
dict: Decoded sample.
"""
if 'image' in sample:
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'image_t1' in sample and not 'image' in sample:
if not ('image_t2' in sample and 'image2' not in sample):
raise ValueError
sample['image'] = self.apply_im(sample['image_t1'])
sample['image2'] = self.apply_im(sample['image_t2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
im_height, im_width, _ = sample['image'].shape
se_height, se_width = sample['mask'].shape
if im_height != se_height or im_width != se_width:
raise Exception(
"The height or width of the im is not same as the mask")
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
# TODO: check the shape of auxiliary masks
sample['im_shape'] = np.array(
sample['image'].shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
class Compose(Transform):
"""
Apply a series of data augmentation to the input.
All input images are in Height-Width-Channel ([H, W, C]) format.
Args:
transforms (List[paddlers.transforms.Transform]): List of data preprocess or augmentations.
Raises:
TypeError: Invalid type of transforms.
ValueError: Invalid length of transforms.
"""
def __init__(self, transforms):
super(Compose, self).__init__()
if not isinstance(transforms, list):
raise TypeError(
'Type of transforms is invalid. Must be List, but received is {}'
.format(type(transforms)))
if len(transforms) < 1:
raise ValueError(
'Length of transforms must not be less than 1, but received is {}'
.format(len(transforms)))
self.transforms = transforms
self.decode_image = ImgDecoder()
self.arrange_outputs = None
self.apply_im_only = False
def __call__(self, sample):
if self.apply_im_only:
if 'mask' in sample:
mask_backup = copy.deepcopy(sample['mask'])
del sample['mask']
if 'aux_masks' in sample:
aux_masks = copy.deepcopy(sample['aux_masks'])
sample = self.decode_image(sample)
for op in self.transforms:
# skip batch transforms amd mixup
if isinstance(op, (paddlers.transforms.BatchRandomResize,
paddlers.transforms.BatchRandomResizeByShort,
MixupImage)):
continue
sample = op(sample)
if self.arrange_outputs is not None:
if self.apply_im_only:
sample['mask'] = mask_backup
if 'aux_masks' in locals():
sample['aux_masks'] = aux_masks
sample = self.arrange_outputs(sample)
return sample
class Resize(Transform):
"""
Resize input.
- If target_size is an int, resize the image(s) to (target_size, target_size).
- If target_size is a list or tuple, resize the image(s) to target_size.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
target_size (int, List[int] or Tuple[int]): Target size. If int, the height and width share the same target_size.
Otherwise, target_size represents [target height, target width].
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional):
Interpolation method of resize. Defaults to 'LINEAR'.
keep_ratio (bool): the resize scale of width/height is same and width/height after resized is not greater
than target width/height. Defaults to False.
Raises:
TypeError: Invalid type of target_size.
ValueError: Invalid interpolation method.
"""
def __init__(self, target_size, interp='LINEAR', keep_ratio=False):
super(Resize, self).__init__()
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
if isinstance(target_size, int):
target_size = (target_size, target_size)
else:
if not (isinstance(target_size,
(list, tuple)) and len(target_size) == 2):
raise TypeError(
"target_size should be an int or a list of length 2, but received {}".
format(target_size))
# (height, width)
self.target_size = target_size
self.interp = interp
self.keep_ratio = keep_ratio
def apply_im(self, image, interp, target_size):
flag = image.shape[2] == 1
image = cv2.resize(image, target_size, interpolation=interp)
if flag:
image = image[:, :, np.newaxis]
return image
def apply_mask(self, mask, target_size):
mask = cv2.resize(mask, target_size, interpolation=cv2.INTER_NEAREST)
return mask
def apply_bbox(self, bbox, scale, target_size):
im_scale_x, im_scale_y = scale
bbox[:, 0::2] *= im_scale_x
bbox[:, 1::2] *= im_scale_y
bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, target_size[0])
bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, target_size[1])
return bbox
def apply_segm(self, segms, im_size, scale):
im_h, im_w = im_size
im_scale_x, im_scale_y = scale
resized_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
resized_segms.append([
resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
])
else:
# RLE format
resized_segms.append(
resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
return resized_segms
def apply(self, sample):
if self.interp == "RANDOM":
interp = random.choice(list(interp_dict.values()))
else:
interp = interp_dict[self.interp]
im_h, im_w = sample['image'].shape[:2]
im_scale_y = self.target_size[0] / im_h
im_scale_x = self.target_size[1] / im_w
target_size = (self.target_size[1], self.target_size[0])
if self.keep_ratio:
scale = min(im_scale_y, im_scale_x)
target_w = int(round(im_w * scale))
target_h = int(round(im_h * scale))
target_size = (target_w, target_h)
im_scale_y = target_h / im_h
im_scale_x = target_w / im_w
sample['image'] = self.apply_im(sample['image'], interp, target_size)
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], interp,
target_size)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], target_size)
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(partial(
self.apply_mask, target_size=target_size),
sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(
sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(
sample['gt_poly'], [im_h, im_w], [im_scale_x, im_scale_y])
sample['im_shape'] = np.asarray(
sample['image'].shape[:2], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
return sample
class RandomResize(Transform):
"""
Resize input to random sizes.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
target_sizes (List[int], List[list or tuple] or Tuple[list or tuple]):
Multiple target sizes, each target size is an int or list/tuple.
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional):
Interpolation method of resize. Defaults to 'LINEAR'.
Raises:
TypeError: Invalid type of target_size.
ValueError: Invalid interpolation method.
See Also:
Resize input to a specific size.
"""
def __init__(self, target_sizes, interp='LINEAR'):
super(RandomResize, self).__init__()
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
self.interp = interp
assert isinstance(target_sizes, list), \
"target_size must be List"
for i, item in enumerate(target_sizes):
if isinstance(item, int):
target_sizes[i] = (item, item)
self.target_size = target_sizes
def apply(self, sample):
height, width = random.choice(self.target_size)
resizer = Resize((height, width), interp=self.interp)
sample = resizer(sample)
return sample
class ResizeByShort(Transform):
"""
Resize input with keeping the aspect ratio.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
short_size (int): Target size of the shorter side of the image(s).
max_size (int, optional): The upper bound of longer side of the image(s). If max_size is -1, no upper bound is applied. Defaults to -1.
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional): Interpolation method of resize. Defaults to 'LINEAR'.
Raises:
ValueError: Invalid interpolation method.
"""
def __init__(self, short_size=256, max_size=-1, interp='LINEAR'):
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
super(ResizeByShort, self).__init__()
self.short_size = short_size
self.max_size = max_size
self.interp = interp
def apply(self, sample):
im_h, im_w = sample['image'].shape[:2]
im_short_size = min(im_h, im_w)
im_long_size = max(im_h, im_w)
scale = float(self.short_size) / float(im_short_size)
if 0 < self.max_size < np.round(scale * im_long_size):
scale = float(self.max_size) / float(im_long_size)
target_w = int(round(im_w * scale))
target_h = int(round(im_h * scale))
sample = Resize(
target_size=(target_h, target_w), interp=self.interp)(sample)
return sample
class RandomResizeByShort(Transform):
"""
Resize input to random sizes with keeping the aspect ratio.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
short_sizes (List[int]): Target size of the shorter side of the image(s).
max_size (int, optional): The upper bound of longer side of the image(s). If max_size is -1, no upper bound is applied. Defaults to -1.
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional): Interpolation method of resize. Defaults to 'LINEAR'.
Raises:
TypeError: Invalid type of target_size.
ValueError: Invalid interpolation method.
See Also:
ResizeByShort: Resize image(s) in input with keeping the aspect ratio.
"""
def __init__(self, short_sizes, max_size=-1, interp='LINEAR'):
super(RandomResizeByShort, self).__init__()
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
self.interp = interp
assert isinstance(short_sizes, list), \
"short_sizes must be List"
self.short_sizes = short_sizes
self.max_size = max_size
def apply(self, sample):
short_size = random.choice(self.short_sizes)
resizer = ResizeByShort(
short_size=short_size, max_size=self.max_size, interp=self.interp)
sample = resizer(sample)
return sample
class ResizeByLong(Transform):
def __init__(self, long_size=256, interp='LINEAR'):
super(ResizeByLong, self).__init__()
self.long_size = long_size
self.interp = interp
def apply(self, sample):
im_h, im_w = sample['image'].shape[:2]
im_long_size = max(im_h, im_w)
scale = float(self.long_size) / float(im_long_size)
target_h = int(round(im_h * scale))
target_w = int(round(im_w * scale))
sample = Resize(
target_size=(target_h, target_w), interp=self.interp)(sample)
return sample
class RandomFlipOrRotation(Transform):
"""
Flip or Rotate an image in different ways with a certain probability.
Args:
probs (list of float): Probabilities of flipping and rotation. Default: [0.35,0.25].
probsf (list of float): Probabilities of 5 flipping mode
(horizontal, vertical, both horizontal diction and vertical, diagonal, anti-diagonal).
Default: [0.3, 0.3, 0.2, 0.1, 0.1].
probsr (list of float): Probabilities of 3 rotation mode(90°, 180°, 270° clockwise). Default: [0.25,0.5,0.25].
Examples:
from paddlers import transforms as T
# 定义数据增强
train_transforms = T.Compose([
T.RandomFlipOrRotation(
probs = [0.3, 0.2] # 进行flip增强的概率是0.3,进行rotate增强的概率是0.2,不变的概率是0.5
probsf = [0.3, 0.25, 0, 0, 0] # flip增强时,使用水平flip、垂直flip的概率分别是0.3、0.25,水平且垂直flip、对角线flip、反对角线flip概率均为0,不变的概率是0.45
probsr = [0, 0.65, 0]), # rotate增强时,顺时针旋转90度的概率是0,顺时针旋转180度的概率是0.65,顺时针旋转90度的概率是0,不变的概率是0.35
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
"""
def __init__(self,
probs=[0.35, 0.25],
probsf=[0.3, 0.3, 0.2, 0.1, 0.1],
probsr=[0.25, 0.5, 0.25]):
super(RandomFlipOrRotation, self).__init__()
# Change various probabilities into probability intervals, to judge in which mode to flip or rotate
self.probs = [probs[0], probs[0] + probs[1]]
self.probsf = self.get_probs_range(probsf)
self.probsr = self.get_probs_range(probsr)
def apply_im(self, image, mode_id, flip_mode=True):
if flip_mode:
image = img_flip(image, mode_id)
else:
image = img_simple_rotate(image, mode_id)
return image
def apply_mask(self, mask, mode_id, flip_mode=True):
if flip_mode:
mask = img_flip(mask, mode_id)
else:
mask = img_simple_rotate(mask, mode_id)
return mask
def get_probs_range(self, probs):
'''
Change various probabilities into cumulative probabilities
Args:
probs(list of float): probabilities of different mode, shape:[n]
Returns:
probability intervals(list of binary list): shape:[n, 2]
'''
ps = []
last_prob = 0
for prob in probs:
p_s = last_prob
cur_prob = prob / sum(probs)
last_prob += cur_prob
p_e = last_prob
ps.append([p_s, p_e])
return ps
def judge_probs_range(self, p, probs):
'''
Judge whether a probability value falls within the given probability interval
Args:
p(float): probability
probs(list of binary list): probability intervals, shape:[n, 2]
Returns:
mode id(int):the probability interval number where the input probability falls,
if return -1, the image will remain as it is and will not be processed
'''
for id, id_range in enumerate(probs):
if p > id_range[0] and p < id_range[1]:
return id
return -1
def apply(self, sample):
p_m = random.random()
if p_m < self.probs[0]:
mode_p = random.random()
mode_id = self.judge_probs_range(mode_p, self.probsf)
sample['image'] = self.apply_im(sample['image'], mode_id, True)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], mode_id, True)
elif p_m < self.probs[1]:
mode_p = random.random()
mode_id = self.judge_probs_range(mode_p, self.probsr)
sample['image'] = self.apply_im(sample['image'], mode_id, False)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], mode_id, False)
return sample
class RandomHorizontalFlip(Transform):
"""
Randomly flip the input horizontally.
Args:
prob(float, optional): Probability of flipping the input. Defaults to .5.
"""
def __init__(self, prob=0.5):
super(RandomHorizontalFlip, self).__init__()
self.prob = prob
def apply_im(self, image):
image = horizontal_flip(image)
return image
def apply_mask(self, mask):
mask = horizontal_flip(mask)
return mask
def apply_bbox(self, bbox, width):
oldx1 = bbox[:, 0].copy()
oldx2 = bbox[:, 2].copy()
bbox[:, 0] = width - oldx2
bbox[:, 2] = width - oldx1
return bbox
def apply_segm(self, segms, height, width):
flipped_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
flipped_segms.append(
[horizontal_flip_poly(poly, width) for poly in segm])
else:
# RLE format
flipped_segms.append(horizontal_flip_rle(segm, height, width))
return flipped_segms
def apply(self, sample):
if random.random() < self.prob:
im_h, im_w = sample['image'].shape[:2]
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], im_w)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_h,
im_w)
return sample
class RandomVerticalFlip(Transform):
"""
Randomly flip the input vertically.
Args:
prob(float, optional): Probability of flipping the input. Defaults to .5.
"""
def __init__(self, prob=0.5):
super(RandomVerticalFlip, self).__init__()
self.prob = prob
def apply_im(self, image):
image = vertical_flip(image)
return image
def apply_mask(self, mask):
mask = vertical_flip(mask)
return mask
def apply_bbox(self, bbox, height):
oldy1 = bbox[:, 1].copy()
oldy2 = bbox[:, 3].copy()
bbox[:, 0] = height - oldy2
bbox[:, 2] = height - oldy1
return bbox
def apply_segm(self, segms, height, width):
flipped_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
flipped_segms.append(
[vertical_flip_poly(poly, height) for poly in segm])
else:
# RLE format
flipped_segms.append(vertical_flip_rle(segm, height, width))
return flipped_segms
def apply(self, sample):
if random.random() < self.prob:
im_h, im_w = sample['image'].shape[:2]
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], im_h)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_h,
im_w)
return sample
class Normalize(Transform):
"""
Apply min-max normalization to the image(s) in input.
1. im = (im - min_value) * 1 / (max_value - min_value)
2. im = im - mean
3. im = im / std
Args:
mean(List[float] or Tuple[float], optional): Mean of input image(s). Defaults to [0.485, 0.456, 0.406].
std(List[float] or Tuple[float], optional): Standard deviation of input image(s). Defaults to [0.229, 0.224, 0.225].
min_val(List[float] or Tuple[float], optional): Minimum value of input image(s). Defaults to [0, 0, 0, ].
max_val(List[float] or Tuple[float], optional): Max value of input image(s). Defaults to [255., 255., 255.].
"""
def __init__(self,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
min_val=None,
max_val=None):
super(Normalize, self).__init__()
channel = len(mean)
if min_val is None:
min_val = [0] * channel
if max_val is None:
max_val = [255.] * channel
from functools import reduce
if reduce(lambda x, y: x * y, std) == 0:
raise ValueError(
'Std should not contain 0, but received is {}.'.format(std))
if reduce(lambda x, y: x * y,
[a - b for a, b in zip(max_val, min_val)]) == 0:
raise ValueError(
'(max_val - min_val) should not contain 0, but received is {}.'.
format((np.asarray(max_val) - np.asarray(min_val)).tolist()))
self.mean = mean
self.std = std
self.min_val = min_val
self.max_val = max_val
def apply_im(self, image):
image = image.astype(np.float32)
mean = np.asarray(
self.mean, dtype=np.float32)[np.newaxis, np.newaxis, :]
std = np.asarray(self.std, dtype=np.float32)[np.newaxis, np.newaxis, :]
image = normalize(image, mean, std, self.min_val, self.max_val)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class CenterCrop(Transform):
"""
Crop the input at the center.
1. Locate the center of the image.
2. Crop the sample.
Args:
crop_size(int, optional): target size of the cropped image(s). Defaults to 224.
"""
def __init__(self, crop_size=224):
super(CenterCrop, self).__init__()
self.crop_size = crop_size
def apply_im(self, image):
image = center_crop(image, self.crop_size)
return image
def apply_mask(self, mask):
mask = center_crop(mask, self.crop_size)
return mask
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
return sample
class RandomCrop(Transform):
"""
Randomly crop the input.
1. Compute the height and width of cropped area according to aspect_ratio and scaling.
2. Locate the upper left corner of cropped area randomly.
3. Crop the image(s).
4. Resize the cropped area to crop_size by crop_size.
Args:
crop_size(int, List[int] or Tuple[int]): Target size of the cropped area. If None, the cropped area will not be
resized. Defaults to None.
aspect_ratio (List[float], optional): Aspect ratio of cropped region in [min, max] format. Defaults to [.5, 2.].
thresholds (List[float], optional): Iou thresholds to decide a valid bbox crop.
Defaults to [.0, .1, .3, .5, .7, .9].
scaling (List[float], optional): Ratio between the cropped region and the original image in [min, max] format.
Defaults to [.3, 1.].
num_attempts (int, optional): The number of tries before giving up. Defaults to 50.
allow_no_crop (bool, optional): Whether returning without doing crop is allowed. Defaults to True.
cover_all_box (bool, optional): Whether to ensure all bboxes are covered in the final crop. Defaults to False.
"""
def __init__(self,
crop_size=None,
aspect_ratio=[.5, 2.],
thresholds=[.0, .1, .3, .5, .7, .9],
scaling=[.3, 1.],
num_attempts=50,
allow_no_crop=True,
cover_all_box=False):
super(RandomCrop, self).__init__()
self.crop_size = crop_size
self.aspect_ratio = aspect_ratio
self.thresholds = thresholds
self.scaling = scaling
self.num_attempts = num_attempts
self.allow_no_crop = allow_no_crop
self.cover_all_box = cover_all_box
def _generate_crop_info(self, sample):
im_h, im_w = sample['image'].shape[:2]
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
thresholds = self.thresholds
if self.allow_no_crop:
thresholds.append('no_crop')
np.random.shuffle(thresholds)
for thresh in thresholds:
if thresh == 'no_crop':
return None
for i in range(self.num_attempts):
crop_box = self._get_crop_box(im_h, im_w)
if crop_box is None:
continue
iou = self._iou_matrix(
sample['gt_bbox'],
np.array(
[crop_box], dtype=np.float32))
if iou.max() < thresh:
continue
if self.cover_all_box and iou.min() < thresh:
continue
cropped_box, valid_ids = self._crop_box_with_center_constraint(
sample['gt_bbox'], np.array(
crop_box, dtype=np.float32))
if valid_ids.size > 0:
return crop_box, cropped_box, valid_ids
else:
for i in range(self.num_attempts):
crop_box = self._get_crop_box(im_h, im_w)
if crop_box is None:
continue
return crop_box, None, None
return None
def _get_crop_box(self, im_h, im_w):
scale = np.random.uniform(*self.scaling)
if self.aspect_ratio is not None:
min_ar, max_ar = self.aspect_ratio
aspect_ratio = np.random.uniform(
max(min_ar, scale**2), min(max_ar, scale**-2))
h_scale = scale / np.sqrt(aspect_ratio)
w_scale = scale * np.sqrt(aspect_ratio)
else:
h_scale = np.random.uniform(*self.scaling)
w_scale = np.random.uniform(*self.scaling)
crop_h = im_h * h_scale
crop_w = im_w * w_scale
if self.aspect_ratio is None:
if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
return None
crop_h = int(crop_h)
crop_w = int(crop_w)
crop_y = np.random.randint(0, im_h - crop_h)
crop_x = np.random.randint(0, im_w - crop_w)
return [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
def _iou_matrix(self, a, b):
tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
area_o = (area_a[:, np.newaxis] + area_b - area_i)
return area_i / (area_o + 1e-10)
def _crop_box_with_center_constraint(self, box, crop):
cropped_box = box.copy()
cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])
cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])
cropped_box[:, :2] -= crop[:2]
cropped_box[:, 2:] -= crop[:2]
centers = (box[:, :2] + box[:, 2:]) / 2
valid = np.logical_and(crop[:2] <= centers,
centers < crop[2:]).all(axis=1)
valid = np.logical_and(
valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
return cropped_box, np.where(valid)[0]
def _crop_segm(self, segms, valid_ids, crop, height, width):
crop_segms = []
for id in valid_ids:
segm = segms[id]
if is_poly(segm):
# Polygon format
crop_segms.append(crop_poly(segm, crop))
else:
# RLE format
crop_segms.append(crop_rle(segm, crop, height, width))
return crop_segms
def apply_im(self, image, crop):
x1, y1, x2, y2 = crop
return image[y1:y2, x1:x2, :]
def apply_mask(self, mask, crop):
x1, y1, x2, y2 = crop
return mask[y1:y2, x1:x2, ...]
def apply(self, sample):
crop_info = self._generate_crop_info(sample)
if crop_info is not None:
crop_box, cropped_box, valid_ids = crop_info
im_h, im_w = sample['image'].shape[:2]
sample['image'] = self.apply_im(sample['image'], crop_box)
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], crop_box)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
crop_polys = self._crop_segm(
sample['gt_poly'],
valid_ids,
np.array(
crop_box, dtype=np.int64),
im_h,
im_w)
if [] in crop_polys:
delete_id = list()
valid_polys = list()
for idx, poly in enumerate(crop_polys):
if not crop_poly:
delete_id.append(idx)
else:
valid_polys.append(poly)
valid_ids = np.delete(valid_ids, delete_id)
if not valid_polys:
return sample
sample['gt_poly'] = valid_polys
else:
sample['gt_poly'] = crop_polys
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
sample['gt_class'] = np.take(
sample['gt_class'], valid_ids, axis=0)
if 'gt_score' in sample:
sample['gt_score'] = np.take(
sample['gt_score'], valid_ids, axis=0)
if 'is_crowd' in sample:
sample['is_crowd'] = np.take(
sample['is_crowd'], valid_ids, axis=0)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], crop_box)
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(partial(
self.apply_mask, crop=crop_box),
sample['aux_masks']))
if self.crop_size is not None:
sample = Resize(self.crop_size)(sample)
return sample
class RandomScaleAspect(Transform):
"""
Crop input image(s) and resize back to original sizes.
Args:
min_scale (float): Minimum ratio between the cropped region and the original image.
If 0, image(s) will not be cropped. Defaults to .5.
aspect_ratio (float): Aspect ratio of cropped region. Defaults to .33.
"""
def __init__(self, min_scale=0.5, aspect_ratio=0.33):
super(RandomScaleAspect, self).__init__()
self.min_scale = min_scale
self.aspect_ratio = aspect_ratio
def apply(self, sample):
if self.min_scale != 0 and self.aspect_ratio != 0:
img_height, img_width = sample['image'].shape[:2]
sample = RandomCrop(
crop_size=(img_height, img_width),
aspect_ratio=[self.aspect_ratio, 1. / self.aspect_ratio],
scaling=[self.min_scale, 1.],
num_attempts=10,
allow_no_crop=False)(sample)
return sample
class RandomExpand(Transform):
"""
Randomly expand the input by padding according to random offsets.
Args:
upper_ratio(float, optional): The maximum ratio to which the original image is expanded. Defaults to 4..
prob(float, optional): The probability of apply expanding. Defaults to .5.
im_padding_value(List[float] or Tuple[float], optional): RGB filling value for the image. Defaults to (127.5, 127.5, 127.5).
label_padding_value(int, optional): Filling value for the mask. Defaults to 255.
See Also:
paddlers.transforms.Padding
"""
def __init__(self,
upper_ratio=4.,
prob=.5,
im_padding_value=127.5,
label_padding_value=255):
super(RandomExpand, self).__init__()
assert upper_ratio > 1.01, "expand ratio must be larger than 1.01"
self.upper_ratio = upper_ratio
self.prob = prob
assert isinstance(im_padding_value, (Number, Sequence)), \
"fill value must be either float or sequence"
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def apply(self, sample):
if random.random() < self.prob:
im_h, im_w = sample['image'].shape[:2]
ratio = np.random.uniform(1., self.upper_ratio)
h = int(im_h * ratio)
w = int(im_w * ratio)
if h > im_h and w > im_w:
y = np.random.randint(0, h - im_h)
x = np.random.randint(0, w - im_w)
target_size = (h, w)
offsets = (x, y)
sample = Padding(
target_size=target_size,
pad_mode=-1,
offsets=offsets,
im_padding_value=self.im_padding_value,
label_padding_value=self.label_padding_value)(sample)
return sample
class Padding(Transform):
def __init__(self,
target_size=None,
pad_mode=0,
offsets=None,
im_padding_value=127.5,
label_padding_value=255,
size_divisor=32):
"""
Pad image to a specified size or multiple of size_divisor.
Args:
target_size(int, Sequence, optional): Image target size, if None, pad to multiple of size_divisor. Defaults to None.
pad_mode({-1, 0, 1, 2}, optional): Pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
if 0, only pad to right and bottom. If 1, pad according to center. If 2, only pad left and top. Defaults to 0.
im_padding_value(Sequence[float]): RGB value of pad area. Defaults to (127.5, 127.5, 127.5).
label_padding_value(int, optional): Filling value for the mask. Defaults to 255.
size_divisor(int): Image width and height after padding is a multiple of coarsest_stride.
"""
super(Padding, self).__init__()
if isinstance(target_size, (list, tuple)):
if len(target_size) != 2:
raise ValueError(
'`target_size` should include 2 elements, but it is {}'.
format(target_size))
if isinstance(target_size, int):
target_size = [target_size] * 2
assert pad_mode in [
-1, 0, 1, 2
], 'currently only supports four modes [-1, 0, 1, 2]'
if pad_mode == -1:
assert offsets, 'if pad_mode is -1, offsets should not be None'
self.target_size = target_size
self.size_divisor = size_divisor
self.pad_mode = pad_mode
self.offsets = offsets
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def apply_im(self, image, offsets, target_size):
x, y = offsets
h, w = target_size
im_h, im_w, channel = image.shape[:3]
canvas = np.ones((h, w, channel), dtype=np.float32)
canvas *= np.array(self.im_padding_value, dtype=np.float32)
canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
return canvas
def apply_mask(self, mask, offsets, target_size):
x, y = offsets
im_h, im_w = mask.shape[:2]
h, w = target_size
canvas = np.ones((h, w), dtype=np.float32)
canvas *= np.array(self.label_padding_value, dtype=np.float32)
canvas[y:y + im_h, x:x + im_w] = mask.astype(np.float32)
return canvas
def apply_bbox(self, bbox, offsets):
return bbox + np.array(offsets * 2, dtype=np.float32)
def apply_segm(self, segms, offsets, im_size, size):
x, y = offsets
height, width = im_size
h, w = size
expanded_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
expanded_segms.append(
[expand_poly(poly, x, y) for poly in segm])
else:
# RLE format
expanded_segms.append(
expand_rle(segm, x, y, height, width, h, w))
return expanded_segms
def apply(self, sample):
im_h, im_w = sample['image'].shape[:2]
if self.target_size:
h, w = self.target_size
assert (
im_h <= h and im_w <= w
), 'target size ({}, {}) cannot be less than image size ({}, {})'\
.format(h, w, im_h, im_w)
else:
h = (np.ceil(im_h / self.size_divisor) *
self.size_divisor).astype(int)
w = (np.ceil(im_w / self.size_divisor) *
self.size_divisor).astype(int)
if h == im_h and w == im_w:
return sample
if self.pad_mode == -1:
offsets = self.offsets
elif self.pad_mode == 0:
offsets = [0, 0]
elif self.pad_mode == 1:
offsets = [(w - im_w) // 2, (h - im_h) // 2]
else:
offsets = [w - im_w, h - im_h]
sample['image'] = self.apply_im(sample['image'], offsets, (h, w))
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], offsets, (h, w))
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], offsets, (h, w))
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(partial(
self.apply_mask, offsets=offsets, target_size=(h, w)),
sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(
sample['gt_poly'], offsets, im_size=[im_h, im_w], size=[h, w])
return sample
class MixupImage(Transform):
def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
"""
Mixup two images and their gt_bbbox/gt_score.
Args:
alpha (float, optional): Alpha parameter of beta distribution. Defaults to 1.5.
beta (float, optional): Beta parameter of beta distribution. Defaults to 1.5.
"""
super(MixupImage, self).__init__()
if alpha <= 0.0:
raise ValueError("alpha should be positive in {}".format(self))
if beta <= 0.0:
raise ValueError("beta should be positive in {}".format(self))
self.alpha = alpha
self.beta = beta
self.mixup_epoch = mixup_epoch
def apply_im(self, image1, image2, factor):
h = max(image1.shape[0], image2.shape[0])
w = max(image1.shape[1], image2.shape[1])
img = np.zeros((h, w, image1.shape[2]), 'float32')
img[:image1.shape[0], :image1.shape[1], :] = \
image1.astype('float32') * factor
img[:image2.shape[0], :image2.shape[1], :] += \
image2.astype('float32') * (1.0 - factor)
return img.astype('uint8')
def __call__(self, sample):
if not isinstance(sample, Sequence):
return sample
assert len(sample) == 2, 'mixup need two samples'
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return sample[0]
if factor <= 0.0:
return sample[1]
image = self.apply_im(sample[0]['image'], sample[1]['image'], factor)
result = copy.deepcopy(sample[0])
result['image'] = image
# apply bbox and score
if 'gt_bbox' in sample[0]:
gt_bbox1 = sample[0]['gt_bbox']
gt_bbox2 = sample[1]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
result['gt_bbox'] = gt_bbox
if 'gt_poly' in sample[0]:
gt_poly1 = sample[0]['gt_poly']
gt_poly2 = sample[1]['gt_poly']
gt_poly = gt_poly1 + gt_poly2
result['gt_poly'] = gt_poly
if 'gt_class' in sample[0]:
gt_class1 = sample[0]['gt_class']
gt_class2 = sample[1]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
result['gt_class'] = gt_class
gt_score1 = np.ones_like(sample[0]['gt_class'])
gt_score2 = np.ones_like(sample[1]['gt_class'])
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
result['gt_score'] = gt_score
if 'is_crowd' in sample[0]:
is_crowd1 = sample[0]['is_crowd']
is_crowd2 = sample[1]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
result['is_crowd'] = is_crowd
if 'difficult' in sample[0]:
is_difficult1 = sample[0]['difficult']
is_difficult2 = sample[1]['difficult']
is_difficult = np.concatenate(
(is_difficult1, is_difficult2), axis=0)
result['difficult'] = is_difficult
return result
class RandomDistort(Transform):
"""
Random color distortion.
Args:
brightness_range(float, optional): Range of brightness distortion. Defaults to .5.
brightness_prob(float, optional): Probability of brightness distortion. Defaults to .5.
contrast_range(float, optional): Range of contrast distortion. Defaults to .5.
contrast_prob(float, optional): Probability of contrast distortion. Defaults to .5.
saturation_range(float, optional): Range of saturation distortion. Defaults to .5.
saturation_prob(float, optional): Probability of saturation distortion. Defaults to .5.
hue_range(float, optional): Range of hue distortion. Defaults to .5.
hue_prob(float, optional): Probability of hue distortion. Defaults to .5.
random_apply (bool, optional): whether to apply in random (yolo) or fixed (SSD)
order. Defaults to True.
count (int, optional): the number of doing distortion. Defaults to 4.
shuffle_channel (bool, optional): whether to swap channels randomly. Defaults to False.
"""
def __init__(self,
brightness_range=0.5,
brightness_prob=0.5,
contrast_range=0.5,
contrast_prob=0.5,
saturation_range=0.5,
saturation_prob=0.5,
hue_range=18,
hue_prob=0.5,
random_apply=True,
count=4,
shuffle_channel=False):
super(RandomDistort, self).__init__()
self.brightness_range = [1 - brightness_range, 1 + brightness_range]
self.brightness_prob = brightness_prob
self.contrast_range = [1 - contrast_range, 1 + contrast_range]
self.contrast_prob = contrast_prob
self.saturation_range = [1 - saturation_range, 1 + saturation_range]
self.saturation_prob = saturation_prob
self.hue_range = [1 - hue_range, 1 + hue_range]
self.hue_prob = hue_prob
self.random_apply = random_apply
self.count = count
self.shuffle_channel = shuffle_channel
def apply_hue(self, image):
low, high = self.hue_range
if np.random.uniform(0., 1.) < self.hue_prob:
return image
# it works, but result differ from HSV version
delta = np.random.uniform(low, high)
u = np.cos(delta * np.pi)
w = np.sin(delta * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
t = np.dot(np.dot(ityiq, bt), tyiq).T
res_list = []
channel = image.shape[2]
for i in range(channel // 3):
sub_img = image[:, :, 3 * i:3 * (i + 1)]
sub_img = sub_img.astype(np.float32)
sub_img = np.dot(image, t)
res_list.append(sub_img)
if channel % 3 != 0:
i = channel % 3
res_list.append(image[:, :, -i:])
return np.concatenate(res_list, axis=2)
def apply_saturation(self, image):
low, high = self.saturation_range
delta = np.random.uniform(low, high)
if np.random.uniform(0., 1.) < self.saturation_prob:
return image
res_list = []
channel = image.shape[2]
for i in range(channel // 3):
sub_img = image[:, :, 3 * i:3 * (i + 1)]
sub_img = sub_img.astype(np.float32)
# it works, but result differ from HSV version
gray = sub_img * np.array(
[[[0.299, 0.587, 0.114]]], dtype=np.float32)
gray = gray.sum(axis=2, keepdims=True)
gray *= (1.0 - delta)
sub_img *= delta
sub_img += gray
res_list.append(sub_img)
if channel % 3 != 0:
i = channel % 3
res_list.append(image[:, :, -i:])
return np.concatenate(res_list, axis=2)
def apply_contrast(self, image):
low, high = self.contrast_range
if np.random.uniform(0., 1.) < self.contrast_prob:
return image
delta = np.random.uniform(low, high)
image = image.astype(np.float32)
image *= delta
return image
def apply_brightness(self, image):
low, high = self.brightness_range
if np.random.uniform(0., 1.) < self.brightness_prob:
return image
delta = np.random.uniform(low, high)
image = image.astype(np.float32)
image += delta
return image
def apply(self, sample):
if self.random_apply:
functions = [
self.apply_brightness, self.apply_contrast,
self.apply_saturation, self.apply_hue
]
distortions = np.random.permutation(functions)[:self.count]
for func in distortions:
sample['image'] = func(sample['image'])
if 'image2' in sample:
sample['image2'] = func(sample['image2'])
return sample
sample['image'] = self.apply_brightness(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_brightness(sample['image2'])
mode = np.random.randint(0, 2)
if mode:
sample['image'] = self.apply_contrast(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_contrast(sample['image2'])
sample['image'] = self.apply_saturation(sample['image'])
sample['image'] = self.apply_hue(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_saturation(sample['image2'])
sample['image2'] = self.apply_hue(sample['image2'])
if not mode:
sample['image'] = self.apply_contrast(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_contrast(sample['image2'])
if self.shuffle_channel:
if np.random.randint(0, 2):
sample['image'] = sample['image'][..., np.random.permutation(3)]
if 'image2' in sample:
sample['image2'] = sample['image2'][
..., np.random.permutation(3)]
return sample
class RandomBlur(Transform):
"""
Randomly blur input image(s).
Args:
prob (float): Probability of blurring.
"""
def __init__(self, prob=0.1):
super(RandomBlur, self).__init__()
self.prob = prob
def apply_im(self, image, radius):
image = cv2.GaussianBlur(image, (radius, radius), 0, 0)
return image
def apply(self, sample):
if self.prob <= 0:
n = 0
elif self.prob >= 1:
n = 1
else:
n = int(1.0 / self.prob)
if n > 0:
if np.random.randint(0, n) == 0:
radius = np.random.randint(3, 10)
if radius % 2 != 1:
radius = radius + 1
if radius > 9:
radius = 9
sample['image'] = self.apply_im(sample['image'], radius)
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], radius)
return sample
class Defogging(Transform):
"""
Defog input image(s).
Args:
gamma (bool, optional): Use gamma correction or not. Defaults to False.
"""
def __init__(self, gamma=False):
super(Defogging, self).__init__()
self.gamma = gamma
def apply_im(self, image):
image = de_haze(image, self.gamma)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class DimReducing(Transform):
"""
Use PCA to reduce input image(s) dimension.
Args:
dim (int, optional): Reserved dimensions. Defaults to 3.
whiten (bool, optional): PCA whiten or not. Defaults to True.
"""
def __init__(self, dim=3, whiten=True):
super(DimReducing, self).__init__()
self.dim = dim
self.whiten = whiten
def apply_im(self, image):
image = pca(image, self.dim, self.whiten)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class BandSelecting(Transform):
"""
Select the band of the input image(s).
Args:
band_list (list, optional): Bands of selected (Start with 1). Defaults to [1, 2, 3].
"""
def __init__(self, band_list=[1, 2, 3]):
super(BandSelecting, self).__init__()
self.band_list = band_list
def apply_im(self, image):
image = select_bands(image, self.band_list)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class _PadBox(Transform):
def __init__(self, num_max_boxes=50):
"""
Pad zeros to bboxes if number of bboxes is less than num_max_boxes.
Args:
num_max_boxes (int, optional): the max number of bboxes. Defaults to 50.
"""
self.num_max_boxes = num_max_boxes
super(_PadBox, self).__init__()
def apply(self, sample):
gt_num = min(self.num_max_boxes, len(sample['gt_bbox']))
num_max = self.num_max_boxes
pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
if gt_num > 0:
pad_bbox[:gt_num, :] = sample['gt_bbox'][:gt_num, :]
sample['gt_bbox'] = pad_bbox
if 'gt_class' in sample:
pad_class = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]
sample['gt_class'] = pad_class
if 'gt_score' in sample:
pad_score = np.zeros((num_max, ), dtype=np.float32)
if gt_num > 0:
pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
sample['gt_score'] = pad_score
# in training, for example in op ExpandImage,
# the bbox and gt_class is expanded, but the difficult is not,
# so, judging by it's length
if 'difficult' in sample:
pad_diff = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
sample['difficult'] = pad_diff
if 'is_crowd' in sample:
pad_crowd = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
sample['is_crowd'] = pad_crowd
return sample
class _NormalizeBox(Transform):
def __init__(self):
super(_NormalizeBox, self).__init__()
def apply(self, sample):
height, width = sample['image'].shape[:2]
for i in range(sample['gt_bbox'].shape[0]):
sample['gt_bbox'][i][0] = sample['gt_bbox'][i][0] / width
sample['gt_bbox'][i][1] = sample['gt_bbox'][i][1] / height
sample['gt_bbox'][i][2] = sample['gt_bbox'][i][2] / width
sample['gt_bbox'][i][3] = sample['gt_bbox'][i][3] / height
return sample
class _BboxXYXY2XYWH(Transform):
"""
Convert bbox XYXY format to XYWH format.
"""
def __init__(self):
super(_BboxXYXY2XYWH, self).__init__()
def apply(self, sample):
bbox = sample['gt_bbox']
bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]
bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.
sample['gt_bbox'] = bbox
return sample
class _Permute(Transform):
def __init__(self):
super(_Permute, self).__init__()
def apply(self, sample):
sample['image'] = permute(sample['image'], False)
if 'image2' in sample:
sample['image2'] = permute(sample['image2'], False)
return sample
class RandomSwap(Transform):
"""
Randomly swap multi-temporal images.
Args:
prob (float, optional): Probability of swapping the input images. Default: 0.2.
"""
def __init__(self, prob=0.2):
super(RandomSwap, self).__init__()
self.prob = prob
def apply(self, sample):
if 'image2' not in sample:
raise ValueError('image2 is not found in the sample.')
if random.random() < self.prob:
sample['image'], sample['image2'] = sample['image2'], sample[
'image']
return sample
class ArrangeSegmenter(Transform):
def __init__(self, mode):
super(ArrangeSegmenter, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
if 'mask' in sample:
mask = sample['mask']
image = permute(sample['image'], False)
if self.mode == 'train':
mask = mask.astype('int64')
return image, mask
if self.mode == 'eval':
mask = np.asarray(Image.open(mask))
mask = mask[np.newaxis, :, :].astype('int64')
return image, mask
if self.mode == 'test':
return image,
class ArrangeChangeDetector(Transform):
def __init__(self, mode):
super(ArrangeChangeDetector, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
if 'mask' in sample:
mask = sample['mask']
image_t1 = permute(sample['image'], False)
image_t2 = permute(sample['image2'], False)
if self.mode == 'train':
mask = mask.astype('int64')
masks = [mask]
if 'aux_masks' in sample:
masks.extend(
map(methodcaller('astype', 'int64'), sample['aux_masks']))
return (
image_t1,
image_t2, ) + tuple(masks)
if self.mode == 'eval':
mask = np.asarray(Image.open(mask))
mask = mask[np.newaxis, :, :].astype('int64')
return image_t1, image_t2, mask
if self.mode == 'test':
return image_t1, image_t2,
class ArrangeClassifier(Transform):
def __init__(self, mode):
super(ArrangeClassifier, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
image = permute(sample['image'], False)
if self.mode in ['train', 'eval']:
return image, sample['label']
else:
return image
class ArrangeDetector(Transform):
def __init__(self, mode):
super(ArrangeDetector, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
if self.mode == 'eval' and 'gt_poly' in sample:
del sample['gt_poly']
return sample
|
py | 1a34ba92fdada091b4d5e377b25061d9d016bf6a | from datetime import datetime
from enum import Enum
from functools import wraps
from typing import Union
import json
import pytz
class DatetimeFormats(Enum):
FULLDATETIME = '%Y-%m-%dT%H:%M:%S.%f%z'
DATE = '%Y-%m-%d'
YMDHMSmS = '%Y-%m-%dT%H:%M:%S.%f%z'
YMDHMS = '%Y-%m-%dT%H:%M:%S%z'
YMD = '%Y-%m-%d'
def str_to_datetime(date: str, format_str: str) -> Union[datetime, None]:
"""Change date format to datetime, based on `format_str` provided.
If `date` is already a datetime object, return it. Make the date aware."""
if date is None:
return date
if not isinstance(date, datetime):
if format_str.find('%z') != -1:
# Localization needed. Check if provided.
if date[-1] == 'Z':
date = date[:-1]
format_str = format_str[:-2]
elif date.find('+') == -1:
format_str = format_str[:-2]
date = datetime.strptime(date, format_str)
try: # Localize to utc if not yet localized
return pytz.utc.localize(date)
except ValueError: # Already localized
return date
def _return_with_miliseconds(func):
"""If the date has milliseconds, return only 3 decimal places. Older servers
need this to work. New can parse both 3 and 6 decimal places."""
@wraps(func)
def inner(*args, **kwargs):
res = func(*args, **kwargs)
if isinstance(res, str) and res.find('.') != -1:
plus_pos = res.find('+')
res = f"{res[:res.find('.') + 4]}{res[plus_pos:] if plus_pos != -1 else ''}"
return res
return inner
@_return_with_miliseconds
def datetime_to_str(date: datetime, format_str: str) -> Union[str, None]:
"""Get date string from datetime, based on `format_str` provided.
If `date` is already a string, return it. Make the date aware."""
if isinstance(date, str):
return date
try:
# We need the date to be aware, or the string won't be accepted by API
try:
return pytz.utc.localize(date).strftime(format_str)
except ValueError: # Already localized
return date.strftime(format_str)
except (TypeError, AttributeError):
return None
def _get_only_datetimeformat_map(string_to_date_map: dict) -> dict:
"""Return all entries that are of `DATETIMEFORMAT` enum type."""
return {
key: value
for (key, value) in string_to_date_map.items()
if isinstance(value, DatetimeFormats)
}
def _solve_prefix_and_convert_date(func, name: str, date: str, string_to_date_map: dict,
only_datetimefomat: bool = True):
if only_datetimefomat:
string_to_date_map = _get_only_datetimeformat_map(string_to_date_map)
if f'_{name}' in string_to_date_map:
date_format = string_to_date_map[f'_{name}'].value if isinstance(
string_to_date_map[f'_{name}'], DatetimeFormats) else string_to_date_map[f'_{name}']
return func(date, date_format)
elif name in string_to_date_map:
date_format = string_to_date_map[name].value if isinstance(
string_to_date_map[name], DatetimeFormats) else string_to_date_map[name]
return func(date, date_format)
return date
def map_str_to_datetime(name: str, date: str, string_to_date_map: dict,
only_datetimefomat: bool = True) -> datetime:
"""Change date format to datetime, based on `string_to_date_map`
conversion dict. All occurrences of `DATETIMEFORMAT` Enum in
`string_to_date_map` are converted to corresponding string values.
If name is not found in `string_to_date_map`, returns date without changes.
"""
return _solve_prefix_and_convert_date(str_to_datetime, name, date, string_to_date_map,
only_datetimefomat)
def map_datetime_to_str(name: str, date: datetime, string_to_date_map: dict,
only_datetimefomat: bool = True) -> str:
"""Change date format to string, based on `string_to_date_map`
conversion dict. All occurrences of `DATETIMEFORMAT` Enum in
`string_to_date_map` are converted to corresponding string values.
If name is not found in `string_to_date_map`, returns date without changes.
"""
return _solve_prefix_and_convert_date(datetime_to_str, name, date, string_to_date_map,
only_datetimefomat)
def bulk_str_to_datetime(source: dict, string_to_date_map: dict,
only_datetimefomat: bool = True) -> dict:
"""Change all dates from `source` found in `string_to_date_map`
to datetime format. If parameter is not found in `string_to_date_map`,
it is returned without changes."""
for key, val in source.items():
source[key] = map_str_to_datetime(key, val, string_to_date_map, only_datetimefomat)
return source
def bulk_datetime_to_str(source: dict, string_to_date_map: dict,
only_datetimefomat: bool = True) -> dict:
"""Change all dates from `source` found in `string_to_date_map`
to string format. If parameter is not found in `string_to_date_map`,
it is returned without changes."""
for key, val in source.items():
source[key] = map_datetime_to_str(key, val, string_to_date_map, only_datetimefomat)
return source
def override_datetime_format(original_format: str, expected_format: str, fields: tuple,
to_unpack=None):
"""A decorator designed to override the datetime format
of some dates in responses from REST server as they can be
a bit crazy sometimes (e.g. two different formats for one object)
Args:
original_format: original format of a datetime
expected_format: the format you want to convert to
fields: fields of the object - e.g. dateModified, dateCreated
to_unpack: when response returns a list of objects
probably they need to be unpacked
"""
def decorator_datetime(func):
@wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
response_json = response.json()
try:
iterable = response_json[to_unpack] if to_unpack else [response_json]
except KeyError:
iterable = []
for obj in iterable:
for field in fields:
datetime_obj = str_to_datetime(obj[field], original_format)
obj[field] = datetime_to_str(datetime_obj, expected_format)
response.encoding, response._content = 'utf-8', json.dumps(response_json).encode(
'utf-8')
return response
return wrapped
return decorator_datetime
|
py | 1a34bb604daf72b8646720efefb394f755aa2ebb | """Support for Aurora Forecast sensor."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import PERCENTAGE
from . import AuroraEntity
from .const import COORDINATOR, DOMAIN
async def async_setup_entry(hass, entry, async_add_entries):
"""Set up the sensor platform."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
entity = AuroraSensor(
coordinator=coordinator,
name=f"{coordinator.name} Aurora Visibility %",
icon="mdi:gauge",
)
async_add_entries([entity])
class AuroraSensor(SensorEntity, AuroraEntity):
"""Implementation of an aurora sensor."""
@property
def state(self):
"""Return % chance the aurora is visible."""
return self.coordinator.data
@property
def unit_of_measurement(self):
"""Return the unit of measure."""
return PERCENTAGE
|
py | 1a34bd4bae1e0a6c1c8e1eb0f5e2b5eb6f69d0c4 | import csv
import os
from statistics import mean, median, quantiles
def process(fqp, resultsfile):
# gather the max per line of file of round 1
prev_fqp = fqp.replace("Round2", "Round1")
r1max = []
with open(prev_fqp, "r") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
titles = next(datareader)
total_pos = [_ for _, y in enumerate(titles) if y == "Total"]
for row in datareader:
r1max.append(max([float(row[_]) for _ in total_pos]))
print(r1max)
# parse file of round 2
threads = -1
category = -1
senders = -1
totals = []
with open(fqp, "r") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
titles = next(datareader)
total_pos = [_ for _, y in enumerate(titles) if y == "Total"]
node_pos = [_ for _, y in enumerate(titles) if y.startswith("Node")]
for row in datareader:
if threads == -1:
threads = int(row[1])
category = row[0][0]
senders = [row[_] for _ in node_pos].count("sending")
prev_max = r1max.pop(0)
totals.extend([float(row[_])+prev_max for _ in total_pos])
nodes = len(node_pos)
## calculate statistics
mind = min(totals)
q1 = quantiles(totals)[0]
medi = median(totals)
avrg = mean(totals)
q3 = quantiles(totals)[2]
maxd = max(totals)
## write results
if not DEBUG:
with open(resultsfile, "a") as f:
f.write(f"{category},{nodes},{threads},{senders},{mind},{q1},{medi},{avrg},{q3},{maxd}\n")
with open(resultsfile.replace(".csv", "all_totals.csv"), "a") as f:
f.write(f"{category},{nodes},{threads},{senders},"+",".join(map(str, totals))+"\n")
print(f"{category},{nodes},{threads},{senders},{mind},{q1},{medi},{avrg},{q3},{maxd}")
# values:
# experiment = "threads"
# experiment = "nodes"
# experiment = "messages"
experiment = "messages"
## file where to write the aggregation results
## all totals will be written to experiment+"all_totals.csv" based on this filename
resultfile = experiment+".csv"
## basefolder where the experiment results can be found
basefolder = "C:\\epc2a\\"+experiment
## output to console instead of writing to file
DEBUG = False
if not DEBUG:
with open(resultfile, "w") as f:
f.write("category,nodes,threads,senders,mind,q1,medi,avrg,q3,maxd\n")
with open(resultfile.replace(".csv", "all_totals.csv"), "w") as f:
f.write("category,nodes,threads,senders,totals...\n")
for r, ds, fs in os.walk(basefolder):
for fn in [_ for _ in fs if _.endswith("Round2.csv")]:
fqp = r+"\\"+fn
process(fqp, resultfile)
|
py | 1a34befc69beb77163d3fd242e44d131be141730 | # Main differences in this ablation:
# - there is no optimism
# - the novelty Q is trained only between episodes
# - the novelty Q is trained on _logged_ novelty rewards, not live ones
import time
import os
import math
import pickle
import queue
from typing import Any
import numpy as np
import matplotlib.pyplot as plt
import jax
from jax import numpy as jnp, random, lax
import flax
from flax import nn, optim, struct
from dm_control import suite
import dmcontrol_gridworld
import replay_buffer
import q_learning
import tabular_density as density
import utils
from observation_domains import DOMAINS
import jax_specs
import point
R_MAX = 100
@struct.dataclass
class ExplorationState():
"""The pure-JAX components that can be jitted/vmapped.
"""
novq_state: q_learning.QLearnerState
target_novq_state: q_learning.QLearnerState
density_state: density.DensityState
temperature: float
update_temperature: float
prior_count: float
optimistic_updates: bool
target_network: bool
# density_fns: Any
@struct.dataclass
class AgentState():
"""A container for the entire state; not jittable.
"""
exploration_state: ExplorationState
policy_state: Any = struct.field(pytree_node=False)
replay: Any = struct.field(pytree_node=False)
policy_replay: Any = struct.field(pytree_node=False)
n_candidates: int
n_update_candidates: int
prioritized_update: bool
update_target_every: int
warmup_steps: int
optimistic_actions: bool
steps_since_tupdate: int = 0
# policy_fns: Any = struct.field(pytree_node=False)
@jax.jit
def compute_novelty_reward(exploration_state, states, actions):
"""Returns a novelty reward in [0, 1] for each (s, a) pair."""
counts = density.get_count_batch(
exploration_state.density_state, states, actions)
ones = jnp.ones(jnp.array(counts).shape)
rewards = (counts + 1e-8) ** (-0.5)
options = jnp.stack([ones, rewards], axis=1)
# Clip rewards to be at most 1 (when count is 0)
return jnp.min(options, axis=1)
@jax.profiler.trace_function
@jax.partial(jax.jit, static_argnums=(3, 4))
def train_step_candidates(exploration_state: ExplorationState,
transitions,
candidate_next_actions,
use_target_network,
use_optimistic_updates):
"""The jittable component of the exploration Q function training step."""
# these transitions come from agent_state.replay, not policy_replay,
# so they contain *novelty* rewards
states, actions, next_states, novelty_reward = transitions
discount = exploration_state.novq_state.discount
temp = exploration_state.update_temperature
if use_optimistic_updates:
next_values = predict_optimistic_values_batch(
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count,
next_states, candidate_next_actions)
next_values_target = predict_optimistic_values_batch(
exploration_state.target_novq_state,
exploration_state.density_state,
exploration_state.prior_count,
next_states, candidate_next_actions)
else:
next_values = q_learning.predict_action_values_batch(
exploration_state.novq_state,
next_states,
candidate_next_actions)
next_values_target = q_learning.predict_action_values_batch(
exploration_state.target_novq_state,
next_states,
candidate_next_actions)
# double DQN rule:
# - select next action according to current Q
# - evaluate it according to target Q
next_value_probs = nn.softmax(next_values / temp, axis=1)
next_value_elements = (next_value_probs * next_values_target)
expected_next_values = next_value_elements.sum(axis=1)
expected_next_values = expected_next_values.reshape(novelty_reward.shape)
# compute targets and update
q_targets = novelty_reward + discount * expected_next_values
# clip targets to be within the feasible set
q_targets = jnp.minimum(q_targets, R_MAX)
novq_state, losses = q_functions.train_step(
exploration_state.novq_state,
states, actions, q_targets)
return exploration_state.replace(novq_state=novq_state), losses
@jax.profiler.trace_function
def train_step(agent_state, transitions):
"""A full (optimistic) training step for the exploration Q function."""
states, actions, next_states, rewards = transitions
# candidate actions should be (bsize x n_update_candidates x *action_shape)
with jax.profiler.TraceContext("get candidates"):
policy_state, candidate_next_actions = policy.action_fn(
agent_state.policy_state, next_states,
int(agent_state.n_update_candidates), True)
agent_state = agent_state.replace(policy_state=policy_state)
# somehow if I don't cast these to bool JAX will recompile the jitted
# function train_step_candidates on every call...
with jax.profiler.TraceContext("train_step_candidates"):
exploration_state, losses = train_step_candidates(
agent_state.exploration_state,
transitions,
candidate_next_actions,
bool(agent_state.exploration_state.target_network),
bool(agent_state.exploration_state.optimistic_updates))
agent_state = agent_state.replace(exploration_state=exploration_state)
return agent_state, losses
def update_target_q(agent_state: AgentState):
exploration_state = agent_state.exploration_state.replace(
target_novq_state=agent_state.exploration_state.novq_state)
agent_state = agent_state.replace(exploration_state=exploration_state,
steps_since_tupdate=0)
return agent_state
def uniform_update(agent_state, rng, n=10):
for _ in range(n):
transitions = tuple((jnp.array(el)
for el in agent_state.replay.sample(128)))
agent_state, losses = train_step(agent_state, transitions)
agent_state = agent_state.replace(
steps_since_tupdate=agent_state.steps_since_tupdate + 1)
if agent_state.steps_since_tupdate >= agent_state.update_target_every:
agent_state = update_target_q(agent_state)
return agent_state
@jax.profiler.trace_function
def update_exploration(agent_state, rng, transition_id):
s, a, sp, r = agent_state.replay.get_transitions(transition_id)
# update density on new observations
with jax.profiler.TraceContext("update density"):
exploration_state = agent_state.exploration_state
density_state = density.update_batch(exploration_state.density_state,
jnp.expand_dims(s, axis=0),
jnp.expand_dims(a, axis=0))
exploration_state = exploration_state.replace(
density_state=density_state)
agent_state = agent_state.replace(exploration_state=exploration_state)
return agent_state
def compute_weight(prior_count, count):
root_real_count = count ** 0.5
# root_prior_count = prior_count ** 0.5
# return root_real_count / (root_real_count + root_prior_count)
root_total_count = (count + prior_count) ** 0.5
return root_real_count / root_total_count
@jax.profiler.trace_function
@jax.jit
def predict_optimistic_value(novq_state, density_state, prior_count,
state, action):
expanded_state = jnp.expand_dims(state, axis=0)
expanded_action = jnp.expand_dims(action, axis=0)
predicted_value = q_learning.predict_value(novq_state,
expanded_state,
expanded_action)
predicted_value = predicted_value.reshape(tuple())
count = density.get_count(density_state,
state, action)
weight = compute_weight(prior_count, count)
optimistic_value = weight * predicted_value + (1 - weight) * R_MAX
return optimistic_value
predict_optimistic_value_batch = jax.vmap( # noqa: E305
predict_optimistic_value, in_axes=(None, None, None, 0, 0))
predict_optimistic_values = jax.vmap(
predict_optimistic_value, in_axes=(None, None, None, None, 0))
predict_optimistic_values_batch = jax.vmap( # noqa: E305
predict_optimistic_values, in_axes=(None, None, None, 0, 0))
@jax.profiler.trace_function
@jax.jit
def select_candidate_optimistic(exploration_state, rng,
state, candidate_actions):
optimistic_values = predict_optimistic_values(
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count,
state, candidate_actions).reshape(-1)
return q_learning.sample_boltzmann(
rng, optimistic_values, candidate_actions,
exploration_state.temperature)
@jax.profiler.trace_function
def sample_exploration_action(agent_state: AgentState, rng, s, train=True):
# during test, take only one action sample from the task policy
# -> will follow the task policy
n = agent_state.n_candidates if train else 1
with jax.profiler.TraceContext("sample candidate actions"):
s_batch = jnp.expand_dims(s, axis=0)
policy_state, candidate_actions = policy.action_fn(
agent_state.policy_state, s_batch, n, train)
# policy.action_fn deals with batches and we only have one element
candidate_actions = candidate_actions[0]
agent_state = agent_state.replace(policy_state=policy_state)
with jax.profiler.TraceContext("select from candidates"):
if agent_state.optimistic_actions:
a, h = select_candidate_optimistic(agent_state.exploration_state,
rng, s, candidate_actions)
else:
a, _, h = q_learning.sample_action_boltzmann(
agent_state.exploration_state.novq_state, rng,
s, candidate_actions,
agent_state.exploration_state.temperature)
flag = 'train' if train else 'test'
logger.update(f'{flag}/explore_entropy', h)
return agent_state, a
def update_agent(agent_state: AgentState, rng, transition):
# add transition to replay
transition_id = agent_state.replay.append(*transition)
# update the density with the observed transition
agent_state = update_exploration(agent_state, rng, transition_id)
return agent_state
def run_episode(agent_state: AgentState, rng, env,
train=True, max_steps=None):
timestep = env.reset()
score, novelty_score = 0, 0
i = 0
while not timestep.last():
rng, action_rng = random.split(rng)
s = utils.flatten_observation(timestep.observation)
# put some random steps in the replay buffer
if len(agent_state.replay) < agent_state.warmup_steps:
action_spec = jax_specs.convert_dm_spec(env.action_spec())
a = utils.sample_uniform_actions(action_spec, action_rng, 1)[0]
flag = 'train' if train else 'test'
logger.update(f'{flag}/policy_entropy', 0)
logger.update(f'{flag}/explore_entropy', 0)
else:
agent_state, a = sample_exploration_action(
agent_state, action_rng, s, train)
timestep = env.step(a)
sp = utils.flatten_observation(timestep.observation)
r = timestep.reward
novelty_reward = compute_novelty_reward(agent_state.exploration_state,
jnp.expand_dims(s, axis=0),
jnp.expand_dims(a, axis=0))
score += r
novelty_score += float(novelty_reward)
if train:
novelty_transition = (s, a, sp, novelty_reward)
task_transition = (s, a, sp, r)
agent_state.policy_replay.append(*task_transition)
rng, update_rng = random.split(rng)
agent_state = update_agent(agent_state, update_rng, novelty_transition)
i += 1
if max_steps is not None and i >= max_steps:
break
return agent_state, env, score, novelty_score
# ----- Visualizations for gridworld ---------------------------------
def display_state(agent_state: AgentState, ospec, aspec,
max_steps=100, bins=20,
rendering='local', savedir=None, episode=None):
exploration_state = agent_state.exploration_state
policy_state = agent_state.policy_state
# min_count_map = dmcontrol_gridworld.render_function(
# jax.partial(density.get_count_batch, exploration_state.density_state),
# env, reduction=jnp.min)
count_map = utils.render_function(
jax.partial(density.get_count_batch, exploration_state.density_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
novq_map = utils.render_function(
jax.partial(q_learning.predict_value, exploration_state.novq_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
optimistic_novq_map = utils.render_function(
jax.partial(predict_optimistic_value_batch,
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
novelty_reward_map = utils.render_function(
jax.partial(compute_novelty_reward, exploration_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
traj_map = replay_buffer.render_trajectory(
agent_state.replay, max_steps, ospec, bins=bins)
subfigs = [
# (min_count_map, "Visit count (min)"),
(count_map, "Visit count (max)"),
(novq_map, "Novelty value (max)"),
(optimistic_novq_map, "Optimistic novelty value (max)"),
(novelty_reward_map, "Novelty reward (max)"),
(traj_map, "Last trajectory"),
]
q_policies = ['policies.deep_q_policy', 'policies.tabular_q_policy']
if policy.__name__ in q_policies:
taskq_map = utils.render_function(
jax.partial(q_learning.predict_value, policy_state.q_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
subfigs.append((taskq_map, "Task value (max)"))
# dump the raw data for later rendering
raw_path = f"{savedir}/data/{episode}.pkl"
os.makedirs(os.path.dirname(raw_path), exist_ok=True)
with open(raw_path, 'wb') as f:
pickle.dump(subfigs, f, protocol=4)
fig, axs = plt.subplots(1, len(subfigs))
for ax, subfig in zip(axs, subfigs):
render, title = subfig
img = ax.imshow(render)
fig.colorbar(img, ax=ax)
ax.set_title(title)
fig.set_size_inches(4 * len(subfigs), 3)
fig_path = f"{savedir}/{episode}.png"
utils.display_figure(fig, rendering, savepath=fig_path)
# -------------------------------------------------------------------
def main(args):
rng = random.PRNGKey(args.seed)
if args.env == 'gridworld':
env = dmcontrol_gridworld.GridWorld(args.env_size, args.max_steps)
observation_spec = env.observation_spec()
else:
env = suite.load(args.env, args.task)
observation_spec = DOMAINS[args.env][args.task]
action_spec = env.action_spec()
j_action_spec = jax_specs.convert_dm_spec(action_spec)
j_observation_spec = jax_specs.convert_dm_spec(observation_spec)
state_shape = utils.flatten_spec_shape(j_observation_spec)
action_shape = action_spec.shape
batch_size = 128
# drawing only one candidate action sample from the policy
# will result in following the policy directly
n_candidates = 64 if args.use_exploration else 1
novq_state = q_functions.init_fn(args.seed,
observation_spec,
action_spec,
# env_size=env.size,
discount=0.97,
max_value=R_MAX)
density_state = density.new(observation_spec, action_spec,
state_bins=args.n_state_bins,
action_bins=args.n_action_bins)
replay = replay_buffer.LowPrecisionTracingReplay(
state_shape, action_shape, min_s=0, max_s=1, n_bins=2)
policy_replay = replay_buffer.LowPrecisionTracingReplay(
state_shape, action_shape, min_s=0, max_s=1, n_bins=2)
policy_state = policy.init_fn(observation_spec, action_spec, args.seed,
lr=args.policy_lr,
update_rule=args.policy_update)
exploration_state = ExplorationState(
novq_state=novq_state,
target_novq_state=novq_state,
density_state=density_state,
temperature=args.temperature,
update_temperature=args.update_temperature,
prior_count=args.prior_count,
optimistic_updates=args.optimistic_updates,
target_network=args.target_network)
agent_state = AgentState(exploration_state=exploration_state,
policy_state=policy_state,
replay=replay,
policy_replay=policy_replay,
n_candidates=n_candidates,
n_update_candidates=args.n_update_candidates,
prioritized_update=args.prioritized_update,
update_target_every=args.update_target_every,
warmup_steps=args.warmup_steps,
optimistic_actions=args.optimistic_actions,)
for episode in range(1, 1000):
# run an episode
rng, episode_rng = random.split(rng)
agent_state, env, score, novelty_score = run_episode(
agent_state, episode_rng, env, train=True, max_steps=args.max_steps)
logger.update('train/episode', episode)
logger.update('train/score', score)
logger.update('train/novelty_score', novelty_score)
# update the task policy
# TODO: pull this loop inside the policy.update_fn
n_updates = args.max_steps // 2
policy_state = agent_state.policy_state
for _ in range(n_updates):
transitions = agent_state.policy_replay.sample(batch_size)
transitions = tuple((jnp.array(el) for el in transitions))
policy_state = policy.update_fn(
policy_state, transitions)
agent_state = agent_state.replace(policy_state=policy_state)
rng, update_rng = random.split(rng)
agent_state = uniform_update(agent_state, update_rng, n=n_updates)
# output / visualize
if episode % args.eval_every == 0:
rng, episode_rng = random.split(rng)
_, _, test_score, test_novelty_score = run_episode(
agent_state, episode_rng, env,
train=False, max_steps=args.max_steps)
logger.update('test/episode', episode)
logger.update('test/score', test_score)
logger.update('test/novelty_score', test_novelty_score)
logger.write_all()
if args.vis != 'none':
# savepath = f"{args.save_dir}/{episode}"
display_state(agent_state, observation_spec, action_spec,
max_steps=args.max_steps, bins=args.n_state_bins,
rendering=args.vis, savedir=args.save_dir,
episode=episode)
if episode % args.save_replay_every == 0:
replay_path = f"{args.save_dir}/replay.pkl"
replay_buffer.save(agent_state.replay, replay_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='default')
parser.add_argument('--env', default='gridworld')
parser.add_argument('--task', default='default')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--env_size', type=int, default=20)
parser.add_argument('--max_steps', type=int, default=1000)
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--vis', default='disk')
parser.add_argument('--eval_every', type=int, default=10)
parser.add_argument('--save_replay_every', type=int, default=10)
parser.add_argument('--policy', type=str, default='deep')
parser.add_argument('--policy_update', type=str, default='ddqn')
parser.add_argument('--policy_lr', type=float, default=1e-3)
parser.add_argument('--novelty_q_function', type=str, default='deep')
parser.add_argument('--temperature', type=float, default=1e-1)
parser.add_argument('--update_temperature', type=float, default=None)
parser.add_argument('--prior_count', type=float, default=1e-3)
parser.add_argument('--n_update_candidates', type=int, default=64)
parser.add_argument('--n_state_bins', type=int, default=4)
parser.add_argument('--n_action_bins', type=int, default=2)
parser.add_argument('--optimistic_updates', dest='optimistic_updates',
action='store_true', default=False)
parser.add_argument('--optimistic_actions', dest='optimistic_actions',
action='store_true', default=False)
parser.add_argument('--target_network', action='store_true', default=True)
parser.add_argument('--no_target_network', dest='target_network',
action='store_false')
parser.add_argument('--update_target_every', type=int, default=10)
parser.add_argument('--warmup_steps', type=int, default=128)
parser.add_argument('--no_exploration', dest='use_exploration',
action='store_false', default=True)
parser.add_argument('--prioritized_update', dest='prioritized_update',
action='store_true', default=False)
parser.add_argument('--no_prioritized_update', dest='prioritized_update',
action='store_false')
args = parser.parse_args()
print(args)
if args.update_temperature is None:
print("Using --temperature as --update_temperature.")
args.update_temperature = args.temperature
args.save_dir = f"results/slow/{args.name}"
os.makedirs(args.save_dir, exist_ok=True)
import experiment_logging
experiment_logging.setup_default_logger(args.save_dir)
from experiment_logging import default_logger as logger
import json
with open(args.save_dir + '/args.json', 'w') as argfile:
json.dump(args.__dict__, argfile, indent=4)
if args.novelty_q_function == 'deep':
import deep_q_functions as q_functions
elif args.novelty_q_function == 'sigmoid':
import sigmoid_q_functions as q_functions
elif args.novelty_q_function == 'tabular':
import tabular_q_functions as q_functions
else:
raise Exception("Argument --novelty_q_function was invalid.")
if args.policy == 'deep':
import policies.deep_q_policy as policy
elif args.policy == 'uniform':
import policies.uniform_policy as policy
elif args.policy == 'tabular':
import policies.tabular_q_policy as policy
else:
raise Exception("Argument --policy was invalid.")
jit = not args.debug
if jit:
main(args)
else:
with jax.disable_jit():
main(args)
|
py | 1a34bf18d33d55530a995ec04323940af92cdb9f | #
# @lc app=leetcode id=445 lang=python
#
# [445] Add Two Numbers II
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for singly-linked list.
from LeetCode.Python.BaseListNode import MakeListNodes, PrintListNode, ListNode
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
"""
:type l1: ListNodeclass ListNode:
def __init__(self, x):
self.val = x
self.next = None
:type l2: ListNode
:rtype: ListNode
"""
l1_list = list()
l2_list = list()
while l1 is not None or l2 is not None:
if l1 is not None:
l1_list.append(l1)
l1 = l1.next
if l2 is not None:
l2_list.append(l2)
l2 = l2.next
carry = 0
result = None
while l1_list or l2_list or carry:
p = l1_list.pop().val if l1_list else 0
q = l2_list.pop().val if l2_list else 0
sum_tmp = p + q + carry
carry = int(sum_tmp // 10)
result_tmp = ListNode(sum_tmp % 10)
result_tmp.next = result
result = result_tmp
return result
if __name__ == '__main__':
s = Solution()
l_1 = MakeListNodes([2, 4, 3])
l_2 = MakeListNodes([5, 6, 4])
PrintListNode(s.addTwoNumbers(l_1, l_2))
PrintListNode(s.addTwoNumbers(ListNode(5), ListNode(5)))
l_1 = MakeListNodes([2, 4])
l_2 = MakeListNodes([5])
PrintListNode(s.addTwoNumbers(l_1, l_2))
|
py | 1a34bf455af6fa3f1f4d3e87926734e8e1c56fd2 | """
Production Settings & Configuration
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
# flake8: noqa
from .base import *
# ============================== #
# Core Settings #
# ============================== #
ALLOWED_HOSTS = [
'www.example.com',
'subdomain.example.com',
]
CSRF_COOKIE_AGE = 31449600
CSRF_COOKIE_DOMAIN = '.example.com'
CSRF_COOKIE_NAME = 'X_CSRF_Token'
CSRF_COOKIE_SECURE = True
CSRF_USE_SESSIONS = True
CSRF_HEADER_NAME = 'HTTP_X_XSRF_TOKEN'
CSRF_TRUSTED_ORIGINS = [
'.example.com'
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my_database_name',
'USER': 'my_database_user',
'PASSWORD': 'my_database_password',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
DEBUG = False
DEFAULT_FROM_EMAIL = '[email protected]'
DISALLOWED_USER_AGENTS = []
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'my_email_host@_provider.com'
EMAIL_HOST_PASSWORD = 'email_host_user_password'
EMAIL_HOST_USER = 'email_host_user'
EMAIL_PORT = 587
EMAIL_SUBJECT_PREFIX = '[MY_APP_NAME] '
EMAIL_USE_SSL = False
EMAIL_USE_TLS = True
EMAIL_SSL_CERTFILE = '/path/to/ssl_certfile.cer'
EMAIL_SSL_KEYFILE = '/path/to/ssl_certfile.cer'
INTERNAL_IPS = [
'10.10.10.1'
]
OGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True,
},
},
}
MANAGERS = [
('John Doe', '[email protected]'),
('Jane Doe', '[email protected]'),
]
try:
SECRET_KEY = os.environ['SECRET_KEY']
except KeyError as exc:
raise KeyError(
'Failed to load Django Secret Key!'
) as exc
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
# ============================== #
# Sessions #
# ============================== #
SESSION_COOKIE_AGE = 1209600
SESSION_COOKIE_DOMAIN = CSRF_COOKIE_DOMAIN
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
py | 1a34bf6fc063776cde834b3c46f5409ba4b2f880 | import pyglet, math
from pyglet.window import key
from . import bullet, physicalobject, resources
class Player(physicalobject.PhysicalObject):
"""Physical object that responds to user input"""
def __init__(self, *args, **kwargs):
super(Player, self).__init__(img=resources.player_image, *args, **kwargs)
# Create a child sprite to show when the ship is thrusting
self.engine_sprite = pyglet.sprite.Sprite(img=resources.engine_image, *args, **kwargs)
self.engine_sprite.visible = False
# Set some easy-to-tweak constants
self.thrust = 300.0
self.rotate_speed = 200.0
self.bullet_speed = 700.0
# Player should not collide with own bullets
self.reacts_to_bullets = False
# Tell the game handler about any event handlers
self.key_handler = key.KeyStateHandler()
self.event_handlers = [self, self.key_handler]
def update(self, dt):
# Do all the normal physics stuff
super(Player, self).update(dt)
if self.key_handler[key.LEFT]:
self.rotation -= self.rotate_speed * dt
if self.key_handler[key.RIGHT]:
self.rotation += self.rotate_speed * dt
if self.key_handler[key.UP]:
# Note: pyglet's rotation attributes are in "negative degrees"
angle_radians = -math.radians(self.rotation)
force_x = math.cos(angle_radians) * self.thrust * dt
force_y = math.sin(angle_radians) * self.thrust * dt
self.velocity_x += force_x
self.velocity_y += force_y
# If thrusting, update the engine sprite
self.engine_sprite.rotation = self.rotation
self.engine_sprite.x = self.x
self.engine_sprite.y = self.y
self.engine_sprite.visible = True
else:
# Otherwise, hide it
self.engine_sprite.visible = False
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.fire()
def fire(self):
# Note: pyglet's rotation attributes are in "negative degrees"
angle_radians = -math.radians(self.rotation)
# Create a new bullet just in front of the player
ship_radius = self.image.width / 2
bullet_x = self.x + math.cos(angle_radians) * ship_radius
bullet_y = self.y + math.sin(angle_radians) * ship_radius
new_bullet = bullet.Bullet(bullet_x, bullet_y, batch=self.batch)
# Give it some speed
bullet_vx = self.velocity_x + math.cos(angle_radians) * self.bullet_speed
bullet_vy = self.velocity_y + math.sin(angle_radians) * self.bullet_speed
new_bullet.velocity_x, new_bullet.velocity_y = bullet_vx, bullet_vy
# Add it to the list of objects to be added to the game_objects list
self.new_objects.append(new_bullet)
# Play the bullet sound
resources.bullet_sound.play()
def delete(self):
# We have a child sprite which must be deleted when this object
# is deleted from batches, etc.
self.engine_sprite.delete()
super(Player, self).delete()
|
py | 1a34bf8f94b2eb8b65d31e930bba58445208dbba | # Copyright 2017: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.deployment import credential
from rally import osclients
@credential.configure("openstack")
class OpenStackCredential(credential.Credential):
"""Credential for OpenStack."""
def __init__(self, auth_url, username, password, tenant_name=None,
project_name=None,
permission=consts.EndpointPermission.USER,
region_name=None, endpoint_type=None,
domain_name=None, endpoint=None, user_domain_name=None,
project_domain_name=None,
https_insecure=False, https_cacert=None):
self.auth_url = auth_url
self.username = username
self.password = password
self.tenant_name = tenant_name or project_name
self.permission = permission
self.region_name = region_name
self.endpoint_type = endpoint_type
self.domain_name = domain_name
self.user_domain_name = user_domain_name
self.project_domain_name = project_domain_name
self.endpoint = endpoint
self.https_insecure = https_insecure
self.https_cacert = https_cacert
self._clients_cache = {}
# backward compatibility
@property
def insecure(self):
return self.https_insecure
# backward compatibility
@property
def cacert(self):
return self.https_cacert
def to_dict(self):
return {"auth_url": self.auth_url,
"username": self.username,
"password": self.password,
"tenant_name": self.tenant_name,
"region_name": self.region_name,
"endpoint_type": self.endpoint_type,
"domain_name": self.domain_name,
"endpoint": self.endpoint,
"https_insecure": self.https_insecure,
"https_cacert": self.https_cacert,
"user_domain_name": self.user_domain_name,
"project_domain_name": self.project_domain_name,
"permission": self.permission}
def verify_connection(self):
if self.permission == consts.EndpointPermission.ADMIN:
self.clients().verified_keystone()
else:
self.clients().keystone()
def list_services(self):
return self.clients().services()
def clients(self, api_info=None):
return osclients.Clients(self, api_info=api_info,
cache=self._clients_cache)
@credential.configure_builder("openstack")
class OpenStackCredentialBuilder(credential.CredentialBuilder):
"""Builds credentials provided by ExistingCloud config."""
USER_SCHEMA = {
"type": "object",
"oneOf": [
{
"description": "Keystone V2.0",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"tenant_name": {"type": "string"},
},
"required": ["username", "password", "tenant_name"],
"additionalProperties": False
},
{
"description": "Keystone V3.0",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"domain_name": {"type": "string"},
"user_domain_name": {"type": "string"},
"project_name": {"type": "string"},
"project_domain_name": {"type": "string"},
},
"required": ["username", "password", "project_name"],
"additionalProperties": False
}
],
}
CONFIG_SCHEMA = {
"type": "object",
"properties": {
"admin": USER_SCHEMA,
"users": {"type": "array", "items": USER_SCHEMA},
"auth_url": {"type": "string"},
"region_name": {"type": "string"},
# NOTE(andreykurilin): it looks like we do not use endpoint
# var at all
"endpoint": {"type": ["string", "null"]},
"endpoint_type": {
"enum": [consts.EndpointType.ADMIN,
consts.EndpointType.INTERNAL,
consts.EndpointType.PUBLIC,
None]},
"https_insecure": {"type": "boolean"},
"https_cacert": {"type": "string"},
},
"required": ["auth_url", "admin"],
"additionalProperties": False
}
def _create_credential(self, common, user, permission):
cred = OpenStackCredential(
auth_url=common["auth_url"],
username=user["username"],
password=user["password"],
tenant_name=user.get("project_name", user.get("tenant_name")),
permission=permission,
region_name=common.get("region_name"),
endpoint_type=common.get("endpoint_type"),
endpoint=common.get("endpoint"),
domain_name=user.get("domain_name"),
user_domain_name=user.get("user_domain_name", None),
project_domain_name=user.get("project_domain_name", None),
https_insecure=common.get("https_insecure", False),
https_cacert=common.get("https_cacert"))
return cred.to_dict()
def build_credentials(self):
permissions = consts.EndpointPermission
users = [self._create_credential(self.config, user, permissions.USER)
for user in self.config.get("users", [])]
admin = self._create_credential(self.config,
self.config.get("admin"),
permissions.ADMIN)
return {"admin": admin, "users": users}
# NOTE(astudenov): Let's consider moving rally.osclients here
|
py | 1a34c1385eb56a091d102e47cc7997be38070c12 | from output.models.sun_data.attr_decl.ad_name.ad_name00109m.ad_name00109m2_xsd.ad_name00109m2 import Root
__all__ = [
"Root",
]
|
py | 1a34c1926ac3121e9e4f91f23dbf3770601c8511 | import numpy as np
def predict_one_vs_all(all_theta, X):
m = X.shape[0]
num_labels = all_theta.shape[0]
# You need to return the following variable correctly;
p = np.zeros(m)
# Add ones to the X data matrix
X = np.c_[np.ones(m), X]
# ===================== Your Code Here =====================
# Instructions : Complete the following code to make predictions using
# your learned logistic regression parameters (one vs all).
# You should set p to a vector of predictions (from 1 to
# num_labels)
#
# Hint : This code can be done all vectorized using the max function
# In particular, the max function can also return the index of the
# max element, for more information see 'np.argmax' function.
#
results = np.dot(X, all_theta.T)
p = np.argmax(results, axis=1)
p = np.array([x if x != 0 else 10 for x in p])
print('p {}'.format(p))
return p
|
py | 1a34c24db102ff20162c85704d451e495ed725ec | from tir import Webapp
import unittest
class CTBA161(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGACTB", "01/01/2019", "T1", "D MG 01", "34")
inst.oHelper.Program("CTBA161")
def test_CTBA161_001(self):
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("D MG 01 ")
self.oHelper.SetValue("CTS_CODPLA", "T01")
OrdemAut = self.oHelper.GetValue("CTS_ORDEM")
EntiGerenAut = self.oHelper.GetValue("CTS_CONTAG")
self.oHelper.CheckResult("CTS_ORDEM", OrdemAut)
self.oHelper.CheckResult("CTS_CONTAG", EntiGerenAut)
self.oHelper.SetValue("CTS_CTASUP", "")
self.oHelper.SetValue("CTS_DESCCG", "ENTIDADE TIR 01 INCLUIR")
self.oHelper.SetValue("CTS_DETHCG", "TIR")
self.oHelper.SetValue("CTS_NORMAL", "2 - Credito")
self.oHelper.SetValue("CTS_COLUNA", "0")
self.oHelper.SetValue("CTS_CLASSE", "1 - Sintetica")
self.oHelper.SetValue("CTS_NOME", "TIR INCS")
self.oHelper.SetValue("CTS_VISENT", "1 - Sim")
self.oHelper.SetValue("CTS_FATSLD", "1 - Mantem")
self.oHelper.SetValue("CTS_TOTVIS", "1 - Sim")
self.oHelper.CheckView("Identificadores")
self.oHelper.ClickCheckBox("Total Geral")
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Cancelar")
chave = "T010000000001001"
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("CTS_CODPLA", "T01")
self.oHelper.CheckResult("CTS_ORDEM", OrdemAut)
self.oHelper.CheckResult("CTS_CONTAG", EntiGerenAut)
self.oHelper.CheckResult("CTS_CTASUP", "")
self.oHelper.CheckResult("CTS_DESCCG", "ENTIDADE TIR 01 INCLUIR")
self.oHelper.CheckResult("CTS_DETHCG", "TIR")
self.oHelper.CheckResult("CTS_NORMAL", "2 - Credito")
self.oHelper.CheckResult("CTS_COLUNA", "0")
self.oHelper.CheckResult("CTS_CLASSE", "1 - Sintetica")
self.oHelper.CheckResult("CTS_NOME", "TIR INCS")
self.oHelper.CheckResult("CTS_VISENT", "1 - Sim")
self.oHelper.CheckResult("CTS_FATSLD", "1 - Mantem")
self.oHelper.CheckResult("CTS_TOTVIS", "1 - Sim")
self.oHelper.SetButton("Cancelar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
py | 1a34c30bde4bea208baa0f1f642e87b66793a021 | from typing import Dict, List, Optional, Tuple
from blspy import AugSchemeMPL, G2Element, PrivateKey
from kiwi.consensus.constants import ConsensusConstants
from kiwi.util.hash import std_hash
from kiwi.types.announcement import Announcement
from kiwi.types.blockchain_format.coin import Coin
from kiwi.types.blockchain_format.program import Program
from kiwi.types.blockchain_format.sized_bytes import bytes32
from kiwi.types.coin_spend import CoinSpend
from kiwi.types.condition_opcodes import ConditionOpcode
from kiwi.types.condition_with_args import ConditionWithArgs
from kiwi.types.spend_bundle import SpendBundle
from kiwi.util.clvm import int_from_bytes, int_to_bytes
from kiwi.util.condition_tools import conditions_by_opcode, conditions_for_solution, pkm_pairs_for_conditions_dict
from kiwi.util.ints import uint32, uint64
from kiwi.wallet.derive_keys import master_sk_to_wallet_sk
from kiwi.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
puzzle_for_pk,
solution_for_conditions,
)
DEFAULT_SEED = b"seed" * 8
assert len(DEFAULT_SEED) == 32
class WalletTool:
next_address = 0
pubkey_num_lookup: Dict[bytes, uint32] = {}
def __init__(self, constants: ConsensusConstants, sk: Optional[PrivateKey] = None):
self.constants = constants
self.current_balance = 0
self.my_utxos: set = set()
if sk is not None:
self.private_key = sk
else:
self.private_key = AugSchemeMPL.key_gen(DEFAULT_SEED)
self.generator_lookups: Dict = {}
self.puzzle_pk_cache: Dict = {}
self.get_new_puzzle()
def get_next_address_index(self) -> uint32:
self.next_address = uint32(self.next_address + 1)
return self.next_address
def get_private_key_for_puzzle_hash(self, puzzle_hash: bytes32) -> PrivateKey:
if puzzle_hash in self.puzzle_pk_cache:
child = self.puzzle_pk_cache[puzzle_hash]
private = master_sk_to_wallet_sk(self.private_key, uint32(child))
# pubkey = private.get_g1()
return private
else:
for child in range(self.next_address):
pubkey = master_sk_to_wallet_sk(self.private_key, uint32(child)).get_g1()
if puzzle_hash == puzzle_for_pk(bytes(pubkey)).get_tree_hash():
return master_sk_to_wallet_sk(self.private_key, uint32(child))
raise ValueError(f"Do not have the keys for puzzle hash {puzzle_hash}")
def puzzle_for_pk(self, pubkey: bytes) -> Program:
return puzzle_for_pk(pubkey)
def get_new_puzzle(self) -> bytes32:
next_address_index: uint32 = self.get_next_address_index()
pubkey = master_sk_to_wallet_sk(self.private_key, next_address_index).get_g1()
self.pubkey_num_lookup[bytes(pubkey)] = next_address_index
puzzle = puzzle_for_pk(bytes(pubkey))
self.puzzle_pk_cache[puzzle.get_tree_hash()] = next_address_index
return puzzle
def get_new_puzzlehash(self) -> bytes32:
puzzle = self.get_new_puzzle()
return puzzle.get_tree_hash()
def sign(self, value: bytes, pubkey: bytes) -> G2Element:
privatekey: PrivateKey = master_sk_to_wallet_sk(self.private_key, self.pubkey_num_lookup[pubkey])
return AugSchemeMPL.sign(privatekey, value)
def make_solution(self, condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]]) -> Program:
ret = []
for con_list in condition_dic.values():
for cvp in con_list:
ret.append([cvp.opcode.value] + cvp.vars)
return solution_for_conditions(Program.to(ret))
def generate_unsigned_transaction(
self,
amount: uint64,
new_puzzle_hash: bytes32,
coins: List[Coin],
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]],
fee: int = 0,
secret_key: Optional[PrivateKey] = None,
additional_outputs: Optional[List[Tuple[bytes32, int]]] = None,
) -> List[CoinSpend]:
spends = []
spend_value = sum([c.amount for c in coins])
if ConditionOpcode.CREATE_COIN not in condition_dic:
condition_dic[ConditionOpcode.CREATE_COIN] = []
if ConditionOpcode.CREATE_COIN_ANNOUNCEMENT not in condition_dic:
condition_dic[ConditionOpcode.CREATE_COIN_ANNOUNCEMENT] = []
output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [new_puzzle_hash, int_to_bytes(amount)])
condition_dic[output.opcode].append(output)
if additional_outputs is not None:
for o in additional_outputs:
out = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [o[0], int_to_bytes(o[1])])
condition_dic[out.opcode].append(out)
amount_total = sum(int_from_bytes(cvp.vars[1]) for cvp in condition_dic[ConditionOpcode.CREATE_COIN])
change = spend_value - amount_total - fee
if change > 0:
change_puzzle_hash = self.get_new_puzzlehash()
change_output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [change_puzzle_hash, int_to_bytes(change)])
condition_dic[output.opcode].append(change_output)
secondary_coins_cond_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = dict()
secondary_coins_cond_dic[ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT] = []
for n, coin in enumerate(coins):
puzzle_hash = coin.puzzle_hash
if secret_key is None:
secret_key = self.get_private_key_for_puzzle_hash(puzzle_hash)
pubkey = secret_key.get_g1()
puzzle = puzzle_for_pk(bytes(pubkey))
if n == 0:
message_list = [c.name() for c in coins]
for outputs in condition_dic[ConditionOpcode.CREATE_COIN]:
message_list.append(Coin(coin.name(), outputs.vars[0], int_from_bytes(outputs.vars[1])).name())
message = std_hash(b"".join(message_list))
condition_dic[ConditionOpcode.CREATE_COIN_ANNOUNCEMENT].append(
ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [message])
)
primary_announcement_hash = Announcement(coin.name(), message).name()
secondary_coins_cond_dic[ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT].append(
ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [primary_announcement_hash])
)
main_solution = self.make_solution(condition_dic)
spends.append(CoinSpend(coin, puzzle, main_solution))
else:
spends.append(CoinSpend(coin, puzzle, self.make_solution(secondary_coins_cond_dic)))
return spends
def sign_transaction(self, coin_spends: List[CoinSpend]) -> SpendBundle:
signatures = []
solution: Program
puzzle: Program
for coin_spend in coin_spends: # type: ignore # noqa
secret_key = self.get_private_key_for_puzzle_hash(coin_spend.coin.puzzle_hash)
synthetic_secret_key = calculate_synthetic_secret_key(secret_key, DEFAULT_HIDDEN_PUZZLE_HASH)
err, con, cost = conditions_for_solution(
coin_spend.puzzle_reveal, coin_spend.solution, self.constants.MAX_BLOCK_COST_CLVM
)
if not con:
raise ValueError(err)
conditions_dict = conditions_by_opcode(con)
for _, msg in pkm_pairs_for_conditions_dict(
conditions_dict, bytes(coin_spend.coin.name()), self.constants.AGG_SIG_ME_ADDITIONAL_DATA
):
signature = AugSchemeMPL.sign(synthetic_secret_key, msg)
signatures.append(signature)
aggsig = AugSchemeMPL.aggregate(signatures)
spend_bundle = SpendBundle(coin_spends, aggsig)
return spend_bundle
def generate_signed_transaction(
self,
amount: uint64,
new_puzzle_hash: bytes32,
coin: Coin,
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: int = 0,
additional_outputs: Optional[List[Tuple[bytes32, int]]] = None,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = self.generate_unsigned_transaction(
amount, new_puzzle_hash, [coin], condition_dic, fee, additional_outputs=additional_outputs
)
assert transaction is not None
return self.sign_transaction(transaction)
def generate_signed_transaction_multiple_coins(
self,
amount: uint64,
new_puzzle_hash: bytes32,
coins: List[Coin],
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: int = 0,
additional_outputs: Optional[List[Tuple[bytes32, int]]] = None,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = self.generate_unsigned_transaction(
amount, new_puzzle_hash, coins, condition_dic, fee, additional_outputs=additional_outputs
)
assert transaction is not None
return self.sign_transaction(transaction)
|
py | 1a34c418c7a0217c6137730e31375a0f48f3deca | from django.apps import AppConfig
class ResourcesConfig(AppConfig):
name = 'Resources'
|
py | 1a34c478d8fe66a929bbf4ebbf8043ec5848fc84 | from django import template
from django.template import Library, Node
register = Library()
@register.filter
def adjust_date(my_date, user):
from datetime import timedelta
return my_date - timedelta(minutes=user.timezone_offset_mins)
@register.tag
def sorting_header(parser, token):
try:
tag_name, my_name, my_sorting = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires exactly four arguments" % token.contents.split()[0]
return SortingHeader(my_name[1:-1], my_sorting[1:-1])
class SortingHeader(Node):
def __init__(self, my_name, my_sorting):
self.my_name = my_name
self.my_sorting = my_sorting
def render(self, context):
url = "#order=%s" % self.my_sorting
return '<th class="%s"><a href="%s">%s</a></th>' % (self.my_sorting, url, self.my_name)
|
py | 1a34c6b298c3ad1bf541bd04eddd54ea9f47094e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class SettingsOperations(object):
"""SettingsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list(
self, app_id, version_id, custom_headers=None, raw=False, **operation_config):
"""Gets the settings in a version of the application.
:param app_id: The application ID.
:type app_id: str
:param version_id: The version ID.
:type version_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.cognitiveservices.language.luis.authoring.models.AppVersionSettingObject]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str'),
'versionId': self._serialize.url("version_id", version_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[AppVersionSettingObject]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list.metadata = {'url': '/apps/{appId}/versions/{versionId}/settings'}
def update(
self, app_id, version_id, list_of_app_version_setting_object, custom_headers=None, raw=False, **operation_config):
"""Updates the settings in a version of the application.
:param app_id: The application ID.
:type app_id: str
:param version_id: The version ID.
:type version_id: str
:param list_of_app_version_setting_object: A list of the updated
application version settings.
:type list_of_app_version_setting_object:
list[~azure.cognitiveservices.language.luis.authoring.models.AppVersionSettingObject]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatus or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.language.luis.authoring.models.OperationStatus
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.language.luis.authoring.models.ErrorResponseException>`
"""
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'appId': self._serialize.url("app_id", app_id, 'str'),
'versionId': self._serialize.url("version_id", version_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(list_of_app_version_setting_object, '[AppVersionSettingObject]')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/apps/{appId}/versions/{versionId}/settings'}
|
py | 1a34c914e04c4359f083b32c7a871dfd56e113aa | import glob
import sys
import os
import xml.etree.ElementTree as ET
from random import random
def main(filename):
# ratio to divide up the images
train = 0.7
val = 0.2
test = 0.1
if (train + test + val) != 1.0:
print("probabilities must equal 1")
exit()
# get the labels
labels = []
imgnames = []
annotations = {}
with open(filename, 'r') as labelfile:
label_string = ""
for line in labelfile:
label_string += line.rstrip()
labels = label_string.split(',')
labels = [elem.replace(" ", "") for elem in labels]
# get image names
for filename in os.listdir("./JPEGImages"):
if filename.endswith(".jpg"):
img = filename.rstrip('.jpg')
imgnames.append(img)
print("Labels:", labels, "imgcnt:", len(imgnames))
# initialise annotation list
for label in labels:
annotations[label] = []
# Scan the annotations for the labels
for img in imgnames:
annote = "Annotations/" + img + '.xml'
if os.path.isfile(annote):
tree = ET.parse(annote)
root = tree.getroot()
annote_labels = []
for labelname in root.findall('*/name'):
labelname = labelname.text
annote_labels.append(labelname)
if labelname in labels:
annotations[labelname].append(img)
annotations[img] = annote_labels
else:
print("Missing annotation for ", annote)
exit()
# divvy up the images to the different sets
sampler = imgnames.copy()
train_list = []
val_list = []
test_list = []
while len(sampler) > 0:
dice = random()
elem = sampler.pop()
if dice <= test:
test_list.append(elem)
elif dice <= (test + val):
val_list.append(elem)
else:
train_list.append(elem)
print("Training set:", len(train_list), "validation set:", len(val_list), "test set:", len(test_list))
# create the dataset files
create_folder("./ImageSets/Main/")
with open("./ImageSets/Main/train.txt", 'w') as outfile:
for name in train_list:
outfile.write(name + "\n")
with open("./ImageSets/Main/val.txt", 'w') as outfile:
for name in val_list:
outfile.write(name + "\n")
with open("./ImageSets/Main/trainval.txt", 'w') as outfile:
for name in train_list:
outfile.write(name + "\n")
for name in val_list:
outfile.write(name + "\n")
with open("./ImageSets/Main/test.txt", 'w') as outfile:
for name in test_list:
outfile.write(name + "\n")
# create the individiual files for each label
for label in labels:
with open("./ImageSets/Main/"+ label +"_train.txt", 'w') as outfile:
for name in train_list:
if label in annotations[name]:
outfile.write(name + " 1\n")
else:
outfile.write(name + " -1\n")
with open("./ImageSets/Main/"+ label +"_val.txt", 'w') as outfile:
for name in val_list:
if label in annotations[name]:
outfile.write(name + " 1\n")
else:
outfile.write(name + " -1\n")
with open("./ImageSets/Main/"+ label +"_test.txt", 'w') as outfile:
for name in test_list:
if label in annotations[name]:
outfile.write(name + " 1\n")
else:
outfile.write(name + " -1\n")
def create_folder(foldername):
if os.path.exists(foldername):
print('folder already exists:', foldername)
else:
os.makedirs(foldername)
if __name__=='__main__':
if len(sys.argv) < 2:
print("usage: python generate_vocdata.py <labelfile>")
exit()
main(sys.argv[1])
|
py | 1a34c977e3a97e1415dbe02956700924ba0bd873 | """
2019/11/17 15:57
92.【Python面向对象】类方法和静态方法
"""
"""
类方法:第一个参数必须是cls,这个cls代表的是当前这个类
静态方法:静态方法是属于类的,只能通过类名字调用。静态方法中不能调用类属性,
如果要调用,只能通过类名来调用.并且不需要传递对象self或者类cls.
静态方法使用场景:不需要修改类或者对象属性的时候,并且这个方法放在这个类中
可以让代码更加有管理性.
"""
class Person(object):
country = 'china'
def eat(self):
print('hello world!')
@classmethod
def greet(cls):
cls.country = 'earth'
print('调用类方法...')
@staticmethod
def static_method():
print('调用静态方法..')
Person.country = 'usa'
# TODO: 1.实例方法
p1 = Person()
p1.eat()
# TODO: TypeError: eat() missing 1 required positional argument: 'self'
# Person.eat()
# TODO: 2.类方法
# TODO: 实例调用
p1.greet()
# TODO: 类调用
Person.greet()
# TODO: 3.静态方法
# TODO: 实例调用
p1.static_method()
# TODO: 类调用
Person.static_method()
print(Person.country) |
py | 1a34c9ccf39cbad7d99b0298873d9abe61615231 | # NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2021 Neongecko.com Inc.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
DEFAULT_LIBRE_HOST = "http://translate.neon.ai:5000"
|
py | 1a34c9cd8fdc28e603fc0a0c6bac361d62e80771 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow.compiler.mlir.tfr.integration.node_expansion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.compiler.mlir.tfr.resources import gen_composite_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
_lib_dir = os.path.dirname(gen_composite_ops.__file__)
_lib_name = os.path.basename(gen_composite_ops.__file__)[4:].replace(
'.py', '.so')
load_library.load_op_library(os.path.join(_lib_dir, _lib_name))
class NodeExpansionTest(test.TestCase):
def testAddN(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq1 = gen_composite_ops.my_add_n([t1])
sq2 = gen_composite_ops.my_add_n([t1, t2])
sq3 = gen_composite_ops.my_add_n([t1, t2, t3])
self.assertAllEqual(sq1.numpy().reshape(-1), [1, 2, 3, 4])
self.assertAllEqual(sq2.numpy().reshape(-1), [2, 4, 6, 8])
self.assertAllEqual(sq3.numpy().reshape(-1), [3, 6, 9, 12])
def testBiasedDense(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = gen_composite_ops.my_biased_dense(t1, t2, t3)
self.assertAllEqual(sq.numpy().reshape(-1), [-3, 0, 5, 12])
def testBiasedDenseRelu(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu')
self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
def testWithKnownKernel(self):
def biasd_dense_elu(x, y, z):
dot = gen_composite_ops.my_biased_dense(x, y, z)
return nn_ops.elu(dot) # with known kernel, should not expand.
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = biasd_dense_elu(t1, t2, t3)
self.assertAllClose(sq.numpy().reshape(-1), [-0.950213, 0, 5, 12])
# Regression test for an issue where VarHandleOp wasn't being properly
# imported into MLIR for "no-op" node expansion.
def testVarHandleOp(self):
x = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
# Note: we purposely make multiple calls to VarHandleOp to exercise the
# cached kernal lookup path that was exhibiting the VarHandleOp import
# issue.
unused_ = gen_resource_variable_ops.VarHandleOp(
dtype=dtypes.float32, shape=[3, 2])
handle = gen_resource_variable_ops.VarHandleOp(
dtype=dtypes.float32, shape=[3, 2])
gen_resource_variable_ops.AssignVariableOp(resource=handle, value=x)
self.assertAllEqual(
x,
gen_resource_variable_ops.ReadVariableOp(
resource=handle, dtype=dtypes.float32))
if __name__ == '__main__':
os.environ['TF_MLIR_TFR_LIB_DIR'] = 'tensorflow/compiler/mlir/tfr/resources'
ops.enable_eager_execution()
test.main()
|
py | 1a34ca1f2910270fabeaafcfdbc635eaa86c0ae1 | import typing
from typing import Optional, Any
import gym
import gym_minigrid.minigrid
import numpy as np
import torch
from babyai.utils.format import InstructionsPreprocessor
from gym_minigrid.minigrid import MiniGridEnv
from core.base_abstractions.sensor import Sensor, prepare_locals_for_super
from core.base_abstractions.task import Task, SubTaskType
# fmt: off
ALL_VOCAB_TOKENS = [
"a", "after", "and", "ball", "behind", "blue", "box",
"door", "front", "go", "green", "grey", "in", "key",
"left", "next", "of", "on", "open", "pick", "purple",
"put", "red", "right", "the", "then", "to", "up", "yellow",
"you", "your",
]
# fmt: on
class EgocentricMiniGridSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(
self,
agent_view_size: int,
view_channels: int = 1,
uuid: str = "minigrid_ego_image",
**kwargs: Any
):
self.agent_view_size = agent_view_size
self.view_channels = view_channels
self.num_objects = (
typing.cast(
int, max(map(abs, gym_minigrid.minigrid.OBJECT_TO_IDX.values())) # type: ignore
)
+ 1
)
self.num_colors = (
typing.cast(int, max(map(abs, gym_minigrid.minigrid.COLOR_TO_IDX.values()))) # type: ignore
+ 1
)
self.num_states = (
typing.cast(int, max(map(abs, gym_minigrid.minigrid.STATE_TO_IDX.values()))) # type: ignore
+ 1
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=max(self.num_objects, self.num_colors, self.num_states) - 1,
shape=(self.agent_view_size, self.agent_view_size, self.view_channels),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is not None and minigrid_output_obs["image"].shape == (
self.agent_view_size,
self.agent_view_size,
):
img = minigrid_output_obs["image"][:, :, : self.view_channels]
else:
env.agent_view_size = self.agent_view_size
img = env.gen_obs()["image"][:, :, : self.view_channels]
assert img.dtype == np.uint8
return img
class MiniGridMissionSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(self, instr_len: int, uuid: str = "minigrid_mission", **kwargs: Any):
self.instr_preprocessor = InstructionsPreprocessor(
model_name="TMP_SENSOR", load_vocab_from=None
)
# We initialize the vocabulary with a fixed collection of tokens
# and then ensure that the size cannot exceed this number. This
# guarantees that sensors on all processes will produce the same
# values.
for token in ALL_VOCAB_TOKENS:
_ = self.instr_preprocessor.vocab[token]
self.instr_preprocessor.vocab.max_size = len(ALL_VOCAB_TOKENS)
self.instr_len = instr_len
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=self.instr_preprocessor.vocab.max_size,
shape=(self.instr_len,),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is None:
minigrid_output_obs = env.gen_obs()
out = self.instr_preprocessor([minigrid_output_obs]).view(-1)
n: int = out.shape[0]
if n > self.instr_len:
out = out[: self.instr_len]
elif n < self.instr_len:
out = torch.nn.functional.pad(
input=out, pad=[0, self.instr_len - n], value=0,
)
return out.long().numpy()
|
py | 1a34ca23902cc8d366e6e6bc5ffb0a5a06bb1fad | from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/webhooks/stripe', methods=['POST'])
def receive_stripe_webhook():
"""Receives a webhook payload from Stripe.
"""
# Try to parse a webhook payload, get upset if we couldn't
# parse any JSON in the body:
stripe_payload = request.json
if not stripe_payload:
return jsonify(message="Could not parse webhook payload"), 400
event = stripe_payload.get('type')
if not event:
return jsonify(message="Could not determine event type"), 400
if event == 'charge.succeeded':
# Pull fields out of payload:
data_object = stripe_payload.get('data').get('object')
customer_id = data_object.get('customer')
amount = data_object.get('amount')
# Here we just log the transaction, but at this point we can do
# anything! (Provision accounts, push to a database, etc.)
print(f'Customer {customer_id} made a purchase of {amount} cents!')
return jsonify(message="Webhook received"), 200 |
py | 1a34cacab2e387b3c5a5a10e788aca5863473c00 | import json
import sys
from wsgiref.simple_server import make_server
from . import NAME, VERSION, Kaa, KaaServer
from .openapi import OpenApi
from .server import Server
class Cli():
def __init__(self):
self.host = '127.0.0.1'
self.port = 8086
self.argv = sys.argv[:]
def execute(self):
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help'
if subcommand == 'version':
msg = self.__get_version()
elif subcommand == 'help':
msg = self.__get_help()
elif subcommand == 'serve':
self.__serve()
return
elif subcommand == 'openapi':
server:KaaServer = Server().get_server()
kaa:Kaa = server.get_kaa({
'REQUEST_METHOD': '',
'PATH_INFO': '',
'REMOTE_ADDR': '',
'QUERY_STRING': ''
}, None)
msg = json.dumps(OpenApi().generate(kaa))
else:
msg = 'Invalid command. Try help'
sys.stdout.write(msg + '\n')
def __get_name(self):
return NAME
def __get_version(self):
return VERSION
def __get_help(self):
commands = [
('version', 'Returns Kaa version'),
('serve', 'Starts a server for development'),
('openapi', 'Generates openapi json')
]
return '\n'.join(['{}\t\t{}'.format(*cmd) for cmd in commands])
def __serve(self):
self.__set_host_port()
sys.stdout.write('{} version {}\n'.format(self.__get_name(), self.__get_version()))
sys.stdout.write('Server started at {}:{}\n\n'.format(self.host, self.port))
server:KaaServer = Server().get_server()
make_server(
host=self.host,
port=int(self.port),
app=lambda env, start_response: server.get_kaa(env, start_response).serve()
).serve_forever()
def __set_host_port(self):
try:
porthost = self.argv[2].split(':')
if len(porthost) == 1:
self.port = porthost[0]
elif len(porthost) == 2:
self.host = porthost[0]
self.port = porthost[1]
else:
sys.stdout.write('Invalid host:port' + '\n')
sys.exit(1)
except IndexError:
pass
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.