ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3a622e103e166c38fbe588713af2de91d50b75 | def test_logsources_active(self):
"""
Comprobacion de que la fuente de seguridad se encuentra activa una vez instanciada
Returns:
"""
log_source = LogSources.objects.get(Active=1)
self.assertEqual(log_source.get_active(), 1)
|
py | 1a3a63c249fa7a8cc9040829f811a9168f4b507e | import random
import numpy as np
import skimage.color as sc
import torch
def get_patch(*args, patch_size=96, scale=2, multi=False, input_large=False):
ih, iw = args[0].shape[:2]
if not input_large:
p = scale if multi else 1
tp = p * patch_size
ip = tp // scale
else:
tp = patch_size
ip = patch_size
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
if not input_large:
tx, ty = scale * ix, scale * iy
else:
tx, ty = ix, iy
ret = [
args[0][iy : iy + ip, ix : ix + ip, :],
*[a[ty : ty + tp, tx : tx + tp, :] for a in args[1:]],
]
return ret
def set_channel(*args, n_channels=3):
def _set_channel(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
c = img.shape[2]
if n_channels == 1 and c == 3:
img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
elif n_channels == 3 and c == 1:
img = np.concatenate([img] * n_channels, 2)
return img
return [_set_channel(a) for a in args]
def np2Tensor(*args, rgb_range=255):
def _np2Tensor(img):
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
tensor.mul_(rgb_range / 255)
return tensor
return [_np2Tensor(a) for a in args]
def augment(*args, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(a) for a in args]
|
py | 1a3a640e92cdaa318befb66e0f6591e9d9fb7772 | # Examples of mouse input
import simplegui
import math
# intialize globals
width = 450
height = 300
ball_list = []
ball_radius = 15
ball_color = "Red"
# helper function
def distance(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# define event handler for mouse click, draw
def click(pos):
ball_list.append(pos)
# if distance(ball_pos, pos) < ball_radius:
# if ball_color == "Red":
# ball_color = "Green"
# else:
# ball_pos = [pos[0], pos[1]]
# ball_color = "Red"
def draw(canvas):
for ball_pos in ball_list:
canvas.draw_circle(ball_pos, ball_radius, 1, "Black", ball_color)
# create frame
frame = simplegui.create_frame("Mouse selection", width, height)
frame.set_canvas_background("White")
# register event handler
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
# start frame
frame.start()
|
py | 1a3a64b23cac5bab2f266d868c0775b45a843256 | import time
import shelve
import datetime
import settings
from twython import Twython
from contextlib import contextmanager
@contextmanager
def closing(this):
try:
yield this
finally:
this.close()
class TwitterStats():
def __init__(self):
# connect to twitter api
self.twitter = Twython(
app_key=settings.consumer_key,
app_secret=settings.consumer_secret,
oauth_token=settings.oauth_token,
oauth_token_secret=settings.oauth_token_secret
)
def init_storage(self):
storage = shelve.open('twitter_stats', writeback=True)
if not storage:
storage['followers'] = set()
storage['unfollowers'] = []
storage['unfollowers_since_last_check'] = None
storage['last_update'] = None
return storage
def get_followers(self):
follower_ids = self.twitter.getFollowersIDs()['ids']
return set(follower_ids)
def show_screen_name(self, user_id):
user = self.twitter.showUser(user_id=user_id)
screen_name = user['screen_name']
return screen_name
def update_unfollower_stats(self):
with closing(self.init_storage()) as storage:
previous_followers = storage['followers']
current_followers = self.get_followers()
new_unfollower_ids = previous_followers - current_followers
unfollowers_since_last_check = []
for follower_id in new_unfollower_ids:
unfollower = {
'id': follower_id,
'screen_name': self.show_screen_name(follower_id),
'timestamp': datetime.datetime.now().strftime('%b %d %Y %H:%M:%S')
}
storage['unfollowers'].append(unfollower)
unfollowers_since_last_check.append(unfollower)
storage['followers'] = current_followers
storage['unfollowers_since_last_check'] = unfollowers_since_last_check
storage['last_update'] = datetime.datetime.now().strftime('%b %d %Y %H:%M:%S')
def main():
twitter_stats = TwitterStats()
while True:
twitter_stats.update_unfollower_stats()
time.sleep(settings.update_interval)
if __name__ == '__main__':
main()
|
py | 1a3a655d66f6bf1c6f604012c636151844803d53 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/D/Scanner.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that the D scanner can return multiple modules imported by
a single statement.
"""
import TestSCons
import sys
from os.path import abspath, dirname, join
sys.path.append(join(dirname(abspath(__file__)), 'Support'))
from executablesSearch import isExecutableOfToolAvailable
test = TestSCons.TestSCons()
_obj = TestSCons._obj
if not isExecutableOfToolAvailable(test, 'dmd'):
test.skip_test("Could not find 'dmd'; skipping test.\n")
test.subdir(['p'])
test.write('SConstruct', """
env = Environment()
env.Program('test1.d')
env.Program('test2.d')
""")
test.write(['test1.d'], """\
import module1;
import module2;
import module3;
import p.submodule1;
import p.submodule2;
int main() {
return 0;
}
""")
test.write(['test2.d'], """\
import
module1,
module2,
module3;
import
p.submodule1,
p.submodule2;
int main() {
return 0;
}
""")
test.write(['ignored.d'], """\
module ignored;
int something;
""")
test.write(['module1.d'], """\
module module1;
int something;
""")
test.write(['module2.d'], """\
module module2;
int something;
""")
test.write(['module3.di'], """\
module module3;
int something;
""")
test.write(['p', 'ignored.d'], """\
module p.ignored;
int something;
""")
test.write(['p', 'submodule1.d'], """\
module p.submodule1;
int something;
""")
test.write(['p', 'submodule2.d'], """\
module p.submodule2;
int something;
""")
arguments = 'test1%(_obj)s test2%(_obj)s' % locals()
test.run(arguments = arguments)
test.up_to_date(arguments = arguments)
test.write(['module2.d'], """\
module module2;
int something_else;
""")
test.not_up_to_date(arguments = arguments)
test.up_to_date(arguments = arguments)
test.write(['p', 'submodule2.d'], """\
module p.submodule2;
int something_else;
""")
test.not_up_to_date(arguments = arguments)
test.up_to_date(arguments = arguments)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | 1a3a660e33b9cf407a611f628c6eae21e6ce6afa | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Wishart distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class _WishartOperatorPD(distribution.Distribution):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar number of degrees of freedom `df` and
an instance of `OperatorPDBase`, which provides matrix-free access to a
symmetric positive definite operator, which defines the scale matrix.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
See `WishartFull`, `WishartCholesky` for examples of initializing and using
this class.
"""
def __init__(self,
df,
scale_operator_pd,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct Wishart distributions.
Args:
df: `float` or `double` tensor, the degrees of freedom of the
distribution(s). `df` must be greater than or equal to `k`.
scale_operator_pd: `float` or `double` instance of `OperatorPDBase`.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if scale is not floating-type
TypeError: if scale.dtype != df.dtype
ValueError: if df < k, where scale operator event shape is `(k, k)`
"""
self._cholesky_input_output_matrices = cholesky_input_output_matrices
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[df, scale_operator_pd]):
if not scale_operator_pd.dtype.is_floating:
raise TypeError(
"scale_operator_pd.dtype=%s is not a floating-point type" %
scale_operator_pd.dtype)
self._scale_operator_pd = scale_operator_pd
self._df = ops.convert_to_tensor(
df, dtype=scale_operator_pd.dtype, name="df")
contrib_tensor_util.assert_same_float_dtype(
(self._df, self._scale_operator_pd))
if (self._scale_operator_pd.get_shape().ndims is None or
self._scale_operator_pd.get_shape()[-1].value is None):
self._dimension = math_ops.cast(
self._scale_operator_pd.vector_space_dimension(),
dtype=self._scale_operator_pd.dtype, name="dimension")
else:
self._dimension = ops.convert_to_tensor(
self._scale_operator_pd.get_shape()[-1].value,
dtype=self._scale_operator_pd.dtype, name="dimension")
df_val = tensor_util.constant_value(self._df)
dim_val = tensor_util.constant_value(self._dimension)
if df_val is not None and dim_val is not None:
df_val = np.asarray(df_val)
if not df_val.shape: df_val = (df_val,)
if any(df_val < dim_val):
raise ValueError(
"Degrees of freedom (df = %s) cannot be less than dimension of "
"scale matrix (scale.dimension = %s)"
% (df_val, dim_val))
elif validate_args:
assertions = check_ops.assert_less_equal(
self._dimension, self._df,
message=("Degrees of freedom (df = %s) cannot be less than "
"dimension of scale matrix (scale.dimension = %s)" %
(self._dimension, self._df)))
self._df = control_flow_ops.with_dependencies([assertions], self._df)
super(_WishartOperatorPD, self).__init__(
dtype=self._scale_operator_pd.dtype,
parameters={"df": self._df,
"scale_operator_pd": self._scale_operator_pd,
"dimension": self._dimension},
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=True,
name=ns)
@property
def df(self):
"""Wishart distribution degree(s) of freedom."""
return self._df
def scale(self):
"""Wishart distribution scale matrix."""
if self._cholesky_input_output_matrices:
return self.scale_operator_pd.sqrt_to_dense()
else:
return self.scale_operator_pd.to_dense()
@property
def scale_operator_pd(self):
"""Wishart distribution scale matrix as an OperatorPD."""
return self._scale_operator_pd
@property
def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
def dimension(self):
"""Dimension of underlying vector space. The `p` in `R^(p*p)`."""
return self._dimension
def _event_shape(self):
s = self.scale_operator_pd.shape()
return array_ops.slice(s, array_ops.shape(s) - 2, [2])
def _get_event_shape(self):
return self.scale_operator_pd.get_shape()[-2:]
def _batch_shape(self):
return self.scale_operator_pd.batch_shape()
def _get_batch_shape(self):
return self.scale_operator_pd.get_batch_shape()
def _sample_n(self, n, seed):
batch_shape = self.batch_shape()
event_shape = self.event_shape()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat(0, ((n,), batch_shape, event_shape))
# Complexity: O(nbk^2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
g = random_ops.random_gamma(shape=(n,),
alpha=self._multi_gamma_sequence(
0.5 * self.df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk^2)
x = array_ops.batch_matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.batch_matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk^2)
perm = array_ops.concat(0, (math_ops.range(1, ndims), (0,)))
x = array_ops.transpose(x, perm)
shape = array_ops.concat(0, (batch_shape, (event_shape[0], -1)))
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator_pd.sqrt_matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, (n,)))
x = array_ops.reshape(x, shape)
perm = array_ops.concat(0, ((ndims-1,), math_ops.range(0, ndims-1)))
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.batch_matmul(x, x, adj_y=True)
return x
def _log_prob(self, x):
if self.cholesky_input_output_matrices:
x_sqrt = x
else:
# Complexity: O(nbk^3)
x_sqrt = linalg_ops.cholesky(x)
batch_shape = self.batch_shape()
event_shape = self.event_shape()
ndims = array_ops.rank(x_sqrt)
# sample_ndims = ndims - batch_ndims - event_ndims
sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
sample_shape = array_ops.slice(
array_ops.shape(x_sqrt), [0], [sample_ndims])
# We need to be able to pre-multiply each matrix by its corresponding
# batch scale matrix. Since a Distribution Tensor supports multiple
# samples per batch, this means we need to reshape the input matrix `x`
# so that the first b dimensions are batch dimensions and the last two
# are of shape [dimension, dimensions*number_of_samples]. Doing these
# gymnastics allows us to do a batch_solve.
#
# After we're done with sqrt_solve (the batch operation) we need to undo
# this reshaping so what we're left with is a Tensor partitionable by
# sample, batch, event dimensions.
# Complexity: O(nbk^2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
perm = array_ops.concat(0, (math_ops.range(sample_ndims, ndims),
math_ops.range(0, sample_ndims)))
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
shape = array_ops.concat(
0, (batch_shape,
(math_ops.cast(self.dimension, dtype=dtypes.int32), -1)))
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for OperatorPDDiag, each solve is O(k), so
# this complexity is O(nbk^2). For OperatorPDCholesky, each solve is
# O(k^2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator_pd.sqrt_solve(
scale_sqrt_inv_x_sqrt)
# Undo make batch-op ready.
# Complexity: O(nbk^2)
shape = array_ops.concat(0, (batch_shape, event_shape, sample_shape))
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
perm = array_ops.concat(0, (math_ops.range(ndims - sample_ndims, ndims),
math_ops.range(0, ndims - sample_ndims)))
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
# tr[inv(V) X] = tr[inv(S)' inv(S) L L']
# = tr[inv(S) L L' inv(S)']
# = tr[(inv(S) L) (inv(S) L)']
# = sum_{ik} (inv(S) L)_{ik}^2
# The second equality follows from the cyclic permutation property.
# Complexity: O(nbk^2)
trace_scale_inv_x = math_ops.reduce_sum(
math_ops.square(scale_sqrt_inv_x_sqrt),
reduction_indices=[-2, -1])
# Complexity: O(nbk)
half_log_det_x = math_ops.reduce_sum(
math_ops.log(array_ops.batch_matrix_diag_part(x_sqrt)),
reduction_indices=[-1])
# Complexity: O(nbk^2)
log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
0.5 * trace_scale_inv_x -
self.log_normalizing_constant())
# Set shape hints.
# Try to merge what we know from the input then what we know from the
# parameters of this distribution.
if x.get_shape().ndims is not None:
log_prob.set_shape(x.get_shape()[:-2])
if (log_prob.get_shape().ndims is not None and
self.get_batch_shape().ndims is not None and
self.get_batch_shape().ndims > 0):
log_prob.get_shape()[-self.get_batch_shape().ndims:].merge_with(
self.get_batch_shape())
return log_prob
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
half_dp1 = 0.5 * self.dimension + 0.5
half_df = 0.5 * self.df
return (self.dimension * (half_df + half_dp1 * math.log(2.)) +
half_dp1 * self.scale_operator_pd.log_det() +
self._multi_lgamma(half_df, self.dimension) +
(half_dp1 - half_df) * self._multi_digamma(half_df, self.dimension))
def _mean(self):
if self.cholesky_input_output_matrices:
return math_ops.sqrt(self.df) * self.scale_operator_pd.sqrt_to_dense()
return self.df * self.scale_operator_pd.to_dense()
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.batch_matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _std(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
def _mode(self):
s = self.df - self.dimension - 1.
s = math_ops.select(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"),
s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense()
return s * self.scale_operator_pd.to_dense()
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
self.scale_operator_pd.log_det())
def log_normalizing_constant(self, name="log_normalizing_constant"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator_pd.sqrt_log_det() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name, values=[a, p]):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = math_ops.linspace(
constant_op.constant(0., dtype=self.dtype),
0.5 - 0.5 * p,
math_ops.cast(p, dtypes.int32))
return seq + array_ops.expand_dims(a, [-1])
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
math_ops.reduce_sum(math_ops.lgamma(seq),
reduction_indices=(-1,)))
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return math_ops.reduce_sum(math_ops.digamma(seq),
reduction_indices=(-1,))
class WishartCholesky(_WishartOperatorPD):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
lower, triangular Cholesky factor which characterizes the scale matrix.
Using WishartCholesky is a constant-time improvement over WishartFull. It
saves an O(nbk^3) operation, i.e., a matrix-product operation for sampling
and a Cholesky factorization in log_prob. For most use-cases it often saves
another O(nbk^3) operation since most uses of Wishart will also use the
Cholesky factorization.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
```python
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.cholesky(...) # Shape is [3, 3].
dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.pdf(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.pdf(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.cholesky(...) # Shape is [2, 3, 3].
dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.pdf(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tf.contrib.distributions.batch_matrix_diag_transform.
```
"""
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartCholesky"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name scope to give class member ops.
"""
super(WishartCholesky, self).__init__(
df=df,
scale_operator_pd=operator_pd_cholesky.OperatorPDCholesky(
scale, verify_pd=validate_args),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
class WishartFull(_WishartOperatorPD):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
symmetric, positive definite scale matrix.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations
where `(k, k)` is the event space shape.
#### Mathematical details.
The PDF of this distribution is,
```
f(X) = det(X)^(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / B(scale, df)
```
where `df >= k` denotes the degrees of freedom, `scale` is a symmetric, pd,
`k x k` matrix, and the normalizing constant `B(scale, df)` is given by:
```
B(scale, df) = 2^(0.5 df k) |det(scale)|^(0.5 df) Gamma_k(0.5 df)
```
where `Gamma_k` is the multivariate Gamma function.
#### Examples
```python
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.pdf(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.pdf(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.pdf(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tf.contrib.distributions.batch_matrix_diag_transform.
```
"""
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartFull"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: The name scope to give class member ops.
"""
super(WishartFull, self).__init__(
df=df,
scale_operator_pd=operator_pd_full.OperatorPDFull(
scale, verify_pd=validate_args),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
|
py | 1a3a666875e3f0ebf35c7d89246e9c19e9e4004b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 11:05:24 2017
The oil and sugar separation (pretreatment) section for the baseline lipid cane biorefinery is defined here as System objects. The systems include all streams and units starting from enzyme treatment to purification of the sugar solution and the oil stream.
@author: Yoel
"""
import numpy as np
from biosteam import System, Stream
from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, \
HXutility, RVF, SplitFlash, VibratingScreen, \
MagneticSeparator, Clarifier, MixTank, \
Shredder, ConveyingBelt, Splitter, \
SplitCentrifuge_LLE, Pump, StorageTank
from biorefineries.lipidcane.species import pretreatment_species
from biorefineries.lipidcane.process_settings import price
__all__ = ('pretreatment_sys', 'lipid_cane', 'lipidcane', 'area_100', 'area_200')
# %% Species
Stream.species = pretreatment_species
psp = ('Ash', 'CaO', 'Cellulose', 'Ethanol', 'Flocculant',
'Glucose', 'Hemicellulose', 'Lignin', 'Lipid',
'Solids', 'H3PO4', 'Sucrose', 'Water')
psp1 = ('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Lipid', 'Solids', 'Sucrose', 'Water')
psp2 = ('Ash', 'CaO', 'Cellulose', 'Flocculant', 'Glucose',
'Hemicellulose', 'Lignin', 'Lipid',
'H3PO4', 'Sucrose', 'Water')
# %% Streams
f1 = (2000.042, 26986.69 , 2007.067, 15922.734, 14459.241,
10035.334, 5017.667, 22746.761, 234157.798)
lipidcane = lipid_cane = Stream('lipid_cane', f1, psp1, units='kg/hr',
price=price['Lipid cane'])
enzyme = Stream('enzyme', Cellulose=100, Water=900, units='kg/hr',
price=price['Protease'])
imbibition_water = Stream('imbibition_water',
Water=87023.35,
T = 338.15, units='kg/hr')
H3PO4 = Stream('H3PO4', H3PO4=74.23, Water=13.10, units='kg/hr',
price=price['H3PO4']) # to T203
lime = Stream('lime', CaO=333.00, Water=2200.00, units='kg/hr',
price=price['Lime']) # to P5
polymer = Stream('polymer', Flocculant=0.83, units='kg/hr',
price=price['Polymer']) # to T205
rvf_wash_water = Stream('rvf_wash_water',
Water=16770, units='kg/hr',
T=363.15) # to C202
oil_wash_water = Stream('oil_wash_water',
Water=1350, units='kg/hr',
T=358.15) # to T207
# %% Units
Stream.default_ID = 'd'
Stream.default_ID_number = 0
# Stream.default_ID_number = 100
# Feed the shredder
U101 = ConveyingBelt('U101', ins=lipid_cane)
U101.cost_items['Conveying belt'].ub = 2500
# Separate metals
U102 = MagneticSeparator('U102', ins=U101.outs)
# Shredded cane
U103 = Shredder('U103', ins=U102.outs)
# Stream.default_ID_number = 200
# Hydrolyze starch
T201 = EnzymeTreatment('T201', T=323.15) # T=50
# Finely crush lipid cane
U201 = CrushingMill('U201',
split=(0.92, 0.92, 0.04, 0.92, 0.92, 0.04, 0.1, 1),
order=('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Sucrose', 'Lipid', 'Solids'),
moisture_content=0.5)
# Convey out bagasse
U202 = ConveyingBelt('U202', ins=U201.outs[0], outs='Bagasse')
# Mix in water
M201 = Mixer('M201')
# Screen out fibers
S201 = VibratingScreen('S201',
split=(0.35, 0.35, 0.88, 0.35,
0.35, 0.88, 0, 0.88, 0.88),
order=psp1)
# Store juice before treatment
T202 = StorageTank('T202')
T202.tau = 12
# Heat up before adding acid
H201 = HXutility('H201', T=343.15)
# Mix in acid
T203 = MixTank('T203')
# Pump acid solution
P201 = Pump('P201')
# Mix lime solution
T204 = MixTank('T204')
T204.tau = 1
P202 = Pump('P202')
# Blend acid lipid solution with lime
T205 = MixTank('T205')
# Mix recycle
M202 = Mixer('M202')
# Heat before adding flocculant
H202 = HXutility('H202', T=372.15)
# Mix in flocculant
T206 = MixTank('T206')
T206.tau = 1/4
# Separate residual solids
C201 = Clarifier('C201',
split=(0, 0, 0, 0.522, 0.522, 0, 0,
0.98, 0.522, 0.522, 0.522),
order=psp2)
# Remove solids as filter cake
C202 = RVF('C202',
outs=('filte_cake', ''),
moisture_content=0.80,
split=(0.85, 0.85, 0.85, 0.01, 0.85, 0.85, 0.01),
order=('Ash', 'CaO', 'Cellulose', 'Glucose',
'Hemicellulose', 'Lignin', 'Sucrose'))
P203 = Pump('P203')
# Separate oil and sugar
T207 = MixTank('T207', outs=('', ''))
split = np.zeros(len(pretreatment_species), float)
index = pretreatment_species.indices(('Lipid', 'Water'))
split[index] = (1, 0.0001)
T207._split = split
T207._run = lambda : Splitter._run(T207)
del split, index
# Cool the oil
H203 = HXutility('H203', T=343.15)
# Screen out small fibers from sugar stream
S202 = VibratingScreen('S202', outs=('', 'fiber_fines'),
split=1-np.array((0, 0, 0, 1, 0.002, 0, 0,0, 0, 0.002, 0.002)),
order=psp2)
sugar = S202-0
S202.mesh_opening = 2
# Add distilled water to wash lipid
T208 = MixTank('T208')
T208.tau = 2
# Centrifuge out water
C203 = SplitCentrifuge_LLE('C203',
split=(0.99, 0.01),
order=('Lipid', 'Water'))
# Vacume out water
F201 = SplitFlash('F201', T=347.15, P=2026.5,
split=(0.0001, 0.999), order=('Lipid', 'Water'))
lipid = F201.outs[1]
# %% Process specifications
# Specifications dependent on lipid cane flow rate
_enzyme_mass = enzyme.mass[[9, 12]]
_CaO_Water_mass = lime.mass[[7, 12]]
_H3PO4_Water_mass = H3PO4.mass[[1, 12]]
last_lipidcane_massnet = int(lipid_cane.massnet)
def correct_flows():
global last_lipidcane_massnet
massnet = lipid_cane.massnet
if int(massnet) != last_lipidcane_massnet:
# correct enzyme, lime, phosphoric acid, and imbibition water
_enzyme_mass[:] = 0.003 * massnet * np.array([0.1, 0.9])
_CaO_Water_mass[:] = 0.001 * massnet * np.array([0.046, 0.954])
_H3PO4_Water_mass[:] = 0.00025 * massnet
imbibition_water_mass.value = 0.25* massnet
last_lipidcane_massnet = int(massnet)
# Specifications within a system
def correct_lipid_wash_water():
oil_wash_water.mol[12] = H202.outs[0].mol[-2]*100/11
solids_index = Stream.indices(['Ash', 'CaO', 'Cellulose', 'Hemicellulose', 'Lignin'])
def correct_wash_water():
solids = solidsmol[solids_index].sum()
rvf_wash_water.mol[12] = 0.0574*solids
imbibition_water_mass = imbibition_water.mass.item(12)
# %% Pretreatment system set-up
(U103-0, enzyme)-T201
(T201-0, M201-0)-U201-1-S201-0-T202
(S201-1, imbibition_water)-M201
crushing_mill_recycle_sys = System('crushing_mill_recycle_sys',
network=(U201, S201, M201),
recycle=M201-0)
T202-0-H201
(H201-0, H3PO4)-T203-P201
(P201-0, lime-T204-0)-T205-P202
(P202-0, P203-0)-M202-H202
(H202-0, polymer)-T206-C201
(C201-1, rvf_wash_water)-C202-1-P203
clarification_recycle_sys = System('clarification_recycle_sys',
network=(M202, H202, T206, C201, C202, P203),
recycle=C202-1)
C201-0-T207-0-H203
(H203-0, oil_wash_water)-T208-C203-0-F201
T207-1-S202
pretreatment_sys = System('pretreatment_sys',
network=(U101, U102, U103,
correct_flows, T201,
crushing_mill_recycle_sys,
U202, T202, H201, T203,
P201, T204, T205, P202,
correct_wash_water,
clarification_recycle_sys,
T207, H203, S202,
correct_lipid_wash_water,
T208, C203, F201,))
solidsmol = P202.outs[0].mol
area_100 = System('area_100', network=(U101, U102, U103))
units = pretreatment_sys.units.copy()
for i in area_100.network: units.discard(i)
area_200_network = sorted(units, key=lambda x: x.ID)
area_200 = System('area_200', network=area_200_network)
|
py | 1a3a6717ca7e1af642c8b8fb464a273ad6a3b53a | # The content of this file was generated by IBM Cloud
# Do not modify it as it might get overridden
from ibmcloudenv import IBMCloudEnv
from . import service_manager
IBMCloudEnv.init()
from . import service_watson_visual_recognition
def initServices(app):
name, service = service_watson_visual_recognition.getService()
service_manager.set(name, service)
return
|
py | 1a3a677adcd43d4be160abebaa039cb99d1e1288 | from .features_generators import get_available_features_generators, get_features_generator
from .featurization import atom_features, bond_features, BatchMolGraph, get_atom_fdim, get_bond_fdim, mol2graph
from .utils import load_features, save_features
|
py | 1a3a68affce7ae047efbd07617f1df20443be2ec | #!/usr/bin/python3
class Rectangle():
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
self.height = height
self.width = width
Rectangle.number_of_instances += 1
def area(self):
return self.__height * self.__width
def perimeter(self):
if self.width == 0 or self.height == 0:
Perimeter = 0
else:
Perimeter = self.__height * 2 + self.__width * 2
return Perimeter
@property
def width(self):
return self.__width
@width.setter
def width(self, value):
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise TypeError('width must be >= 0')
self.__width = value
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise TypeError('height must be >= 0')
self.__height = value
def __str__(self):
rectangle_string = ""
if self.__width == 0 or self.__height == 0:
return rectangle_string
for row in range(self.__height):
for column in range(self.__width):
rectangle_string += str(self.print_symbol)
if row != self.__height:
rectangle_string += "\n"
rectangle_string = rectangle_string[:-1]
str(column)
return rectangle_string
def __repr__(self):
return 'Rectangle({}, {})'.format(self.width, self.height)
def __del__(self):
print('Bye rectangle...')
Rectangle.number_of_instances -= 1
|
py | 1a3a6924f3a92a02fe8c3aa077c5ea7f75ae5aac | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from mock import patch, Mock
from pyfakefs import fake_filesystem_unittest
from requests import Response
from cloudshell.rest.api import PackagingRestApiClient
from cloudshell.rest.exceptions import ShellNotFoundException, FeatureUnavailable
class TestPackagingRestApiClient(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
@patch('cloudshell.rest.api.urllib2.build_opener')
def test_login(self, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
# Act
PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
# Assert
self.assertTrue(mock_opener.open.called, 'open should be called')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.post')
def test_add_shell(self, mock_post, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.fs.CreateFile('work//NutShell.zip', contents='ZIP CONTENT')
mock_post.return_value = Response()
mock_post.return_value.status_code = 201 # Created
# Act
client.add_shell('work//NutShell.zip')
# Assert
self.assertTrue(mock_post.called, 'Post should be called')
self.assertEqual(mock_post.call_args[0][0], 'http://SERVER:9000/API/Shells')
self.assertEqual(mock_post.call_args[1]['headers']['Authorization'], 'Basic TOKEN')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.put')
def test_update_shell(self, mock_put, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.fs.CreateFile('work//NutShell.zip', contents='ZIP CONTENT')
mock_put.return_value = Response()
mock_put.return_value.status_code = 200 # Ok
# Act
client.update_shell('work//NutShell.zip')
# Assert
self.assertTrue(mock_put.called, 'Post should be called')
self.assertEqual(mock_put.call_args[0][0], 'http://SERVER:9000/API/Shells/NutShell')
self.assertEqual(mock_put.call_args[1]['headers']['Authorization'], 'Basic TOKEN') \
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.put')
def test_update_shell_name_given(self, mock_put, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.fs.CreateFile('work//NutShell.zip', contents='ZIP CONTENT')
mock_put.return_value = Response()
mock_put.return_value.status_code = 200 # Ok
# Act
client.update_shell('work//NutShell.zip', 'my_amazing_shell')
# Assert
self.assertTrue(mock_put.called, 'Put should be called')
self.assertEqual(mock_put.call_args[0][0], 'http://SERVER:9000/API/Shells/my_amazing_shell')
self.assertEqual(mock_put.call_args[1]['headers']['Authorization'], 'Basic TOKEN') \
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.put')
def test_update_shell_throws_shell_not_found_exception_when_404_code_returned(self, mock_put, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.fs.CreateFile('work//NutShell.zip', contents='ZIP CONTENT')
mock_put.return_value = Response()
mock_put.return_value.status_code = 404 # Not Found
# Act & Assert
self.assertRaises(ShellNotFoundException, client.update_shell, 'work//NutShell.zip')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.get')
def test_get_installed_standards(self, mock_get, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
mock_get.return_value = Response()
mock_get.return_value._content = "[]" # hack - empty response content
mock_get.return_value.status_code = 200 # Ok
# Act
client.get_installed_standards()
# Assert
self.assertTrue(mock_get.called, 'Get should be called')
self.assertEqual(mock_get.call_args[0][0], 'http://SERVER:9000/API/Standards')
self.assertEqual(mock_get.call_args[1]['headers']['Authorization'], 'Basic TOKEN')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.get')
def test_get_installed_standards_feature_not_install_error_thrown(self, mock_get, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
mock_get.return_value = Response()
mock_get.return_value.status_code = 404 # Not Found
# Act Assert
self.assertRaises(FeatureUnavailable, client.get_installed_standards)
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.get')
def test_get_shell_success(self, mock_get, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_get.return_value = Response()
mock_get.return_value._content = "[]" # hack - empty response content
mock_get.return_value.status_code = 200 # Ok
# Act
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
client.get_shell('shell')
# Assert
self.assertTrue(mock_get.called, 'Get should be called')
self.assertEqual(mock_get.call_args[0][0], 'http://SERVER:9000/API/Shells/shell')
self.assertEqual(mock_get.call_args[1]['headers']['Authorization'], 'Basic TOKEN')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.get')
def test_get_shell_feature_unavailable_raises_error(self, mock_get, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_get.return_value = Response()
mock_get.return_value.status_code = 404 # Not Found
# Act Assert
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.assertRaises(FeatureUnavailable, client.get_shell, 'shell')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.get')
def test_get_shell_feature_unavailable_http_status_405_raises_error(self, mock_get, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_get.return_value = Response()
mock_get.return_value.status_code = 405 # Method Not Allowed
# Act Assert
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.assertRaises(FeatureUnavailable, client.get_shell, 'shell')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.get')
def test_get_shell_shell_not_found_raises_error(self, mock_get, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_get.return_value = Response()
mock_get.return_value.status_code = 400 # Bad Request
# Act Assert
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.assertRaises(ShellNotFoundException, client.get_shell, 'shell')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.delete')
def test_delete_shell_success(self, mock_delete, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_delete.return_value = Response()
mock_delete.return_value._content = "[]" # hack - empty response content
mock_delete.return_value.status_code = 200 # Ok
# Act
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
client.delete_shell("shell")
# Assert
self.assertTrue(mock_delete.called, 'Delete should be called')
self.assertEqual(mock_delete.call_args[0][0], 'http://SERVER:9000/API/Shells/shell')
self.assertEqual(mock_delete.call_args[1]['headers']['Authorization'], 'Basic TOKEN')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.delete')
def test_delete_shell_feature_unavailable_raises_error(self, mock_delete, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_delete.return_value = Response()
mock_delete.return_value.status_code = 404 # Not Found
# Act Assert
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.assertRaises(FeatureUnavailable, client.delete_shell, 'shell')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.delete')
def test_delete_shell_feature_unavailable_http_status_405_raises_error(self, mock_delete, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_delete.return_value = Response()
mock_delete.return_value.status_code = 405 # Method Not Allowed
# Act Assert
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.assertRaises(FeatureUnavailable, client.delete_shell, 'shell')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.delete')
def test_delete_shell_shell_not_found_raises_error(self, mock_delete, mock_build_opener):
# Arrange
mock_url = Mock()
mock_url.read = Mock(return_value='TOKEN')
mock_opener = Mock()
mock_opener.open = Mock(return_value=mock_url)
mock_build_opener.return_value = mock_opener
mock_delete.return_value = Response()
mock_delete.return_value.status_code = 400 # Bad Request
# Act Assert
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
self.assertRaises(ShellNotFoundException, client.delete_shell, 'shell')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.post')
def test_export_package(self, mock_post, mock_build_opener):
# prepare
mock_build_opener.return_value.open.return_value.read.return_value = 'TOKEN'
prepared_response = Response()
prepared_response.status_code = 201
prepared_response._content = 'zip package content'
mock_post.return_value = prepared_response
# act
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
response = client.export_package(['topology_name'])
# verify
mock_post.assert_called_once_with(
'http://SERVER:9000/API/Package/ExportPackage',
headers={'Authorization': 'Basic TOKEN'},
data={'TopologyNames': ['topology_name']},
)
self.assertEqual(response, 'zip package content')
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.post')
def test_import_package(self, mock_post, mock_build_opener):
# prepare
mock_build_opener.return_value.open.return_value.read.return_value = 'TOKEN'
prepared_response = Response()
prepared_response.status_code = 201
prepared_response._content = json.dumps({'Success': True, 'ErrorMessage': None})
mock_post.return_value = prepared_response
package_zip = self.fs.create_file('work//package.zip', contents='ZIP CONTENT')
# act
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
client.import_package('work//package.zip')
# verify
mock_post.assert_called_once()
self.assertEqual(
'http://SERVER:9000/API/Package/ImportPackage',
mock_post.call_args[0][0],
)
self.assertEqual(
{'Authorization': 'Basic TOKEN'},
mock_post.call_args[1]['headers'],
)
file_object = mock_post.call_args[1]['files']['file'].get_object()
self.assertEqual(package_zip, file_object)
@patch('cloudshell.rest.api.urllib2.build_opener')
@patch('cloudshell.rest.api.post')
def test_import_package_error(self, mock_post, mock_build_opener):
# prepare
mock_build_opener.return_value.open.return_value.read.return_value = 'TOKEN'
prepared_response = Response()
prepared_response.status_code = 201
prepared_response._content = json.dumps(
{'Success': False, 'ErrorMessage': 'Fail to find Name script'})
mock_post.return_value = prepared_response
package_zip = self.fs.create_file('work//package.zip', contents='ZIP CONTENT')
# act
client = PackagingRestApiClient('SERVER', 9000, 'USER', 'PASS', 'Global')
# verify
with self.assertRaisesRegexp(Exception, 'Fail to find Name script'):
client.import_package('work//package.zip')
mock_post.assert_called_once()
self.assertEqual(
'http://SERVER:9000/API/Package/ImportPackage',
mock_post.call_args[0][0],
)
self.assertEqual(
{'Authorization': 'Basic TOKEN'},
mock_post.call_args[1]['headers'],
)
file_object = mock_post.call_args[1]['files']['file'].get_object()
self.assertEqual(package_zip, file_object)
|
py | 1a3a6a227f37332c56ff2f816430c7798446988b | # -*- coding: utf-8 -*-
"""
Interpolation
=============
Defines the classes and definitions for interpolating variables.
- :class:`colour.KernelInterpolator`: 1-D function generic interpolation with
arbitrary kernel.
- :class:`colour.NearestNeighbourInterpolator`: 1-D function
nearest-neighbour interpolation.
- :class:`colour.LinearInterpolator`: 1-D function linear interpolation.
- :class:`colour.SpragueInterpolator`: 1-D function fifth-order polynomial
interpolation using *Sprague (1880)* method.
- :class:`colour.CubicSplineInterpolator`: 1-D function cubic spline
interpolation.
- :class:`colour.PchipInterpolator`: 1-D function piecewise cube Hermite
interpolation.
- :class:`colour.NullInterpolator`: 1-D function null interpolation.
- :func:`colour.lagrange_coefficients`: Computation of
*Lagrange Coefficients*.
- :func:`colour.algebra.table_interpolation_trilinear`: Trilinear
interpolation with table.
- :func:`colour.algebra.table_interpolation_tetrahedral`: Tetrahedral
interpolation with table.
- :attr:`colour.TABLE_INTERPOLATION_METHODS`: Supported table interpolation
methods.
- :func:`colour.table_interpolation`: Interpolation with table using given
method.
References
----------
- :cite:`Bourkeb` : Bourke, P. (n.d.). Trilinear Interpolation. Retrieved
January 13, 2018, from http://paulbourke.net/miscellaneous/interpolation/
- :cite:`Burger2009b` : Burger, W., & Burge, M. J. (2009). Principles of
Digital Image Processing. Springer London. doi:10.1007/978-1-84800-195-4
- :cite:`CIETC1-382005f` : CIE TC 1-38. (2005). 9.2.4 Method of
interpolation for uniformly spaced independent variable. In CIE 167:2005
Recommended Practice for Tabulating Spectral Data for Use in Colour
Computations (pp. 1-27). ISBN:978-3-901906-41-1
- :cite:`CIETC1-382005h` : CIE TC 1-38. (2005). Table V. Values of the
c-coefficients of Equ.s 6 and 7. In CIE 167:2005 Recommended Practice for
Tabulating Spectral Data for Use in Colour Computations (p. 19).
ISBN:978-3-901906-41-1
- :cite:`Fairman1985b` : Fairman, H. S. (1985). The calculation of weight
factors for tristimulus integration. Color Research & Application, 10(4),
199-203. doi:10.1002/col.5080100407
- :cite:`Kirk2006` : Kirk, R. (2006). Truelight Software Library 2.0.
Retrieved July 8, 2017, from
https://www.filmlight.ltd.uk/pdf/whitepapers/FL-TL-TN-0057-SoftwareLib.pdf
- :cite:`Westland2012h` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
Interpolation Methods. In Computational Colour Science Using MATLAB (2nd
ed., pp. 29-37). ISBN:978-0-470-66569-5
- :cite:`Wikipedia2003a` : Wikipedia. (2003). Lagrange polynomial -
Definition. Retrieved January 20, 2016, from
https://en.wikipedia.org/wiki/Lagrange_polynomial#Definition
- :cite:`Wikipedia2005b` : Wikipedia. (2005). Lanczos resampling. Retrieved
October 14, 2017, from https://en.wikipedia.org/wiki/Lanczos_resampling
"""
import itertools
import numpy as np
import scipy.interpolate
from collections import OrderedDict
from collections.abc import Mapping
from functools import reduce
from colour.constants import DEFAULT_FLOAT_DTYPE, DEFAULT_INT_DTYPE
from colour.utilities import (
CaseInsensitiveMapping, as_float_array, as_float, closest_indexes,
interval, is_integer, is_numeric, runtime_warning, tsplit, validate_method)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'kernel_nearest_neighbour', 'kernel_linear', 'kernel_sinc',
'kernel_lanczos', 'kernel_cardinal_spline', 'KernelInterpolator',
'NearestNeighbourInterpolator', 'LinearInterpolator',
'SpragueInterpolator', 'CubicSplineInterpolator', 'PchipInterpolator',
'NullInterpolator', 'lagrange_coefficients',
'vertices_and_relative_coordinates', 'table_interpolation_trilinear',
'table_interpolation_tetrahedral', 'TABLE_INTERPOLATION_METHODS',
'table_interpolation'
]
def kernel_nearest_neighbour(x):
"""
Returns the *nearest-neighbour* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *nearest-neighbour* kernel.
Returns
-------
ndarray
The *nearest-neighbour* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_nearest_neighbour(np.linspace(0, 1, 10))
array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
"""
return np.where(np.abs(x) < 0.5, 1, 0)
def kernel_linear(x):
"""
Returns the *linear* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *linear* kernel.
Returns
-------
ndarray
The *linear* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_linear(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1. , 0.8888888..., 0.7777777..., \
0.6666666..., 0.5555555...,
0.4444444..., 0.3333333..., 0.2222222..., \
0.1111111..., 0. ])
"""
return np.where(np.abs(x) < 1, 1 - np.abs(x), 0)
def kernel_sinc(x, a=3):
"""
Returns the *sinc* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *sinc* kernel.
a : int, optional
Size of the *sinc* kernel.
Returns
-------
ndarray
The *sinc* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_sinc(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1.0000000...e+00, 9.7981553...e-01, 9.2072542...e-01,
8.2699334...e-01, 7.0531659...e-01, 5.6425327...e-01,
4.1349667...e-01, 2.6306440...e-01, 1.2247694...e-01,
3.8981718...e-17])
"""
assert a >= 1, '"a" must be equal or superior to 1!'
return np.where(np.abs(x) < a, np.sinc(x), 0)
def kernel_lanczos(x, a=3):
"""
Returns the *lanczos* kernel evaluated at given samples.
Parameters
----------
x : array_like
Samples at which to evaluate the *lanczos* kernel.
a : int, optional
Size of the *lanczos* kernel.
Returns
-------
ndarray
The *lanczos* kernel evaluated at given samples.
References
----------
:cite:`Wikipedia2005b`
Examples
--------
>>> kernel_lanczos(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1.0000000...e+00, 9.7760615...e-01, 9.1243770...e-01,
8.1030092...e-01, 6.8012706...e-01, 5.3295773...e-01,
3.8071690...e-01, 2.3492839...e-01, 1.0554054...e-01,
3.2237621...e-17])
"""
assert a >= 1, '"a" must be equal or superior to 1!'
return np.where(np.abs(x) < a, np.sinc(x) * np.sinc(x / a), 0)
def kernel_cardinal_spline(x, a=0.5, b=0.0):
"""
Returns the *cardinal spline* kernel evaluated at given samples.
Notable *cardinal spline* :math:`a` and :math:`b` parameterizations:
- *Catmull-Rom*: :math:`(a=0.5, b=0)`
- *Cubic B-Spline*: :math:`(a=0, b=1)`
- *Mitchell-Netravalli*: :math:`(a=\\cfrac{1}{3}, b=\\cfrac{1}{3})`
Parameters
----------
x : array_like
Samples at which to evaluate the *cardinal spline* kernel.
a : int, optional
:math:`a` control parameter.
b : int, optional
:math:`b` control parameter.
Returns
-------
ndarray
The *cardinal spline* kernel evaluated at given samples.
References
----------
:cite:`Burger2009b`
Examples
--------
>>> kernel_cardinal_spline(np.linspace(0, 1, 10)) # doctest: +ELLIPSIS
array([ 1. , 0.9711934..., 0.8930041..., \
0.7777777..., 0.6378600...,
0.4855967..., 0.3333333..., 0.1934156..., \
0.0781893..., 0. ])
"""
x_abs = np.abs(x)
y = np.where(
x_abs < 1,
(-6 * a - 9 * b + 12) * x_abs ** 3 + (6 * a + 12 * b - 18) * x_abs ** 2
- 2 * b + 6,
(-6 * a - b) * x_abs ** 3 + (30 * a + 6 * b) * x_abs ** 2 +
(-48 * a - 12 * b) * x_abs + 24 * a + 8 * b,
)
y[x_abs >= 2] = 0
return 1 / 6 * y
class KernelInterpolator:
"""
Kernel based interpolation of a 1-D function.
The reconstruction of a continuous signal can be described as a linear
convolution operation. Interpolation can be expressed as a convolution of
the given discrete function :math:`g(x)` with some continuous interpolation
kernel :math:`k(w)`:
:math:`\\hat{g}(w_0) = [k * g](w_0) = \
\\sum_{x=-\\infty}^{\\infty}k(w_0 - x)\\cdot g(x)`
Parameters
----------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
window : int, optional
Width of the window in samples on each side.
kernel : callable, optional
Kernel to use for interpolation.
kernel_kwargs : dict, optional
Arguments to use when calling the kernel.
padding_kwargs : dict, optional
Arguments to use when padding :math:`y` variable values with the
:func:`np.pad` definition.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.KernelInterpolator.x`
- :attr:`~colour.KernelInterpolator.y`
- :attr:`~colour.KernelInterpolator.window`
- :attr:`~colour.KernelInterpolator.kernel`
- :attr:`~colour.KernelInterpolator.kernel_kwargs`
- :attr:`~colour.KernelInterpolator.padding_kwargs`
Methods
-------
- :meth:`~colour.KernelInterpolator.__init__`
- :meth:`~colour.KernelInterpolator.__call__`
References
----------
:cite:`Burger2009b`, :cite:`Wikipedia2005b`
Examples
--------
Interpolating a single numeric variable:
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = KernelInterpolator(x, y)
>>> f(0.5) # doctest: +ELLIPSIS
6.9411400...
Interpolating an *array_like* variable:
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 6.1806208..., 8.0823848...])
Using a different *lanczos* kernel:
>>> f = KernelInterpolator(x, y, kernel=kernel_sinc)
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 6.5147317..., 8.3965466...])
Using a different window size:
>>> f = KernelInterpolator(
... x,
... y,
... window=16,
... kernel=kernel_lanczos,
... kernel_kwargs={'a': 16})
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 5.3961792..., 5.6521093...])
"""
def __init__(self,
x,
y,
window=3,
kernel=kernel_lanczos,
kernel_kwargs=None,
padding_kwargs=None,
dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._x_p = None
self._y_p = None
self._x = None
self._y = None
self._window = None
self._padding_kwargs = {
'pad_width': (window, window),
'mode': 'reflect'
}
self._dtype = dtype
self.x = x
self.y = y
self.window = window
self.padding_kwargs = padding_kwargs
self._kernel = None
self.kernel = kernel
self._kernel_kwargs = {}
self.kernel_kwargs = kernel_kwargs
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
value_interval = interval(value)
if value_interval.size != 1:
runtime_warning('"x" independent variable is not uniform, '
'unpredictable results may occur!')
self._x = value
if self._window is not None:
self._x_p = np.pad(
self._x, (self._window, self._window),
'linear_ramp',
end_values=(
np.min(self._x) - self._window * value_interval[0],
np.max(self._x) + self._window * value_interval[0]))
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
self._y = value
if self._window is not None:
self._y_p = np.pad(self._y, **self._padding_kwargs)
@property
def window(self):
"""
Getter and setter property for the window.
Parameters
----------
value : int
Value to set the window with.
Returns
-------
int
Window.
"""
return self._window
@window.setter
def window(self, value):
"""
Setter for the **self.window** property.
"""
if value is not None:
assert is_integer(value), '"window" must be an integer!'
assert value >= 1, (
'"window" must be equal to or or greater than 1!')
self._window = value
# Triggering "self._x_p" update.
if self._x is not None:
self.x = self._x
# Triggering "self._y_p" update.
if self._y is not None:
self.y = self._y
@property
def kernel(self):
"""
Getter and setter property for the kernel callable.
Parameters
----------
value : callable
Value to set the kernel callable.
Returns
-------
callable
Kernel callable.
"""
return self._kernel
@kernel.setter
def kernel(self, value):
"""
Setter for the **self.kernel** property.
"""
if value is not None:
assert hasattr(
value,
'__call__'), ('"{0}" attribute: "{1}" is not callable!'.format(
'kernel', value))
self._kernel = value
@property
def kernel_kwargs(self):
"""
Getter and setter property for the kernel call time arguments.
Parameters
----------
value : dict
Value to call the interpolation kernel with.
Returns
-------
dict
Kernel call time arguments.
"""
return self._kernel_kwargs
@kernel_kwargs.setter
def kernel_kwargs(self, value):
"""
Setter for the **self.kernel_kwargs** property.
"""
if value is not None:
assert isinstance(value, (dict, OrderedDict)), (
'"{0}" attribute: "{1}" type is not "dict" or "OrderedDict"!'
).format('kernel_kwargs', value)
self._kernel_kwargs = value
@property
def padding_kwargs(self):
"""
Getter and setter property for the kernel call time arguments.
Parameters
----------
value : dict
Value to call the interpolation kernel with.
Returns
-------
dict
Kernel call time arguments.
"""
return self._padding_kwargs
@padding_kwargs.setter
def padding_kwargs(self, value):
"""
Setter for the **self.padding_kwargs** property.
"""
if value is not None:
assert isinstance(value, Mapping), (
'"{0}" attribute: "{1}" type is not a "Mapping" instance!'
).format('padding_kwargs', value)
self._padding_kwargs = value
# Triggering "self._y_p" update.
if self._y is not None:
self.y = self._y
def __call__(self, x):
"""
Evaluates the interpolator at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
float or ndarray
Interpolated value(s).
"""
x = np.atleast_1d(x).astype(self._dtype)
xi = as_float(self._evaluate(x))
return xi
def _evaluate(self, x):
"""
Performs the interpolator evaluation at given points.
Parameters
----------
x : ndarray
Points to evaluate the interpolant at.
Returns
-------
ndarray
Interpolated points values.
"""
self._validate_dimensions()
self._validate_interpolation_range(x)
x_interval = interval(self._x)[0]
x_f = np.floor(x / x_interval)
windows = (x_f[:, np.newaxis] + np.arange(-self._window + 1,
self._window + 1))
clip_l = min(self._x_p) / x_interval
clip_h = max(self._x_p) / x_interval
windows = np.clip(windows, clip_l, clip_h) - clip_l
windows = np.around(windows).astype(DEFAULT_INT_DTYPE)
return np.sum(
self._y_p[windows] * self._kernel(
x[:, np.newaxis] / x_interval - windows -
min(self._x_p) / x_interval, **self._kernel_kwargs),
axis=-1)
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
class NearestNeighbourInterpolator(KernelInterpolator):
"""
A nearest-neighbour interpolator.
Other Parameters
----------------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
window : int, optional
Width of the window in samples on each side.
padding_kwargs : dict, optional
Arguments to use when padding :math:`y` variable values with the
:func:`np.pad` definition.
dtype : type
Data type used for internal conversions.
Methods
-------
- :meth:`~colour.NearestNeighbourInterpolator.__init__`
"""
def __init__(self, *args, **kwargs):
kwargs['kernel'] = kernel_nearest_neighbour
if 'kernel_kwargs' in kwargs:
del kwargs['kernel_kwargs']
super(NearestNeighbourInterpolator, self).__init__(*args, **kwargs)
class LinearInterpolator:
"""
Linearly interpolates a 1-D function.
Parameters
----------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.LinearInterpolator.x`
- :attr:`~colour.LinearInterpolator.y`
Methods
-------
- :meth:`~colour.LinearInterpolator.__init__`
- :meth:`~colour.LinearInterpolator.__call__`
Notes
-----
- This class is a wrapper around *numpy.interp* definition.
Examples
--------
Interpolating a single numeric variable:
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = LinearInterpolator(x, y)
>>> f(0.5) # doctest: +ELLIPSIS
7.64...
Interpolating an *array_like* variable:
>>> f([0.25, 0.75])
array([ 6.7825, 8.5075])
"""
def __init__(self, x, y, dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._x = None
self._y = None
self._dtype = dtype
self.x = x
self.y = y
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
self._x = value
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
self._y = value
def __call__(self, x):
"""
Evaluates the interpolating polynomial at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
float or ndarray
Interpolated value(s).
"""
x = np.atleast_1d(x).astype(self._dtype)
xi = as_float(self._evaluate(x))
return xi
def _evaluate(self, x):
"""
Performs the interpolating polynomial evaluation at given points.
Parameters
----------
x : ndarray
Points to evaluate the interpolant at.
Returns
-------
ndarray
Interpolated points values.
"""
self._validate_dimensions()
self._validate_interpolation_range(x)
return np.interp(x, self._x, self._y)
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
class SpragueInterpolator:
"""
Constructs a fifth-order polynomial that passes through :math:`y` dependent
variable.
*Sprague (1880)* method is recommended by the *CIE* for interpolating
functions having a uniformly spaced independent variable.
Parameters
----------
x : array_like
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : array_like
Dependent and already known :math:`y` variable values to
interpolate.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.SpragueInterpolator.x`
- :attr:`~colour.SpragueInterpolator.y`
Methods
-------
- :meth:`~colour.SpragueInterpolator.__init__`
- :meth:`~colour.SpragueInterpolator.__call__`
Notes
-----
- The minimum number :math:`k` of data points required along the
interpolation axis is :math:`k=6`.
References
----------
:cite:`CIETC1-382005f`, :cite:`Westland2012h`
Examples
--------
Interpolating a single numeric variable:
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = SpragueInterpolator(x, y)
>>> f(0.5) # doctest: +ELLIPSIS
7.2185025...
Interpolating an *array_like* variable:
>>> f([0.25, 0.75]) # doctest: +ELLIPSIS
array([ 6.7295161..., 7.8140625...])
"""
SPRAGUE_C_COEFFICIENTS = np.array([
[884, -1960, 3033, -2648, 1080, -180],
[508, -540, 488, -367, 144, -24],
[-24, 144, -367, 488, -540, 508],
[-180, 1080, -2648, 3033, -1960, 884],
])
"""
Defines the coefficients used to generate extra points for boundaries
interpolation.
SPRAGUE_C_COEFFICIENTS : array_like, (4, 6)
References
----------
:cite:`CIETC1-382005h`
"""
def __init__(self, x, y, dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._xp = None
self._yp = None
self._x = None
self._y = None
self._dtype = dtype
self.x = x
self.y = y
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
value_interval = interval(value)[0]
xp1 = value[0] - value_interval * 2
xp2 = value[0] - value_interval
xp3 = value[-1] + value_interval
xp4 = value[-1] + value_interval * 2
self._xp = np.concatenate(((xp1, xp2), value, (xp3, xp4)))
self._x = value
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
assert len(value) >= 6, (
'"y" dependent variable values count must be equal to or '
'greater than 6!')
yp1 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[0],
np.array(value[0:6]).reshape([6, 1]))) / 209)[0]
yp2 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[1],
np.array(value[0:6]).reshape([6, 1]))) / 209)[0]
yp3 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[2],
np.array(value[-6:]).reshape([6, 1]))) / 209)[0]
yp4 = np.ravel(
(np.dot(self.SPRAGUE_C_COEFFICIENTS[3],
np.array(value[-6:]).reshape([6, 1]))) / 209)[0]
self._yp = np.concatenate(((yp1, yp2), value, (yp3, yp4)))
self._y = value
def __call__(self, x):
"""
Evaluates the interpolating polynomial at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
numeric or ndarray
Interpolated value(s).
"""
return self._evaluate(x)
def _evaluate(self, x):
"""
Performs the interpolating polynomial evaluation at given point.
Parameters
----------
x : numeric
Point to evaluate the interpolant at.
Returns
-------
float
Interpolated point values.
"""
x = as_float_array(x)
self._validate_dimensions()
self._validate_interpolation_range(x)
i = np.searchsorted(self._xp, x) - 1
X = (x - self._xp[i]) / (self._xp[i + 1] - self._xp[i])
r = self._yp
a0p = r[i]
a1p = ((2 * r[i - 2] - 16 * r[i - 1] + 16 * r[i + 1] -
2 * r[i + 2]) / 24) # yapf: disable
a2p = ((-r[i - 2] + 16 * r[i - 1] - 30 * r[i] + 16 * r[i + 1] -
r[i + 2]) / 24) # yapf: disable
a3p = ((-9 * r[i - 2] + 39 * r[i - 1] - 70 * r[i] + 66 * r[i + 1] -
33 * r[i + 2] + 7 * r[i + 3]) / 24)
a4p = ((13 * r[i - 2] - 64 * r[i - 1] + 126 * r[i] - 124 * r[i + 1] +
61 * r[i + 2] - 12 * r[i + 3]) / 24)
a5p = ((-5 * r[i - 2] + 25 * r[i - 1] - 50 * r[i] + 50 * r[i + 1] -
25 * r[i + 2] + 5 * r[i + 3]) / 24)
y = (a0p + a1p * X + a2p * X ** 2 + a3p * X ** 3 + a4p * X ** 4 +
a5p * X ** 5)
return y
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
class CubicSplineInterpolator(scipy.interpolate.interp1d):
"""
Interpolates a 1-D function using cubic spline interpolation.
Methods
-------
- :meth:`~colour.CubicSplineInterpolator.__init__`
Notes
-----
- This class is a wrapper around *scipy.interpolate.interp1d* class.
"""
def __init__(self, *args, **kwargs):
super(CubicSplineInterpolator, self).__init__(
kind='cubic', *args, **kwargs)
class PchipInterpolator(scipy.interpolate.PchipInterpolator):
"""
Interpolates a 1-D function using Piecewise Cubic Hermite Interpolating
Polynomial interpolation.
Attributes
----------
- :attr:`~colour.PchipInterpolator.y`
Methods
-------
- :meth:`~colour.PchipInterpolator.__init__`
Notes
-----
- This class is a wrapper around *scipy.interpolate.PchipInterpolator*
class.
"""
def __init__(self, x, y, *args, **kwargs):
super(PchipInterpolator, self).__init__(x, y, *args, **kwargs)
self._y = y
@property
def y(self):
"""
Getter property for the dependent and already known :math:`y`
variable.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
class NullInterpolator:
"""
Performs 1-D function null interpolation, i.e. a call within given
tolerances will return existing :math:`y` variable values and ``default``
if outside tolerances.
Parameters
----------
x : ndarray
Independent :math:`x` variable values corresponding with :math:`y`
variable.
y : ndarray
Dependent and already known :math:`y` variable values to
interpolate.
absolute_tolerance : numeric, optional
Absolute tolerance.
relative_tolerance : numeric, optional
Relative tolerance.
default : numeric, optional
Default value for interpolation outside tolerances.
dtype : type
Data type used for internal conversions.
Attributes
----------
- :attr:`~colour.NullInterpolator.x`
- :attr:`~colour.NullInterpolator.y`
- :attr:`~colour.NullInterpolator.relative_tolerance`
- :attr:`~colour.NullInterpolator.absolute_tolerance`
- :attr:`~colour.NullInterpolator.default`
Methods
-------
- :meth:`~colour.NullInterpolator.__init__`
- :meth:`~colour.NullInterpolator.__call__`
Examples
--------
>>> y = np.array([5.9200, 9.3700, 10.8135, 4.5100,
... 69.5900, 27.8007, 86.0500])
>>> x = np.arange(len(y))
>>> f = NullInterpolator(x, y)
>>> f(0.5)
nan
>>> f(1.0) # doctest: +ELLIPSIS
9.3699999...
>>> f = NullInterpolator(x, y, absolute_tolerance=0.01)
>>> f(1.01) # doctest: +ELLIPSIS
9.3699999...
"""
def __init__(self,
x,
y,
absolute_tolerance=10e-7,
relative_tolerance=10e-7,
default=np.nan,
dtype=None):
if dtype is None:
dtype = DEFAULT_FLOAT_DTYPE
self._x = None
self._y = None
self._absolute_tolerance = None
self._relative_tolerance = None
self._default = None
self._dtype = dtype
self.x = x
self.y = y
self.absolute_tolerance = absolute_tolerance
self.relative_tolerance = relative_tolerance
self.default = default
self._validate_dimensions()
@property
def x(self):
"""
Getter and setter property for the independent :math:`x` variable.
Parameters
----------
value : array_like
Value to set the independent :math:`x` variable with.
Returns
-------
array_like
Independent :math:`x` variable.
"""
return self._x
@x.setter
def x(self, value):
"""
Setter for the **self.x** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"x" independent variable must have exactly one dimension!')
self._x = value
@property
def y(self):
"""
Getter and setter property for the dependent and already known
:math:`y` variable.
Parameters
----------
value : array_like
Value to set the dependent and already known :math:`y` variable
with.
Returns
-------
array_like
Dependent and already known :math:`y` variable.
"""
return self._y
@y.setter
def y(self, value):
"""
Setter for the **self.y** property.
"""
if value is not None:
value = np.atleast_1d(value).astype(self._dtype)
assert value.ndim == 1, (
'"y" dependent variable must have exactly one dimension!')
self._y = value
@property
def relative_tolerance(self):
"""
Getter and setter property for the relative tolerance.
Parameters
----------
value : numeric
Value to set the relative tolerance with.
Returns
-------
numeric
Relative tolerance.
"""
return self._relative_tolerance
@relative_tolerance.setter
def relative_tolerance(self, value):
"""
Setter for the **self.relative_tolerance** property.
"""
if value is not None:
assert is_numeric(value), (
'"relative_tolerance" variable must be a "numeric"!')
self._relative_tolerance = value
@property
def absolute_tolerance(self):
"""
Getter and setter property for the absolute tolerance.
Parameters
----------
value : numeric
Value to set the absolute tolerance with.
Returns
-------
numeric
Absolute tolerance.
"""
return self._absolute_tolerance
@absolute_tolerance.setter
def absolute_tolerance(self, value):
"""
Setter for the **self.absolute_tolerance** property.
"""
if value is not None:
assert is_numeric(value), (
'"absolute_tolerance" variable must be a "numeric"!')
self._absolute_tolerance = value
@property
def default(self):
"""
Getter and setter property for the default value for call outside
tolerances.
Parameters
----------
value : numeric
Value to set the default value with.
Returns
-------
numeric
Default value.
"""
return self._default
@default.setter
def default(self, value):
"""
Setter for the **self.default** property.
"""
if value is not None:
assert is_numeric(value), (
'"default" variable must be a "numeric"!')
self._default = value
def __call__(self, x):
"""
Evaluates the interpolator at given point(s).
Parameters
----------
x : numeric or array_like
Point(s) to evaluate the interpolant at.
Returns
-------
float or ndarray
Interpolated value(s).
"""
x = np.atleast_1d(x).astype(self._dtype)
xi = as_float(self._evaluate(x))
return xi
def _evaluate(self, x):
"""
Performs the interpolator evaluation at given points.
Parameters
----------
x : ndarray
Points to evaluate the interpolant at.
Returns
-------
ndarray
Interpolated points values.
"""
self._validate_dimensions()
self._validate_interpolation_range(x)
indexes = closest_indexes(self._x, x)
values = self._y[indexes]
values[~np.isclose(
self._x[indexes],
x,
rtol=self._absolute_tolerance,
atol=self._relative_tolerance)] = self._default
return values
def _validate_dimensions(self):
"""
Validates variables dimensions to be the same.
"""
if len(self._x) != len(self._y):
raise ValueError(
('"x" independent and "y" dependent variables have different '
'dimensions: "{0}", "{1}"').format(
len(self._x), len(self._y)))
def _validate_interpolation_range(self, x):
"""
Validates given point to be in interpolation range.
"""
below_interpolation_range = x < self._x[0]
above_interpolation_range = x > self._x[-1]
if below_interpolation_range.any():
raise ValueError('"{0}" is below interpolation range.'.format(x))
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
def lagrange_coefficients(r, n=4):
"""
Computes the *Lagrange Coefficients* at given point :math:`r` for degree
:math:`n`.
Parameters
----------
r : numeric
Point to get the *Lagrange Coefficients* at.
n : int, optional
Degree of the *Lagrange Coefficients* being calculated.
Returns
-------
ndarray
References
----------
:cite:`Fairman1985b`, :cite:`Wikipedia2003a`
Examples
--------
>>> lagrange_coefficients(0.1)
array([ 0.8265, 0.2755, -0.1305, 0.0285])
"""
r_i = np.arange(n)
L_n = []
for j in range(len(r_i)):
basis = [(r - r_i[i]) / (r_i[j] - r_i[i]) for i in range(len(r_i))
if i != j]
L_n.append(reduce(lambda x, y: x * y, basis)) # noqa
return np.array(L_n)
def vertices_and_relative_coordinates(V_xyz, table):
"""
Computes the vertices coordinates and indexes relative :math:`V_{xyzr}`
coordinates from given :math:`V_{xyzr}` values and interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to transform to indexes relative
:math:`V_{xyzr}` values.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
Returns
-------
tuple
Vertices coordinates and indexes relative :math:`V_{xyzr}` coordinates.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> vertices, V_xyzr = vertices_and_relative_coordinates(V_xyz, table)
>>> print(vertices)
[[[ 0.833311 0.833311 0.833311]
[ 0.349416 0.657749 0.041083]
[ 0.797894 -0.035412 -0.035412]]
<BLANKLINE>
[[ 0.833311 0.833311 1.249963]
[ 0.340435 0.743769 0.340435]
[ 0.752767 -0.028479 0.362144]]
<BLANKLINE>
[[ 0.707102 1.110435 0.707102]
[ 0.344991 1.050213 -0.007621]
[ 0.633333 0.316667 0. ]]
<BLANKLINE>
[[ 0.519714 0.744729 0.744729]
[ 0.314204 1.120871 0.314204]
[ 0.732278 0.315626 0.315626]]
<BLANKLINE>
[[ 1.06561 0.648957 0.648957]
[ 0.589195 0.589195 0.139164]
[ 1.196841 -0.053117 -0.053117]]
<BLANKLINE>
[[ 1. 0.666667 1. ]
[ 0.594601 0.594601 0.369586]
[ 1.162588 -0.050372 0.353948]]
<BLANKLINE>
[[ 0.894606 0.894606 0.66959 ]
[ 0.663432 0.930188 0.12992 ]
[ 1.038439 0.310899 -0.05287 ]]
<BLANKLINE>
[[ 1.249966 1.249966 1.249966]
[ 0.682749 0.991082 0.374416]
[ 1.131225 0.29792 0.29792 ]]]
>>> print(V_xyzr) # doctest: +ELLIPSIS
[[ 0.9010895... 0.1444479... 0.9288233...]
[ 0.6416967... 0.0931864... 0.0186907...]
[ 0.9180530... 0.6482684... 0.7589470...]]
"""
V_xyz = np.clip(V_xyz, 0, 1)
table = as_float_array(table)
V_xyz = np.reshape(V_xyz, (-1, 3))
# Indexes computations where ``i_m`` is the maximum index value on a given
# table axis, ``i_f`` and ``i_c`` respectively the floor and ceiling
# indexes encompassing a given V_xyz value.
i_m = np.array(table.shape[0:-1]) - 1
i_f = np.floor(V_xyz * i_m).astype(DEFAULT_INT_DTYPE)
i_c = np.clip(i_f + 1, 0, i_m)
# Relative to indexes ``V_xyz`` values.
V_xyzr = i_m * V_xyz - i_f
i_f_c = i_f, i_c
# Vertices computations by indexing ``table`` with the ``i_f`` and ``i_c``
# indexes. 8 encompassing vertices are computed for a given V_xyz value
# forming a cube around it:
vertices = np.array([
table[i_f_c[i[0]][..., 0], i_f_c[i[1]][..., 1], i_f_c[i[2]][..., 2]]
for i in itertools.product(*zip([0, 0, 0], [1, 1, 1]))
])
return vertices, V_xyzr
def table_interpolation_trilinear(V_xyz, table):
"""
Performs trilinear interpolation of given :math:`V_{xyz}` values using
given interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to interpolate.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
Returns
-------
ndarray
Interpolated :math:`V_{xyz}` values.
References
----------
:cite:`Bourkeb`
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> table_interpolation_trilinear(V_xyz, table) # doctest: +ELLIPSIS
array([[ 1.0120664..., 0.7539146..., 1.0228540...],
[ 0.5075794..., 0.6479459..., 0.1066404...],
[ 1.0976519..., 0.1785998..., 0.2299897...]])
"""
V_xyz = as_float_array(V_xyz)
vertices, V_xyzr = vertices_and_relative_coordinates(V_xyz, table)
vertices = np.moveaxis(vertices, 0, 1)
x, y, z = [f[:, np.newaxis] for f in tsplit(V_xyzr)]
weights = np.moveaxis(
np.transpose(
[(1 - x) * (1 - y) * (1 - z), (1 - x) * (1 - y) * z,
(1 - x) * y * (1 - z), (1 - x) * y * z, x * (1 - y) * (1 - z),
x * (1 - y) * z, x * y * (1 - z), x * y * z]), 0, -1)
xyz_o = np.reshape(np.sum(vertices * weights, 1), V_xyz.shape)
return xyz_o
def table_interpolation_tetrahedral(V_xyz, table):
"""
Performs tetrahedral interpolation of given :math:`V_{xyz}` values using
given interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to interpolate.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
Returns
-------
ndarray
Interpolated :math:`V_{xyz}` values.
References
----------
:cite:`Kirk2006`
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> table_interpolation_tetrahedral(V_xyz, table) # doctest: +ELLIPSIS
array([[ 1.0196197..., 0.7674062..., 1.0311751...],
[ 0.5105603..., 0.6466722..., 0.1077296...],
[ 1.1178206..., 0.1762039..., 0.2209534...]])
"""
V_xyz = as_float_array(V_xyz)
vertices, V_xyzr = vertices_and_relative_coordinates(V_xyz, table)
vertices = np.moveaxis(vertices, 0, -1)
V000, V001, V010, V011, V100, V101, V110, V111 = tsplit(vertices)
x, y, z = [r[:, np.newaxis] for r in tsplit(V_xyzr)]
xyz_o = np.select([
np.logical_and(x > y, y > z),
np.logical_and(x > y, x > z),
np.logical_and(x > y, np.logical_and(y <= z, x <= z)),
np.logical_and(x <= y, z > y),
np.logical_and(x <= y, z > x),
np.logical_and(x <= y, np.logical_and(z <= y, z <= x)),
], [
(1 - x) * V000 + (x - y) * V100 + (y - z) * V110 + z * V111,
(1 - x) * V000 + (x - z) * V100 + (z - y) * V101 + y * V111,
(1 - z) * V000 + (z - x) * V001 + (x - y) * V101 + y * V111,
(1 - z) * V000 + (z - y) * V001 + (y - x) * V011 + x * V111,
(1 - y) * V000 + (y - z) * V010 + (z - x) * V011 + x * V111,
(1 - y) * V000 + (y - x) * V010 + (x - z) * V110 + z * V111,
])
xyz_o = np.reshape(xyz_o, V_xyz.shape)
return xyz_o
TABLE_INTERPOLATION_METHODS = CaseInsensitiveMapping({
'Trilinear': table_interpolation_trilinear,
'Tetrahedral': table_interpolation_tetrahedral,
})
TABLE_INTERPOLATION_METHODS.__doc__ = """
Supported table interpolation methods.
References
----------
:cite:`Bourkeb`, :cite:`Kirk2006`
TABLE_INTERPOLATION_METHODS : CaseInsensitiveMapping
**{'Trilinear', 'Tetrahedral'}**
"""
def table_interpolation(V_xyz, table, method='Trilinear'):
"""
Performs interpolation of given :math:`V_{xyz}` values using given
interpolation table.
Parameters
----------
V_xyz : array_like
:math:`V_{xyz}` values to interpolate.
table : array_like
4-Dimensional (NxNxNx3) interpolation table.
method : unicode, optional
**{'Trilinear', 'Tetrahedral'}**,
Interpolation method.
Returns
-------
ndarray
Interpolated :math:`V_{xyz}` values.
References
----------
:cite:`Bourkeb`, :cite:`Kirk2006`
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... os.path.dirname(__file__),'..', 'io', 'luts', 'tests', 'resources',
... 'iridas_cube', 'Colour_Correct.cube')
>>> LUT = colour.read_LUT(path)
>>> table = LUT.table
>>> prng = np.random.RandomState(4)
>>> V_xyz = colour.algebra.random_triplet_generator(3, random_state=prng)
>>> print(V_xyz) # doctest: +ELLIPSIS
[[ 0.9670298... 0.7148159... 0.9762744...]
[ 0.5472322... 0.6977288... 0.0062302...]
[ 0.9726843... 0.2160895... 0.2529823...]]
>>> table_interpolation(V_xyz, table) # doctest: +ELLIPSIS
array([[ 1.0120664..., 0.7539146..., 1.0228540...],
[ 0.5075794..., 0.6479459..., 0.1066404...],
[ 1.0976519..., 0.1785998..., 0.2299897...]])
>>> table_interpolation(V_xyz, table, method='Tetrahedral')
... # doctest: +ELLIPSIS
array([[ 1.0196197..., 0.7674062..., 1.0311751...],
[ 0.5105603..., 0.6466722..., 0.1077296...],
[ 1.1178206..., 0.1762039..., 0.2209534...]])
"""
method = validate_method(method, TABLE_INTERPOLATION_METHODS)
return TABLE_INTERPOLATION_METHODS[method](V_xyz, table)
|
py | 1a3a6aac98d011418212f0e281fffa03404fbe35 | from rest_framework import serializers as rfs
from .models import UserFeedback
__all__ = [
"UserFeedbackSerializer",
]
class UserFeedbackSerializer(rfs.ModelSerializer):
class Meta:
model = UserFeedback
fields = rfs.ALL_FIELDS
user = rfs.HiddenField(default=rfs.CurrentUserDefault())
appname = rfs.HiddenField(default=UserFeedback.AppChoices.CURRENTAPP)
def create(self, validated_data: dict) -> UserFeedback:
"""
Create :class:`~.models.UserFeedback` model.
"""
feedback = super().create(validated_data)
# below function does not raise any exception
# because we should not respond with error
# if the slack API fails since it's an internal use-case
feedback.send_to_slack()
return feedback
|
py | 1a3a6ae831fd2a21ee2d4f292781b5f8376b9453 | import os
import numpy as np
from shapely import geometry, affinity
from pyquaternion import Quaternion
from shapely.geometry import Point
from nuscenes.eval.detection.utils import category_to_detection_name
from nuscenes.eval.detection.constants import DETECTION_NAMES
from nuscenes.utils.data_classes import LidarPointCloud
import logging
from src.data.utils import transform_polygon, render_polygon, transform
import cv2
import time
CAMERA_NAMES = ['CAM_FRONT']
# CAMERA_NAMES = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT',
# 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', 'CAM_BACK']
NUSCENES_CLASS_NAMES = [
'drivable_area', 'ped_crossing', 'walkway', 'carpark', 'car', 'truck',
'bus', 'trailer', 'construction_vehicle', 'pedestrian', 'motorcycle',
'bicycle', 'traffic_cone', 'barrier'
]
# NUSCENES_CLASS_NAMES = [
# 'drivable_area', 'ped_crossing', 'walkway', 'carpark']
STATIC_CLASSES = ['drivable_area', 'ped_crossing', 'walkway', 'carpark_area']
LOCATIONS = ['boston-seaport', 'singapore-onenorth', 'singapore-queenstown',
'singapore-hollandvillage']
def iterate_samples(nuscenes, start_token):
sample_token = start_token
while sample_token != '':
sample = nuscenes.get('sample', sample_token)
yield sample
sample_token = sample['next']
def get_map_masks(nuscenes, map_data, sample_data, extents, resolution):
# Render each layer sequentially
layers = [get_layer_mask(nuscenes, polys, sample_data, extents,
resolution) for layer, polys in map_data.items()]
return np.stack(layers, axis=0)
def get_layer_mask(nuscenes, polygons, sample_data, extents, resolution):
# Get the 2D affine transform from bev coords to map coords
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
inv_tfm = np.linalg.inv(tfm)
# Create a patch representing the birds-eye-view region in map coordinates
map_patch = geometry.box(*extents)
map_patch = transform_polygon(map_patch, tfm)
# Initialise the map mask
x1, z1, x2, z2 = extents
mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
dtype=np.uint8)
# Find all polygons which intersect with the area of interest
for polygon in polygons.query(map_patch):
polygon = polygon.intersection(map_patch)
# Transform into map coordinates
polygon = transform_polygon(polygon, inv_tfm)
# Render the polygon to the mask
render_shapely_polygon(mask, polygon, extents, resolution)
return mask.astype(np.bool)
def get_object_masks(nuscenes, sample_data, extents, resolution):
# Initialize object masks
nclass = len(DETECTION_NAMES) + 1
grid_width = int((extents[2] - extents[0]) / resolution)
grid_height = int((extents[3] - extents[1]) / resolution)
masks = np.zeros((nclass, grid_height, grid_width), dtype=np.uint8)
# Get the 2D affine transform from bev coords to map coords
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
inv_tfm = np.linalg.inv(tfm)
obj_list=[]
for box in nuscenes.get_boxes(sample_data['token']):
# Get the index of the class
det_name = category_to_detection_name(box.name)
if det_name not in DETECTION_NAMES:
class_id = -1
else:
class_id = DETECTION_NAMES.index(det_name)
# Get bounding box coordinates in the grid coordinate frame
bbox = box.bottom_corners()[:2]
local_bbox = np.dot(inv_tfm[:2, :2], bbox).T + inv_tfm[:2, 2]
temp_ar = np.squeeze(np.zeros((9,1),np.float32))
temp_ar[:8] = np.float32(local_bbox).flatten()
temp_ar[-1] = class_id
obj_list.append(np.copy(temp_ar))
# Render the rotated bounding box to the mask
render_polygon(masks[class_id], local_bbox, extents, resolution)
return np.array(obj_list), masks
#
#def get_object_masks(nuscenes, sample_data, extents, resolution):
#
# # Initialize object masks
# nclass = len(DETECTION_NAMES) + 2
# grid_width = int((extents[2] - extents[0]) / resolution)
# grid_height = int((extents[3] - extents[1]) / resolution)
# masks = np.zeros((nclass, grid_height, grid_width), dtype=np.uint8)
#
# # Get the 2D affine transform from bev coords to map coords
# tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
# inv_tfm = np.linalg.inv(tfm)
#
# for box in nuscenes.get_boxes(sample_data['token']):
#
# # Get the index of the class
# det_name = category_to_detection_name(box.name)
# if det_name not in DETECTION_NAMES:
# class_id = -1
# else:
# class_id = DETECTION_NAMES.index(det_name)
#
# # Get bounding box coordinates in the grid coordinate frame
# bbox = box.bottom_corners()[:2]
# local_bbox = np.dot(inv_tfm[:2, :2], bbox).T + inv_tfm[:2, 2]
#
# # Render the rotated bounding box to the mask
# render_polygon(masks[class_id], local_bbox, extents, resolution)
#
# return masks.astype(np.bool)
def get_sensor_transform(nuscenes, sample_data):
# Load sensor transform data
sensor = nuscenes.get(
'calibrated_sensor', sample_data['calibrated_sensor_token'])
sensor_tfm = make_transform_matrix(sensor)
# Load ego pose data
pose = nuscenes.get('ego_pose', sample_data['ego_pose_token'])
pose_tfm = make_transform_matrix(pose)
return np.dot(pose_tfm, sensor_tfm)
def load_point_cloud(nuscenes, sample_data):
# Load point cloud
lidar_path = os.path.join(nuscenes.dataroot, sample_data['filename'])
pcl = LidarPointCloud.from_file(lidar_path)
return pcl.points[:3, :].T
def make_transform_matrix(record):
"""
Create a 4x4 transform matrix from a calibrated_sensor or ego_pose record
"""
my_transform = np.eye(4)
my_transform[:3, :3] = Quaternion(record['rotation']).rotation_matrix
my_transform[:3, 3] = np.array(record['translation'])
return my_transform
def render_shapely_polygon(mask, polygon, extents, resolution):
if polygon.geom_type == 'Polygon':
# Render exteriors
# logging.error('POLYGON ' + str(polygon.exterior.coords))
# time.sleep(1)
render_polygon(mask, polygon.exterior.coords, extents, resolution, 1)
# Render interiors
for hole in polygon.interiors:
render_polygon(mask, hole.coords, extents, resolution, 0)
# Handle the case of compound shapes
else:
for poly in polygon:
render_shapely_polygon(mask, poly, extents, resolution)
def render_point(mask, polygon, extents, resolution,value):
# Render exteriors
# logging.error('POLYGON ' + str(polygon.coords))
# logging.error('EXTENTS ' + str(np.array(extents[:2])))
polygon = (polygon - np.array(extents[:2])) / resolution
polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
cv2.fillConvexPoly(mask, polygon, value)
render_polygon(mask, polygon.coords, extents, resolution, value)
#def render_centerlines(map_api,resolution_meters=0.5,
# figsize: Union[None, float, Tuple[float, float]] = None,
# bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]:
# """
# Render the centerlines of all lanes and lane connectors.
# :param resolution_meters: How finely to discretize the lane. Smaller values ensure curved
# lanes are properly represented.
# :param figsize: Size of the figure.
# :param bitmap: Optional BitMap object to render below the other map layers.
# """
# # Discretize all lanes and lane connectors.
# pose_lists = map_api.discretize_centerlines(resolution_meters)
#
#
#
# # Render connectivity lines.
# fig = plt.figure(figsize=self._get_figsize(figsize))
# ax = fig.add_axes([0, 0, 1, 1 / self.canvas_aspect_ratio])
#
# if bitmap is not None:
# bitmap.render(self.map_api.canvas_edge, ax)
#
# for pose_list in pose_lists:
# if len(pose_list) > 0:
# plt.plot(pose_list[:, 0], pose_list[:, 1])
#
# return fig, ax
def view_points(points, view, normalize=True):
"""
This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and
orthographic projections. It first applies the dot product between the points and the view. By convention,
the view should be such that the data is projected onto the first 2 axis. It then optionally applies a
normalization along the third dimension.
For a perspective projection the view should be a 3x3 camera matrix, and normalize=True
For an orthographic projection with translation the view is a 3x4 matrix and normalize=False
For an orthographic projection without translation the view is a 3x3 matrix (optionally 3x4 with last columns
all zeros) and normalize=False
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).
The projection should be such that the corners are projected onto the first 2 axis.
:param normalize: Whether to normalize the remaining coordinate (along the third axis).
:return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[0] == 3
viewpad = np.eye(4)
viewpad[:view.shape[0], :view.shape[1]] = view
nbr_points = points.shape[1]
# Do operation in homogenous coordinates.
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
norm_const = points[2:3, :]
if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
return points,norm_const
# def check_visible(polygon, vis_mask):
def get_centerlines(nuscenes, new_ar, sample_data, extents, resolution, vis_mask, already_found=None):
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
my_thresh = 100
my_x = tfm[0,-1]
my_y = tfm[1,-1]
road_ind_ar = np.arange(len(new_ar))
selecteds = np.abs(new_ar[:,:,0] - my_x) + np.abs(new_ar[:,:,1] - my_y) < my_thresh
selected_lines = np.any(selecteds, axis=-1)
logging.error('FOUND ' + str(np.sum(selected_lines)) + ' LINES')
my_road_ar = road_ind_ar[selected_lines]
my_lines = new_ar[selected_lines]
my_sel_points = selecteds[selected_lines]
inv_tfm = np.linalg.inv(tfm)
# Create a patch representing the birds-eye-view region in map coordinates
map_patch = geometry.box(*extents)
map_patch = transform_polygon(map_patch, tfm)
# Initialise the map mask
x1, z1, x2, z2 = extents
mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
dtype=np.uint16)
# Find all polygons which intersect with the area of interest
loc_array = np.zeros((len(new_ar),2,2),np.uint8)
for road_id in range(len(my_lines)):
cons_points = my_lines[road_id][my_sel_points[road_id]]
cur_min = False
cur_last = (None,None)
for p in range(len(cons_points)):
cur = cons_points[p][:2]
cur_point = Point(cur)
cont = map_patch.contains(cur_point)
if cont:
# # Transform into map coordinates
polygon = transform_polygon(cur_point, inv_tfm)
if len(polygon.coords) > 0:
polygon = (polygon.coords[0]- np.array(extents[:2])) / resolution
polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
if ((polygon[0] >= 0) & (polygon[1] >= 0)):
if ((polygon[0] < mask.shape[1]) & (polygon[1] < mask.shape[0])):
mask[polygon[1],polygon[0]] = my_road_ar[road_id] + 1
#
if vis_mask[polygon[1],polygon[0]] > 0.5:
if not cur_min:
#
#
loc_array[my_road_ar[road_id],0,0] = np.int32(polygon[1])
loc_array[my_road_ar[road_id],0,1] = np.int32(polygon[0])
cur_min = True
#
cur_last = (np.int32(polygon[1]),np.int32(polygon[0]))
#
if cur_last[0] != None:
#
loc_array[my_road_ar[road_id],1,0] = np.int32(cur_last[0])
loc_array[my_road_ar[road_id],1,1] = np.int32(cur_last[1])
else:
loc_array[my_road_ar[road_id],1,0] = 255
loc_array[my_road_ar[road_id],1,1] = 255
if not cur_min:
loc_array[my_road_ar[road_id],0,0] = 255
loc_array[my_road_ar[road_id],0,1] = 255
return mask, loc_array
#
#def get_centerlines(nuscenes, centers, sample_data, extents, resolution, vis_mask, already_found=None):
#
# # Get the 2D affine transform from bev coords to map coords
# tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
#
# tfm[1,-1] = tfm[1,-1]
#
#
# inv_tfm = np.linalg.inv(tfm)
#
# # Create a patch representing the birds-eye-view region in map coordinates
# map_patch = geometry.box(*extents)
# map_patch = transform_polygon(map_patch, tfm)
#
# # Initialise the map mask
# x1, z1, x2, z2 = extents
#
#
# mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
# dtype=np.uint16)
#
# # Find all polygons which intersect with the area of interest
#
# loc_array = np.zeros((len(centers),2,2),np.uint8)
#
# for road_id in range(len(centers)):
#
# cur_min = False
# cur_last = (None,None)
#
# for p in range(len(centers[road_id])):
# cur = centers[road_id][p][:2]
# cur_point = Point(cur)
# cont = map_patch.contains(cur_point)
#
# if cont:
#
## # Transform into map coordinates
# polygon = transform_polygon(cur_point, inv_tfm)
# if len(polygon.coords) > 0:
# polygon = (polygon.coords[0]- np.array(extents[:2])) / resolution
# polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
# if ((polygon[0] >= 0) & (polygon[1] >= 0)):
# if ((polygon[0] < mask.shape[1]) & (polygon[1] < mask.shape[0])):
# mask[polygon[1],polygon[0]] = road_id + 1
# #
# if vis_mask[polygon[1],polygon[0]] > 0.5:
#
# if not cur_min:
# #
# #
# loc_array[road_id,0,0] = np.uint8(polygon[1])
# loc_array[road_id,0,1] = np.uint8(polygon[0])
# cur_min = True
# #
# cur_last = (polygon[1],polygon[0])
##
# if cur_last[0] != None:
##
# loc_array[road_id,1,0] = np.uint8(cur_last[0])
# loc_array[road_id,1,1] = np.uint8(cur_last[1])
# else:
# loc_array[road_id,1,0] = 255
# loc_array[road_id,1,1] = 255
#
# if not cur_min:
# loc_array[road_id,0,0] = 255
# loc_array[road_id,0,1] = 255
#
# return mask, loc_array
def get_moved_centerlines(nuscenes, centers, sample_data, extents, resolution, vis_mask, beta, already_found):
start_point_base = 5000
end_point_base = 10000
# Get the 2D affine transform from bev coords to map coords
tfm = get_sensor_transform(nuscenes, sample_data)[[0, 1, 3]][:, [0, 2, 3]]
tfm[1,-1] = tfm[1,-1] - beta
inv_tfm = np.linalg.inv(tfm)
# Create a patch representing the birds-eye-view region in map coordinates
map_patch = geometry.box(*extents)
map_patch = transform_polygon(map_patch, tfm)
# Initialise the map mask
x1, z1, x2, z2 = extents
mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
dtype=np.uint16)
# Find all polygons which intersect with the area of interest
# selected_roads=[]
# found=False
loc_array = np.zeros((len(centers),2,2),np.uint8)
road_ids = list(np.int64(np.unique(already_found)[1:] - 1))
for road_id in road_ids:
# temp_mask = np.zeros((int((z2 - z1) / resolution), int((x2 - x1) / resolution)),
# dtype=np.uint16)
# per_road_check = False
cur_min = False
cur_last = (None,None)
for p in range(len(centers[road_id])):
cur = centers[road_id][p][:2]
cur_point = Point(cur)
cont = map_patch.contains(cur_point)
if cont:
# logging.error('road_id ' + str(road_id))
# logging.error('point ' + str(p))
# found=True
# break
# # Transform into map coordinates
polygon = transform_polygon(cur_point, inv_tfm)
if len(polygon.coords) > 0:
polygon = (polygon.coords[0]- np.array(extents[:2])) / resolution
polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
if ((polygon[0] >= 0) & (polygon[1] >= 0)):
if ((polygon[0] < mask.shape[1]) & (polygon[1] < mask.shape[0])):
mask[polygon[1],polygon[0]] = road_id + 1
#
if vis_mask[polygon[1],polygon[0]] > 0.5:
if not cur_min:
#
# if mask[polygon[1],polygon[0]] > start_point_base:
#
# if mask[polygon[1],polygon[0]] > end_point_base:
#
# rel_id = mask[polygon[1],polygon[0]] - end_point_base - 1
# logging.error('START OF ROAD '+ str(road_id) + ' and END OF '+ str(rel_id))
#
# else:
# rel_id = mask[polygon[1],polygon[0]] - start_point_base - 1
#
# logging.error('START OF ROAD '+ str(road_id) + ' and START OF '+ str(rel_id))
#
loc_array[road_id,0,0] = np.uint8(polygon[1])
loc_array[road_id,0,1] = np.uint8(polygon[0])
cur_min = True
#
cur_last = (polygon[1],polygon[0])
#
# # Render the polygon to the mask
# logging.error('POLYGON ' + str(polygon.coords[1]))
# logging.error('EXTENTS ' + str(np.array(extents[:2])))
# polygon = (polygon - np.array(extents[:2])) / resolution
# polygon = np.ascontiguousarray(polygon).round().astype(np.int32)
# cv2.fillConvexPoly(mask, polygon, road_id)
# render_point(mask, polygon, extents, resolution,road_id)
# if found:
# break
if cur_last[0] != None:
# if mask[cur_last[0],cur_last[1]] > 25000:
# logging.error('ENDPOITNS COLLIDED IN ROAD '+ str(road_id) + ' and '+ str(np.float32(mask[cur_last[0],cur_last[1]])//10))
# mask[cur_last[0],cur_last[1]] = (road_id + 1)*10 + 1
loc_array[road_id,1,0] = np.uint8(cur_last[0])
loc_array[road_id,1,1] = np.uint8(cur_last[1])
else:
loc_array[road_id,1,0] = 255
loc_array[road_id,1,1] = 255
if not cur_min:
loc_array[road_id,0,0] = 255
loc_array[road_id,0,1] = 255
return mask, loc_array
def zoom_augment_grids(image_shape, intrinsics, cs, beta):
image = np.zeros(image_shape)
col_ar2 = np.arange(image.shape[1])
row_ar2 = np.arange(image.shape[0])
mesh2_col, mesh2_row = np.meshgrid(col_ar2, row_ar2)
write_col = np.copy(mesh2_col)
write_row = np.copy(mesh2_row)
col_ar1 = np.arange(image.shape[1])
row_ar1 = np.arange(image.shape[0])
mesh1_col, mesh1_row = np.meshgrid(col_ar1, row_ar1)
x_center = intrinsics[0,-1]
y_center = intrinsics[1,-1]
f = intrinsics[0,0]
Y = -cs[-1]
for m in range(mesh1_row.shape[0]):
for n in range(mesh1_row.shape[1]):
write_col[m,n] = int((mesh2_col[m,n] - x_center)*f*Y/(f*Y - beta*mesh2_row[m,n] + beta*y_center) + x_center)
write_row[m,n] = int(f*Y*(mesh2_row[m,n] - y_center)/(f*Y - beta*mesh2_row[m,n] + beta*y_center) + y_center)
total_mask = np.ones_like(write_col)
total_mask[write_col < 0] = 0
total_mask[write_col > (image.shape[1]-1)] = 0
total_mask[write_row < 0] = 0
total_mask[write_row > (image.shape[0]-1)] = 0
write_col[write_col < 0] = 0
write_col[write_col > (image.shape[1]-1)] = 0
write_row[write_row < 0] = 0
write_row[write_row > (image.shape[0]-1)] = 0
return write_row, write_col, total_mask |
py | 1a3a6c154bad78c5090f072a5e2c6ea9d56baca1 | import ast
import contextlib
import json
import os
import re
import sys
import threading
from datetime import timedelta
import pytest
import retrying
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from dcos import constants
from dcoscli.test.common import (assert_command, assert_lines, exec_command,
popen_tty, update_config)
from dcoscli.test.marathon import (app, list_apps, list_deployments, show_app,
start_app, watch_all_deployments,
watch_deployment)
_ZERO_INSTANCE_APP_ID = 'zero-instance-app'
_ZERO_INSTANCE_APP_INSTANCES = 100
def test_help():
with open('tests/data/marathon/help.txt') as content:
assert_command(['dcos', 'marathon', '--help'],
stdout=content.read().encode('utf-8'))
def test_version():
assert_command(['dcos', 'marathon', '--version'],
stdout=b'dcos-marathon version SNAPSHOT\n')
def test_info():
assert_command(['dcos', 'marathon', '--info'],
stdout=b'Deploy and manage applications to DC/OS\n')
def test_about():
returncode, stdout, stderr = exec_command(['dcos', 'marathon', 'about'])
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['name'] == "marathon"
@pytest.fixture
def env():
r = os.environ.copy()
r.update({constants.PATH_ENV: os.environ[constants.PATH_ENV]})
return r
def test_empty_list():
list_apps()
def test_add_app_through_http():
with _zero_instance_app_through_http():
list_apps('zero-instance-app')
def test_add_app_bad_resource():
stderr = (b'Error: can\'t read from resource: bad_resource. Please check that it exists\n')
assert_command(['dcos', 'marathon', 'app', 'add', 'bad_resource'],
returncode=1,
stderr=stderr)
def test_remove_app():
with _zero_instance_app():
pass
list_apps()
def test_add_bad_json_app():
with open('tests/data/marathon/apps/bad.json') as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add'],
stdin=fd)
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith('Error: error loading JSON: ')
def test_add_existing_app():
with _zero_instance_app():
app_path = 'tests/data/marathon/apps/zero_instance_sleep_v2.json'
with open(app_path) as fd:
stderr = b"Error: Application '/zero-instance-app' already exists\n"
assert_command(['dcos', 'marathon', 'app', 'add'],
returncode=1,
stderr=stderr,
stdin=fd)
def test_show_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
result = show_app('zero-instance-app')
show_app('zero-instance-app', result['version'])
def test_show_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
show_app('zero-instance-app', "-1")
def test_show_missing_relative_app_version():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
# Marathon persists app versions indefinitely by ID, so pick a large
# index here in case the history is long
cmd = ['dcos', 'marathon', 'app', 'show', '--app-version=-200', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 1
assert stdout == b''
pattern = ("Error: application 'zero-instance-app' only has "
"[1-9][0-9]* version\\(s\\)\n")
assert re.fullmatch(pattern, stderr.decode('utf-8'), flags=re.DOTALL)
def test_show_missing_absolute_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2000-02-11T20:39:32.972Z', 'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.decode('utf-8').startswith(
"Error: app '/zero-instance-app' does not exist")
def test_show_bad_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'show', '--app-version=20:39:32.972Z',
'zero-instance-app'])
assert returncode == 1
assert stdout == b''
assert stderr.startswith(b'Error: invalid timestamp provided')
def test_show_bad_relative_app_version():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
assert_command(
['dcos', 'marathon', 'app', 'show',
'--app-version=2', 'zero-instance-app'],
returncode=1,
stderr=b"Error: relative versions must be negative: 2\n")
def test_start_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'start', 'missing-id'],
returncode=1,
stderr=b"Error: app '/missing-id' does not exist\n")
def test_start_already_started_app():
with _zero_instance_app():
start_app('zero-instance-app')
stderr = (b"Error: application 'zero-instance-app' already "
b"started: 1 instances\n")
assert_command(
['dcos', 'marathon', 'app', 'start', 'zero-instance-app'],
returncode=1,
stderr=stderr)
def test_stop_missing_app():
assert_command(['dcos', 'marathon', 'app', 'stop', 'missing-id'],
returncode=1,
stderr=b"Error: app '/missing-id' does not exist\n")
def test_stop_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_stop_already_stopped_app():
with _zero_instance_app():
stderr = (b"Error: app '/zero-instance-app' already "
b"stopped: 0 instances\n")
assert_command(
['dcos', 'marathon', 'app', 'stop', 'zero-instance-app'],
returncode=1,
stderr=stderr)
def test_update_missing_app():
assert_command(['dcos', 'marathon', 'app', 'update', 'missing-id'],
stderr=b"Error: App '/missing-id' does not exist\n",
returncode=1)
def test_update_bad_type():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update',
'zero-instance-app', 'cpus="a string"'])
stderr_end = b"""{"message":"Invalid JSON","details":[{"path":"/cpus","errors":["error.expected.jsnumber"]}]}""" # noqa: E501
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_invalid_request():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', '{', 'instances'])
assert returncode == 1
assert stdout == b''
stderr = stderr.decode()
# TODO (tamar): this becomes 'Error: App '/{' does not exist\n"'
# in Marathon 0.11.0
assert stderr.startswith('Error on request')
assert stderr.endswith('HTTP 400: Bad Request\n')
def test_app_add_invalid_request():
path = os.path.join(
'tests', 'data', 'marathon', 'apps', 'app_add_400.json')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'add', path])
stderr_end = b"Invalid JSON (path: '/container/docker/network' errors: error.unknown.enum.literal)" # noqa: E501
assert returncode == 1
assert stderr_end in stderr
assert stdout == b''
def test_update_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
'cpus=1', 'mem=20', "cmd='sleep 100'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_json():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', 'zero-instance-app',
"env='{\"key\":\"/value\"}'"])
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def test_update_app_from_stdin():
with _zero_instance_app():
_update_app(
'zero-instance-app',
'tests/data/marathon/apps/update_zero_instance_sleep.json')
def test_restarting_stopped_app():
with _zero_instance_app():
stderr = (b"Error: unable to perform rolling restart of application '"
b"/zero-instance-app' because it has no running tasks\n")
assert_command(
['dcos', 'marathon', 'app', 'restart', 'zero-instance-app'],
returncode=1,
stderr=stderr)
def test_restarting_missing_app():
assert_command(['dcos', 'marathon', 'app', 'restart', 'missing-id'],
returncode=1,
stderr=b"Error: app '/missing-id' does not exist\n")
def test_killing_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stderr == b''
out = stdout.decode()
assert out.startswith('Killed tasks: ')
out = out.strip('Killed tasks: ')
dictout = ast.literal_eval(out)
assert len(dictout) == 3
def test_killing_scaling_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
command = ['dcos', 'marathon', 'app', 'kill', '--scale',
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert returncode == 0
assert stdout.decode().startswith('Started deployment: ')
assert stdout.decode().find('version') > -1
assert stdout.decode().find('deploymentId') > -1
assert stderr == b''
watch_all_deployments()
_list_tasks(0)
def test_killing_with_host_app():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
existing_tasks = _list_tasks(3, 'zero-instance-app')
task_hosts = set([task['host'] for task in existing_tasks])
if len(task_hosts) <= 1:
pytest.skip('test needs 2 or more agents to succeed, '
'only {} agents available'.format(len(task_hosts)))
assert len(task_hosts) > 1
kill_host = list(task_hosts)[0]
expected_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] == kill_host])
not_to_be_killed = set([task['id']
for task in existing_tasks
if task['host'] != kill_host])
assert len(not_to_be_killed) > 0
assert len(expected_to_be_killed) > 0
command = ['dcos', 'marathon', 'app', 'kill', '--host', kill_host,
'zero-instance-app']
returncode, stdout, stderr = exec_command(command)
assert stdout.decode().startswith('Killed tasks: ')
assert stderr == b''
new_tasks = set([task['id'] for task in _list_tasks()])
assert not_to_be_killed.intersection(new_tasks) == not_to_be_killed
assert len(expected_to_be_killed.intersection(new_tasks)) == 0
def test_kill_stopped_app():
with _zero_instance_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'zero-instance-app'])
assert returncode == 0
assert stdout.decode().startswith('Killed tasks: []')
def test_kill_missing_app():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'kill', 'app'])
assert returncode == 1
assert stdout.decode() == ''
stderr_expected = "Error: app '/app' does not exist"
assert stderr.decode().strip() == stderr_expected
def test_list_version_missing_app():
assert_command(
['dcos', 'marathon', 'app', 'version', 'list', 'missing-id'],
returncode=1,
stderr=b"Error: Marathon API error: App '/missing-id' does not exist\n")
def test_list_version_negative_max_count():
assert_command(['dcos', 'marathon', 'app', 'version', 'list',
'missing-id', '--max-count=-1'],
returncode=1,
stderr=b'Error: maximum count must be a positive number\n')
def test_list_version_app():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_list_versions(app_id, 1)
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 2)
def test_list_version_max_count():
app_id = _ZERO_INSTANCE_APP_ID
with _zero_instance_app():
_update_app(
app_id,
'tests/data/marathon/apps/update_zero_instance_sleep.json')
_list_versions(app_id, 1, 1)
_list_versions(app_id, 2, 2)
_list_versions(app_id, 2, 3)
def test_list_empty_deployment():
list_deployments(0)
def test_list_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1)
def test_list_deployment_table():
"""Simple sanity check for listing deployments with a table output.
The more specific testing is done in unit tests.
"""
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
assert_lines(['dcos', 'marathon', 'deployment', 'list'], 2)
def test_list_deployment_missing_app():
with _zero_instance_app():
start_app('zero-instance-app')
list_deployments(0, 'missing-id')
def test_list_deployment_app():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
list_deployments(1, 'zero-instance-app')
def test_rollback_missing_deployment():
assert_command(
['dcos', 'marathon', 'deployment', 'rollback', 'missing-deployment'],
returncode=1,
stderr=b'Error: DeploymentPlan missing-deployment does not exist\n')
def test_rollback_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'deployment', 'rollback', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert 'deploymentId' in result
assert 'version' in result
assert stderr == b''
watch_all_deployments()
list_deployments(0)
def test_stop_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0)
def test_watching_missing_deployment():
watch_deployment('missing-deployment', 1)
def test_watching_deployment():
with _zero_instance_app():
start_app('zero-instance-app', _ZERO_INSTANCE_APP_INSTANCES)
result = list_deployments(1, 'zero-instance-app')
watch_deployment(result[0]['id'], 60)
assert_command(
['dcos', 'marathon', 'deployment', 'stop', result[0]['id']])
list_deployments(0, 'zero-instance-app')
def test_list_empty_task():
_list_tasks(0)
def test_list_empty_task_not_running_app():
with _zero_instance_app():
_list_tasks(0)
def test_list_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3)
def test_list_tasks_table():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
assert_lines(['dcos', 'marathon', 'task', 'list'], 4)
def test_list_app_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(3, 'zero-instance-app')
def test_list_missing_app_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
_list_tasks(0, 'missing-id')
def test_show_missing_task():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', 'missing-id'])
stderr = stderr.decode('utf-8')
assert returncode == 1
assert stdout == b''
assert stderr.startswith("Task '")
assert stderr.endswith("' does not exist\n")
def test_show_task():
with _zero_instance_app():
start_app('zero-instance-app', 3)
watch_all_deployments()
result = _list_tasks(3, 'zero-instance-app')
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'task', 'show', result[0]['id']])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert result['appId'] == '/zero-instance-app'
assert stderr == b''
def test_stop_task():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id)
def test_stop_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = task_list[0]['id']
_stop_task(task_id, '--wipe')
def test_kill_one_task():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id)
def test_kill_two_tasks():
with _zero_instance_app():
start_app('zero-instance-app', 2)
watch_all_deployments()
task_list = _list_tasks(2, 'zero-instance-app')
task_ids = [task['id'] for task in task_list]
_kill_task(task_ids)
def test_kill_and_scale_task():
with _zero_instance_app():
start_app('zero-instance-app', 2)
watch_all_deployments()
task_list = _list_tasks(2, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id, scale=True)
task_list = _list_tasks(1, 'zero-instance-app')
def test_kill_unknown_task():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = ['unknown-task-id']
_kill_task(task_id, expect_success=False)
def test_kill_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app', 1)
watch_all_deployments()
task_list = _list_tasks(1, 'zero-instance-app')
task_id = [task_list[0]['id']]
_kill_task(task_id, wipe=True)
def test_stop_unknown_task():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, expect_success=False)
def test_stop_unknown_task_wipe():
with _zero_instance_app():
start_app('zero-instance-app')
watch_all_deployments()
task_id = 'unknown-task-id'
_stop_task(task_id, '--wipe', expect_success=False)
def test_bad_configuration(env):
with update_config('marathon.url', 'http://localhost:88888', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'about'], env=env)
assert returncode == 1
def test_app_locked_error():
with app('tests/data/marathon/apps/sleep_many_instances.json',
'/sleep-many-instances',
wait=False):
stderr = (b'Error: changes blocked: deployment '
b'already in progress for app\n')
assert_command(
['dcos', 'marathon', 'app', 'stop', 'sleep-many-instances'],
returncode=1,
stderr=stderr)
def test_ping():
assert_command(['dcos', 'marathon', 'ping'],
stdout=b'Marathon ping response[1x]: "pong"\n')
def test_leader_show():
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'leader', 'show', '--json'])
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert stderr == b''
assert result['host'] == "marathon.mesos."
assert 'ip' in result
def ignore_exception(exc):
return isinstance(exc, Exception)
@pytest.fixture
def marathon_up():
yield
check_marathon_up()
@retrying.retry(stop_max_delay=timedelta(minutes=5).total_seconds() * 1000,
retry_on_exception=ignore_exception, wait_fixed=1000)
def check_marathon_up():
# testing to see if marathon is up and can talk through the gateway
# ignore the exception until we have a successful reponse.
returncode, _, _ = exec_command(['dcos', 'marathon', 'app', 'list'])
assert returncode == 0
@retrying.retry(stop_max_delay=timedelta(minutes=5).total_seconds() * 1000,
retry_on_exception=ignore_exception)
def wait_marathon_down():
returncode, _, _ = exec_command(['dcos', 'marathon', 'app', 'list'])
assert returncode != 0
def test_leader_delete(marathon_up):
assert_command(['dcos', 'marathon', 'leader', 'delete'],
stdout=b'Leadership abdicated\n')
# There might be a slight delay until marathon shows itself as down,
# so marathon_up() might succeed directly and the next tests would
# run with an unhealthy marathon. Explicitly wait for marathon to
# go down before waiting for it to become healthy again.
wait_marathon_down()
check_marathon_up()
def _update_app(app_id, file_path):
with open(file_path) as fd:
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'update', app_id],
stdin=fd)
assert returncode == 0
assert stdout.decode().startswith('Created deployment ')
assert stderr == b''
def _list_versions(app_id, expected_min_count, max_count=None):
cmd = ['dcos', 'marathon', 'app', 'version', 'list', app_id]
if max_count is not None:
cmd.append('--max-count={}'.format(max_count))
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
assert isinstance(result, list)
assert stderr == b''
# Marathon persists app versions indefinitely by ID, so there may be extras
assert len(result) >= expected_min_count
if max_count is not None:
assert len(result) <= max_count
def _list_tasks(expected_count=None, app_id=None):
cmd = ['dcos', 'marathon', 'task', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count:
assert len(result) == expected_count
assert stderr == b''
return result
def _stop_task(task_id, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'stop', task_id]
if wipe is not None:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert result['id'] == task_id
else:
assert returncode == 1
def _kill_task(task_ids, scale=None, wipe=None, expect_success=True):
cmd = ['dcos', 'marathon', 'task', 'kill', '--json'] + task_ids
if scale:
cmd.append('--scale')
if wipe:
cmd.append('--wipe')
returncode, stdout, stderr = exec_command(cmd)
if expect_success:
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
if scale:
assert 'deploymentId' in result
else:
assert sorted(
[task['id'] for task in result['tasks']]) == sorted(task_ids)
else:
assert returncode == 1
@contextlib.contextmanager
def _zero_instance_app():
with app('tests/data/marathon/apps/zero_instance_sleep.json',
'zero-instance-app'):
yield
@contextlib.contextmanager
def _zero_instance_app_through_http():
class JSONRequestHandler (BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(open(
'tests/data/marathon/apps/zero_instance_sleep.json',
'rb').read())
host = 'localhost'
port = 12345
server = HTTPServer((host, port), JSONRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
with app('http://{}:{}'.format(host, port), 'zero-instance-app'):
try:
yield
finally:
server.shutdown()
|
py | 1a3a6d6e78b14560d8db54df647c376239a0ab17 | import unittest, traceback
from subprocess import TimeoutExpired
class TestcaseError(BaseException):
pass
class TestResult(unittest.TextTestResult):
def __init__(self, stream=None, descriptions=None, verbosity=0):
super(TestResult, self).__init__(stream, descriptions, verbosity)
self.success_count = 0
self.failures_count = 0
self.errors_count = 0
self.result = []
def addError(self, test, error):
self.errors_count += 1
self.saveTestCaseResult(test, "errored", error)
return super(TestResult, self).addError(test, error)
def addFailure(self, test, error):
self.failures_count += 1
self.saveTestCaseResult(test, "failed", error)
return super(TestResult, self).addFailure(test, error)
def addSuccess(self, test):
self.success_count += 1
self.saveTestCaseResult(test, "passed")
return super(TestResult, self).addSuccess(test)
def saveTestCaseResult(self, test, status, error=None):
result = {
"uid": test.testcase["uid"],
"stdin": repr(test.testcase["stdin"]),
"stdout": repr(test.stdout),
"stderr": test.response.stderr,
"generated_stdout": repr(test.response.stdout),
"status": status,
}
if error:
error = "".join(traceback.format_exception_only(error[0], error[1])).strip()
result["error"] = error
self.result.append(result)
class TestCase(unittest.TestCase):
def __init__(self, module, testcase, timeout=None):
unittest.TestCase.__init__(self)
self.module = module
self.testcase = testcase
self.timeout = timeout
def runTest(self):
self.stdout = str(self.testcase["expected_stdout"]).strip()
self.response = self.module.runTest(
stdin=self.testcase["stdin"], timeout=self.timeout
)
if self.response.returncode:
raise TestcaseError(self.response.stderr)
self.generated_stdout = self.response.stdout.strip()
if self.generated_stdout != self.stdout:
raise AssertionError(
"{} != {}".format(repr(self.generated_stdout), repr(self.stdout))
)
class Judger:
def __init__(self):
self.testresult = TestResult()
self.testsuite = unittest.TestSuite()
def _create_testsuite(self, module, testcases, timeout):
for testcase in testcases:
obj = TestCase(module=module, testcase=testcase, timeout=timeout)
self.testsuite.addTest(obj)
def judge(self, module, sourcefile, testcases, timeout=10):
self.result = {"tests": [], "compiler": {}, "summary": {}}
if hasattr(module, "compile"):
compiler = module.compile(sourcefile, timeout=timeout)
self.result["compiler"] = {
"returncode": compiler.returncode,
"error": compiler.stderr,
}
if compiler.returncode:
self.result["summary"]["status"] = "Compiler Error"
return
self._create_testsuite(module=module, testcases=testcases, timeout=timeout)
self.testsuite.run(self.testresult)
status = (
"Failed"
if (self.testresult.failures_count or self.testresult.errors_count)
else "Passed"
)
self.result.update(
{
"tests": self.testresult.result,
"summary": {
"success": self.testresult.success_count,
"failures": self.testresult.failures_count,
"errors": self.testresult.errors_count,
"status": status,
},
}
)
|
py | 1a3a6e8e12d902278bdccab35238910538566638 | from __future__ import print_function, division
import scipy
import torch.nn as nn
import torch.nn.functional as F
import torch
import functools
import datetime
import matplotlib.pyplot as plt
import sys
from data_loader import InMemoryDataLoader
import numpy as np
import pandas as pd
import os
import random
import argparse
import os
import time
import torch
import torchvision
import tqdm
import warnings
import argparse
from sklearn.metrics import accuracy_score
from models_gan_pytorch_2_bottlenec5x5 import *
from utils import *
# reproducibility
torch.manual_seed(777)
np.random.seed(777)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class C_CC_GAN():
def __init__(self, root_data_path, train_size=-1,
img_rows = 112,img_cols = 112,channels = 3,
AU_num=35,
lambda_cl=1,lambda_cyc=1,
loss_type='loss_nonsaturating',
adam_lr=0.0002,adam_beta_1=0.5,adam_beta_2=0.999):
# paths
self.root_data_path = root_data_path
# Input shape
self.img_rows = img_rows
self.img_cols = img_cols
self.channels = channels
self.img_shape = (self.channels,self.img_rows, self.img_cols)
self.AU_num = AU_num
# Loss weights
self.lambda_cl = lambda_cl
self.lambda_cyc = lambda_cyc
# loss type
self.loss_type = loss_type
# optmizer params
self.adam_lr = adam_lr
self.adam_beta_1 = adam_beta_1
self.adam_beta_2 = adam_beta_2
# Configure data loader
self.data_loader = InMemoryDataLoader(dataset_name='EmotioNet',
img_res=(self.img_rows, self.img_cols,self.channels),
root_data_path=self.root_data_path,
normalize=True,
csv_columns = ['frame', "AU01_c" , "AU02_c" , "AU04_c",
"AU05_c", "AU06_c", "AU07_c", "AU09_c",
"AU10_c", "AU12_c", "AU14_c", "AU15_c",
"AU17_c" , "AU20_c" , "AU23_c", "AU25_c",
"AU26_c" , "AU45_c"],
max_images=train_size)
#optimizer = Adam(self.adam_lr, self.adam_beta_1, self.adam_beta_2)
# Build and compile the discriminators
self.d = Discriminator(img_shape=self.img_shape,df=64,AU_num=self.AU_num).to(device)
#self.d.init_weights()
print("******** Discriminator/Classifier ********")
print(self.d)
# Build the generators
self.g = Generator(img_shape=(3,112,112),gf=64,AU_num=self.AU_num).to(device)
#xself.g.init_weights()
print("******** Generator ********")
print(self.g)
##
self.g_optimizer = torch.optim.Adam(self.g.parameters(), self.adam_lr, betas=(self.adam_beta_1, self.adam_beta_2))
self.d_optimizer = torch.optim.Adam(self.d.parameters(), self.adam_lr, betas=(self.adam_beta_1, self.adam_beta_2))
def train(self, epochs, batch_size=1, sample_interval=50 , d_g_ratio=5):
start_time = datetime.datetime.now()
# logs
epoch_history, batch_i_history, = [] , []
d_gan_loss_history, d_au_loss_history = [], [],
g_gan_loss_history, g_au_loss_history = [] , []
reconstr_history = []
##
self.g.train()
self.d.train()
for epoch in range(epochs):
for batch_i, (labels0 , imgs) in enumerate(self.data_loader.load_batch(batch_size=batch_size)):
imgs = np.transpose(imgs,(0,3,1,2))
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels0, imgs = torch.tensor(labels0).to(device).type(dtype), torch.tensor(imgs).to(device).type(dtype)
if self.loss_type == 'loss_nonsaturating':
d_loss , d_loss_dict , g_loss, g_loss_dict = loss_nonsaturating(self.g, self.d,
imgs, labels0,
self.lambda_cl, self.lambda_cyc,
self.data_loader,
device,
train_generator=(batch_i % d_g_ratio == 0))
## opt. discr.
self.d_optimizer.zero_grad()
d_loss.backward(retain_graph=True)
self.d_optimizer.step()
## opt. gen.
if g_loss is not None:
self.g_optimizer.zero_grad()
g_loss.backward()
self.g_optimizer.step()
elif self.loss_type == 'loss_wasserstein_gp':
# train critic
d_loss_dict = train_D_wasserstein_gp(self.g, self.d, imgs, labels0,
self.lambda_cl, self.lambda_cyc,
self.data_loader,
device,self.d_optimizer)
# train generator
if batch_i % d_g_ratio == 0:
g_loss_dict = train_G_wasserstein_gp(self.g, self.d, imgs, labels0,
self.lambda_cl, self.lambda_cyc,
self.data_loader,
device,self.g_optimizer)
else:
raise Exception("Unknown loss type::"+str(self.loss_type))
torch.cuda.empty_cache()
elapsed_time = datetime.datetime.now() - start_time
try:
if batch_i % d_g_ratio == 0:
print ("[Epoch %d/%d] [Batch %d/%d] [D_gan loss: %f, D_AU_loss: %f] [G_gan loss: %05f, G_AU_loss: %05f, recon: %05f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss_dict['d_adv_loss'], d_loss_dict['d_cl_loss'],
g_loss_dict['g_adv_loss'],g_loss_dict['g_cl_loss'], g_loss_dict['rec_loss'],
elapsed_time))
else:
print ("[Epoch %d/%d] [Batch %d/%d] [D_gan loss: %f, D_AU_loss: %f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss_dict['d_adv_loss'], d_loss_dict['d_cl_loss'],
elapsed_time))
except:
print("*** problem to log ***")
# log
if batch_i % d_g_ratio == 0:
epoch_history.append(epoch)
batch_i_history.append(batch_i)
d_gan_loss_history.append(d_loss_dict['d_adv_loss'].cpu().detach().numpy())
d_au_loss_history.append(d_loss_dict['d_cl_loss'].cpu().detach().numpy())
g_gan_loss_history.append(g_loss_dict['g_adv_loss'].cpu().detach().numpy())
g_au_loss_history.append(g_loss_dict['g_cl_loss'].cpu().detach().numpy())
reconstr_history.append(g_loss_dict['rec_loss'].cpu().detach().numpy())
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
with torch.no_grad():
self.g.eval()
self.sample_images(epoch, batch_i)
#self.sample_images(epoch, batch_i,use_leo=True)
self.g.train()
train_history = pd.DataFrame({
'epoch': epoch_history,
'batch': batch_i_history,
'd_gan_loss': d_gan_loss_history,
'd_AU_loss': d_au_loss_history,
'g_gan_loss': g_gan_loss_history,
'g_AU_loss': g_au_loss_history,
'reconstr_loss': reconstr_history
})
train_history.to_csv(str(sys.argv[0]).split('.')[0]+'_train_log.csv',index=False)
def sample_images(self, epoch, batch_i):
for labels0 , imgs in self.data_loader.load_batch(batch_size=1):
## disc
imgs_d = np.transpose(imgs,(0,3,1,2))
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels0_d, imgs_d = torch.tensor(labels0).to(device).type(dtype), torch.tensor(imgs_d).to(device).type(dtype)
#gan_pred_prob,au_prob = self.d(imgs_d)
#des_au_1 = torch.tensor(self.data_loader.gen_rand_cond(batch_size=1)).to(device).type(dtype)
des_au_1 = torch.tensor(self.data_loader.gen_rand_cond_for_binary_au(labels0)).to(device).type(dtype)[0]
# Translate images
zs = self.g.encode(imgs_d)
# Reconstruct image
reconstr_ = self.g.translate_decode(zs,labels0_d)
# Transl. image
transl_ = self.g.translate_decode(zs,des_au_1)
## save reconstraction
if not os.path.exists('log_images'):
os.makedirs('log_images')
#plot reconstr_
reconstr_ = reconstr_.cpu()
reconstr_ = np.transpose(reconstr_.detach().numpy(),(0,2,3,1))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_grid(np.concatenate([imgs, reconstr_]),
row_titles=None,
col_titles=["Orig.[ep:%d]" % (epoch),'Reconstr.'],
nrow = 1,ncol = 2,
save_filename="log_images/reconstr_%d_%d.png" % (epoch, batch_i))
#plot transl_
transl_ = transl_.cpu()
transl_ = np.transpose(transl_.detach().numpy(),(0,2,3,1))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_grid(np.concatenate([imgs, transl_]),
row_titles=None,
col_titles=["Orig.[ep:%d]" % (epoch),'Transl.'],
nrow = 1,ncol = 2,
save_filename="log_images/translat_%d_%d.png" % (epoch, batch_i))
####
n_row = 4 # alpha
n_col = 9 # AUs
col_names = ['AU1_r','AU2_r','AU4_r','AU5_r','AU10_r',
'AU12_r','AU15_r','AU25_r','AU45_r']
col_idx = [0,1,2,3,7,8,10,14,16]
assert len(col_names) == len(col_idx)
alphas = [0,.33,.66,1]
au_grid = np.repeat(labels0,n_row*n_col,axis=0)
img_tens = np.repeat(imgs,n_row*n_col,axis=0)
n = 0
for r in range(n_row):
for c in range(n_col):
au_n = au_grid[[n],:]
au_n[0,col_idx[c]] = alphas[r]
au_n = torch.tensor(au_n).to(device).type(dtype)
#
act_au = self.g.translate_decode(zs,au_n)
act_au = act_au.cpu()
act_au = np.transpose(act_au.detach().numpy(),(0,2,3,1))
act_au = act_au
img_tens[n,:] = act_au
n += 1
#plot
col_names_plot = ['AU1','AU2','AU4','AU5','AU10','AU12','AU15','AU25','AU45']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_grid(img_tens,
row_titles=alphas,
col_titles=col_names_plot,
nrow = n_row,ncol = n_col,
save_filename="log_images/au_edition_%d_%d.png" % (epoch, batch_i))
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('-lambda_cl', help='loss weight for cond. regress. loss', dest='lambda_cl', type=float, default=100)
parser.add_argument('-lambda_cyc', help='reconstr. loss weight', dest='lambda_cyc', type=float, default=10)
parser.add_argument('-loss_type', help='loss type [loss_nonsaturating] ', dest='loss_type', type=str, default='loss_wasserstein_gp')
parser.add_argument('-d_g_ratio', help='# train iterations of critic per each train iteration of generator', dest='d_g_ratio', type=int, default=1)
parser.add_argument('-adam_lr', help='Adam l.r.', dest='adam_lr', type=float, default=0.0002)
parser.add_argument('-adam_beta_1', help='Adam beta-1', dest='adam_beta_1', type=float, default=0.5)
parser.add_argument('-adam_beta_2', help='Adam beta-2', dest='adam_beta_2', type=float, default=0.999)
parser.add_argument('-epochs', help='N. epochs', dest='epochs', type=int, default=170)
parser.add_argument('-batch_size', help='batch size', dest='batch_size', type=int, default=32)
parser.add_argument('-sample_interval', help='sample interval', dest='sample_interval', type=int, default=1000)
parser.add_argument('-root_data_path', help='base file path', dest='root_data_path', type=str, default='datasets')
parser.add_argument('-train_size', help='train size [-1 for all train data]', dest='train_size', type=int, default=-1)
args = parser.parse_args()
# print parameters
print('-' * 30)
print('Parameters .')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} := {}'.format(key, value))
print('-' * 30)
# GAN
root_data_path = args.root_data_path
gan = C_CC_GAN(
root_data_path = root_data_path,
train_size = args.train_size,
AU_num=17,
lambda_cl=args.lambda_cl,lambda_cyc=args.lambda_cyc,
loss_type=args.loss_type,
adam_lr=args.adam_lr,adam_beta_1=args.adam_beta_1,adam_beta_2=args.adam_beta_2)
gan.train(epochs=args.epochs,
batch_size=args.batch_size,
sample_interval=args.sample_interval,
d_g_ratio=args.d_g_ratio)
|
py | 1a3a6edbe6eb7fb5915743d94a3c516cface923c | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaauser_aaagroup_binding(base_resource) :
""" Binding class showing the aaagroup that can be bound to aaauser.
"""
def __init__(self) :
self._groupname = ""
self._username = ""
self.___count = 0
@property
def username(self) :
ur"""User account to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
ur"""User account to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def groupname(self) :
ur"""The group name.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
ur"""The group name.
"""
try :
self._groupname = groupname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaauser_aaagroup_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaauser_aaagroup_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.username is not None :
return str(self.username)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, username) :
ur""" Use this API to fetch aaauser_aaagroup_binding resources.
"""
try :
obj = aaauser_aaagroup_binding()
obj.username = username
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, username, filter_) :
ur""" Use this API to fetch filtered set of aaauser_aaagroup_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_aaagroup_binding()
obj.username = username
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, username) :
ur""" Use this API to count aaauser_aaagroup_binding resources configued on NetScaler.
"""
try :
obj = aaauser_aaagroup_binding()
obj.username = username
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, username, filter_) :
ur""" Use this API to count the filtered set of aaauser_aaagroup_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_aaagroup_binding()
obj.username = username
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class aaauser_aaagroup_binding_response(base_response) :
def __init__(self, length=1) :
self.aaauser_aaagroup_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaauser_aaagroup_binding = [aaauser_aaagroup_binding() for _ in range(length)]
|
py | 1a3a6fb61cb84f79739786401d6aa8e7ec7c4637 | # ----------------------------------------------------------------------------
# Copyright (c) 2020, Franck Lejzerowicz.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import re, ast
from setuptools import find_packages, setup
classes = """
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3 :: Only
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = (
""
)
with open("README.md") as f:
long_description = f.read()
_version_re = re.compile(r"__version__\s+=\s+(.*)")
with open("Xmmvec/__init__.py", "rb") as f:
hit = _version_re.search(f.read().decode("utf-8")).group(1)
version = str(ast.literal_eval(hit))
standalone = ['Xmmvec=Xmmvec.scripts._standalone_xmmvec:standalone_xmmvec']
setup(
name="Xmmvec",
version=version,
license="BSD",
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author="Franck Lejzerowicz",
author_email="[email protected]",
maintainer="Franck Lejzerowicz",
maintainer_email="[email protected]",
url="https://github.com/FranckLejzerowicz/Xmmvec",
packages=find_packages(),
install_requires=[
"click >= 6.7",
'pandas >= 0.19.0',
'numpy >= 1.12.1',
'altair >= 4.1.0',
],
classifiers=classifiers,
entry_points={'console_scripts': standalone},
package_data={},
python_requires='>=3.5',
)
|
py | 1a3a6ff22082713da6e4223cad184d129987297f | from typing import Dict
from dbnd import parameter
from dbnd._core.settings import EngineConfig
from dbnd_docker.docker_ctrl import DockerRunCtrl
class AwsBatchConfig(EngineConfig):
"""Amazon Web Services Batch"""
_conf__task_family = "aws_batch"
job_definition = parameter(description="the job definition name on AWS Batch").none[
str
]
overrides = parameter(
empty_default=True,
description="the same parameter that boto3 will receive on containerOverrides (templated)"
" http://boto3.readthedocs.io/en/latest/reference/services/batch.html#submit_job",
)[Dict[str, str]]
job_queue = parameter(description="the queue name on AWS Batch")[str]
max_retries = parameter(
description="exponential backoff retries while waiter is not merged, 4200 = 48 hours"
)[int]
def get_docker_ctrl(self, task_run):
return AWSBatchCtrl(task_run=task_run)
class AWSBatchCtrl(DockerRunCtrl):
"""
Execute a job on AWS Batch Service
"""
def __init__(self, **kwargs):
super(AWSBatchCtrl, self).__init__(**kwargs)
self.runner_op = None
@property
def aws_batch_config(self):
# type: (AWSBatchCtrl) -> AwsBatchConfig
return self.task.docker_engine
def docker_run(self):
dc = self.aws_batch_config
if dc.job_definition is None:
raise Exception("Please define aws batch definition first")
from airflow.contrib.operators.awsbatch_operator import AWSBatchOperator
cloud_config = self.task.task_env
self.runner_op = AWSBatchOperator(
task_id=self.task_id,
job_name=self.job.job_id,
# per task settings
job_definition=dc.job_definition,
overrides=dc.overrides,
# more global
job_queue=dc.job_queue,
max_retries=dc.max_retries,
aws_conn_id=cloud_config.conn_id,
region_name=cloud_config.region_name,
)
self.runner_op.execute(context=None)
def on_kill(self):
if self.runner_op is not None:
self.runner_op.on_kill()
|
py | 1a3a7081f4d12c0f493f6685da55bbe4a02aa92e | import os
import sys
import numpy as np
import importlib
from dataclasses import dataclass
from loguru import logger
from tqdm import tqdm
import psutil
__all__ = [
'sanitize_filename',
'get_tqdm',
'show_docstring',
'Results',
]
def _is_ipython_notebook(): # pragma: no cover
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def config_logger(fmt, loglevel): # pragma: no cover
r"""
Configures loguru logger with the given format and log level.
Parameters
----------
fmt : str
loguru-compatible format used to format logger messages.
loglevel : str
Determines what messages to get printed in console. Options are:
"TRACE", "DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"
Returns
-------
None.
"""
logger.remove()
logger.add(lambda msg: tqdm.write(msg, end=""),
level=loglevel,
format=fmt,
colorize=True)
@dataclass
class Settings: # pragma: no cover
r"""
A dataclass for use at the module level to store settings. This class
is defined as a Singleton so now matter how or where it gets
instantiated the same object is returned, containing all existing
settings.
Parameters
----------
notebook : boolean
Is automatically determined upon initialization of PoreSpy, and is
``True`` if running within a Jupyter notebook and ``False``
otherwise. This is used by the ``porespy.tools.get_tqdm`` function
to determine whether a standard or a notebook version of the
progress bar should be used.
tqdm : dict
This dictionary is passed directly to the the ``tqdm`` function
throughout PoreSpy (``for i in tqdm(range(N), **settings.tqdm)``).
To see a list of available options visit the tqdm website.
Probably the most important is ``'disable'`` which when set to
``True`` will silence the progress bars. It's also possible to
adjust the formatting such as ``'colour'`` and ``'ncols'``, which
controls width.
logger_fmt : str
luguru-compatible format used to format the logger messages.
loglevel : str, or int
Determines what messages to get printed in console. Options are:
"TRACE" (5), "DEBUG" (10), "INFO" (20), "SUCCESS" (25), "WARNING" (30),
"ERROR" (40), "CRITICAL" (50)
"""
__instance__ = None
# Might need to add 'file': sys.stdout to tqdm dict
tqdm = {'disable': False,
'colour': None,
'ncols': None,
'leave': False,
'file': sys.stdout}
_logger_fmt = '<green>{time:YYYY-MM-DD HH:mm:ss}</green> | ' \
'<level>{level: <8}</level> | ' \
'<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan>' \
'\n--> <level>{message}</level>'
_loglevel = "ERROR" if _is_ipython_notebook() else "WARNING"
config_logger(_logger_fmt, _loglevel)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._notebook = None
self._ncores = psutil.cpu_count()
@property
def logger_fmt(self):
return self._logger_fmt
@property
def loglevel(self):
return self._loglevel
@logger_fmt.setter
def logger_fmt(self, value):
self._logger_fmt = value
config_logger(fmt=value, loglevel=self.loglevel)
@loglevel.setter
def loglevel(self, value):
if isinstance(value, int):
options = {5: "TRACE",
10: "DEBUG",
20: "INFO",
25: "SUCESS",
30: "WARNING",
40: "ERROR",
50: "CRITICAL"}
value = options[value]
self._loglevel = value
os.environ["LOGURU_LEVEL"] = value
config_logger(fmt=self.logger_fmt, loglevel=value)
def __new__(cls):
if Settings.__instance__ is None:
Settings.__instance__ = super().__new__(cls)
return Settings.__instance__
def __repr__(self):
indent = 0
for item in self.__dir__():
if not item.startswith('_'):
indent = max(indent, len(item) + 1)
s = ''
for item in self.__dir__():
if not item.startswith('_'):
s += ''.join((item, ':', ' '*(indent-len(item))))
attr = getattr(self, item)
temp = ''.join((attr.__repr__(), '\n'))
if isinstance(attr, dict):
temp = temp.replace(',', '\n' + ' '*(indent + 1))
s += temp
return s
def _get_ncores(self):
if self._ncores is None:
self._ncores = psutil.cpu_count()
return self._ncores
def _set_ncores(self, val):
if val is None:
val = psutil.cpu_count()
elif val > psutil.cpu_count():
logger.error('Value is more than the available number of cores')
val = psutil.cpu_count()
self._ncores = val
ncores = property(fget=_get_ncores, fset=_set_ncores)
def _get_notebook(self):
if self._notebook is None:
self._notebook = _is_ipython_notebook()
return self._notebook
def _set_notebook(self, val):
logger.error('This value is determined automatically at runtime')
notebook = property(fget=_get_notebook, fset=_set_notebook)
def get_tqdm(): # pragma: no cover
r"""
Fetches a version of ``tqdm`` function that depends on the environment.
Either text-based for the IPython console or gui-based for Jupyter
notebooks.
Returns
-------
tqdm : function handle
The function to use when wrapping an iterator (i.e. tqdm(range(n)))
"""
if Settings().notebook is True:
tqdm = importlib.import_module('tqdm.notebook')
else:
tqdm = importlib.import_module('tqdm')
return tqdm.tqdm
def show_docstring(func): # pragma: no cover
r"""
Fetches the docstring for a function and returns it in markdown format.
Useful for printing in a Jupyter notebook.
Parameters
----------
func : object
Function handle to function whose docstring is desired
Returns
-------
md : str
A string with the markdown syntax included, suitable for printing
in a Jupyter notebook using the ``IPython.display.Markdown``
function.
"""
title = f'---\n ## Documentation for ``{func.__name__}``\n ---\n'
try:
from npdoc_to_md import render_md_from_obj_docstring
txt = render_md_from_obj_docstring(obj=func, obj_namespace=func.__name__)
except ModuleNotFoundError:
txt = func.__doc__
return title + txt + '\n---'
def sanitize_filename(filename, ext, exclude_ext=False):
r"""
Returns a sanitized string in the form of name.extension
Parameters
----------
filename : str
Unsanitized filename, could be 'test.vtk' or just 'test'
ext : str
Extension of the file, could be 'vtk'
exclude_ext : bool
If True, the returned string doesn't have the extension
Returns
-------
sanitized : str
Sanitized filename in form of name.extension
"""
ext.strip(".")
if filename.endswith(f".{ext}"):
name = ".".join(filename.split(".")[:-1])
else:
name = filename
filename_formatted = f"{name}" if exclude_ext else f"{name}.{ext}"
return filename_formatted
class Results:
r"""
A minimal class for use when returning multiple values from a function
This class supports dict-like assignment and retrieval
(``obj['im'] = im``), namedtuple-like attribute look-ups (``obj.im``),
and generic class-like object assignment (``obj.im = im``)
"""
_value = "Description"
_key = "Item"
def __iter__(self):
for item in self.__dict__.values():
yield item
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __str__(self):
header = "―" * 78
lines = [header, "{0:<25s} {1}".format(self._key, self._value), header]
for item in list(self.__dict__.keys()):
if item.startswith('_'):
continue
if (isinstance(self[item], np.ndarray)):
s = np.shape(self[item])
if (self[item].ndim > 1):
lines.append("{0:<25s} Image of size {1}".format(item, s))
else:
lines.append("{0:<25s} Array of size {1}".format(item, s))
else:
lines.append("{0:<25s} {1}".format(item, self[item]))
lines.append(header)
return "\n".join(lines)
|
py | 1a3a7270d455160c3b44677768c668322cd98288 | import ezdxf
import random # needed for random placing points
def get_random_point():
"""Creates random x, y coordinates."""
x = random.randint(-100, 100)
y = random.randint(-100, 100)
return x, y
# Create a new drawing in the DXF format of AutoCAD 2010
dwg = ezdxf.new('ac1024')
# Create a block with the name 'FLAG'
flag = dwg.blocks.new(name='FLAG')
# Add DXF entities to the block 'FLAG'.
# The default base point (= insertion point) of the block is (0, 0).
flag.add_polyline2d([(0, 0), (0, 5), (4, 3), (0, 3)]) # the flag as 2D polyline
flag.add_circle((0, 0), .4, dxfattribs={'color': 2}) # mark the base point with a circle
flag.add_linear_dim((1, 3), (0, 3), (4, 3), dxfattribs={'color': 2})
# Get the modelspace of the drawing.
modelspace = dwg.modelspace()
# Get 50 random placing points.
placing_points = [get_random_point() for _ in range(50)]
for point in placing_points:
# Every flag has a different scaling and a rotation of -15 deg.
random_scale = 0.5 + random.random() * 2.0
# Add a block reference to the block named 'FLAG' at the coordinates 'point'.
modelspace.add_blockref('FLAG', point, dxfattribs={
'xscale': random_scale,
'yscale': random_scale,
'rotation': -15
})
# Save the drawing.
dwg.saveas("/home/ebi/blockref_tutorial.dxf")
|
py | 1a3a73750111bcde6b06e7923a0f3aefc92f49d6 | from django.contrib import admin
from .models import Category, Blog, Comment
# Register your models here.
admin.site.register(Category)
admin.site.register(Blog)
admin.site.register(Comment) |
py | 1a3a73758ca0077df45ae00c0eac6025572ed1d0 | class Solution(object):
def decodeString(self, s):
"""
:type s: str
:rtype: str
"""
if len(s) < 1:
return ''
pointer = 0
nums = '0123456789'
result = ''
times = 1
while pointer < len(s):
if s[pointer] in nums:
times = int(s[pointer])
start = pointer
pointer += 1
while pointer < len(s) and s[pointer] in nums :
times = int(s[start:pointer+1])
pointer +=1
else:
if s[pointer] == '[':
stack = [s[pointer]]
start = pointer + 1
pointer += 1
while len(stack) > 0:
if s[pointer] == '[':
stack.append(s[pointer])
if s[pointer] == ']':
stack.pop()
pointer += 1
result = result + self.decodeString(s[start:pointer-1]) * times
times = 1
else:
result = result + s[pointer]
pointer += 1
return result |
py | 1a3a744b712db7fd26e043e9dc5ebcf9e56e16a5 | from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.distributions import Categorical, Normal
class BasePolicy(nn.Module):
"""
Basic implementation of a general Policy
:param state_dim: State dimensions of the environment
:param action_dim: Action dimensions of the environment
:param hidden: Sizes of hidden layers
:param discrete: True if action space is discrete, else False
:type state_dim: int
:type action_dim: int
:type hidden: tuple or list
:type discrete: bool
"""
def __init__(
self, state_dim: int, action_dim: int, hidden: Tuple, discrete: bool, **kwargs
):
super(BasePolicy, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.hidden = hidden
self.discrete = discrete
self.action_lim = kwargs["action_lim"] if "action_lim" in kwargs else 1.0
self.action_var = kwargs["action_var"] if "action_var" in kwargs else 0.1
self.sac = kwargs["sac"] if "sac" in kwargs else False
if self.sac:
self.fc_mean = nn.Linear(self.hidden[-1], self.action_dim)
self.fc_std = nn.Linear(self.hidden[-1], self.action_dim)
self.model = None
def forward(
self, state: torch.Tensor
) -> (Tuple[torch.Tensor, Optional[torch.Tensor]]):
"""
Defines the computation performed at every call.
:param state: The state being passed as input to the policy
:type state: Tensor
"""
state = self.model.forward(state)
if self.sac:
state = nn.ReLU()(state)
mean = self.fc_mean(state)
log_std = self.fc_std(state)
log_std = torch.clamp(log_std, min=-20.0, max=2.0)
return mean, log_std
return state
def get_action(
self, state: torch.Tensor, deterministic: bool = False
) -> torch.Tensor:
"""
Get action from policy based on input
:param state: The state being passed as input to the policy
:param deterministic: (True if the action space is deterministic,
else False)
:type state: Tensor
:type deterministic: boolean
:returns: action
"""
action_probs = self.forward(state)
if self.discrete:
action_probs = nn.Softmax(dim=-1)(action_probs)
if deterministic:
action = (torch.argmax(action_probs, dim=-1), None)
else:
distribution = Categorical(probs=action_probs)
action = (distribution.sample(), distribution)
else:
action_probs = nn.Tanh()(action_probs) * self.action_lim
if deterministic:
action = (action_probs, None)
else:
distribution = Normal(action_probs, self.action_var)
action = (distribution.sample(), distribution)
return action
class BaseValue(nn.Module):
"""
Basic implementation of a general Value function
"""
def __init__(self, state_dim: int, action_dim: int):
super(BaseValue, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.model = None
def forward(self, state: torch.Tensor) -> torch.Tensor:
"""
Defines the computation performed at every call.
:param state: Input to value function
:type state: Tensor
"""
return self.model.forward(state)
def get_value(self, state: torch.Tensor) -> torch.Tensor:
"""
Get value from value function based on input
:param state: Input to value function
:type state: Tensor
:returns: Value
"""
return self.forward(state).squeeze(-1)
class BaseActorCritic(nn.Module):
"""
Basic implementation of a general Actor Critic
"""
def __init__(self):
super(BaseActorCritic, self).__init__()
self.actor = None
self.critic = None
def get_action(
self, state: torch.Tensor, deterministic: bool = False
) -> torch.Tensor:
"""
Get action from the Actor based on input
:param state: The state being passed as input to the Actor
:param deterministic: (True if the action space is deterministic,
else False)
:type state: Tensor
:type deterministic: boolean
:returns: action
"""
state = torch.as_tensor(state).float()
return self.actor.get_action(state, deterministic=deterministic)
def get_value(self, state: torch.Tensor) -> torch.Tensor:
"""
Get value from the Critic based on input
:param state: Input to the Critic
:type state: Tensor
:returns: value
"""
state = torch.as_tensor(state).float()
return self.critic.get_value(state)
|
py | 1a3a7761b108402d4f6ae803d2f9dbcd297fd754 | #parses response from gateway
from ..importmanager import Imports
imports = Imports(
{
'StartParse': 'fossbotpy.gateway.start.parse',
'GuildParse': 'fossbotpy.gateway.guild.parse',
'UserParse': 'fossbotpy.gateway.user.parse',
'MessageParse': 'fossbotpy.gateway.messages.parse',
'ChannelParse': 'fossbotpy.gateway.channels.parse',
}
)
import copy
#function names are just lowercase types, so for type GUILD_MEMBER_LIST_UPDATE, the function is guild_member_list_update
class Parse(object):
__slots__ = ['response']
def __init__(self, response):
self.response = copy.deepcopy(response)
def auto(self): #auto parse, does not allow for custom inputs
resptype = str(self.response.get('t')).lower()
if hasattr(self, resptype):
return getattr(self, resptype)()
return self.response.get('d') #just return the value of d if there's no parse function for it yet
def ready(self):
return imports.StartParse().ready(self.response)
def guild_member_list_update(self):
return imports.GuildParse().guild_member_list_update(self.response)
def guild_create(self, my_user_id='0'): #personal user id needed to update personal roles for that guild
return imports.GuildParse().guild_create(self.response, my_user_id)
def guild_members_chunk(self):
return imports.GuildParse().guild_members_chunk(self.response)
def message_create(self):
return imports.MessageParse().message_create(self.response)
def sessions_replace(self, session_id='0'):
return imports.UserParse().sessions_replace(self.response, session_id)
def channel_create(self):
return imports.ChannelParse().channel_create(self.response)
def channel_delete(self):
return imports.ChannelParse().channel_delete(self.response)
|
py | 1a3a77cef72be4f539e3ee5c122366c2af66d8d6 | from diff_prof.diffusion_profiles import DiffusionProfiles
from msi.msi import MSI
import os
import numpy as np
def test_diffusion_profiles(ref_dp_file_path, calculated_dp_file_path):
# Saved
# this is loading the reference directory
dp_saved = DiffusionProfiles(alpha=None, max_iter=None, tol=None,
weights=None, num_cores=None, save_load_file_path=ref_dp_file_path)
msi_saved = MSI()
msi_saved.load()
msi_saved.load_saved_node_idx_mapping_and_nodelist(
dp_saved.save_load_file_path)
dp_saved.load_diffusion_profiles(
msi_saved.drugs_in_graph + msi_saved.indications_in_graph)
# Calculated
dp_calculated = DiffusionProfiles(alpha=None, max_iter=None, tol=None,
weights=None, num_cores=None, save_load_file_path=calculated_dp_file_path)
msi_calculated = MSI()
msi_calculated.load()
msi_calculated.load_saved_node_idx_mapping_and_nodelist(
dp_calculated.save_load_file_path)
dp_calculated.load_diffusion_profiles(
msi_calculated.drugs_in_graph + msi_calculated.indications_in_graph)
# Compare
# Make sure have diffusion profiles for the same drugs and indications
assert(set(dp_saved.drug_or_indication2diffusion_profile.keys()) ==
set(dp_calculated.drug_or_indication2diffusion_profile.keys()))
# Reorder calculated diffusion profile for consistency with saved diffusion profile
calculated_reorder_idxs = [msi_calculated.node2idx[node]
for node in msi_saved.nodelist]
for drug_or_indication, saved_diffusion_profile in dp_saved.drug_or_indication2diffusion_profile.items():
calculated_diffusion_profile = dp_calculated.drug_or_indication2diffusion_profile[
drug_or_indication]
# Reorder calculated diffusion_profile according to saved
calculated_diffusion_profile = calculated_diffusion_profile[calculated_reorder_idxs]
# Ensure close enough
assert(np.allclose(saved_diffusion_profile, calculated_diffusion_profile))
|
py | 1a3a782e9a3045167e8d9b265230d8d1ca4a48ed | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import codecs
import numpy as np
import hashlib
import random
import preprocess
class Preparation(object):
'''Convert dataset of different text matching tasks into a unified format as the input of deep matching modules. Users provide datasets contain pairs of texts along with their labels, and the module produces the following files:
* Word Dictionary: this file records the mapping from each word to a unique identifier.
* Corpus File: this file records the mapping from each text to a unique identifiers, along with a sequence of word identifiers contained in text.
* Relation File: this file records the relationship between two texts, each line containing the label and a pair of ids.
'''
def __init__(self):
pass
def get_text_id(self, hashid, text, idtag='T'):
hash_obj = hashlib.sha1(text.encode('utf8')) # if the text are the same, then the hash_code are also the same
hex_dig = hash_obj.hexdigest()
if hex_dig in hashid:
return hashid[hex_dig]
else:
tid = idtag + str(len(hashid)) # start from 0, 1, 2, ...
hashid[hex_dig] = tid
return tid
def parse_line(self, line, delimiter='\t'):
subs = line.split(delimiter)
# print('subs: ', len(subs))
if 3 != len(subs):
raise ValueError('format of data file wrong, should be \'label,text1,text2\'.')
else:
return subs[0], subs[1], subs[2]
def parse_line_for_quora(self, line, delimiter='","'):
subs = line.split(delimiter)
#print('subs: ', len(subs))
# if subs[1]=="qid1":
# return
if 6 != len(subs):
# print( "line__not satisfied",line)
# raise ValueError('format of data file wrong, should be \'label,text1,text2\'.')
return 0, 0, 0, 0, 0
else:
return subs[1], subs[2], subs[3], subs[4], subs[5][0]
def run_with_one_corpus_for_quora(self, file_path):
# hashid = {}
corpus = {}
rels = []
f = codecs.open(file_path, 'r', encoding='utf8')
next(f)
for line in f:
# print("", i)
# print("", i)
# line = line.decode('utf8')
line = line.strip()
qid1, qid2, q1, q2, label = self.parse_line_for_quora(line, "\t")
if q1 != 0:
corpus[qid1] = q1
corpus[qid2] = q2
rels.append((label, qid1, qid2))
f.close()
return corpus, rels
def run_with_one_corpus(self, file_path):
hashid = {}
corpus = {}
rels = []
f = codecs.open(file_path, 'r', encoding='utf8')
for line in f:
line = line
line = line.strip()
label, t1, t2 = self.parse_line(line)
id1 = self.get_text_id(hashid, t1, 'T')
id2 = self.get_text_id(hashid, t2, 'T')
corpus[id1] = t1
corpus[id2] = t2
rels.append((label, id1, id2))
f.close()
return corpus, rels
def run_with_two_corpus(self, file_path):
hashid_q = {}
hashid_d = {}
corpus_q = {}
corpus_d = {}
rels = []
f = codecs.open(file_path, 'r', encoding='utf8')
for line in f:
line = line
line = line.strip()
label, t1, t2 = self.parse_line(line)
id1 = self.get_text_id(hashid_q, t1, 'Q')
id2 = self.get_text_id(hashid_d, t2, 'D')
corpus_q[id1] = t1
corpus_d[id2] = t2
rels.append((label, id1, id2))
f.close()
return corpus_q, corpus_d, rels
def run_with_train_valid_test_corpus(self, train_file, valid_file, test_file):
'''
Run with pre-splited train_file, valid_file, test_file
The input format should be label \t text1 \t text2
The query ids can't be duplicated. For the same query
id, the document ids can't be duplicated.
Note that if we make queries with unique id (fixed 10 candidates for a single query), then it is
possible that multiple queries have different query ids, but with the same text (in rare cases)
:param train_file: train file
:param valid_file: valid file
:param test_file: test file
:return: corpus, rels_train, rels_valid, rels_test
'''
hashid = {}
corpus = {}
rels = []
rels_train = []
rels_valid = []
rels_test = []
# merge corpus files, but return rels for train/valid/test seperately
curQ = 'init'
curQid = 0
for file_path in list([train_file, valid_file, test_file]):
if file_path == train_file:
rels = rels_train
elif file_path == valid_file:
rels = rels_valid
if file_path == test_file:
rels = rels_test
f = codecs.open(file_path, 'r', encoding='utf8')
for line in f:
line = line
line = line.strip()
label, t1, t2 = self.parse_line(line)
id2 = self.get_text_id(hashid, t2, 'D')
# generate unique query ids
if t1 == curQ:
# same query
id1 = 'Q' + str(curQid)
else:
# new query
curQid += 1
id1 = 'Q' + str(curQid)
curQ = t1
corpus[id1] = t1
corpus[id2] = t2
rels.append((label, id1, id2))
f.close()
return corpus, rels_train, rels_valid, rels_test
@staticmethod
def save_corpus(file_path, corpus):
f = codecs.open(file_path, 'w', encoding='utf8')
for qid, text in corpus.items():
f.write('%s %s\n' % (qid, text))
f.close()
@staticmethod
def merge_corpus(train_corpus, valid_corpus, test_corpus):
# cat train valid test > corpus.txt
# cat corpus_train.txt corpus_valid.txt corpus_test.txt > corpus.txt
os.system('cat ' + train_corpus + ' ' + valid_corpus + ' ' + test_corpus + ' > corpus.txt')
@staticmethod
def save_relation(file_path, relations):
f = open(file_path, 'w')
for rel in relations:
f.write('%s %s %s\n' % (rel))
f.close()
@staticmethod
def check_filter_query_with_dup_doc(input_file):
""" Filter queries with duplicated doc ids in the relation files
:param input_file: input file, which could be the relation file for train/valid/test data
The format is "label qid did"
:return:
"""
with open(input_file) as f_in, open(input_file + '.fd', 'w') as f_out:
cur_qid = 'init'
cache_did_set = set()
cache_q_lines = []
found_dup_doc = False
for l in f_in:
tokens = l.split()
if tokens[1] == cur_qid:
# same qid
cache_q_lines.append(l)
if tokens[2] in cache_did_set:
found_dup_doc = True
else:
cache_did_set.add(tokens[2])
else:
# new qid
if not found_dup_doc:
f_out.write(''.join(cache_q_lines))
else:
print('found qid with duplicated doc id/text: ', ''.join(cache_q_lines))
print('filtered... continue')
cache_q_lines = []
cache_q_lines.append(l)
found_dup_doc = False
cache_did_set.clear()
cur_qid = tokens[1]
cache_did_set.add(tokens[2])
# the last query
# print len(cache_q_lines), len(cache_did_set)
if len(cache_q_lines) != 0 and len(cache_q_lines) == len(cache_did_set):
f_out.write(''.join(cache_q_lines))
print('write the last query... done: ', ''.join(cache_q_lines))
@staticmethod
def split_train_valid_test(relations, ratio=(0.8, 0.1, 0.1)):
random.shuffle(relations)
total_rel = len(relations)
num_train = int(total_rel * ratio[0])
num_valid = int(total_rel * ratio[1])
valid_end = num_train + num_valid
rel_train = relations[: num_train]
rel_valid = relations[num_train: valid_end]
rel_test = relations[valid_end:]
return rel_train, rel_valid, rel_test
@staticmethod
def split_train_valid_test_for_ranking(relations, ratio=(0.8, 0.1, 0.1)):
qid_group = set()
for r, q, d in relations:
qid_group.add(q)
qid_group = list(qid_group)
random.shuffle(qid_group)
total_rel = len(qid_group)
num_train = int(total_rel * ratio[0])
num_valid = int(total_rel * ratio[1])
valid_end = num_train + num_valid
qid_train = qid_group[: num_train]
qid_valid = qid_group[num_train: valid_end]
qid_test = qid_group[valid_end:]
def select_rel_by_qids(qids):
rels = []
qids = set(qids)
for r, q, d in relations:
if q in qids:
rels.append((r, q, d))
return rels
rel_train = select_rel_by_qids(qid_train)
rel_valid = select_rel_by_qids(qid_valid)
rel_test = select_rel_by_qids(qid_test)
return rel_train, rel_valid, rel_test
if __name__ == '__main__':
prepare = Preparation()
basedir = '/home/wtt/Code/MatchZoo/data/InsuranceQA/'#'../../data/example/ranking/'
corpus, rels = prepare.run_with_one_corpus(basedir + 'sample.txt')
print('total corpus : %d ...' % (len(corpus)))
print('total relations : %d ...' % (len(rels)))
prepare.save_corpus(basedir + 'corpus.txt', corpus)
# rel_train, rel_valid, rel_test = prepare.split_train_valid_test(rels, (0.8, 0.1, 0.1))
corpus, rel_train = prepare.run_with_one_corpus(basedir + 'trainsample.txt')
corpus, rel_valid = prepare.run_with_one_corpus(basedir + 'validsample.txt')
corpus, rel_test = prepare.run_with_one_corpus(basedir + 'testsample.txt')
prepare.save_relation(basedir + 'relation_train.txt', rel_train)
prepare.save_relation(basedir + 'relation_valid.txt', rel_valid)
prepare.save_relation(basedir + 'relation_test.txt', rel_test)
print('Done ...')
|
py | 1a3a78bb5bead8c140743a053b41db850aaad0c1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stateless random ops which take seed as a tensor input.
DEPRECATED: Use `tf.random.stateless_uniform` rather than
`tf.contrib.stateless.stateless_random_uniform`, and similarly for the other
routines.
Instead of taking `seed` as an attr which initializes a mutable state within
the op, these random ops take `seed` as an input, and the random numbers are
a deterministic function of `shape` and `seed`.
WARNING: These ops are in contrib, and are not stable. They should be
consistent across multiple runs on the same hardware, but only for the same
version of the code.
@@stateless_multinomial
@@stateless_random_uniform
@@stateless_random_normal
@@stateless_truncated_normal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.stateless_random_ops import stateless_random_uniform
from tensorflow.python.ops.stateless_random_ops import stateless_random_normal
from tensorflow.python.ops.stateless_random_ops import stateless_truncated_normal
from tensorflow.python.ops.stateless_random_ops import stateless_multinomial
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
py | 1a3a78eaa48f8705a93b8cb08a0a30725c9f015a | import copy
import logging
import random
from typing import Any, Callable, Dict, List, Optional, Tuple
from generator import (
DefinitionDataset,
InteractiveGoal,
ObjectDefinition,
RetrievalGoal,
SceneException,
base_objects,
containers,
geometry,
materials,
specific_objects,
structures,
tags,
util,
)
from generator.separating_axis_theorem import sat_entry
from .hypercubes import (
Hypercube,
HypercubeFactory,
update_floor_and_walls,
update_scene_objects,
)
from .interactive_plans import (
InteractivePlan,
ObjectLocationPlan,
ObjectPlan,
create_container_hypercube_plan_list,
create_eval_4_container_hypercube_plan_list,
create_obstacle_hypercube_plan_list,
create_occluder_hypercube_plan_list,
)
from .object_data import (
ObjectData,
ReceptacleData,
TargetData,
identify_larger_definition,
)
ROOM_DIMENSIONS = geometry.DEFAULT_ROOM_DIMENSIONS
# Add or subtract the performer width to ensure it can move behind any object.
ROOM_X_MIN = -(ROOM_DIMENSIONS['x'] / 2.0) + util.PERFORMER_WIDTH
ROOM_Z_MIN = -(ROOM_DIMENSIONS['x'] / 2.0) + util.PERFORMER_WIDTH
ROOM_X_MAX = (ROOM_DIMENSIONS['x'] / 2.0) - util.PERFORMER_WIDTH
ROOM_Z_MAX = (ROOM_DIMENSIONS['x'] / 2.0) - util.PERFORMER_WIDTH
LAST_STEP = 2500
SMALL_CONTEXT_OBJECT_CHOICES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SMALL_CONTEXT_OBJECT_WEIGHTS = [5, 5, 10, 10, 12.5, 15, 12.5, 10, 10, 5, 5]
WALL_CHOICES = [0, 1, 2, 3]
WALL_WEIGHTS = [40, 30, 20, 10]
WALL_MAX_WIDTH = 4
WALL_MIN_WIDTH = 1
WALL_HEIGHT = 3
WALL_DEPTH = 0.1
WALL_SEPARATION = 1
def retrieve_template_list(object_data: ObjectData) -> List[Dict[str, Any]]:
return [object_data.trained_template, object_data.untrained_template]
class InteractiveHypercube(Hypercube):
"""A hypercube of interactive scenes that each have the same goals,
targets, distractors, walls, materials, and performer starts, except for
specific differences detailed in its plan."""
def __init__(
self,
body_template: Dict[str, Any],
goal: InteractiveGoal,
role_to_type: Dict[str, str],
plan_name: str,
plan_list: List[InteractivePlan],
training=False
) -> None:
self._goal = goal
self._plan_list = plan_list
self._role_to_type = role_to_type
self._initialize_object_data()
self._validate_object_plan()
super().__init__(
goal.get_name() + ((' ' + plan_name) if plan_name else ''),
body_template,
goal.get_goal_template(),
training=training
)
def _initialize_object_data(self) -> None:
# Save each possible object's plans across all scenes.
self._data = {
'target': [TargetData(self._plan_list[0].target_plan, 0)],
'confusor': [
ObjectData(tags.ROLES.CONFUSOR, object_plan) for object_plan
in self._plan_list[0].confusor_plan_list
],
'large_container': [
ReceptacleData(tags.ROLES.CONTAINER, object_plan)
for object_plan in self._plan_list[0].large_container_plan_list
],
'obstacle': [
ReceptacleData(tags.ROLES.OBSTACLE, object_plan)
for object_plan in self._plan_list[0].obstacle_plan_list
],
'occluder': [
ReceptacleData(tags.ROLES.OCCLUDER, object_plan)
for object_plan in self._plan_list[0].occluder_plan_list
],
'small_container': [
ReceptacleData(tags.ROLES.CONTAINER, object_plan)
for object_plan in self._plan_list[0].small_container_plan_list
]
}
# Assume that each object has a plan in each scene. An object that does
# not appear in a scene should be given a NONE location plan.
for scene_plan in self._plan_list[1:]:
for role, object_plan_list in scene_plan.object_plans().items():
for index, object_plan in enumerate(object_plan_list):
self._data[role][index].append_object_plan(object_plan)
# Assume only one target plan, and always use the index 0 target.
self._target_data = self._data['target'][0]
# Assume only zero or one confusor plan.
self._confusor_data = (
self._data['confusor'][0] if len(self._data['confusor']) > 0
else None
)
def _validate_object_plan(self) -> None:
if any([
scene_plan.target_plan.definition !=
self._target_data.original_definition
for scene_plan in self._plan_list
]):
raise SceneException(
'Interactive hypercubes cannot currently handle a target with '
'different definitions across scenes')
if any(self._target_data.untrained_plan_list):
raise SceneException(
'Interactive hypercubes cannot currently handle a target with '
'a randomly chosen (not pre-defined) untrained shape')
# Update _assign_each_object_location to handle new location plans.
for object_data in self._data['target']:
if (
object_data.is_between() or object_data.is_far()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'target location plans: BETWEEN, FAR')
for object_data in self._data['confusor']:
if (
object_data.is_between() or object_data.is_random()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'confusor location plans: BETWEEN, RANDOM')
for object_data in (
self._data['large_container'] + self._data['small_container']
):
if (
object_data.is_back() or object_data.is_between() or
object_data.is_close() or object_data.is_far() or
object_data.is_front() or object_data.is_inside()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'container location plans: BACK, BETWEEN, CLOSE, FAR, '
'FRONT, INSIDE')
for object_data in (self._data['obstacle'] + self._data['occluder']):
if (
object_data.is_back() or object_data.is_far() or
object_data.is_front() or object_data.is_inside()
):
raise SceneException(
'Interactive hypercubes cannot currently handle the '
'obstacle or occluder location plans: BACK, FAR, FRONT, '
'INSIDE')
# Override
def _create_scenes(
self,
body_template: Dict[str, Any],
goal_template: Dict[str, Any]
) -> List[Dict[str, Any]]:
tries = 0
while True:
tries += 1
try:
logging.debug(
f'\n\n{self.get_name()} initialize scenes try {tries}\n')
# Reset the half-finished scenes, all of their objects, and
# their other properties on each try.
scenes = [
copy.deepcopy(body_template) for _
in range(len(self._plan_list))
]
for object_data_list in self._data.values():
for object_data in object_data_list:
object_data.reset_all_properties()
# Save the bounds of each object in each of its possible
# locations across all the scenes to detect collisions with
# any subsequently positioned objects.
self._bounds_list = []
# Save the targets used in the hypercube that are not defined
# by the plan, if the goal has multiple targets.
self._common_target_list = []
# Save the interior walls used in the hypercube.
self._interior_wall_list = []
# Save the performer's start location in the hypercube.
self._performer_start = self._generate_performer_start()
# Save the small context objects used in the hypercube.
self._small_context_object_list = []
# Initialize all of the objects in all of the scenes.
self._initialize_each_hypercube_object()
# Update each scene's template with its corresponding objects,
# goal, tags, and other specific properties.
for index, scene in enumerate(scenes):
self._update_scene_at_index(scene, index, goal_template)
logging.debug(
f'\n\n{self.get_name()} initialize scenes is done\n ')
scenes = update_floor_and_walls(
body_template,
self._data,
retrieve_template_list,
scenes
)
break
except SceneException:
logging.exception(
f'{self.get_name()} _initialize_each_hypercube_object')
if tries >= util.MAX_TRIES:
raise SceneException(
f'{self.get_name()} cannot successfully initialize scenes '
f'-- please redo.')
return scenes
# Override
def _get_training_scenes(self) -> List[Dict[str, Any]]:
return [
scene for scene in self._scenes
if not scene['debug']['evaluationOnly']
]
def _assign_confusor_obstacle_occluder_location(
self,
target_data: TargetData,
target_or_receptacle_definition: ObjectDefinition,
confusor_data: Optional[ObjectData],
obstacle_occluder_data_list: List[ObjectData],
large_container_data_list: List[ReceptacleData],
goal: InteractiveGoal,
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
plans_to_locations: Dict[ObjectLocationPlan, List[Dict[str, Any]]]
) -> None:
"""Generate and assign locations to the given confusor, obstacle, and
occluder objects, if needed. Will update the given bounds_list."""
# Objects positioned relative to the target (confusors, obstacles, and
# occluders) must each choose new locations for each of the target's
# distinct locations (or its receptacle's locations) across scenes.
target_locations_with_indexes = (
target_data.locations_with_indexes(large_container_data_list)
)
# Next, choose a location for an obstacle/occluder either between the
# performer's start location and the target or behind the target (if
# needed). Assume only one obstacle or occluder is ever "in between"
# OR "close" in a single scene.
for target_location_plan, indexes in target_locations_with_indexes:
for object_data in obstacle_occluder_data_list:
is_obstacle = (object_data.role == tags.ROLES.OBSTACLE)
if object_data.is_between():
# Use the same location for the object across scenes in
# which the target is in this specific location.
self._assign_single_obstacle_occluder_location(
object_data,
target_or_receptacle_definition,
plans_to_locations[target_location_plan],
performer_start,
bounds_list,
'between',
object_data.assign_location_between,
indexes,
obstruct=(not is_obstacle),
unreachable=is_obstacle
)
if object_data.is_close():
# Use the same location for the object across scenes in
# which the target is in this specific location.
self._assign_single_obstacle_occluder_location(
object_data,
target_or_receptacle_definition,
plans_to_locations[target_location_plan],
performer_start,
bounds_list,
'behind',
object_data.assign_location_close,
indexes,
behind=True
)
if object_data.is_random():
# Use the same location for the object across scenes in
# which the target is in this specific location.
location = self._generate_random_location(
object_data.trained_definition,
goal,
performer_start,
bounds_list,
target_location=(
plans_to_locations[target_location_plan]
),
second_definition=object_data.untrained_definition
)
logging.debug(
f'{self.get_name()} obstacle/occluder location '
f'randomly chosen but not obstructing target: '
f'{location}')
bounds = object_data.assign_location_random(location)
bounds_list.extend(bounds)
# Next, choose a location for the confusor, close to or far from the
# target (if needed).
if confusor_data:
for target_location_plan, indexes in target_locations_with_indexes:
if confusor_data.is_close():
# Use the same location for the object across scenes in
# which the target is in this specific location.
location = self._generate_close_to(
confusor_data.larger_definition(),
target_or_receptacle_definition,
plans_to_locations[target_location_plan],
performer_start,
bounds_list,
adjacent=True
)
logging.debug(
f'{self.get_name()} confusor location close to: '
f'{location}')
bounds = confusor_data.assign_location_close(
location,
indexes
)
bounds_list.extend(bounds)
if confusor_data.is_far():
# Use the same location for the object across scenes in
# which the target is in this specific location.
location = self._generate_far_from(
confusor_data.larger_definition(),
plans_to_locations[target_location_plan],
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} confusor location far from: '
f'{location}')
bounds = confusor_data.assign_location_far(
location,
indexes
)
bounds_list.extend(bounds)
def _assign_container_location(
self,
container_data_list: List[ReceptacleData],
goal: InteractiveGoal,
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> None:
"""Generate and assign locations to the given container receptacle
objects, if needed. Will update the given bounds_list."""
# Next, choose the locations for the remaining containers (if needed).
for container_data in container_data_list:
if container_data.is_random():
# Use the same location for the object across scenes in which
# the object is randomly positioned.
location = self._generate_random_location(
container_data.larger_definition(),
goal,
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} container location randomly chosen: '
f'{location}')
bounds = container_data.assign_location_random(location)
bounds_list.extend(bounds)
def _assign_front_and_back_location(
self,
target_data: TargetData,
target_or_receptacle_definition: ObjectDefinition,
confusor_data_list: List[ObjectData],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[ObjectLocationPlan, List[Dict[str, Any]]]:
"""Generate and assign front and back locations to the given target and
confusor objects, if needed. Will update the given bounds_list. Return
the target's location corresponding to each unique location plan."""
# Save the target's location corresponding to each location plan.
plans_to_locations = {}
front_and_back_object_data_list = [target_data] + confusor_data_list
if any([
(object_data.is_front() or object_data.is_back()) for object_data
in front_and_back_object_data_list
]):
# Assume only one object is ever "in front" and only one object
# is ever "in back" in a single scene, so use the same front and
# back locations on each relevant object.
location_front, location_back = self._generate_front_and_back(
target_or_receptacle_definition,
target_data.choice
)
logging.debug(
f'{self.get_name()} location in front of performer start:'
f'{location_front}')
logging.debug(
f'{self.get_name()} location in back of performer start:'
f'{location_back}')
for object_data in front_and_back_object_data_list:
bounds = object_data.assign_location_front(location_front)
bounds_list.extend(bounds)
bounds = object_data.assign_location_back(location_back)
bounds_list.extend(bounds)
plans_to_locations[ObjectLocationPlan.FRONT] = location_front
plans_to_locations[ObjectLocationPlan.BACK] = location_back
# We assume the performer_start won't be modified past here.
logging.debug(
f'{self.get_name()} performer start: {self._performer_start}')
return plans_to_locations
def _assign_object_location_inside_container(
self,
target_data: TargetData,
confusor_data: Optional[ObjectData],
large_container_data_list: List[ReceptacleData]
) -> None:
"""Generate and assign locations to the given target and confusor
objects inside the given container objects, if needed. Will update the
given bounds_list."""
target_contained_indexes = target_data.contained_indexes(
large_container_data_list,
confusor_data
)
# Finally, position the target and confusor inside containers.
for index, container_data, confusor_data in target_contained_indexes:
# Create a new instance of each object to use in this scene.
target_instance = copy.deepcopy(target_data.trained_template)
containment = (
container_data.untrained_containment
if container_data.untrained_plan_list[index]
else container_data.trained_containment
)
# If confusor_data is None, put just the target in the container.
if not confusor_data:
containers.put_object_in_container(
target_instance,
container_data.instance_list[index],
containment.area_index,
containment.target_angle
)
# Else, put both the target and confusor together in the container.
else:
confusor_instance = copy.deepcopy(
confusor_data.untrained_template
if confusor_data.untrained_plan_list[index]
else confusor_data.trained_template
)
containers.put_objects_in_container(
target_instance,
confusor_instance,
container_data.instance_list[index],
containment.area_index,
containment.orientation,
containment.target_angle,
containment.confusor_angle
)
# Save the confusor instance in the hypercube data.
confusor_data.instance_list[index] = confusor_instance
# Save the target instance in the hypercube data.
target_data.instance_list[index] = target_instance
confusor_contained_indexes = confusor_data.contained_indexes(
large_container_data_list,
target_data
) if confusor_data else []
for index, container_data, target_data in confusor_contained_indexes:
# Create a new instance of each object to use in this scene.
confusor_instance = copy.deepcopy(
confusor_data.untrained_template
if confusor_data.untrained_plan_list[index]
else confusor_data.trained_template
)
# If target_data is None, put just the confusor in the container.
if not target_data:
containers.put_object_in_container(
confusor_instance,
container_data.instance_list[index],
container_data.area_index,
container_data.confusor_angle
)
# Save the confusor instance in the hypercube data.
confusor_data.instance_list[index] = confusor_instance
# Else, we already put both objects together in a container, above.
def _assign_single_obstacle_occluder_location(
self,
obstacle_occluder_data: ObjectData,
target_or_receptacle_definition: ObjectDefinition,
target_location: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
debug_label: str,
location_function: Callable,
indexes: List[float],
behind: bool = False,
obstruct: bool = False,
unreachable: bool = False
) -> None:
"""Generate and assign new locations to a single given obstacle or
occluder using the given function either obstructing or behind the
target. Find separate locations for both the trained and the untrained
definitions because each must be able to obstruct the target."""
trained_location = self._generate_close_to(
obstacle_occluder_data.trained_definition,
target_or_receptacle_definition,
target_location,
performer_start,
bounds_list,
behind=behind,
obstruct=obstruct,
unreachable=unreachable
)
logging.debug(
f'{self.get_name()} trained obstacle/occluder location '
f'{debug_label} target and performer start: {trained_location}')
untrained_location = self._generate_close_to(
obstacle_occluder_data.untrained_definition,
target_or_receptacle_definition,
target_location,
performer_start,
bounds_list,
behind=behind,
obstruct=obstruct,
unreachable=unreachable
)
logging.debug(
f'{self.get_name()} untrained obstacle/occluder location '
f'{debug_label} target and performer start: {untrained_location}')
bounds_trained = location_function(trained_location, [
index for index in indexes
if not obstacle_occluder_data.untrained_plan_list[index]
])
bounds_list.extend(bounds_trained)
bounds_untrained = location_function(untrained_location, [
index for index in indexes
if obstacle_occluder_data.untrained_plan_list[index]
])
bounds_list.extend(bounds_untrained)
def _assign_target_location(
self,
target_data: TargetData,
target_or_receptacle_definition: ObjectDefinition,
container_data: Optional[ReceptacleData],
confusor_data_list: List[ObjectData],
goal: InteractiveGoal,
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[ObjectLocationPlan, List[Dict[str, Any]]]:
"""Generate and assign locations to the given target, as well as the
given target's receptacle and confusor objects if needed. Will update
the given bounds_list. Return the target's location corresponding to
each unique location plan."""
# First, choose the locations for the objects positioned relative to
# the performer's start location (if needed), both in front of it and
# in back of it. Do FIRST because it may change performer_start.
plans_to_locations = self._assign_front_and_back_location(
target_data,
target_or_receptacle_definition,
confusor_data_list,
bounds_list
)
# Next, choose the locations for the target's container (if needed).
target_container_location = None
if container_data and container_data.is_random():
# Use the same location for the object across scenes in which
# the object is randomly positioned.
target_container_location = self._generate_random_location(
container_data.larger_definition(),
goal,
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} container location randomly chosen: '
f'{target_container_location}')
bounds = container_data.assign_location_random(
target_container_location
)
bounds_list.extend(bounds)
# Next, choose a location close to the target's container (if any).
# Assume a "close" target is always close to its container.
if target_data.is_close():
target_definition = target_data.larger_definition()
# If the target was turned sideways, revert it for the location
# close to the target's container.
if target_definition.notSideways:
target_definition = copy.deepcopy(target_definition)
target_definition.dimensions = (
target_definition.notSideways['dimensions']
)
target_definition.offset = (
target_definition.notSideways['offset']
)
target_definition.positionY = (
target_definition.notSideways['positionY']
)
target_definition.rotation = (
target_definition.notSideways['rotation']
)
location = self._generate_close_to(
target_definition,
container_data.larger_definition(),
target_container_location,
performer_start,
bounds_list
)
logging.debug(
f'{self.get_name()} target location close to the first '
f'large container: {location}')
bounds = target_data.assign_location_close(
location,
None
)
bounds_list.extend(bounds)
plans_to_locations[ObjectLocationPlan.CLOSE] = location
# Next, handle the remaining cases for choosing the target's location.
if target_data.is_random():
# Use the same location for the target across scenes in which the
# target is positioned randomly.
location = self._generate_random_location(
target_or_receptacle_definition,
goal,
performer_start,
bounds_list,
target_choice=target_data.choice
)
logging.debug(
f'{self.get_name()} target location randomly chosen: '
f'{location}')
bounds = target_data.assign_location_random(location)
bounds_list.extend(bounds)
plans_to_locations[ObjectLocationPlan.RANDOM] = location
return plans_to_locations
def _assign_each_object_location(self) -> None:
"""Assign each object's final location in all of the scenes by creating
separate instances of them to use in each individual scene."""
# Use the larger definition of the target or its receptacle in any
# scene to save a big enough area for all objects.
larger_target_definition = self._target_data.larger_definition_of(
self._data['large_container'],
self._confusor_data
)
logging.debug(
f'{self.get_name()} larger definition of trained/untrained '
f'target/confusor/container: {larger_target_definition}')
# Save the target's location corresponding to each location plan.
target_location_plans_to_locations = self._assign_target_location(
self._target_data,
larger_target_definition,
# Assume the 1st large container may have the target inside of it.
self._data['large_container'][0]
if len(self._data['large_container']) > 0 else None,
self._data['confusor'],
self._goal,
self._performer_start,
self._bounds_list
)
self._assign_confusor_obstacle_occluder_location(
self._target_data,
larger_target_definition,
self._confusor_data,
self._data['obstacle'] + self._data['occluder'],
self._data['large_container'],
self._goal,
self._performer_start,
self._bounds_list,
target_location_plans_to_locations
)
self._assign_container_location(
# Assume the 1st large container may have the target inside of it,
# and thus it will have been positioned previously, but the other
# containers will not have any objects inside of them.
self._data['large_container'][1:] + self._data['small_container'],
self._goal,
self._performer_start,
self._bounds_list
)
self._assign_object_location_inside_container(
self._target_data,
self._confusor_data,
self._data['large_container']
)
def _assign_confusor_definition(
self,
confusor_data: Optional[ObjectData],
target_definition: ObjectDefinition
) -> None:
"""Update the given confusor data with its object definition using the
given target data."""
if not confusor_data:
return
dataset = specific_objects.get_interactable_definition_dataset()
trained_dataset = dataset.filter_on_trained()
untrained_dataset = dataset.filter_on_untrained(
tags.SCENE.UNTRAINED_SHAPE
)
if not confusor_data.trained_definition:
confusor_data.trained_definition = util.get_similar_definition(
target_definition,
trained_dataset
)
if not confusor_data.trained_definition:
raise SceneException(
f'{self.get_name()} cannot find trained confusor '
f'size={trained_dataset.size()} '
f'target={target_definition}')
if not confusor_data.untrained_definition:
confusor_data.untrained_definition = util.get_similar_definition(
target_definition,
untrained_dataset
)
if not confusor_data.untrained_definition:
raise SceneException(
f'{self.get_name()} cannot find untrained confusor '
f'size={untrained_dataset.size()} '
f'target={target_definition}')
logging.debug(
f'{self.get_name()} confusor definition: '
f'trained={confusor_data.trained_definition}'
f'untrained={confusor_data.untrained_definition}')
def _choose_small_context_definition(
self,
target_confusor_data_list: List[ObjectData]
) -> Dict[str, Any]:
"""Choose and return a small context object definition for the given
target and confusor objects from the given definition list."""
return util.choose_distractor_definition([
object_data.trained_definition.shape for object_data
in target_confusor_data_list
] + [
object_data.untrained_definition.shape for object_data
in target_confusor_data_list if object_data.untrained_definition
])
def _assign_obstacle_or_occluder_definition(
self,
object_data: ObjectData,
target_definition: ObjectDefinition,
is_occluder: bool
) -> None:
"""Update the given obstacle or occluder data with its object
definition using the given target data."""
dataset = (
specific_objects.get_occluder_definition_dataset() if is_occluder
else specific_objects.get_obstacle_definition_dataset()
)
trained_dataset = dataset.filter_on_trained()
untrained_dataset = dataset.filter_on_untrained(
tags.SCENE.UNTRAINED_SHAPE
)
if not object_data.trained_definition:
object_data.trained_definition = (
self._choose_obstacle_or_occluder_definition(
target_definition,
trained_dataset,
is_occluder
)
)
if not object_data.untrained_definition:
object_data.untrained_definition = (
self._choose_obstacle_or_occluder_definition(
target_definition,
untrained_dataset,
is_occluder
)
)
logging.debug(
f'{self.get_name()} {"occluder" if is_occluder else "obstacle"} '
f'definition: trained={object_data.trained_definition} '
f'untrained={object_data.untrained_definition}')
def _choose_obstacle_or_occluder_definition(
self,
target_definition: ObjectDefinition,
definition_dataset: DefinitionDataset,
is_occluder: bool
) -> Dict[str, Any]:
"""Choose and return an obstacle or occluder definition for the given
target object from the given definition list."""
obstacle_occluder_definition_list = (
geometry.retrieve_obstacle_occluder_definition_list(
target_definition,
definition_dataset,
is_occluder
)
)
if not obstacle_occluder_definition_list:
raise SceneException(
f'{self.get_name()} cannot find '
f'{"occluder" if is_occluder else "obstacle"} '
f'size={definition_dataset.size()} '
f'target={target_definition}')
definition, angle = random.choice(obstacle_occluder_definition_list)
# Note that this rotation must be also modified with the final
# performer start Y.
definition.rotation.y += angle
return definition
def _assign_container_definition(
self,
container_data: ReceptacleData,
target_data: TargetData,
confusor_data: Optional[ObjectData],
find_invalid_container: bool = False
) -> None:
"""Update the given container data with its object definition using the
given target and confusor data and whether it should be a valid or an
invalid size to fit either or both of the objects inside of it."""
dataset = specific_objects.get_container_definition_dataset()
trained_dataset = dataset.filter_on_trained()
untrained_dataset = dataset.filter_on_untrained(
tags.SCENE.UNTRAINED_SHAPE
)
if not container_data.trained_definition:
(
definition,
area_index,
orientation,
target_angle,
confusor_angle
) = self._choose_container_definition(
target_data,
confusor_data,
confusor_data.trained_definition if confusor_data else None,
trained_dataset,
find_invalid_container
)
container_data.trained_definition = definition
container_data.trained_containment.area_index = area_index
container_data.trained_containment.orientation = orientation
container_data.trained_containment.target_angle = target_angle
container_data.trained_containment.confusor_angle = confusor_angle
if not container_data.untrained_definition:
(
definition,
area_index,
orientation,
target_angle,
confusor_angle
) = self._choose_container_definition(
target_data,
confusor_data,
confusor_data.untrained_definition if confusor_data else None,
untrained_dataset,
find_invalid_container
)
container_data.untrained_definition = definition
container_data.untrained_containment.area_index = area_index
container_data.untrained_containment.orientation = orientation
container_data.untrained_containment.target_angle = target_angle
container_data.untrained_containment.confusor_angle = (
confusor_angle
)
logging.debug(
f'{self.get_name()} container definition: '
f'trained={container_data.trained_definition} '
f'untrained={container_data.untrained_definition}')
def _choose_container_definition(
self,
target_data: TargetData,
confusor_data: Optional[ObjectData],
confusor_definition: Optional[ObjectDefinition],
definition_dataset: DefinitionDataset,
find_invalid_container: bool = False,
) -> Tuple[Dict[str, Any], int, containers.Orientation, float, float]:
"""Choose and return a valid or an invalid container definition for the
given target and confusor objects from the given definition list."""
container_definition = None
area_index = None
orientation = None
target_angle = None
confusor_angle = None
target_definition_list = [target_data.trained_definition]
# Also try the target definition's sideways option if it exists.
if target_data.trained_definition.sideways:
sideways_definition = copy.deepcopy(target_data.trained_definition)
# Save the original properties.
sideways_definition.notSideways = {
'dimensions': sideways_definition.dimensions,
'offset': sideways_definition.offset,
'positionY': sideways_definition.positionY,
'rotation': sideways_definition.rotation
}
# Override the original properties with the sideways properties.
sideways_definition.dimensions = (
sideways_definition.sideways['dimensions']
)
sideways_definition.offset = (
sideways_definition.sideways['offset']
)
sideways_definition.positionY = (
sideways_definition.sideways['positionY']
)
sideways_definition.rotation = (
sideways_definition.sideways['rotation']
)
sideways_definition.sideways = None
target_definition_list.append(sideways_definition)
# If needed, find an enclosable container that can hold both the
# target and the confusor together.
if target_data.containerize_with(confusor_data):
for definition in definition_dataset.definitions():
for target_definition in target_definition_list:
valid_containment = containers.can_contain_both(
definition,
target_definition,
confusor_definition
)
if valid_containment and not find_invalid_container:
target_data.trained_definition = target_definition
container_definition = definition
area_index, angles, orientation = valid_containment
target_angle = angles[0]
confusor_angle = angles[1]
break
elif not valid_containment and find_invalid_container:
target_data.trained_definition = target_definition
container_definition = definition
break
# Else, find an enclosable container that can hold either the target
# or confusor individually.
else:
confusor_definition_or_none = (
confusor_definition if confusor_data and
confusor_data.is_inside() else None
)
if not target_data.is_inside():
target_definition_list = [None]
for definition in definition_dataset.definitions():
for target_definition in target_definition_list:
valid_containment = containers.can_contain(
definition,
target_definition,
confusor_definition_or_none
)
if valid_containment and not find_invalid_container:
if target_definition:
target_data.trained_definition = (
target_definition
)
container_definition = definition
area_index, angles = valid_containment
target_angle = angles[0]
confusor_angle = angles[1]
break
elif not valid_containment and find_invalid_container:
if target_definition:
target_data.trained_definition = (
target_definition
)
container_definition = definition
break
if not container_definition:
raise SceneException(
f'{self.get_name()} cannot create '
f'{"small" if find_invalid_container else "large"} '
f'container size={definition_dataset.size()} '
f'target={target_data.trained_definition}\n'
f'confusor={confusor_definition}')
return (
container_definition, area_index, orientation, target_angle,
confusor_angle
)
def _assign_target_definition(
self,
target_data: TargetData,
goal: InteractiveGoal
) -> None:
"""Update the given target data with its object definition using the
given interactive goal."""
if not target_data.trained_definition:
target_data.trained_definition = goal.choose_target_definition(
target_data.choice
)
logging.debug(
f'{self.get_name()} target definition: '
f'{target_data.trained_definition}')
def _create_interior_wall(
self,
wall_material: str,
wall_colors: List[str],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
keep_unobstructed_list: List[Dict[str, Any]] = None
) -> Optional[Dict[str, Any]]:
"""Create and return a randomly positioned interior wall with the
given material and colors. If keep_unobstructed_list is not None, the
wall won't obstruct the line between the performer_start and the
objects in keep_unobstructed_list."""
tries = 0
performer_rect = geometry.find_performer_rect(
performer_start['position']
)
performer_poly = geometry.rect_to_poly(performer_rect)
while tries < util.MAX_TRIES:
rotation = random.choice((0, 90, 180, 270))
x_position = geometry.random_position_x(ROOM_DIMENSIONS)
z_position = geometry.random_position_z(ROOM_DIMENSIONS)
x_width = round(
random.uniform(WALL_MIN_WIDTH, WALL_MAX_WIDTH),
geometry.POSITION_DIGITS
)
# Ensure the wall is not too close to the room's parallel walls.
if (
(rotation == 0 or rotation == 180) and
(
z_position < (ROOM_Z_MIN + WALL_SEPARATION) or
z_position > (ROOM_Z_MAX - WALL_SEPARATION)
)
) or (
(rotation == 90 or rotation == 270) and
(
x_position < (ROOM_X_MIN + WALL_SEPARATION) or
x_position > (ROOM_X_MAX - WALL_SEPARATION)
)
):
continue
wall_rect = geometry.calc_obj_coords(
x_position,
z_position,
x_width / 2.0,
WALL_DEPTH / 2.0,
0,
0,
rotation
)
wall_poly = geometry.rect_to_poly(wall_rect)
# Ensure parallel walls are not too close one another.
boundary_rect = geometry.calc_obj_coords(
x_position,
z_position,
(x_width + WALL_SEPARATION) / 2.0,
(WALL_DEPTH + WALL_SEPARATION) / 2.0,
0,
0,
rotation
)
is_too_close = any(
sat_entry(boundary_rect, bounds) for bounds in bounds_list
)
is_ok = (
not wall_poly.intersects(performer_poly) and
geometry.rect_within_room(wall_rect, ROOM_DIMENSIONS) and
not is_too_close
)
if is_ok and keep_unobstructed_list:
for instance in keep_unobstructed_list:
if (
'locationParent' not in instance and
geometry.does_fully_obstruct_target(
performer_start['position'],
instance,
wall_poly
)
):
is_ok = False
break
if is_ok:
break
tries += 1
if tries < util.MAX_TRIES:
interior_wall = structures.create_interior_wall(
x_position,
z_position,
rotation,
x_width,
WALL_HEIGHT,
materials.MaterialTuple(wall_material, wall_colors),
thickness=WALL_DEPTH,
bounding_rect=wall_rect
)
return interior_wall
return None
def _create_target_list(
self,
goal: InteractiveGoal,
performer_start: Dict[str, float],
existing_bounds_list: List[List[Dict[str, float]]],
target_validation_list: List[Dict[str, float]],
start_index: int = None,
end_index: int = None
) -> Tuple[List[Dict[str, Any]], List[List[Dict[str, float]]]]:
"""Create and return each of the goal's targets between the start_index
and the end_index. Used if the goal needs more targets than are defined
by the hypercube's plan. Changes the bounds_list."""
valid_start_index = 0 if start_index is None else start_index
# Only create targets up to the given index, or create each of the
# targets if no end_index was given. Keep each existing target.
valid_end_index = (
goal.get_target_count() if end_index is None else end_index
)
if valid_start_index >= valid_end_index:
return [], existing_bounds_list
target_list = []
bounds_list = existing_bounds_list
for i in range(valid_start_index, valid_end_index):
definition = goal.choose_target_definition(i)
for _ in range(util.MAX_TRIES):
location, possible_bounds_list = goal.choose_location(
definition,
performer_start,
existing_bounds_list,
is_target=True,
room_dimensions=ROOM_DIMENSIONS
)
if goal.validate_target_location(
i,
location,
target_validation_list,
performer_start
):
break
location = None
if not location:
raise SceneException(
f'{self.get_name()} cannot find suitable location '
f'target={definition}')
bounds_list = possible_bounds_list
instance = util.instantiate_object(definition, location)
target_list.append(instance)
return target_list, bounds_list
def _generate_front_and_back(
self,
definition: ObjectDefinition,
target_choice: int = None
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Generate a location in front of and (if needed) in back of the
performer's start location. May change the global performer_start if
it's needed to generate the two locations. Return the front and back
locations."""
location_front = None
location_back = None
for _ in range(util.MAX_TRIES):
location_front = self._identify_front(
self._goal,
self._performer_start,
definition,
target_choice
)
if location_front:
location_back = self._identify_back(
self._goal,
self._performer_start,
definition,
target_choice
)
if location_back:
break
location_front = None
location_back = None
self._performer_start = self._generate_performer_start()
if not location_front or not location_back:
raise SceneException(
f'{self.get_name()} cannot position performer start in '
f'front of and in back of object={definition}')
return location_front, location_back
def _generate_close_to(
self,
object_definition: ObjectDefinition,
existing_definition: ObjectDefinition,
existing_location: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]],
adjacent: bool = False,
behind: bool = False,
obstruct: bool = False,
unreachable: bool = False
) -> Dict[str, Any]:
"""Generate and return a new location for the given object very close
to the given previously-positioned object and its given location."""
location_close = geometry.generate_location_in_line_with_object(
object_definition,
existing_definition,
existing_location,
performer_start,
bounds_list,
adjacent=adjacent,
behind=behind,
obstruct=obstruct,
unreachable=unreachable,
room_dimensions=ROOM_DIMENSIONS
)
if not location_close:
if adjacent:
raise SceneException(
f'{self.get_name()} cannot position object adjacent to '
f'existing:\nperformer_start={performer_start}\n'
f'object={object_definition}\n'
f'existing={existing_definition}\n'
f'location={existing_location}\nbounds={bounds_list}')
elif behind:
raise SceneException(
f'{self.get_name()} cannot position object directly in '
f'back of existing:\nperformer_start={performer_start}\n'
f'object={object_definition}\n'
f'existing={existing_definition}\n'
f'location={existing_location}\nbounds={bounds_list}')
raise SceneException(
f'{self.get_name()} cannot position object directly in '
f'front of existing:\nperformer_start={performer_start}\n'
f'object={object_definition}\n'
f'existing={existing_definition}\n'
f'location={existing_location}\nbounds={bounds_list}')
return location_close
def _generate_far_from(
self,
object_definition: ObjectDefinition,
existing_location: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[str, Any]:
"""Generate and return a new location for the given object far away
from the given location."""
for _ in range(util.MAX_TRIES):
bounds_list_copy = copy.deepcopy(bounds_list)
location_far = geometry.calc_obj_pos(
performer_start['position'],
bounds_list_copy,
object_definition,
room_dimensions=ROOM_DIMENSIONS
)
if not geometry.are_adjacent(
existing_location,
location_far,
distance=geometry.MIN_OBJECTS_SEPARATION_DISTANCE
):
break
location_far = None
if not location_far:
raise SceneException(
f'{self.get_name()} cannot position object far from existing: '
f'object={object_definition}\nexisting={existing_location}')
return location_far
def _generate_performer_start(self) -> Dict[str, Dict[str, float]]:
"""Generate and return the performer's start location dict."""
return {
'position': {
'x': round(
random.uniform(ROOM_X_MIN, ROOM_X_MAX),
geometry.POSITION_DIGITS
),
'y': 0,
'z': round(
random.uniform(ROOM_Z_MIN, ROOM_Z_MAX),
geometry.POSITION_DIGITS
)
},
'rotation': {
'x': 0,
'y': geometry.random_rotation(),
'z': 0
}
}
def _generate_random_location(
self,
definition: ObjectDefinition,
goal: InteractiveGoal,
performer_start: Dict[str, float],
bounds_list: List[List[Dict[str, float]]],
target_choice: int = None,
target_location: Dict[str, Any] = None,
second_definition: ObjectDefinition = None
) -> Dict[str, Any]:
"""Generate a random location and return it twice."""
for _ in range(util.MAX_TRIES):
location_random, _ = goal.choose_location(
identify_larger_definition(definition, second_definition)
if second_definition else definition,
performer_start,
bounds_list,
is_target=(target_choice is not None),
room_dimensions=ROOM_DIMENSIONS
)
if location_random:
# If generating a location for the target object...
if target_choice:
if goal.validate_target_location(
target_choice,
location_random,
bounds_list,
performer_start
):
# Successful
break
# If generating a location that must ensure the visibility of
# this object, the target object, and other critical objects to
# the performer's start location...
elif target_location:
# Assume that all of the bounds that have been set by now
# will only be for critical objects (specifically targets,
# confusors, containers, obstacles, occluders).
for bounds in bounds_list:
bounds_poly = geometry.get_bounding_polygon({
'boundingBox': bounds
})
# Also validate the second object definition, if given.
second_rect = geometry.generate_object_bounds(
vars(second_definition.dimensions),
vars(second_definition.offset),
location_random['position'],
location_random['rotation']
)
# This location should not completely obstruct or be
# obstructed by any critical object's location.
if geometry.does_fully_obstruct_target(
performer_start['position'],
location_random,
bounds_poly
) or geometry.does_fully_obstruct_target(
performer_start['position'],
{'boundingBox': bounds},
geometry.get_bounding_polygon(location_random)
) or geometry.does_fully_obstruct_target(
performer_start['position'],
{'boundingBox': second_rect},
bounds_poly
) or geometry.does_fully_obstruct_target(
performer_start['position'],
{'boundingBox': bounds},
geometry.get_bounding_polygon({
'boundingBox': second_rect
})
):
# Failed
location_random = None
break
if location_random:
# This location should not partly obstruct the target
# object's location.
if not geometry.does_partly_obstruct_target(
self._performer_start['position'],
target_location,
geometry.get_bounding_polygon(location_random)
):
# Successful
break
# Otherwise...
else:
# Successful
break
# Failed
location_random = None
if not location_random:
raise SceneException(
f'{self.get_name()} cannot randomly position '
f'target={definition}')
return location_random
def _identify_front(
self,
goal: InteractiveGoal,
performer_start: Dict[str, float],
definition: ObjectDefinition,
target_choice: int = None
) -> Dict[str, Any]:
"""Find and return a location in front of the given performer_start."""
def rotation_func():
return performer_start['rotation']['y']
for _ in range(util.MAX_TRIES):
location_front = geometry.get_location_in_front_of_performer(
performer_start,
definition,
rotation_func=rotation_func,
room_dimensions=ROOM_DIMENSIONS
)
# If we've found a valid location...
if location_front:
# If this is a target location, ensure it's valid for the goal.
if target_choice is None or goal.validate_target_location(
target_choice,
location_front,
[],
performer_start
):
break
# Else, find a new location.
location_front = None
return location_front
def _identify_back(
self,
goal: InteractiveGoal,
performer_start: Dict[str, float],
definition: ObjectDefinition,
target_choice: int = None
) -> Dict[str, Any]:
"""Find and return a location in back of the given performer_start."""
def rotation_func():
return performer_start['rotation']['y']
for _ in range(util.MAX_TRIES):
location_back = geometry.get_location_in_back_of_performer(
performer_start,
definition,
rotation_func,
room_dimensions=ROOM_DIMENSIONS
)
# If we've found a valid location...
if location_back:
# If this is a target location, ensure it's valid for the goal.
if target_choice is None or goal.validate_target_location(
target_choice,
location_back,
[],
performer_start
):
break
# Else, find a new location.
location_back = None
return location_back
def _initialize_context_objects(self) -> None:
"""Create this hypercube's small context objects."""
critical_object_data_list = (
self._data['target'] + self._data['confusor'] +
self._data['obstacle'] + self._data['occluder']
)
context_count = random.choices(
SMALL_CONTEXT_OBJECT_CHOICES,
weights=SMALL_CONTEXT_OBJECT_WEIGHTS,
k=1
)[0]
for _ in range(context_count):
definition = self._choose_small_context_definition(
critical_object_data_list
)
for _ in range(util.MAX_TRIES):
location, bounds_list = self._goal.choose_location(
definition,
self._performer_start,
self._bounds_list,
room_dimensions=ROOM_DIMENSIONS
)
successful = True
if successful:
for object_data in critical_object_data_list:
for instance in object_data.instance_list:
if not instance:
continue
if geometry.does_fully_obstruct_target(
self._performer_start['position'],
instance,
geometry.get_bounding_polygon(location)
):
successful = False
break
if not successful:
break
if successful:
break
location = False
if not location:
raise SceneException(
f'{self.get_name()} cannot find suitable location '
f'small context object {definition}')
self._bounds_list = bounds_list
instance = util.instantiate_object(definition, location)
self._small_context_object_list.append(instance)
def _initialize_interior_walls(self) -> None:
"""Create this hypercube's interior walls. Changes the
interior_wall_list and the bounds_list."""
# All scenes will have the same room wall material/colors.
room_wall_material_name = self._scene_1['wallMaterial']
room_wall_colors = self._scene_1['debug']['wallColors']
keep_unobstructed_list = [self._target_data.trained_definition]
if self._confusor_data:
keep_unobstructed_list.extend([
self._confusor_data.trained_definition,
self._confusor_data.untrained_definition
])
number = random.choices(WALL_CHOICES, weights=WALL_WEIGHTS, k=1)[0]
logging.debug(f'{self.get_name()} {number} interior walls')
for _ in range(number + 1):
wall = self._create_interior_wall(
room_wall_material_name,
room_wall_colors,
self._performer_start,
self._bounds_list,
keep_unobstructed_list
)
if wall:
self._interior_wall_list.append(wall)
self._bounds_list.append(wall['shows'][0]['boundingBox'])
def _choose_each_object_definition(self) -> None:
"""Choose each object's definition to use across scenes."""
# Create all targets in the hypercube that the goal must make before
# the target chosen by the plan, if the goal has multiple targets.
self._common_target_list, self._bounds_list = self._create_target_list(
self._goal,
self._performer_start,
self._bounds_list,
[],
end_index=self._target_data.choice
)
self._assign_target_definition(self._target_data, self._goal)
self._assign_confusor_definition(
self._confusor_data,
self._target_data.trained_definition
)
for container in self._data['large_container']:
self._assign_container_definition(
container,
self._target_data,
self._confusor_data
)
for container in self._data['small_container']:
self._assign_container_definition(
container,
self._target_data,
self._confusor_data,
find_invalid_container=True
)
larger_target_definition = self._target_data.larger_definition_of(
self._data['large_container'],
self._confusor_data
)
for obstacle in self._data['obstacle']:
self._assign_obstacle_or_occluder_definition(
obstacle,
larger_target_definition,
is_occluder=False
)
for occluder in self._data['occluder']:
self._assign_obstacle_or_occluder_definition(
occluder,
larger_target_definition,
is_occluder=True
)
def _create_each_object_template(self) -> None:
"""Create each object's template at a base location, since later we'll
move them to their final locations in all of the scenes."""
for object_data_list in self._data.values():
for object_data in object_data_list:
object_data.recreate_both_templates()
# Reset object's half-finished instances in all scenes.
object_data.reset_all_instances()
def _initialize_each_hypercube_object(self) -> None:
"""
Initialize this hypercube's objects:
- 1. Create objects that may change in each scene (like targets).
- 2. Containerize objects as needed by this hypercube's plan.
- 3. Move objects into locations specific to each scene.
- 4. Save objects specific to each scene.
- 5. Create all other objects shared by both scenes (like distractors).
"""
self._choose_each_object_definition()
tries = 0
while True:
tries += 1
# Reset the bounds_list on each new try.
self._bounds_list = []
self._create_each_object_template()
try:
self._assign_each_object_location()
for i, instance in enumerate(self._target_data.instance_list):
if not instance:
raise SceneException(
f'{self.get_name()} did not successfully create a '
f'target instance in scene {i} (uh-oh)! '
f'target_location_plan='
f'{self._target_data.location_plan_list[i]}')
break
except SceneException:
logging.exception(
f'{self.get_name()} _assign_each_object_location')
if tries >= util.MAX_TRIES:
raise SceneException(
f'{self.get_name()} cannot successfully assign each '
f'object to a location -- please redo.')
for object_data_list in self._data.values():
for object_data in object_data_list:
self._log_debug_object_data(object_data)
# Create other targets in the hypercube that the goal must make after
# the target chosen by the plan, if the goal has multiple targets.
common_target_list, self._bounds_list = self._create_target_list(
self._goal,
self._performer_start,
self._bounds_list,
self._common_target_list + [
instance for instance in self._target_data.instance_list
if instance
],
start_index=(len(self._common_target_list) + 1)
)
self._common_target_list.extend(common_target_list)
self._initialize_context_objects()
# Add the canContainTarget tag to each container in each scene.
for container_data in self._data['large_container']:
for instance in container_data.instance_list:
if instance:
instance['canContainTarget'] = True
for container_data in self._data['small_container']:
for instance in container_data.instance_list:
if instance:
instance['canContainTarget'] = False
def _log_debug_object_data(self, object_data: ObjectData) -> None:
"""Log debug info for the given object data."""
for scene_index, instance in enumerate(object_data.instance_list):
if instance:
logging.info(
f'{self.get_name()} '
f'{object_data.role}_{scene_index} '
f'{instance["type"]} {instance["id"]} '
f'parent={instance.get("locationParent", None)}')
else:
logging.info(
f'{self.get_name()} '
f'{object_data.role}_{scene_index} None')
def _move_distractor_into_receptacle(
self,
object_instance: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[str, Any]:
"""Create and return a receptacle object, moving the given object into
the new receptacle. Changes the bounds_list."""
# Only a pickupable object can be positioned inside a receptacle.
if not object_instance.get('pickupable', False):
return None
# Please note that an enclosable receptacle (that can have objects
# positioned inside of it) may also be called a "container".
dataset = specific_objects.get_container_definition_dataset()
for receptacle_definition in dataset.definitions():
valid_containment = containers.can_contain(
receptacle_definition,
object_instance
)
if valid_containment:
location = geometry.calc_obj_pos(
performer_start['position'],
bounds_list,
receptacle_definition,
room_dimensions=ROOM_DIMENSIONS
)
if location:
receptacle_instance = util.instantiate_object(
receptacle_definition,
location
)
area, angles = valid_containment
containers.put_object_in_container(
object_instance,
receptacle_instance,
area,
angles[0]
)
return receptacle_instance
return None
def _move_distractor_onto_receptacle(
self,
object_instance: Dict[str, Any],
performer_start: Dict[str, Dict[str, float]],
bounds_list: List[List[Dict[str, float]]]
) -> Dict[str, Any]:
"""Create and return a receptacle object, moving the given object onto
the new receptacle. Changes the bounds_list."""
# TODO MCS-146 Position objects on top of receptacles.
return None
def _update_scene_at_index(
self,
scene: Dict[str, Any],
scene_index: int,
goal_template: Dict[str, Any]
) -> None:
"""Update the given scene with its metadata like all of its objects."""
scene_plan = self._plan_list[scene_index]
scene['performerStart'] = self._performer_start
scene['debug']['evaluationOnly'] = any([
object_plan.untrained
for object_plan_list in scene_plan.object_plans().values()
for object_plan in object_plan_list
])
scene['goal'] = copy.deepcopy(goal_template)
scene['goal'] = self._goal.update_goal_template(
scene['goal'],
[self._target_data.instance_list[scene_index]]
)
scene['goal']['last_step'] = LAST_STEP
role_to_object_list = {}
role_to_object_list[tags.ROLES.TARGET] = [
object_data.instance_list[scene_index] for object_data in
self._data['target'] if object_data.instance_list[scene_index]
] + self._common_target_list
role_to_object_list[tags.ROLES.CONFUSOR] = [
object_data.instance_list[scene_index] for object_data in
self._data['confusor'] if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.CONTAINER] = [
object_data.instance_list[scene_index] for object_data in
(self._data['large_container'] + self._data['small_container'])
if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.CONTEXT] = (
self._small_context_object_list
)
role_to_object_list[tags.ROLES.OBSTACLE] = [
object_data.instance_list[scene_index] for object_data in
self._data['obstacle'] if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.OCCLUDER] = [
object_data.instance_list[scene_index] for object_data in
self._data['occluder'] if object_data.instance_list[scene_index]
]
role_to_object_list[tags.ROLES.WALL] = self._interior_wall_list
update_scene_objects(scene, role_to_object_list)
scene['goal']['sceneInfo'][tags.SCENE.ID] = [
scene_plan.scene_id.upper()
]
scene['goal']['sceneInfo'][tags.SCENE.SLICES] = []
for tag, value in scene_plan.slice_tags.items():
scene['goal']['sceneInfo'][tag] = value
scene['goal']['sceneInfo'][tags.SCENE.SLICES].append(
tags.tag_to_label(tag) + ' ' + str(value)
)
class InteractiveSingleSceneFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
goal.get_name().replace(' ', '').capitalize(),
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
target_object_plan = ObjectPlan(
ObjectLocationPlan.RANDOM,
definition=base_objects.create_soccer_ball()
)
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'',
[InteractivePlan('', {}, target_object_plan)],
training=self.training
)
class InteractiveContainerTrainingHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Container' + goal.get_name().replace(' ', '').capitalize() +
'Training',
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'container',
create_container_hypercube_plan_list(),
training=self.training
)
class InteractiveObstacleTrainingHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Obstacle' + goal.get_name().replace(' ', '').capitalize() +
'Training',
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'obstacle',
create_obstacle_hypercube_plan_list(),
training=self.training
)
class InteractiveOccluderTrainingHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Occluder' + goal.get_name().replace(' ', '').capitalize() +
'Training',
training=True
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'occluder',
create_occluder_hypercube_plan_list(),
training=self.training
)
class InteractiveContainerEvaluationHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Container' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'container',
create_container_hypercube_plan_list(),
training=self.training
)
class InteractiveContainerEvaluation4HypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Container' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation4',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'container',
create_eval_4_container_hypercube_plan_list(),
training=self.training
)
class InteractiveObstacleEvaluationHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Obstacle' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'obstacle',
create_obstacle_hypercube_plan_list(),
training=self.training
)
class InteractiveOccluderEvaluationHypercubeFactory(HypercubeFactory):
def __init__(self, goal: InteractiveGoal) -> None:
super().__init__(
'Occluder' + goal.get_name().replace(' ', '').capitalize() +
'Evaluation',
training=False
)
self.goal = goal
def _build(
self,
body_template: Dict[str, Any],
role_to_type: Dict[str, str]
) -> Hypercube:
return InteractiveHypercube(
body_template,
self.goal,
role_to_type,
'occluder',
create_occluder_hypercube_plan_list(),
training=self.training
)
INTERACTIVE_TRAINING_HYPERCUBE_LIST = [
InteractiveSingleSceneFactory(RetrievalGoal('retrieval')),
InteractiveContainerTrainingHypercubeFactory(RetrievalGoal('container')),
InteractiveObstacleTrainingHypercubeFactory(RetrievalGoal('obstacle')),
InteractiveOccluderTrainingHypercubeFactory(RetrievalGoal('occluder'))
]
INTERACTIVE_EVALUATION_HYPERCUBE_LIST = [
InteractiveContainerEvaluationHypercubeFactory(RetrievalGoal('container')),
InteractiveObstacleEvaluationHypercubeFactory(RetrievalGoal('obstacle')),
InteractiveOccluderEvaluationHypercubeFactory(RetrievalGoal('occluder')),
InteractiveContainerEvaluation4HypercubeFactory(RetrievalGoal('container'))
]
|
py | 1a3a7911f4a9a03c4cd2b5a546af14fa926f50be | example code |
py | 1a3a7a257b7ec4635f2c5cd2d70a12ef860c481a | # Solution
def tower_of_Hanoi_soln(num_disks, source, auxiliary, destination):
if num_disks == 0:
return
if num_disks == 1:
print("{} {}".format(source, destination))
return
tower_of_Hanoi_soln(num_disks - 1, source, destination, auxiliary)
print("{} {}".format(source, destination))
tower_of_Hanoi_soln(num_disks - 1, auxiliary, source, destination)
def tower_of_Hanoi(num_disks):
tower_of_Hanoi_soln(num_disks, 'S', 'A', 'D') |
py | 1a3a7a619b20d65b14e25c4b5e14c45da7b9561f | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl
from kfp_tekton.compiler import TektonCompiler
from kfp_tekton import TektonClient
from os import path
from tempfile import gettempdir
############################################################
# Define the pipeline method
############################################################
@dsl.pipeline(
name='${name}',
description='${description}'
)
def dataset_pipeline(${pipeline_method_args}):
from kfp.components import load_component_from_url
dax_dataset_metadata_url = '${dataset_template_url}'
dax_to_dlf_component_url = '${dax_to_dlf_component_url}'
dlf_to_pvc_component_url = '${dlf_to_pvc_component_url}'
dax_to_dlf_op = load_component_from_url(dax_to_dlf_component_url)
dlf_to_pvc_op = load_component_from_url(dlf_to_pvc_component_url)
create_dlf_yaml = dax_to_dlf_op(dax_yaml=dax_dataset_metadata_url)\
.set_image_pull_policy('Always')\
.set_display_name("generate dataset metadata")
mount_pvc = dlf_to_pvc_op(action='create',
namespace='${namespace}',
dataset_yaml=create_dlf_yaml.outputs['dlf_yaml'])\
.set_image_pull_policy('Always')\
.set_display_name("create persistent volume")
############################################################
# Compile the pipeline
############################################################
pipeline_function = dataset_pipeline
pipeline_filename = path.join(gettempdir(), pipeline_function.__name__ + '.pipeline.tar.gz')
TektonCompiler().compile(pipeline_function, pipeline_filename)
############################################################
# Run the pipeline
############################################################
# TODO: specify pipeline argument values
arguments = ${parameter_dict}
client = TektonClient(${pipeline_server})
# Get or create an experiment and submit a pipeline run
experiment = client.create_experiment('DATASET_RUNS')
# Submit the experiment to run in a pipeline
run_name = '${run_name}'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
|
py | 1a3a7aa216aaaea3d5a3b5227daaa23e0695c7ba | import collections
from nltk import NaiveBayesClassifier, DecisionTreeClassifier
from nltk.metrics import precision, recall, f_measure
from nltk.classify import apply_features, accuracy
from nltk.classify.scikitlearn import SklearnClassifier
from prueba_paquete.utils import clean_html_tags, shuffled, tokenize_and_stem
from prueba_paquete.concept_extraction import ConceptExtractor
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
class DocumentClassifier():
'''
Train a classifier with labeled documents and classify new documents
into one of the labeled clases.
We call 'dev docs' to the documents set provided for training the
classifier. These 'dev docs' are splitted into two sub sets: 'train docs'
and 'test docs' that would be used to train and test the machine learning
model respectively.
Parameters
----------
train_p : float, 0.8 by default
The proportion of the 'dev docs' used as 'train docs'
Use values greater than 0 and lower than 1.
The remaining docs will be using as 'test docs'
eq_label_num : boolean, True by default
If true, 'train docs' will have equal number of documents for each
class. This number will be the lowest label count.
complete_p : boolean, True by default
Used when eq_label_num is True, but the lowest label count is not
enough for getting the train_p proportion of 'train docs'. If this
attribute is True, more documents from 'test docs' will be moved
to 'train docs' until we get train_p
n_folds : integer, 10 by default
Number of folds to be used in k-fold cross validation technique for
choosing different sets as 'train docs'
vocab_size : integer, 500 by default
This is the size of the vocabulary set that will be used for extracting
features out of the docs
t_classifier : string, 'NB' by default
This is the type of classifier model used. Available types are 'NB'
(Naive Bayes), 'DT' (decision tree), 'RF' (Random Forest), and 'SVM'
(Support Vector Machine)
language: string, 'english'; by default
Language on which documents are written
'''
def __init__(self, train_p=0.8, eq_label_num=True,
complete_p=True, n_folds=10,
vocab_size=250,
t_classifier="NB", language="english",
stem=False):
self.train_p = train_p
self.eq_label_num = eq_label_num
self.complete_p = complete_p
self.n_folds = n_folds
self.vocab_size = vocab_size
self.t_classifier = t_classifier
self.language = language
self.stem = stem
self._vocab = []
self._classified_docs = []
self._classifier = None
self._accuracy = 0
self._precision = {}
self._recall = {}
self._f_measure = {}
self._train_docs = []
self._test_docs = []
def split_train_and_test(self, docs):
'''
Split the 'dev docs' set into the 'train docs' and 'test docs' subsets
Parameters
----------
docs: iterable
An iterable which yields a list of strings
'''
categories_count = self.count_categories(docs)
label_limit = min([c for (k,c) in categories_count.items()])
labeled_docs = {}
train_docs = []
test_docs = []
# Split docs by label
for (cat,count) in categories_count.items():
labeled_docs[cat] = shuffled([t for (t,k) in docs if k == cat])
if self.eq_label_num:
# Select the same number of doc for all labels
for cat, cat_docs in labeled_docs.items():
cat_limit = label_limit
cat_train_docs = cat_docs[:cat_limit]
cat_test_docs = cat_docs[cat_limit:]
train_docs += [(doc, cat) for doc in cat_train_docs]
test_docs += [(doc, cat) for doc in cat_test_docs]
l_train = len(train_docs)
l_docs = len(docs)
l_test = len(test_docs)
actual_p = l_train / l_docs
# If the training proportion is not
if self.complete_p == True and actual_p < self.train_p:
shuffled_extra = shuffled(test_docs)
extra_i = 0
while(actual_p < self.train_p and extra_i < l_test):
aux_l_train = l_train + extra_i
actual_p = aux_l_train / l_docs
extra_i += 1
train_docs += shuffled_extra[:extra_i]
test_docs = shuffled_extra[extra_i:]
else:
label_limit = int(self.train_p * len(docs))
shuffled_docs = shuffled(docs)
train_docs = shuffled_docs[:label_limit]
test_docs = shuffled_docs[label_limit:]
self._train_docs = train_docs
self._test_docs = test_docs
def cross_validation_train(self, dev_docs):
'''
Applies k-fold cross validation technique to split the docs into different
pairs of training and testing sets. For each pair, it trains and evals the
a classifier, choosing the one with the best accuracy
Parameters
----------
dev_docs: iterable
An iterable which yields a list of strings
'''
dev_docs = shuffled(dev_docs)
accuracies = []
best_accuracy = 0
subset_size = int(len(dev_docs)/self.n_folds)
for i in range(self.n_folds):
classifier_list = []
train_docs = (dev_docs[(i + 1) * subset_size:] + \
dev_docs[:i * subset_size])
test_docs = dev_docs[i * subset_size:(i + 1) * subset_size]
train_set = apply_features(self.get_doc_features, train_docs)
if self.t_classifier == "NB":
classifier = NaiveBayesClassifier.train(train_set)
elif self.t_classifier == "DT":
classifier = DecisionTreeClassifier.train(train_set)
elif self.t_classifier == "RF":
classifier = SklearnClassifier(RandomForestClassifier())\
.train(train_set)
elif self.t_classifier == "SVM":
classifier = SklearnClassifier(LinearSVC(), sparse=False)\
.train(train_set)
classifier_list.append(classifier)
test_set = apply_features(self.get_doc_features, test_docs, True)
accuracies.append((accuracy(classifier, test_set)) * 100)
if accuracies[-1] > best_accuracy:
best_accuracy = accuracies[-1]
self._classifier = classifier
self._train_docs = train_docs
self._test_docs = test_docs
def equitative_class_train(self, dev_docs):
categories_count = self.count_categories(dev_docs)
labeled_docs = {}
for (cat,count) in categories_count.items():
labeled_docs[cat] = shuffled([t for (t,k) in dev_docs if k == cat])
train_docs = []
test_docs = []
for cat, l in labeled_docs.items():
cat_limit = int(self.train_p * len(l))
train_docs += [(t, cat) for t in l[:cat_limit]]
test_docs += [(t, cat) for t in l[cat_limit:]]
self._train_docs = train_docs
self._test_docs = test_docs
# print("len dev docs", len(dev_docs))
# print("categories count", categories_count)
# print("count train", self.count_categories(train_docs))
# print("count test", self.count_categories(test_docs))
# split dev docs and create traning and test set
# self.split_train_and_test(dev_docs)
train_set = apply_features(self.get_doc_features, self._train_docs)
# create and train the classification model according to t_classifier
if self.t_classifier == "NB":
self._classifier = NaiveBayesClassifier.train(train_set)
elif self.t_classifier == "DT":
self._classifier = DecisionTreeClassifier.train(train_set)
elif self.t_classifier == "RF":
self._classifier = SklearnClassifier(RandomForestClassifier())\
.train(train_set)
elif self.t_classifier == "SVM":
self._classifier = SklearnClassifier(LinearSVC(), sparse=False)\
.train(train_set)
def count_categories(self, docs):
'''
Count how many documents of each class are in the 'dev docs' set
Parameters
----------
docs: iterable
An iterable which yields a list of strings
Returns
-------
counters: dictionary
A dictiionary where each item is the number of docs for a class
'''
categories = set([c for (t,c) in docs])
counters = {}
for cat in categories:
counters[cat] = 0
for (text, cat) in docs:
counters[cat] += 1
self._categories = sorted(categories)
return counters
def get_doc_features(self, doc):
'''
Extract features of a document, checking the presence of the words
in the vocabulary
Parameters
----------
doc: string
The doc from which features will be extracted
Returns
-------
features: dictionary
A dictionary where each item indicates the presence of a
word from the vocabulary in the input doc
'''
features = {}
for word in self._vocab:
features['contains({})'.format(word)] = (word in doc)
return features
def train_classifier(self, dev_docs):
'''
Create the features vocabulary from 'dev docs',
Split 'dev docs', train the classifier with 'train docs',
Evaluate accuracy with 'test docs'
Parameters
----------
dev_docs: iterable
An iterable which yields a list of strings
'''
# create vocabulary for feature extraction
ce = ConceptExtractor(num_concepts=self.vocab_size,
language=self.language)
ce.extract_concepts([t for (t,c) in dev_docs])
self._vocab = sorted([c for (c,f) in ce.common_concepts], key=str.lower)
if (self.stem):
self._vocab = [tokenize_and_stem(w, language=self.language)[0] \
for w in self._vocab]
# self.cross_validation_train(dev_docs)
self.equitative_class_train(dev_docs)
def eval_classifier(self):
'''
Test the model and calculates the metrics of accuracy, precision,
recall and f-measure
'''
test_set = apply_features(self.get_doc_features, self._test_docs, True)
self._accuracy = accuracy(self._classifier, test_set)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(test_set):
refsets[label].add(i)
observed = self._classifier.classify(feats)
testsets[observed].add(i)
self.count_categories(self._train_docs)
for cat in self._categories:
self._precision[cat] = precision(refsets[cat], testsets[cat])
self._recall[cat] = recall(refsets[cat], testsets[cat])
self._f_measure[cat] = f_measure(refsets[cat], testsets[cat])
def classify_docs(self, docs):
'''
First train the classifier with the labeled data.
Then classifies the unlabeled data.
Parameters
----------
docs: iterable
An iterable which yields a list of strings
'''
dev_docs = [(t, c) for (t, c) in docs if c!=""]
unlabeled_docs = [t for (t, c) in docs if c==""]
self.train_classifier(dev_docs)
self.eval_classifier()
results = []
for doc in unlabeled_docs:
doc_feats = self.get_doc_features(doc)
result = self._classifier.classify(doc_feats)
results.append((doc, result))
self._classified_docs = results
self._final_cat_count = self.count_categories(dev_docs+results)
@property
def classified_docs(self):
return self._classified_docs
@property
def accuracy(self):
return self._accuracy
@property
def precision(self):
return self._precision
@property
def recall(self):
return self._recall
@property
def f_measure(self):
return self._f_measure
@property
def category_count(self):
return self._final_cat_count
|
py | 1a3a7be5ae10a745f95e19e94aaa71a29e0c70fa | import time
import math
import pdb
def count_down(t, f = 'm', md = 'v'):
"""
counts down from the given time. takes 3 argument.
Takes t, f, and md as arguments
- t = time
- f = format (set to 'm' by default)
- md = mode (sets to 'v' by default)
MODE
This determines whether the countdown timer will
be shown or hidden.
Mode can be 's' or 'v'
- 's' = stealth
- 'v' = verbose
FORMATS
You can specify the format(f) of t by passing:
- 'm' for minute(s)
- 's' for second(s)
- 'h' for hour(s)
"""
# validates input
if f == 'm':
t = Timing().ms(t)
elif f == 'h':
t = Timing().hs(t)
elif f == 's':
t = t
# raise error when an invalid format is entered
else:
raise ValueError("Invalid Input! \nf can only be 'm, s or h'.")
# check if time(t) is an integer, because an error will be raised
# if it is not. so i use this to my advantage by checking if a
# TypeError is raised
try:
if md == 'v':
while t >= 1:
print(t)
time.sleep(1)
t -= 1
elif md == 's':
while t >= 1:
time.sleep(1)
t -= 1
else: raise ValueError("mode is either 's' or 'v'!")
except TypeError:
#print('time (t) must be an integer!')
raise Exception('time (t) must be an integer!')
class Timing:
"""
Handles several calculations involving time
"""
def ms(self, t):
""" convert minute to secs -- t is time """
return t * 60
def mh(self, m, r = 'f'):
"""
Converts minutes to hours and return the answer.
When r is set to 't', returns a tuple containing
the quotient (in hours) and the remainder (mins)
of the convertion
example: (1, 30) - this result means 1 (hour) and
30 (mins)
"""
if r == 'f' or r == 'F':
return m / 60
elif r == 't' or r == 'T':
return divmod(m, 60)
else:
raise ValueError("remainder_mode should be either 'f' or 't'")
def hs(self, h):
""" converts hour to secs -- h is hour """
return self.ms(self.hm(h))
def hm(self, t):
"""
converts hr(s) to min(s) -- t is time in hour(s)
"""
return t * 60
def sm(self, t, r = 'f'):
""" converts second(s) to minute(s)
Takes 2 arguments
t --- the time in sec(s)
r --- remainder_mode (either 'f' or 't')
When r is set to 't', returns a tuple containing
the quotient (mins) and the remainder (secs) of
the convertion
"""
if r == 'f' or r == 'F':
return t / 60
elif r == 't' or r == 'T':
return divmod(t, 60)
else:
raise ValueError("remainder_mode should be either 'f' or 't'")
def sh(self, t, r = 'f'):
""" converts second(s) to hour(s)
Takes 2 arguments
t --- the time in sec(s)
r --- remainder_mode (either 'f' or 't')
When r is set to 't', returns a tuple containing
the quotient (in hours) and the remainder (mins)
of the convertion
"""
if r == 'f' or r == 'F':
return t / 3600
elif r == 't' or r == 'T':
return divmod(t, 3600)
else:
raise ValueError("remainder_mode should be either 'f' or 't'")
def dy_2_hr(self, d):
""" converts days to hours """
if int(d):
return d * 24
raise ValueError('day must be an integer')
def str_to_sec(self, timestring):
"""
Accepts timestring in the format 'hh:mm' and
converts it to secs
"""
a = timestring.split(':')
h = int(a[0])
m = int(a[1])
return (self.hs(h) + (self.ms(m)))
def time_diff(self, t1, t2):
"""
return the difference between two times
"""
if(self.str_to_sec(t1) > self.str_to_sec(t2)):
return self.str_to_sec(t1) - self.str_to_sec(t2)
return self.str_to_sec(t2) - self.str_to_sec(t1)
def sec_to_str(self, t):
# ctime and time are methods in the time module
#
# ctime() return string from current time in
# seconds from epoch
# time() returns current time in seconds since epoch
return time.ctime((time.time() + t))
class Appointment:
'''
class for appointments and tasks
'''
def __init__(self, t, g, l):
self._goal = g
self._dtime = t # due time
self._location = l
class Reminder(Appointment):
"""
"""
def __init__(self, g, t, l = None):
self.__goal = g
self.__dtime = t # due time
self.__location = l
# ----------- setters ---------
def set_time(self, nt):
""" nt is new time """
self.__dtime = nt
def set_goal(self, ng):
""" ng is new goal """
self.__goal = ng
def set_loc(self, nl):
""" nl is new location """
self.__location = nl
# ----------- getters ------------
def get_time(self):
return self.__dtime
def get_goal(self):
return self.__goal
def get_loc(self):
return self.__location
#----------------------- Tests --------------------
if __name__ == '__main__' :
#print('ms =',Timing().ms(1))
#print('mh =',Timing().mh(150, 'f'))
#print('hs =',Timing().hs(1))
#print('hm =',Timing().hm(1))
#print('sh =',Timing().sh(1))
#print('sm =',Timing().sm(1))
#print('dy_2_hr =',Timing().dy_2_hr(1))
#print(Timing().sh(6000))
#print(Timing().str_to_sec('2:30'))
#print('time in one hour', Timing().sec_to_str(3600))
#count_down(10, 's')
a= Appointment(10, 'test a kit', 'home') |
py | 1a3a7cf670bf871c6e1fbadb66ec348b13804906 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import threading
from PyQt6 import QtCore
class Socket_server():
"""
A class for creating and listening the socket server that connect errors messages from device modules
and a dedicated text box in the main window of the programm.
"""
def start_messenger_server(self, helper):
"""
A function to create a socket server.
This function should be run in another thread in order to not
block the execution of the main programm.
"""
sock = socket.socket()
sock.bind(('', 9091))
sock.listen(2)
while True:
client, addr = sock.accept()
client_handler = threading.Thread(target=self.message, args=(helper, client), daemon = True).start()
def message(self, helper, client):
"""
A function to read a message from clients and emit a special signal with the message
to a helper class and finally to a dedicated text box in the main window of the programm.
This function should be run in another thread in order to not
block the execution of the main programm.
"""
data = client.recv(1024).decode()
helper.changedSignal.emit(data)
#print(data)
class Helper(QtCore.QObject):
"""
A helper class to connect an event in another thread to a function in the main thread.
"""
changedSignal = QtCore.pyqtSignal(str)
if __name__ == "__main__":
main() |
py | 1a3a7ee2554b3d17fff48041859745207f4b9b34 | #!/usr/bin/env python3
"""
Functions for finding and killing processes
"""
import os
import psutil
import signal
import time
def process_exists(pid):
"""
Determine if process 'pid' exists.
"""
try:
os.kill(pid, 0)
except ProcessLookupError:
return False # Doesn't exist
except PermissionError:
pass # Exists but not ours
return True
def kill_processes(first_args,
orphaned_only=False,
send_signal=signal.SIGINT,
wait=2.0,
retry=0.25
):
"""
Find all processes whose arguments begin with the list 'first_args'
and kill them off, first with 'send_signal' for up to 'wait' seconds
and retrying every 'retry' seconds. If, after that time, any
processes are left, send them a SIGKILL. If 'orphaned_only' is True,
only kill processes with a PPID of 1.
"""
if not isinstance(first_args,list):
raise ValueError("Program arguments must be in a list")
len_first_args = len(first_args)
if len_first_args < 1:
raise ValueError("Must have at least one argument to match.")
pids = []
for process_item in psutil.process_iter():
process = process_item.as_dict()
if (process["cmdline"][0:len_first_args] == first_args) \
and ((not orphaned_only) or (process["ppid"] == 1)):
pids.append(process["pid"])
times = (wait / retry)
while times:
existing = 0
# Do the kills in parallel to make things more predictable
# time-wise.
for pid in pids:
if process_exists(pid):
existing += 1
os.kill(pid, send_signal)
if existing == 0:
return # Awww. Ran out of PIDdies.
time.sleep(retry)
times -= 1
# Last resort
for pid in pids:
if process_exists(pid):
os.kill(pid, signal.SIGKILL)
|
py | 1a3a7fe06a101d15c1299a73075a13a073fa790d | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from glue.core import Data
from glue.tests.helpers import requires_h5py
from ..hdf5 import hdf5_writer
DTYPES = [np.int16, np.int32, np.int64, np.float32, np.float64]
@requires_h5py
@pytest.mark.parametrize('dtype', DTYPES)
def test_hdf5_writer_data(tmpdir, dtype):
filename = tmpdir.join('test1.hdf5').strpath
data = Data(x=np.arange(6).reshape(2, 3).astype(dtype),
y=(np.arange(6) * 2).reshape(2, 3).astype(dtype))
hdf5_writer(filename, data)
from h5py import File
f = File(filename)
assert len(f) == 2
np.testing.assert_equal(f['x'][()], data['x'])
np.testing.assert_equal(f['y'][()], data['y'])
assert f['x'][()].dtype == dtype
assert f['y'][()].dtype == dtype
f.close()
# Only write out some components
filename = tmpdir.join('test2.hdf5').strpath
hdf5_writer(filename, data, components=[data.id['x']])
f = File(filename)
assert len(f) == 1
np.testing.assert_equal(f['x'][()], data['x'])
f.close()
@requires_h5py
@pytest.mark.parametrize('dtype', DTYPES)
def test_hdf5_writer_subset(tmpdir, dtype):
filename = tmpdir.join('test').strpath
data = Data(x=np.arange(6).reshape(2, 3).astype(dtype),
y=(np.arange(6) * 2).reshape(2, 3).astype(dtype))
subset = data.new_subset()
subset.subset_state = data.id['x'] > 2
hdf5_writer(filename, subset)
from h5py import File
f = File(filename)
if np.dtype(dtype).kind == 'f':
assert np.all(np.isnan(f['x'][0]))
assert np.all(np.isnan(f['y'][0]))
else:
np.testing.assert_equal(f['x'][0], 0)
np.testing.assert_equal(f['y'][0], 0)
np.testing.assert_equal(f['x'][1], data['x'][1])
np.testing.assert_equal(f['y'][1], data['y'][1])
assert f['x'][()].dtype == dtype
assert f['y'][()].dtype == dtype
f.close()
|
py | 1a3a808bc59c4e8dbac1b53d0c3c8f3e970a324f | import torch
import torch.nn as nn
from torchvision import models
class BaseModel_scratch(nn.Module):
def __init__(self, model_name, eps=3, num_classes=200, init_weights=True):
super().__init__()
if model_name == 'vgg16bn':
backbone = nn.Sequential(*list(models.vgg16_bn(pretrained=False).features.children())[:-4])
last_conv = nn.Sequential(
nn.Conv2d(512, num_classes * eps, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(num_classes * eps),
nn.ReLU(True),
nn.AvgPool2d(kernel_size=1, stride=1, padding=0)
)
else:
backbone = None
last_conv = None
self.backbone = backbone
self.last_conv = last_conv
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if init_weights:
self._initialize_weights()
def forward(self, x):
feat = self.backbone(x)
feat = self.last_conv(feat)
out = self.maxpool(feat)
out = out.view(out.size(0), -1)
return feat, out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
model = BaseModel_scratch('vgg16bn')
print(model)
inp = torch.randn((3, 3, 224, 224))
a, b = model(inp)
print(a.size())
print(b.size())
|
py | 1a3a80be5bf173391d85813f1f62d6aff54c1ae3 | # -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2013-2016
# - Cedric Serfon <[email protected]>, 2014-2019
# - Thomas Beermann <[email protected]>, 2014
# - Mario Lassnig <[email protected]>, 2017-2019
# - Hannes Hansen <[email protected]>, 2018-2019
# - Martin Barisits <[email protected]>, 2019-2021
# - Andrew Lister <[email protected]>, 2019
# - Ilija Vukotic <[email protected]>, 2020
# - Luc Goossens <[email protected]>, 2020
# - Patrick Austin <[email protected]>, 2020
# - Eric Vaandering <[email protected]>, 2020
# - Benedikt Ziemons <[email protected]>, 2020
# - James Perry <[email protected]>, 2020
# - Radu Carpa <[email protected]>, 2021
from rucio.api import permission
from rucio.db.sqla.constants import BadFilesStatus
from rucio.core import replica
from rucio.core.rse import get_rse_id, get_rse_name
from rucio.common import exception
from rucio.common.schema import validate_schema
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict
def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, vo='def'):
"""
List the bad file replicas summary. Method used by the rucio-ui.
:param rse_expression: The RSE expression.
:param from_date: The start date.
:param to_date: The end date.
:param vo: the VO to act on.
"""
replicas = replica.get_bad_replicas_summary(rse_expression=rse_expression, from_date=from_date, to_date=to_date, filter_={'vo': vo})
return [api_update_return_dict(r) for r in replicas]
def list_bad_replicas_status(state=BadFilesStatus.BAD, rse=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def'):
"""
List the bad file replicas history states. Method used by the rucio-ui.
:param state: The state of the file (SUSPICIOUS or BAD).
:param rse: The RSE name.
:param younger_than: datetime object to select bad replicas younger than this date.
:param older_than: datetime object to select bad replicas older than this date.
:param limit: The maximum number of replicas returned.
:param vo: The VO to act on.
"""
rse_id = None
if rse is not None:
rse_id = get_rse_id(rse=rse, vo=vo)
replicas = replica.list_bad_replicas_status(state=state, rse_id=rse_id, younger_than=younger_than,
older_than=older_than, limit=limit, list_pfns=list_pfns, vo=vo)
return [api_update_return_dict(r) for r in replicas]
def declare_bad_file_replicas(pfns, reason, issuer, vo='def'):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
kwargs = {}
if not permission.has_permission(issuer=issuer, vo=vo, action='declare_bad_file_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare bad replicas' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
replicas = replica.declare_bad_file_replicas(pfns=pfns, reason=reason, issuer=issuer, status=BadFilesStatus.BAD)
for k in list(replicas):
try:
rse = get_rse_name(rse_id=k)
replicas[rse] = replicas.pop(k)
except exception.RSENotFound:
pass
return replicas
def declare_suspicious_file_replicas(pfns, reason, issuer, vo='def'):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
kwargs = {}
if not permission.has_permission(issuer=issuer, vo=vo, action='declare_suspicious_file_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare suspicious replicas' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
replicas = replica.declare_bad_file_replicas(pfns=pfns, reason=reason, issuer=issuer, status=BadFilesStatus.SUSPICIOUS)
for k in list(replicas):
try:
rse = get_rse_name(rse_id=k)
replicas[rse] = replicas.pop(k)
except exception.RSENotFound:
pass
return replicas
def get_did_from_pfns(pfns, rse, vo='def'):
"""
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse: The RSE name.
:param vo: The VO to act on.
:returns: A dictionary {pfn: {'scope': scope, 'name': name}}
"""
rse_id = get_rse_id(rse=rse, vo=vo)
replicas = replica.get_did_from_pfns(pfns=pfns, rse_id=rse_id, vo=vo)
for r in replicas:
for k in r.keys():
r[k]['scope'] = r[k]['scope'].external
yield r
def list_replicas(dids, schemes=None, unavailable=False, request_id=None,
ignore_availability=True, all_states=False, rse_expression=None,
client_location=None, domain=None, signature_lifetime=None,
resolve_archives=True, resolve_parents=False,
nrandom=None, updated_after=None,
issuer=None, vo='def'):
"""
List file replicas for a list of data identifiers.
:param dids: The list of data identifiers (DIDs).
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param request_id: ID associated with the request for debugging.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param rse_expression: The RSE expression to restrict replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param domain: The network domain for the call, either None, 'wan' or 'lan'. Compatibility fallback: None falls back to 'wan'.
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to True, find archives which contain the replicas.
:param resolve_parents: When set to True, find all parent datasets which contain the replicas.
:param updated_after: datetime object (UTC time), only return replicas updated after this time
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
validate_schema(name='r_dids', obj=dids, vo=vo)
# Allow selected authenticated users to retrieve signed URLs.
# Unauthenticated users, or permission-less users will get the raw URL without the signature.
sign_urls = False
if permission.has_permission(issuer=issuer, vo=vo, action='get_signed_url', kwargs={}):
sign_urls = True
for d in dids:
d['scope'] = InternalScope(d['scope'], vo=vo)
replicas = replica.list_replicas(dids=dids, schemes=schemes, unavailable=unavailable,
request_id=request_id,
ignore_availability=ignore_availability,
all_states=all_states, rse_expression=rse_expression,
client_location=client_location, domain=domain,
sign_urls=sign_urls, signature_lifetime=signature_lifetime,
resolve_archives=resolve_archives, resolve_parents=resolve_parents,
nrandom=nrandom, updated_after=updated_after)
for rep in replicas:
# 'rses' and 'states' use rse_id as the key. This needs updating to be rse.
keys = ['rses', 'states']
for k in keys:
old_dict = rep.get(k, None)
if old_dict is not None:
new_dict = {}
for rse_id in old_dict:
rse = get_rse_name(rse_id=rse_id) if rse_id is not None else None
new_dict[rse] = old_dict[rse_id]
rep[k] = new_dict
rep['scope'] = rep['scope'].external
if 'parents' in rep:
new_parents = []
for p in rep['parents']:
scope, name = p.split(':')
scope = InternalScope(scope, fromExternal=False).external
new_parents.append('{}:{}'.format(scope, name))
rep['parents'] = new_parents
yield rep
def add_replicas(rse, files, issuer, ignore_availability=False, vo='def'):
"""
Bulk add file replicas.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param ignore_availability: Ignore blocked RSEs.
:param vo: The VO to act on.
:returns: True is successful, False otherwise
"""
for v_file in files:
v_file.update({"type": "FILE"}) # Make sure DIDs are identified as files for checking
validate_schema(name='dids', obj=files, vo=vo)
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'rse': rse, 'rse_id': rse_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not add file replicas on %s' % (issuer, rse))
if not permission.has_permission(issuer=issuer, vo=vo, action='skip_availability_check', kwargs=kwargs):
ignore_availability = False
issuer = InternalAccount(issuer, vo=vo)
for f in files:
f['scope'] = InternalScope(f['scope'], vo=vo)
if 'account' in f:
f['account'] = InternalAccount(f['account'], vo=vo)
replica.add_replicas(rse_id=rse_id, files=files, account=issuer, ignore_availability=ignore_availability)
def delete_replicas(rse, files, issuer, ignore_availability=False, vo='def'):
"""
Bulk delete file replicas.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param ignore_availability: Ignore blocked RSEs.
:param vo: The VO to act on.
:returns: True is successful, False otherwise
"""
validate_schema(name='r_dids', obj=files, vo=vo)
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'rse': rse, 'rse_id': rse_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='delete_replicas', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not delete file replicas on %s' % (issuer, rse))
if not permission.has_permission(issuer=issuer, vo=vo, action='skip_availability_check', kwargs=kwargs):
ignore_availability = False
for f in files:
f['scope'] = InternalScope(f['scope'], vo=vo)
replica.delete_replicas(rse_id=rse_id, files=files, ignore_availability=ignore_availability)
def update_replicas_states(rse, files, issuer, vo='def'):
"""
Update File replica information and state.
:param rse: The RSE name.
:param files: The list of files.
:param issuer: The issuer account.
:param vo: The VO to act on.
"""
for v_file in files:
v_file.update({"type": "FILE"}) # Make sure DIDs are identified as files for checking
validate_schema(name='dids', obj=files, vo=vo)
rse_id = get_rse_id(rse=rse, vo=vo)
kwargs = {'rse': rse, 'rse_id': rse_id}
if not permission.has_permission(issuer=issuer, vo=vo, action='update_replicas_states', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not update file replicas state on %s' % (issuer, rse))
replicas = []
for file in files:
rep = file
rep['rse_id'] = rse_id
rep['scope'] = InternalScope(rep['scope'], vo=vo)
replicas.append(rep)
replica.update_replicas_states(replicas=replicas)
def list_dataset_replicas(scope, name, deep=False, vo='def'):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param vo: The VO to act on.
:returns: A list of dict dataset replicas
"""
scope = InternalScope(scope, vo=vo)
replicas = replica.list_dataset_replicas(scope=scope, name=name, deep=deep)
for r in replicas:
r['scope'] = r['scope'].external
yield r
def list_dataset_replicas_bulk(dids, vo='def'):
"""
:param dids: The list of did dictionaries with scope and name.
:param vo: The VO to act on.
:returns: A list of dict dataset replicas
"""
validate_schema(name='r_dids', obj=dids, vo=vo)
names_by_scope = dict()
for d in dids:
if d['scope'] in names_by_scope:
names_by_scope[d['scope']].append(d['name'])
else:
names_by_scope[d['scope']] = [d['name'], ]
names_by_intscope = dict()
for scope in names_by_scope:
internal_scope = InternalScope(scope, vo=vo)
names_by_intscope[internal_scope] = names_by_scope[scope]
replicas = replica.list_dataset_replicas_bulk(names_by_intscope)
for r in replicas:
yield api_update_return_dict(r)
def list_dataset_replicas_vp(scope, name, deep=False, vo='def'):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param vo: The vo to act on.
:returns: If VP exists a list of dicts of sites, otherwise nothing
NOTICE: This is an RnD function and might change or go away at any time.
"""
scope = InternalScope(scope, vo=vo)
for r in replica.list_dataset_replicas_vp(scope=scope, name=name, deep=deep):
yield api_update_return_dict(r)
def list_datasets_per_rse(rse, filters={}, limit=None, vo='def'):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:param session: Database session to use.
:param vo: The VO to act on.
:returns: A list of dict dataset replicas
"""
rse_id = get_rse_id(rse=rse, vo=vo)
if 'scope' in filters:
filters['scope'] = InternalScope(filters['scope'], vo=vo)
for r in replica.list_datasets_per_rse(rse_id, filters=filters, limit=limit):
yield api_update_return_dict(r)
def add_bad_pfns(pfns, issuer, state, reason=None, expires_at=None, vo='def'):
"""
Add bad PFNs.
:param pfns: the list of new files.
:param issuer: The issuer account.
:param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.
:param reason: A string describing the reason of the loss.
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:param vo: The VO to act on.
:param session: The database session in use.
:returns: True is successful.
"""
kwargs = {'state': state}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_bad_pfns', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare bad PFNs' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
return replica.add_bad_pfns(pfns=pfns, account=issuer, state=state, reason=reason, expires_at=expires_at)
def add_bad_dids(dids, rse, issuer, state, reason=None, expires_at=None, vo='def'):
"""
Add bad replica entries for DIDs.
:param dids: the list of dids with bad replicas at rse.
:param rse: the rse with the bad replicas.
:param issuer: The issuer account.
:param state: One of the possible states : BAD
:param reason: A string describing the reason of the loss.
:param expires_at: None
:param vo: The VO to act on.
:returns: The list of replicas not declared bad
"""
kwargs = {'state': state}
if not permission.has_permission(issuer=issuer, vo=vo, action='add_bad_pfns', kwargs=kwargs):
raise exception.AccessDenied('Account %s can not declare bad PFN or DIDs' % issuer)
issuer = InternalAccount(issuer, vo=vo)
rse_id = get_rse_id(rse=rse)
return replica.add_bad_dids(dids=dids, rse_id=rse_id, reason=reason, issuer=issuer, state=state)
def get_suspicious_files(rse_expression, younger_than=None, nattempts=None, vo='def'):
"""
List the list of suspicious files on a list of RSEs
:param rse_expression: The RSE expression where the suspicious files are located
:param younger_than: datetime object to select the suspicious replicas younger than this date.
:param nattempts: The number of time the replicas have been declared suspicious
:param vo: The VO to act on.
"""
replicas = replica.get_suspicious_files(rse_expression=rse_expression, younger_than=younger_than, nattempts=nattempts, filter_={'vo': vo})
return [api_update_return_dict(r) for r in replicas]
def set_tombstone(rse, scope, name, issuer, vo='def'):
"""
Sets a tombstone on one replica.
:param rse: name of the RSE.
:param scope: scope of the replica DID.
:param name: name of the replica DID.
:param issuer: The issuer account
:param vo: The VO to act on.
"""
rse_id = get_rse_id(rse, vo=vo)
if not permission.has_permission(issuer=issuer, vo=vo, action='set_tombstone', kwargs={}):
raise exception.AccessDenied('Account %s can not set tombstones' % (issuer))
scope = InternalScope(scope, vo=vo)
replica.set_tombstone(rse_id, scope, name)
|
py | 1a3a815a532c5b60a723d5065e55e54358b7e7a7 | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test Access Control roles propagation."""
from ggrc.models import all_models, get_model
from integration.ggrc import TestCase
from integration.ggrc.models import factories
from integration.ggrc_basic_permissions.models \
import factories as rbac_factories
class TestACLPropagation(TestCase):
"""TestACLPropagation base class."""
GLOBAL_ROLES = ["Creator", "Reader", "Editor", "Administrator"]
SUCCESS = 200
SUCCESS_CREATED = 201
FORBIDDEN = 403
ACCESS_ERROR = ("Response for current operation has wrong status. {} "
"expected, {} received.")
ACCESS_QUERY_API_ERROR = ("Current operation has wrong result. {} "
"expected, {} received. ({} count={})")
CAN_NOT_READ_ERROR = ("{} objects weren't read. Non-zero object count "
"expected.")
CAN_READ_ERROR = "Some {} objects were read. No objects expected."
READ_COLLECTION_OPERATIONS = ["read_revisions", "get_latest_version"]
QUERY_API_OPERATIONS = ["read_comments", "read_document_comments"]
def setup_people(self):
"""Setup people with global roles."""
# pylint: disable=attribute-defined-outside-init
roles_query = all_models.Role.query.filter(
all_models.Role.name.in_(self.GLOBAL_ROLES)
)
global_roles = {role.name: role for role in roles_query}
self.people = {}
with factories.single_commit():
for role_name in self.GLOBAL_ROLES:
user = factories.PersonFactory()
self.people[role_name] = user
rbac_factories.UserRoleFactory(
role=global_roles[role_name],
person=user
)
def assert_read_collection(self, response, expected_res, model):
"""Check collection read operation.
Args:
response(TestResponse): Received operation response.
expected_res: Boolean flag showing if objects should be read or not.
model: Model name.
"""
self.assertStatus(response, self.SUCCESS)
table_plural = get_model(model)._inflector.table_plural
response_data = response.json.get("{}_collection".format(table_plural), {})
assert_raises = False
if isinstance(expected_res, tuple):
expected_res, assert_raises = expected_res
if expected_res:
err = self.CAN_NOT_READ_ERROR.format(model)
else:
err = self.CAN_READ_ERROR.format(model)
if assert_raises == "unimplemented":
with self.assertRaises(AssertionError):
self.assertEqual(
bool(response_data.get(table_plural)),
expected_res,
err,
)
else:
self.assertEqual(
bool(response_data.get(table_plural)),
expected_res,
err,
)
def assert_status(self, response, expected_res):
"""Check correctness of response status.
Args:
response: Response instance.
expected_res: Boolean flag. If True 200/201 status expected, if False
403 status expected.
"""
assert_raises = False
if isinstance(expected_res, tuple):
expected_res, assert_raises = expected_res
success_statuses = [self.SUCCESS, self.SUCCESS_CREATED]
exp_statuses = success_statuses if expected_res else [self.FORBIDDEN]
if assert_raises:
with self.assertRaises(AssertionError):
self.assertIn(
response.status_code,
exp_statuses,
self.ACCESS_ERROR.format(exp_statuses[0], response.status_code)
)
else:
self.assertIn(
response.status_code,
exp_statuses,
self.ACCESS_ERROR.format(exp_statuses[0], response.status_code)
)
def assert_query_api_response(self, response, expected_res):
"""Check correctness of query API response.
Args:
response: query api result of action execution.
expected_res: Boolean flag.
"""
for resp_item in response.json:
for obj, resp in resp_item.iteritems():
res = bool(resp['count'])
self.assertEqual(res, expected_res,
self.ACCESS_QUERY_API_ERROR.format(expected_res,
res, obj,
resp['count']))
def assert_result(self, response, expected_res, operation, model):
"""Check correctness of response result.
Args:
response: Response instance.
expected_res: Boolean flag that show if response should be succeeded.
operation: Action name.
model: Model name.
"""
# Snapshot is a special case. All operation with it
# is done through Revisions.
model = "Revision" if model == "Snapshot" else model
# Some operations based on several requests and responses,
# need to verify all of these responses
responses = response if isinstance(response, list) else [response]
for res in responses:
if operation in self.READ_COLLECTION_OPERATIONS:
self.assert_read_collection(res, expected_res, model)
elif operation in self.QUERY_API_OPERATIONS:
self.assert_query_api_response(res, expected_res)
else:
self.assert_status(res, expected_res)
def runtest(self, role, model, action_name, expected_result, **kwargs):
"""Run integration RBAC test.
Args:
role: Global user role (Creator/Reader/Editor).
model: Model that should be tested (Audit/Assessment/...).
action_name: Action that should be tested (read/update/delete/...).
expected_result: Boolean expected result of action.
"""
model_name, parent = model, None
if " " in model:
model_name, parent = model.split(" ")
rbac_factory = self.init_factory(role, model_name, parent, **kwargs)
if not rbac_factory:
raise Exception("There is no factory for model '{}'".format(model_name))
action = getattr(rbac_factory, action_name, None)
if not action:
raise NotImplementedError(
"Action {} is not implemented for this test.".format(action_name)
)
response = action()
self.assert_result(response, expected_result, action_name, model_name)
|
py | 1a3a81aef42a2a0e380e416a145a4a6a2fb807a0 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
import tensorflow as tf
from object_detection import eval_util
from object_detection import exporter as exporter_lib
from object_detection import inputs
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn':
inputs.create_train_input_fn,
'create_eval_input_fn':
inputs.create_eval_input_fn,
'create_predict_input_fn':
inputs.create_predict_input_fn,
'detection_model_fn_base': model_builder.build,
}
def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
max_number_of_boxes):
"""Extracts groundtruth data from detection_model and prepares it for eval.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
max_number_of_boxes: Max number of groundtruth boxes.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,
in normalized coordinates.
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes.
'groundtruth_masks': 4D float32 tensor of instance masks (if provided in
groundtruth)
'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating
is_crowd annotations (if provided in groundtruth).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image..
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.boxes))
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.classes))
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks))
if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):
groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))
groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked, unpadded tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {
key: tf.unstack(tensor) for key, tensor in tensor_dict.items()
}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def provide_groundtruth(model, labels):
"""Provides the labels to a model as groundtruth.
This helper function extracts the corresponding boxes, classes,
keypoints, weights, masks, etc. from the labels, and provides it
as groundtruth to the models.
Args:
model: The detection model to provide groundtruth to.
labels: The labels for the training or evaluation inputs.
"""
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
gt_weights_list = None
if fields.InputDataFields.groundtruth_weights in labels:
gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]
gt_confidences_list = None
if fields.InputDataFields.groundtruth_confidences in labels:
gt_confidences_list = labels[
fields.InputDataFields.groundtruth_confidences]
gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_confidences_list=gt_confidences_list,
groundtruth_masks_list=gt_masks_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list)
def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False,
postprocess_on_cpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess
is scheduled on the host cpu.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Make sure to set the Keras learning phase. True during training,
# False for inference.
tf.keras.backend.set_learning_phase(is_training)
detection_model = detection_model_fn(
is_training=is_training, add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
provide_groundtruth(detection_model, labels)
preprocessed_images = features[fields.InputDataFields.image]
if use_tpu and train_config.use_bfloat16:
with tf.contrib.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)
else:
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):
if use_tpu and postprocess_on_cpu:
detections = tf.contrib.tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict,
features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper((
prediction_dict,
features[fields.InputDataFields.true_image_shape]))
if mode == tf.estimator.ModeKeys.TRAIN:
if train_config.fine_tune_checkpoint and hparams.load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if train_config.add_regularization_loss:
regularization_losses = detection_model.regularization_losses()
if use_tpu and train_config.use_bfloat16:
regularization_losses = ops.bfloat16_to_float32_nested(
regularization_losses)
if regularization_losses:
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=is_training)
graph_rewriter_fn()
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.contrib.tpu.CrossShardOptimizer(
training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
include_variables = (
train_config.update_trainable_variables
if train_config.update_trainable_variables else None)
exclude_variables = (
train_config.freeze_variables
if train_config.freeze_variables else None)
trainable_variables = tf.contrib.framework.filter_variables(
tf.trainable_variables(),
include_patterns=include_variables,
exclude_patterns=exclude_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
update_ops=detection_model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
exported_output = exporter_lib.add_output_tensor_nodes(detections)
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(exported_output)
}
eval_metric_ops = None
scaffold = None
if mode == tf.estimator.ModeKeys.EVAL:
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
groundtruth = _prepare_groundtruth_for_eval(
detection_model, class_agnostic,
eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[fields.InputDataFields
.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
vis_metric_ops = None
if not use_tpu and use_original_images:
eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=eval_config.num_visualizations,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False)
vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(
eval_dict)
# Eval metrics on a single example.
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, list(category_index.values()), eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if vis_metric_ops is not None:
eval_metric_ops.update(vis_metric_ops)
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
variables_to_restore,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
scaffold = tf.train.Scaffold(saver=saver)
# EVAL executes on CPU, so use regular non-TPU EstimatorSpec.
if use_tpu and mode != tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
if scaffold is None:
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
sharded=True,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
scaffold=scaffold)
return model_fn
def create_estimator_and_inputs(run_config,
hparams,
pipeline_config_path,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=None,
sample_1_of_n_eval_on_train_examples=1,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
override_eval_num_epochs=True,
save_final_config=False,
postprocess_on_cpu=False,
export_to_tpu=None,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for
eval_input.
save_final_config: Whether to save final config (obtained after applying
overrides) to `estimator.model_dir`.
postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,
postprocess is scheduled on the host cpu.
export_to_tpu: When use_tpu and export_to_tpu are true,
`export_savedmodel()` exports a metagraph for serving on TPU besides the
one on CPU.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fns': A list of all evaluation input functions.
'eval_input_names': A list of names for each evaluation input.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
if sample_1_of_n_eval_examples >= 1:
kwargs.update({
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples
})
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
detection_model_fn = functools.partial(
detection_model_fn_base, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fns = [
create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config) for eval_input_config in eval_input_configs
]
eval_input_names = [
eval_input_config.name for eval_input_config in eval_input_configs
]
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_on_train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(
model_config=model_config, predict_input_config=eval_input_configs[0])
# Read export_to_tpu from hparams if not passed.
if export_to_tpu is None:
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu,
postprocess_on_cpu)
if use_tpu_estimator:
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
export_to_tpu=export_to_tpu,
eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief and save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
eval_interval_secs = eval_config.eval_interval_secs
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fns=eval_input_fns,
eval_input_names=eval_input_names,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps,
eval_interval_secs = eval_interval_secs)
def create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False,
eval_interval_secs=300,
final_exporter_name='Servo',
eval_spec_names=None):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fns: A list of functions that produce features and labels on eval
data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_names: A list of string names for each `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is
True, the last `EvalSpec` in the list will correspond to training data. The
rest EvalSpecs in the list are evaluation datas.
"""
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
if eval_spec_names is None:
eval_spec_names = [str(i) for i in range(len(eval_input_fns))]
eval_specs = []
for index, (eval_spec_name, eval_input_fn) in enumerate(
zip(eval_spec_names, eval_input_fns)):
# Uses final_exporter_name as exporter_name for the first eval spec for
# backward compatibility.
if index == 0:
exporter_name = final_exporter_name
else:
exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)
exporter = tf.estimator.FinalExporter(
name=exporter_name, serving_input_receiver_fn=predict_input_fn)
eval_specs.append(
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=None,
exporters=exporter,
throttle_secs=eval_interval_secs))
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))
return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, train_steps, name):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.contrib.training.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = estimator.evaluate(
input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
save_final_config=True,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fns[0],
train_steps=train_steps,
eval_steps=None,
export_strategies=export_strategies,
eval_delay_secs=120,
)
|
py | 1a3a820afad2fde00ee1737a44bdc7e88b72b6da | # coding=utf-8
from django.conf import settings
from django.http import HttpResponse, HttpRequest
from django.db.models import Model
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import authenticate
from django.shortcuts import redirect, resolve_url
from django.template import TemplateDoesNotExist, loader, engines
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.translation import get_language
from social_core.strategy import BaseStrategy, BaseTemplateStrategy
from .compat import get_request_port
def render_template_string(request, html, context=None):
"""Take a template in the form of a string and render it for the
given context"""
template = engines['django'].from_string(html)
return template.render(context=context, request=request)
class DjangoTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
template = loader.get_template(tpl)
return template.render(context=context, request=self.strategy.request)
def render_string(self, html, context):
return render_template_string(self.strategy.request, html, context)
class DjangoStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = DjangoTemplateStrategy
def __init__(self, storage, request=None, tpl=None):
self.request = request
self.session = request.session if request else {}
super(DjangoStrategy, self).__init__(storage, tpl)
def get_setting(self, name):
value = getattr(settings, name)
# Force text on URL named settings that are instance of Promise
if name.endswith('_URL'):
if isinstance(value, Promise):
value = force_text(value)
value = resolve_url(value)
return value
def request_data(self, merge=True):
if not self.request:
return {}
if merge:
data = self.request.GET.copy()
data.update(self.request.POST)
elif self.request.method == 'POST':
data = self.request.POST
else:
data = self.request.GET
return data
def request_host(self):
if self.request:
return self.request.get_host()
def request_is_secure(self):
"""Is the request using HTTPS?"""
return self.request.is_secure()
def request_path(self):
"""path of the current request"""
return self.request.path
def request_port(self):
"""Port in use for this request"""
return get_request_port(request=self.request)
def request_get(self):
"""Request GET data"""
return self.request.GET.copy()
def request_post(self):
"""Request POST data"""
return self.request.POST.copy()
def redirect(self, url):
return redirect(url)
def html(self, content):
return HttpResponse(content, content_type='text/html;charset=UTF-8')
def render_html(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
try:
template = loader.get_template(tpl)
return template.render(context=context, request=self.request)
except TemplateDoesNotExist:
return render_template_string(self.request, html, context)
def authenticate(self, backend, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = backend
return authenticate(*args, **kwargs)
def clean_authenticate_args(self, *args, **kwargs):
"""Cleanup request argument if present, which is passed to
authenticate as for Django 1.11"""
if len(args) > 0 and isinstance(args[0], HttpRequest):
kwargs['request'], args = args[0], args[1:]
return args, kwargs
def session_get(self, name, default=None):
return self.session.get(name, default)
def session_set(self, name, value):
self.session[name] = value
if hasattr(self.session, 'modified'):
self.session.modified = True
def session_pop(self, name):
return self.session.pop(name, None)
def session_setdefault(self, name, value):
return self.session.setdefault(name, value)
def build_absolute_uri(self, path=None):
if self.request:
return self.request.build_absolute_uri(path)
else:
return path
def random_string(self, length=12, chars=BaseStrategy.ALLOWED_CHARS):
return get_random_string(length, chars)
def to_session_value(self, val):
"""Converts values that are instance of Model to a dictionary
with enough information to retrieve the instance back later."""
if isinstance(val, Model):
val = {
'pk': val.pk,
'ctype': ContentType.objects.get_for_model(val).pk
}
return val
def from_session_value(self, val):
"""Converts back the instance saved by self._ctype function."""
if isinstance(val, dict) and 'pk' in val and 'ctype' in val:
ctype = ContentType.objects.get_for_id(val['ctype'])
ModelClass = ctype.model_class()
val = ModelClass.objects.get(pk=val['pk'])
return val
def get_language(self):
"""Return current language"""
return get_language()
|
py | 1a3a82a3e422093e0f0cc2c5cfca0ed709a88da4 | import datetime
from casexml.apps.case.models import CommCareCaseAction
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import ReportCaseES
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.basic import BasicTabularReport, Column
from corehq.apps.reports.standard import (DatespanMixin,
ProjectReportParametersMixin, CustomProjectReport)
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.pillows.base import restore_property_dict
from hsph.reports import HSPHSiteDataMixin
from hsph.fields import NameOfCATIField, AllocatedToFilter
from corehq.apps.reports.filters.users import UserTypeFilter
from corehq.apps.reports.filters.dates import DatespanFilter
from couchdbkit_aggregate.fn import mean, unique_count
from casexml.apps.case import const
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.couch.database import get_db
def short_date_format(date):
return date.strftime('%d-%b')
def username(key, report):
return report.usernames[key[0]]
def datestring_minus_days(datestring, days):
date = datetime.datetime.strptime(datestring[:10], '%Y-%m-%d')
return (date - datetime.timedelta(days=days)).isoformat()
def date_minus_11_days(couchkey):
return couchkey + [datestring_minus_days(couchkey[0], 11)]
def date_minus_14_days(couchkey):
return couchkey + [datestring_minus_days(couchkey[0], 14)]
class CATIPerformanceReport(CustomProjectReport, ProjectReportParametersMixin,
DatespanMixin, BasicTabularReport):
name = "CATI Performance Report"
slug = "cati_performance"
field_classes = (UserTypeFilter, DatespanFilter, NameOfCATIField)
filter_group_name = "CATI"
couch_view = "hsph/cati_performance_report_old"
default_column_order = (
'catiName',
'followedUp',
'noFollowUpAfter4Days',
'transferredToManager',
'transferredToField',
'notClosedOrTransferredAfter13Days',
'workingDaysUniqueCount',
'followUpTime',
'followUpTimeMean'
)
catiName = Column(
"Name of CATI", calculate_fn=username)
followedUp = Column(
"No. of Births Followed Up", key='followedUp')
noFollowUpAfter4Days = Column(
"No. of Cases with No Follow Up for 4 Days",
key='noFollowUpAfter4Days',
endkey_fn=date_minus_11_days)
transferredToManager = Column(
"Transferred to Call Center Manager", key='transferredToManager')
transferredToField = Column(
"Transferred to Field", key='transferredToField')
notClosedOrTransferredAfter13Days = Column(
"CATI Timed Out", key='notClosedOrTransferredAfter13Days',
endkey_fn=date_minus_14_days)
workingDaysUniqueCount = Column(
"No. of Working Days", key='workingDays', reduce_fn=unique_count)
followUpTime = Column(
"Total Follow Up Time", key='followUpTime')
followUpTimeMean = Column(
"Average Follow Up Time", key='followUpTime', reduce_fn=mean)
@property
def start_and_end_keys(self):
return ([self.datespan.startdate_param_utc],
[self.datespan.enddate_param_utc])
@property
def keys(self):
for user in self.users:
yield [user['user_id']]
class HSPHCaseDisplay(CaseDisplay):
@property
def region(self):
try:
return self.report.get_region_name(self.case['region_id'])
except AttributeError:
return ""
@property
def district(self):
try:
return self.report.get_district_name(
self.case['region_id'], self.case['district_id'])
except AttributeError:
return ""
@property
def site(self):
try:
return self.report.get_site_name(
self.case['region_id'], self.case['district_id'],
self.case['site_number'])
except AttributeError:
return ""
@property
def patient_id(self):
try:
return self.case.patient_id
except AttributeError:
return ""
@property
def status(self):
return "Closed" if self.case['closed'] else "Open"
@property
def mother_name(self):
return self.case.get('name_mother', '')
@property
def filter_date(self):
return self.case.get('filter_date', '')
@property
def address(self):
return self.case.get('house_address', '')
@property
@memoized
def allocated_to(self):
if self.status == "Closed":
close_action = [CommCareCaseAction.wrap(a) for a in self.case['actions'] if a['action_type'] ==
const.CASE_ACTION_CLOSE][0]
CATI_FOLLOW_UP_FORMS = (
"http://openrosa.org/formdesigner/A5B08D8F-139D-46C6-9FDF-B1AD176EAE1F",
)
if close_action.xform.xmlns in CATI_FOLLOW_UP_FORMS:
return 'CATI'
else:
return 'Field'
else:
follow_up_type = self.case.get('follow_up_type', '')
house_number = self.case.get('phone_house_number', '')
husband_number = self.case.get('phone_husband_number', '')
mother_number = self.case.get('phone_mother_number', '')
asha_number = self.case.get('phone_asha_number', '')
if follow_up_type != 'field_follow_up' and (house_number or
husband_number or mother_number or asha_number):
return 'CATI'
else:
return 'Field'
@property
def allocated_start(self):
try:
delta = datetime.timedelta(days=8 if self.allocated_to == 'CATI' else 13)
return short_date_format(self.parse_date(self.case['filter_date']) + delta)
except AttributeError:
return ""
@property
def allocated_end(self):
try:
delta = datetime.timedelta(days=13 if self.allocated_to == 'CATI' else 23)
return short_date_format(self.parse_date(self.case['filter_date']) + delta)
except AttributeError:
return ""
@property
def outside_allocated_period(self):
if not ('filter_date' in self.case and
isinstance(self.parse_date(self.case['filter_date']), datetime.datetime)):
return ""
if self.case['closed_on']:
compare_date = self.parse_date(self.case['closed_on']).date()
else:
compare_date = datetime.date.today()
return 'Yes' if (compare_date - self.parse_date(self.case['filter_date']).date()).days > 23 else 'No'
class CaseReport(CaseListReport, CustomProjectReport, HSPHSiteDataMixin,
DatespanMixin):
name = 'Case Report'
slug = 'case_report'
fields = (
'corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.SiteField',
#'hsph.fields.AllocatedToFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
)
default_case_type = 'birth'
@property
@memoized
def case_es(self):
return ReportCaseES(self.domain)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn("Region"),
DataTablesColumn("District"),
DataTablesColumn("Site"),
DataTablesColumn("Patient ID"),
DataTablesColumn("Status"),
DataTablesColumn("Mother Name"),
DataTablesColumn("Date of Delivery or Admission"),
DataTablesColumn("Address of Patient"),
DataTablesColumn("Allocated To"),
DataTablesColumn("Allocated Start"),
DataTablesColumn("Allocated End"),
DataTablesColumn("Outside Allocated Period")
)
headers.no_sort = True
return headers
@property
def case_filter(self):
#allocated_to = self.request_params.get(AllocatedToFilter.slug, '')
region_id = self.request_params.get('hsph_region', '')
district_id = self.request_params.get('hsph_district', '')
site_num = str(self.request_params.get('hsph_site', ''))
filters = [{
'range': {
'opened_on': {
"from": self.datespan.startdate_param_utc,
"to": self.datespan.enddate_param_utc
}
}
}]
#if allocated_to:
#filters.append({'term': {'allocated_to': allocated_to}})
if site_num:
filters.append({'term': {'site_number.#value': site_num.lower()}})
if district_id:
filters.append({'term': {'district_id.#value': district_id.lower()}})
if region_id:
filters.append({'term': {'region_id.#value': region_id.lower()}})
return {'and': filters} if filters else {}
#def allocated_to(self):
#if self.status == "Closed":
#close_action = [a for a in self.case.actions if a.action_type ==
#const.CASE_ACTION_CLOSE][0]
#CATI_FOLLOW_UP_FORMS = (
#"http://openrosa.org/formdesigner/A5B08D8F-139D-46C6-9FDF-B1AD176EAE1F",
#)
#if close_action.xform.xmlns in CATI_FOLLOW_UP_FORMS:
#return 'CATI'
#else:
#return 'Field'
#else:
#follow_up_type = getattr(self.case, 'follow_up_type', '')
#house_number = getattr(self.case, 'phone_house_number', '')
#husband_number = getattr(self.case, 'phone_husband_number', '')
#mother_number = getattr(self.case, 'phone_mother_number', '')
#asha_number = getattr(self.case, 'phone_asha_number', '')
#if follow_up_type != 'field_follow_up' and (house_number or
#husband_number or mother_number or asha_number):
#return 'CATI'
#else:
#return 'Field'
@property
def shared_pagination_GET_params(self):
params = super(CaseReport, self).shared_pagination_GET_params
slugs = [
AllocatedToFilter.slug,
'hsph_region',
'hsph_district',
'hsph_site',
'startdate',
'enddate'
]
for slug in slugs:
params.append({
'name': slug,
'value': self.request_params.get(slug, '')
})
return params
@property
def rows(self):
case_displays = (HSPHCaseDisplay(self, restore_property_dict(self.get_case(case)))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.region,
disp.district,
disp.site,
disp.patient_id,
disp.status,
disp.case_link,
disp.filter_date,
disp.address,
disp.allocated_to,
disp.allocated_start,
disp.allocated_end,
disp.outside_allocated_period
]
class CallCenterFollowUpSummaryReport(GenericTabularReport,
CustomProjectReport, ProjectReportParametersMixin, DatespanMixin,
HSPHSiteDataMixin):
name = "Call Center Follow Up Summary"
slug = "hsph_dcc_followup_summary"
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'hsph.fields.SiteField']
@property
def headers(self):
return DataTablesHeader(DataTablesColumn("Region"),
DataTablesColumn("District"),
DataTablesColumn("Site"),
DataTablesColumn("Total Number of Birth events with contact details"),
DataTablesColumn("Total number of births followed up"),
DataTablesColumn("Number of cases followed up at day 8th"),
DataTablesColumn("Number of cases followed up between day 9th to 13th"),
DataTablesColumn("Number of cases with contact details open at day 14th"),
DataTablesColumn("Number of cases with contact details transferred to Field management for home Visits"),
DataTablesColumn("Number of cases where no out comes could be recorded"))
@property
def rows(self):
db = get_db()
rows = []
if not self.selected_site_map:
self._selected_site_map = self.site_map
keys = self.generate_keys()
for key in keys:
data = db.view("hsph/dcc_followup_summary_old",
reduce=True,
startkey=key+[self.datespan.startdate_param_utc],
endkey=key+[self.datespan.enddate_param_utc]
).all()
for item in data:
item = item.get('value')
if item:
region, district, site = self.get_site_table_values(key)
now = self.datespan.enddate
day14 = now-datetime.timedelta(days=14)
day14 = day14.strftime("%Y-%m-%d")
day14_data = db.view("hsph/cases_by_birth_date_old",
reduce=True,
startkey=key,
endkey=key+[day14]
).first()
still_open_at_day14 = day14_data.get('value', 0) if day14_data else 0
rows.append([
region,
district,
site,
item.get('totalBirthsWithContact', 0),
item.get('totalBirths', 0),
item.get('numCasesFollowedUpByDay8', 0),
item.get('numCasesFollowedUpBetweenDays9and13', 0),
still_open_at_day14,
item.get('numCasesWithContactTransferredToField', 0),
item.get('numCasesWithNoOutcomes', 0)
])
return rows
|
py | 1a3a83880b713de21e508d59bbc67adc29ca52e4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class IPConfiguration(SubResource):
"""IPConfiguration.
:param id: Resource ID.
:type id: str
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP allocation method.
Possible values are 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2016_09_01.models.IPAllocationMethod
:param subnet: The reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2016_09_01.models.Subnet
:param public_ip_address: The reference of the public IP resource.
:type public_ip_address:
~azure.mgmt.network.v2016_09_01.models.PublicIPAddress
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, private_ip_address=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state=None, name=None, etag=None):
super(IPConfiguration, self).__init__(id=id)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
py | 1a3a850b2fc72f574daeef66e488b20931cbee0e | """
ViP training and evaluating script
This script is modified from pytorch-image-models by Ross Wightman (https://github.com/rwightman/pytorch-image-models/)
It was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import models
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import load_checkpoint, create_model, resume_checkpoint, convert_splitbn_model
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='ViP Training and Evaluating')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--model', default='vip_s7', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--eval_checkpoint', default='', type=str, metavar='PATH',
help='path to eval checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=224, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.005 for adamw)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "const")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.0)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.99996,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=8, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed and args.num_gpu > 1:
_logger.warning(
'Using more than one GPU per process in distributed mode is not allowed.Setting num_gpu to 1.')
args.num_gpu = 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.num_gpu = 1
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
assert args.rank >= 0
if args.distributed:
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on %d GPUs.' % args.num_gpu)
torch.manual_seed(args.seed + args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
checkpoint_path=args.initial_checkpoint,
img_size=args.img_size)
if args.local_rank == 0:
_logger.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
use_amp = None
if args.amp:
# for backwards compat, `--amp` arg tries apex before native amp
if has_apex:
args.apex_amp = True
elif has_native_amp:
args.native_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
if args.num_gpu > 1:
if use_amp == 'apex':
_logger.warning(
'Apex AMP does not work well with nn.DataParallel, disabling. Use DDP or Torch AMP.')
use_amp = None
model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
assert not args.channels_last, "Channels last not supported with DP, use DDP."
else:
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
optimizer = create_optimizer(args, model)
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=args.resume)
if args.distributed:
if args.sync_bn:
assert not args.split_bn
try:
if has_apex and use_amp != 'native':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
except Exception as e:
_logger.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')
if has_apex and use_amp != 'native':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
dataset_train = create_dataset(
args.dataset, root=args.data, split=args.train_split, is_training=True, batch_size=args.batch_size)
dataset_eval = create_dataset(
args.dataset, root=args.data, split=args.val_split, is_training=False, batch_size=args.batch_size)
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
if args.eval_checkpoint: # evaluate the model
load_checkpoint(model, args.eval_checkpoint, args.model_ema)
val_metrics = validate(model, loader_eval, validate_loss_fn, args)
print(f"Top-1 accuracy of the model is: {val_metrics['top1']:.1f}%")
return
saver = None
output_dir = ''
if args.local_rank == 0:
output_base = args.output if args.output else './output'
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
args.model,
str(data_config['input_size'][-1])
])
output_dir = get_outdir(output_base, 'train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try: # train the model
for epoch in range(start_epoch, num_epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_metrics = train_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.ema, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters(), create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
|
py | 1a3a85e6379c55b537820b542fdb73d6dfbc77c6 | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
from apricot import TestWithServers
from daos_perf_utils import DaosPerfCommand
from command_utils_base import CommandFailure
class DaosPerfBase(TestWithServers):
"""Base test cases for the daos_perf command.
Test Class Description:
Tests daos_perf with different configurations.
:avocado: recursive
"""
def run_daos_perf(self):
"""Run the daos_perf command."""
# Create pool
self.add_pool()
# Create container
self.add_container(self.pool)
# Obtain the number of processes listed with the daos_perf options
processes = self.params.get("processes", "/run/daos_perf/*")
# Use the dmg_control yaml
dmg_config_file = self.get_dmg_command().yaml.filename
# Create the daos_perf command from the test yaml file
daos_perf = DaosPerfCommand(self.bin)
daos_perf.get_params(self)
daos_perf.pool_uuid.update(self.pool.uuid)
daos_perf.cont_uuid.update(self.container.uuid)
daos_perf.dmg_config_file.update(dmg_config_file)
self.log.info("daos_perf command: %s", str(daos_perf))
daos_perf_env = daos_perf.get_environment(self.server_managers[0])
# Create the orterun command
self.job_manager.assign_hosts(self.hostlist_clients, self.workdir, None)
self.job_manager.assign_processes(processes)
self.job_manager.assign_environment(daos_perf_env)
self.job_manager.job = daos_perf
self.log.info("orterun command: %s", str(self.job_manager))
# Run the daos_perf command and check for errors
try:
return self.job_manager.run()
except CommandFailure as error:
self.log.error("DAOS PERF Failed: %s", str(error))
self.fail("Test was expected to pass but it failed.\n")
|
py | 1a3a85f7b2c94d4bb9d6008412cddf286ce8bbc9 | import numpy as np
from random import shuffle
from scipy.sparse import csr_matrix
class SVM:
def __init__(self, learning_rate=1, regularization_loss_tradeoff=1):
self.learning_rate = learning_rate
self.regularization_loss_tradeoff = regularization_loss_tradeoff
def train(self, train, labels, epochs):
w = csr_matrix((1, train[0].shape[1]), dtype=np.float128)
for _ in range(epochs):
[w] = self.train_one_epoch(train, labels, w)
return w
def train_one_epoch(self, train, labels, w):
lr = self.learning_rate
tradeoff = self.regularization_loss_tradeoff
order = [i for i in range(train.shape[0])]
shuffle(order)
w_transpose = w.transpose()
for i in order:
x = train[i]
y = labels.toarray()[0][i]
if (x.dot(w_transpose) * y)[0, 0] <= 1:
w = w * (1 - lr) + x * (lr * tradeoff * y)
else:
w = w * (1 - lr)
w_transpose = w.transpose()
return [w]
class SVMPredictor:
def __init__(self, w):
self.w = w.transpose()
def predict(self, x):
if x.dot(self.w)[0, 0] < 0:
return -1
else:
return 1
|
py | 1a3a8696d354d93e1bbeb17c844a93ba07a05127 | x = 0
n = 3
b = True
if type(n) == int:
if type(b) == bool:
if type(type(n)) == type:
x = 1
else:
x = 10000
else:
x = 10000
else:
x = 100000
assert x == 1
|
py | 1a3a87e6da975a14037cfd70b471495325f337ef | from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-negativeInteger-maxLength-2-NS"
@dataclass
class NistschemaSvIvListNegativeIntegerMaxLength2:
class Meta:
name = "NISTSchema-SV-IV-list-negativeInteger-maxLength-2"
namespace = "NISTSchema-SV-IV-list-negativeInteger-maxLength-2-NS"
value: List[int] = field(
default_factory=list,
metadata={
"max_length": 6,
"tokens": True,
}
)
|
py | 1a3a889558be7fc396e8e44aef032d9e22f28896 | h3_version = 'v3.4.1'
|
py | 1a3a89152af49311a98abe41745ff4a5089ed1f4 | from flask import Flask
app = Flask(__name__)
from flask_platform import views
|
py | 1a3a8988c729d38789d18198a866d3a245fb18f9 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import route_target
import route_distinguisher
import duplicate_mac_timer
import vni
class evpn_instance(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/evpn-instance. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: EVPN instance config
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__instance_name','__route_target','__route_distinguisher','__df_delay_timer','__duplicate_mac_timer','__vni',)
_yang_name = 'evpn-instance'
_rest_name = 'evpn-instance'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__route_target = YANGDynClass(base=route_target.route_target, is_container='container', presence=False, yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__df_delay_timer = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..10']}), is_leaf=True, yang_name="df-delay-timer", rest_name="df-delay-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DF delay timer'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='delay-timer', is_config=True)
self.__vni = YANGDynClass(base=vni.vni, is_container='container', presence=False, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VNI configuration', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__instance_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="instance-name", rest_name="instance-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='evpn-type', is_config=True)
self.__route_distinguisher = YANGDynClass(base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name="route-distinguisher", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__duplicate_mac_timer = YANGDynClass(base=duplicate_mac_timer.duplicate_mac_timer, is_container='container', presence=False, yang_name="duplicate-mac-timer", rest_name="duplicate-mac-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Duplicate mac timer', u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'evpn-instance']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'evpn-instance']
def _get_instance_name(self):
"""
Getter method for instance_name, mapped from YANG variable /rbridge_id/evpn_instance/instance_name (evpn-type)
"""
return self.__instance_name
def _set_instance_name(self, v, load=False):
"""
Setter method for instance_name, mapped from YANG variable /rbridge_id/evpn_instance/instance_name (evpn-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_instance_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_instance_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="instance-name", rest_name="instance-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='evpn-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instance_name must be of a type compatible with evpn-type""",
'defined-type': "brocade-bgp:evpn-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="instance-name", rest_name="instance-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='evpn-type', is_config=True)""",
})
self.__instance_name = t
if hasattr(self, '_set'):
self._set()
def _unset_instance_name(self):
self.__instance_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="instance-name", rest_name="instance-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='evpn-type', is_config=True)
def _get_route_target(self):
"""
Getter method for route_target, mapped from YANG variable /rbridge_id/evpn_instance/route_target (container)
"""
return self.__route_target
def _set_route_target(self, v, load=False):
"""
Setter method for route_target, mapped from YANG variable /rbridge_id/evpn_instance/route_target (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_target.route_target, is_container='container', presence=False, yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_target must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_target.route_target, is_container='container', presence=False, yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__route_target = t
if hasattr(self, '_set'):
self._set()
def _unset_route_target(self):
self.__route_target = YANGDynClass(base=route_target.route_target, is_container='container', presence=False, yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
def _get_route_distinguisher(self):
"""
Getter method for route_distinguisher, mapped from YANG variable /rbridge_id/evpn_instance/route_distinguisher (container)
"""
return self.__route_distinguisher
def _set_route_distinguisher(self, v, load=False):
"""
Setter method for route_distinguisher, mapped from YANG variable /rbridge_id/evpn_instance/route_distinguisher (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_distinguisher is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_distinguisher() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name="route-distinguisher", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_distinguisher must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name="route-distinguisher", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__route_distinguisher = t
if hasattr(self, '_set'):
self._set()
def _unset_route_distinguisher(self):
self.__route_distinguisher = YANGDynClass(base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name="route-distinguisher", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
def _get_df_delay_timer(self):
"""
Getter method for df_delay_timer, mapped from YANG variable /rbridge_id/evpn_instance/df_delay_timer (delay-timer)
YANG Description: DF delay timer
"""
return self.__df_delay_timer
def _set_df_delay_timer(self, v, load=False):
"""
Setter method for df_delay_timer, mapped from YANG variable /rbridge_id/evpn_instance/df_delay_timer (delay-timer)
If this variable is read-only (config: false) in the
source YANG file, then _set_df_delay_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_df_delay_timer() directly.
YANG Description: DF delay timer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..10']}), is_leaf=True, yang_name="df-delay-timer", rest_name="df-delay-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DF delay timer'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='delay-timer', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """df_delay_timer must be of a type compatible with delay-timer""",
'defined-type': "brocade-bgp:delay-timer",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..10']}), is_leaf=True, yang_name="df-delay-timer", rest_name="df-delay-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DF delay timer'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='delay-timer', is_config=True)""",
})
self.__df_delay_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_df_delay_timer(self):
self.__df_delay_timer = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..10']}), is_leaf=True, yang_name="df-delay-timer", rest_name="df-delay-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DF delay timer'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='delay-timer', is_config=True)
def _get_duplicate_mac_timer(self):
"""
Getter method for duplicate_mac_timer, mapped from YANG variable /rbridge_id/evpn_instance/duplicate_mac_timer (container)
YANG Description: Duplicate mac timer
"""
return self.__duplicate_mac_timer
def _set_duplicate_mac_timer(self, v, load=False):
"""
Setter method for duplicate_mac_timer, mapped from YANG variable /rbridge_id/evpn_instance/duplicate_mac_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_duplicate_mac_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_duplicate_mac_timer() directly.
YANG Description: Duplicate mac timer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=duplicate_mac_timer.duplicate_mac_timer, is_container='container', presence=False, yang_name="duplicate-mac-timer", rest_name="duplicate-mac-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Duplicate mac timer', u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """duplicate_mac_timer must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=duplicate_mac_timer.duplicate_mac_timer, is_container='container', presence=False, yang_name="duplicate-mac-timer", rest_name="duplicate-mac-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Duplicate mac timer', u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__duplicate_mac_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_duplicate_mac_timer(self):
self.__duplicate_mac_timer = YANGDynClass(base=duplicate_mac_timer.duplicate_mac_timer, is_container='container', presence=False, yang_name="duplicate-mac-timer", rest_name="duplicate-mac-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Duplicate mac timer', u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
def _get_vni(self):
"""
Getter method for vni, mapped from YANG variable /rbridge_id/evpn_instance/vni (container)
"""
return self.__vni
def _set_vni(self, v, load=False):
"""
Setter method for vni, mapped from YANG variable /rbridge_id/evpn_instance/vni (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vni is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vni() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vni.vni, is_container='container', presence=False, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VNI configuration', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vni must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vni.vni, is_container='container', presence=False, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VNI configuration', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__vni = t
if hasattr(self, '_set'):
self._set()
def _unset_vni(self):
self.__vni = YANGDynClass(base=vni.vni, is_container='container', presence=False, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VNI configuration', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
instance_name = __builtin__.property(_get_instance_name, _set_instance_name)
route_target = __builtin__.property(_get_route_target, _set_route_target)
route_distinguisher = __builtin__.property(_get_route_distinguisher, _set_route_distinguisher)
df_delay_timer = __builtin__.property(_get_df_delay_timer, _set_df_delay_timer)
duplicate_mac_timer = __builtin__.property(_get_duplicate_mac_timer, _set_duplicate_mac_timer)
vni = __builtin__.property(_get_vni, _set_vni)
_pyangbind_elements = {'instance_name': instance_name, 'route_target': route_target, 'route_distinguisher': route_distinguisher, 'df_delay_timer': df_delay_timer, 'duplicate_mac_timer': duplicate_mac_timer, 'vni': vni, }
|
py | 1a3a89cf9a55cde3c8db22aeee541c303a110859 | import pygame
import random
import os
pygame.init()
win = pygame.display.set_mode((700, 700))
pygame.display.set_caption("Falling Blocks")
script_dir = os.path.dirname('Obstacles')
rel_path = r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Obstacles"
abs_file_path = os.path.join(script_dir, rel_path)
# loading objects
current_file = r"/finalboulder.png"
boulderimg = pygame.image.load(abs_file_path + current_file)
current_file = r"/betterball.png"
snowballimg = pygame.image.load(abs_file_path + current_file)
current_file = r"/bestrock.png"
rockimg = pygame.image.load(abs_file_path + current_file)
rel_path = r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Powerups"
script_dir = os.path.dirname('Powerups')
abs_file_path = os.path.join(script_dir, rel_path)
# loading powerups
current_file = r"/resizedheart.png"
heartimg = pygame.image.load(abs_file_path + current_file)
current_file = r"/bestgun.png"
gunimg = pygame.image.load(abs_file_path + current_file)
current_file = r'/better_small.png'
side_gun = pygame.image.load(abs_file_path + current_file)
#loading background
abs_file_path = os.path.join('Backgrounds',r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Backgrounds")
bg = pygame.image.load(abs_file_path + r"/background.jpg")
rel_path = r"/Users/alecdewulf/Desktop/Falling-Blocks/Images/Player"
script_dir = os.path.dirname('Player')
abs_file_path = os.path.join(script_dir, rel_path)
# character
char = pygame.image.load(abs_file_path + r'/standing.png')
walkRight = [pygame.image.load(abs_file_path + r'/R1.png'), pygame.image.load(abs_file_path + r'/R2.png'), pygame.image.load(abs_file_path + r'/R3.png'), pygame.image.load(abs_file_path + r'/R4.png'),\
pygame.image.load(abs_file_path + r'/R5.png'), pygame.image.load(abs_file_path + r'/R6.png'), pygame.image.load(abs_file_path + r'/R7.png'), pygame.image.load(abs_file_path + r'/R8.png'), pygame.image.load(abs_file_path + r'/R9.png')]
walkLeft = [pygame.image.load(abs_file_path + r'/L1.png'), pygame.image.load(abs_file_path + r'/L2.png'), pygame.image.load(abs_file_path + r'/L3.png'), pygame.image.load(abs_file_path + r'/L4.png'),\
pygame.image.load(abs_file_path + r'/L5.png'), pygame.image.load(abs_file_path + r'/L6.png'), pygame.image.load(abs_file_path + r'/L7.png'), pygame.image.load(abs_file_path + r'/L8.png'), pygame.image.load(abs_file_path + r'/L9.png')]
clock = pygame.time.Clock()
class Character(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.v = 5
self.left, self.right = False, False
self.standing = True
self.walkCount = 0
self.health = 10
self.hitbox = (self.x + 17, self.y + 11, 29, 52)
self.alive = True
self.shooting = False
def draw(self, win):
if self.alive:
# reset image cycle
if self.walkCount + 1 >= 9:
self.walkCount = 0
# moving
if not(self.standing):
# drawing left walking images
if self.left:
win.blit(walkLeft[self.walkCount], (self.x, self.y))
self.walkCount += 1
elif self.right:
win.blit(walkRight[self.walkCount], (self.x, self.y))
self.walkCount += 1
# not moving
else:
if self.right:
win.blit(walkRight[0], (self.x, self.y))
else:
win.blit(walkLeft[0], (self.x, self.y))
#hitbox
# hitbox[0], hitbox[1] are the coords of the top left of the hitbox
self.hitbox = (self.x + 17, self.y + 11, 29, 52)
#pygame.draw.rect(win, (255, 0, 0), self.hitbox, 2)
#health bar
pygame.draw.rect(win, (255,0,0), (self.hitbox[0], self.hitbox[1] - 20, 50, 10))
pygame.draw.rect(win, (0,128,0), (self.hitbox[0], self.hitbox[1] - 20, 50 - ( 5* (10-self.health)), 10))
# gun
if self.shooting:
win.blit(side_gun, (self.x + 20, self.y + 40))
# circle at start of rect
# pygame.draw.circle(win, (255,0,0), (self.hitbox[0], self.hitbox[1]), 20)
else:
#game over
font = pygame.font.SysFont('comicsans', 30, True)
over = font.render('GAME OVER', 1, (0,0,0))
win.blit(over, (290, 350))
# abstract block class
class Block(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.v = 10
self.falling = True
def draw(self, win):
# not off the screen
if self.y < 700:
win.blit(self.image, (self.x, self.y))
self.y += self.v
else:
self.falling = False
class Rock(Block):
def __init__(self, x, y):
Block.__init__(self, x, y)
self.v = 10
self.image = rockimg
self.hitbox = (self.x, self.y + 10, 90, 60)
self.id = "rock"
def draw(self, win):
Block.draw(self,win)
# hitbox
self.hitbox = (self.x, self.y + 10, 90, 60)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Snowball(Block):
def __init__(self, x, y):
Block.__init__(self, x, y)
self.v = 20
self.image = snowballimg
self.hitbox = (self.x, self.y - 10, 15, 15)
self.id = "snowball"
def draw(self,win):
Block.draw(self, win)
self.hitbox = (self.x, self.y - 10, 30, 30)
# pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Boulder(Block):
def __init__(self, x, y):
Block.__init__(self, x, y)
self.v = 5
self.image = boulderimg
self.hitbox = (self.x, self.y - 20, 200 ,200)
self.id = "boulder"
def draw(self, win):
Block.draw(self, win)
self.hitbox = (self.x, self.y - 5, 135, 135)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Powerup(object):
def __init__(self, x):
self.x = x
self.y = 635
self.image = None
self.time = 270
self.appear = True
def draw(self, win):
if self.appear:
if self.time > 0:
win.blit(self.image, (self.x, self.y))
else:
self.appear = False
self.time -= 1
class Heart(Powerup):
def __init__(self, x):
Powerup.__init__(self, x)
self.increase = 1
self.image = heartimg
self.id = "heart"
self.hitbox = (self.x, self.y, 30, 30)
def draw(self, win):
Powerup.draw(self, win)
self.hitbox = (self.x, self.y, 30, 30)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Gun(Powerup):
def __init__(self, x):
Powerup.__init__(self, x)
self.image = gunimg
self.id = "gun"
self.hitbox = (self.x, self.y, 30, 30)
def draw(self, win):
Powerup.draw(self, win)
self.hitbox = (self.x, self.y, 30, 30)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
class Bullet(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.hitbox = (self.x, self.y, 30, 30)
self.appear = True
self.v = 8
def draw(self, win):
if self.appear:
pygame.draw.circle(win, (0,0,0), (self.x, self.y), 7)
self.hitbox = (self.x - 10, self.y - 10, 20, 20)
#pygame.draw.rect(win, (255,0,0), self.hitbox, 2)
def drawWindow():
win.blit(bg, (0,0))
man.draw(win)
# drawing falling blocks
if man.alive:
for o in objects:
o.draw(win)
for p in powerups:
p.draw(win)
for b in bullets:
b.draw(win)
# displaying score
font = pygame.font.SysFont('comicsans', 30, True)
text = font.render('Score: ' + str(score), 1, (0,0,0))
win.blit(text, (550, 10))
else:
# draw the score
font = pygame.font.SysFont('comicsans', 30, True)
text = font.render('Score: ' + str(score), 1, (0,0,0))
win.blit(text, (290, 400))
# update the display
pygame.display.update()
man = Character(300, 600, 64, 64)
gun = Gun(400)
print(type(man))
objects, powerups, bullets = [], [gun] , []
run, hit = True, False
max_length, rounds, score, cooldown, interval = 0, 0, 0, 0, 27
shoot_cooldown, shoot_time = 0, 0
while run and man.alive:
# set fps
clock.tick(27)
# find the center of the man
center_x = (man.hitbox[0] + (man.hitbox[0] + man.hitbox[2]))//2
center_y = (man.hitbox[1] + (man.hitbox[1] + man.hitbox[3]))//2
# screen being closed
for e in pygame.event.get():
if e.type == pygame.QUIT:
run = False
# keys list
keys = pygame.key.get_pressed()
# moving left
if keys[pygame.K_LEFT] and man.x > 0:
man.x -= man.v
man.left, man.right = True, False
man.standing = False
# moving right
elif keys[pygame.K_RIGHT] and man.x < 700 - man.width:
man.x += man.v
man.right, man.left = True, False
man.standing = False
# standing
else:
man.standing = True
man.walkCount = 0
#shooting controls
if man.shooting and keys[pygame.K_SPACE] and len(bullets) <= 5 and shoot_cooldown >= 10:
shoot_cooldown = 0
new_bullet = Bullet(man.x + 30, man.y)
bullets.append(new_bullet)
# change bullet position or delete them
for bullet in bullets:
if bullet.y > 0:
bullet.y -= bullet.v
else:
bullets.pop(bullets.index(bullet))
# check for bullet collisions
for bullet in bullets:
for o in objects:
if bullet.x >= o.hitbox[0] and bullet.x <= o.hitbox[0] + o.hitbox[2]:
# check the y
if bullet.y >= o.hitbox[1] and bullet.y <= o.hitbox[1] + o.hitbox[3]:
objects.pop(objects.index(o))
bullets.pop(bullets.index(bullet))
#check rocks
for o in objects:
if o.falling == False:
objects.pop(objects.index(o))
score += 1
# check powerups
for p in powerups:
if p.appear == False:
powerups.pop(powerups.index(p))
#check for a collision
for r in objects:
# check the x
if center_x >= r.hitbox[0] and center_x <= r.hitbox[0] + r.hitbox[2]:
# check the y
if center_y >= r.hitbox[1] and center_y <= r.hitbox[1] + r.hitbox[3]:
if r.id == "boulder":
if man.health - 2 <= 0:
man.alive = False
else:
print("HIT")
r.falling = False
man.health -= 2
# not a boulder
elif man.health - 1 == 0:
man.alive = False
else:
print('hit')
r.falling = False
man.health -= 1
# generate new objects
x = random.randint(1,10)
if x >= 5 and len(objects) < 5 + max_length and cooldown >= 20:
x = random.randint(1, 21)
xpos = random.randint(0, 700)
if x == 10 or x == 15:
new_snowball = Snowball(xpos, 0)
objects.append(new_snowball)
elif x == 20:
new_boulder = Boulder(xpos, 0)
objects.append(new_boulder)
else:
newrock = Rock(xpos, 0)
objects.append(newrock)
cooldown = 0
# generate new powerups
x = random.randint(1, 100)
if score > 50 and x == 25 and len(powerups) == 0:
choice = random.randint(1, 100)
xpos = random.randint(0,700)
if choice >= 50:
newheart = Heart(xpos)
powerups.append(newheart)
else:
newgun = Gun(xpos)
powerups.append(newgun)
# check for picking up powerup
for p in powerups:
if center_x >= p.hitbox[0] and center_x <= p.hitbox[0] + p.hitbox[2]:
# check the y
if center_y >= p.hitbox[1] and center_y <= p.hitbox[1] + p.hitbox[3]:
if p.id == "heart":
if man.health < 10:
man.health += 1
elif p.id == "gun":
man.shooting = True
# reset the shoot time
shoot_time = 135
# picked up an powerup
p.appear = False
# check for the gun being use up
if shoot_time == 0:
man.shooting = False
# draw the scene
drawWindow()
# increment the cooldown by a tenth of the score after 10 objects
# so that difficulty increases over time
if score < 10:
cooldown += 1
else:
cooldown += int(score * 0.1)
# add to the amount of allowed objects as time goes on
if rounds == 100 and max_length <= 10:
max_length += 1
rounds = 0
# increment varaibles
interval += 1
shoot_cooldown += 1
shoot_time -= 1
rounds += 1
rel_path = r"C:\Users\Owner\Desktop\Falling-Blocks\Textfiles"
script_dir = os.path.dirname("Textfiles")
abs_file_path = os.path.join(script_dir, rel_path)
# print higscores
highscores = open(abs_file_path + r'\highscores.txt', 'r')
top = int(highscores.read())
print("Current highscore is ", top)
highscores.close()
hs = open(abs_file_path + r'\highscores.txt', 'w')
if score > top:
print("Congratulations! You have the new highscore")
hs.write(str(score))
hs.close()
run = True
# game over screen
while run and not(man.alive):
# screen being closed
for e in pygame.event.get():
if e.type == pygame.QUIT:
run = False
drawWindow()
pygame.quit()
|
py | 1a3a89f5ddce514cbfc341c81b193b3cc874209d | """Handle presentation exchange information interface with non-secrets storage."""
from marshmallow import fields
from ....messaging.models.base_record import BaseRecord, BaseRecordSchema
class PresentationExchange(BaseRecord):
"""Represents a presentation exchange."""
class Meta:
"""PresentationExchange metadata."""
schema_class = "PresentationExchangeSchema"
RECORD_TYPE = "presentation_exchange"
RECORD_ID_NAME = "presentation_exchange_id"
WEBHOOK_TOPIC = "presentations"
LOG_STATE_FLAG = "debug.presentations"
TAG_NAMES = {"thread_id"}
INITIATOR_SELF = "self"
INITIATOR_EXTERNAL = "external"
STATE_REQUEST_SENT = "request_sent"
STATE_REQUEST_RECEIVED = "request_received"
STATE_PRESENTATION_SENT = "presentation_sent"
STATE_PRESENTATION_RECEIVED = "presentation_received"
STATE_VERIFIED = "verified"
def __init__(
self,
*,
presentation_exchange_id: str = None,
connection_id: str = None,
thread_id: str = None,
initiator: str = None,
state: str = None,
presentation_request: dict = None,
presentation: dict = None,
verified: str = None,
error_msg: str = None,
**kwargs
):
"""Initialize a new PresentationExchange."""
super().__init__(presentation_exchange_id, state, **kwargs)
self.connection_id = connection_id
self.thread_id = thread_id
self.initiator = initiator
self.state = state
self.presentation_request = presentation_request
self.presentation = presentation
self.verified = verified
self.error_msg = error_msg
@property
def presentation_exchange_id(self) -> str:
"""Accessor for the ID associated with this exchange."""
return self._id
@property
def record_value(self) -> dict:
"""Accessor for JSON record value generated for this presentation exchange."""
return {
prop: getattr(self, prop)
for prop in (
"connection_id",
"initiator",
"presentation_request",
"presentation",
"error_msg",
"verified",
"state",
)
}
class PresentationExchangeSchema(BaseRecordSchema):
"""Schema for serialization/deserialization of presentation exchange records."""
class Meta:
"""PresentationExchangeSchema metadata."""
model_class = PresentationExchange
presentation_exchange_id = fields.Str(required=False)
connection_id = fields.Str(required=False)
thread_id = fields.Str(required=False)
initiator = fields.Str(required=False)
state = fields.Str(required=False)
presentation_request = fields.Dict(required=False)
presentation = fields.Dict(required=False)
verified = fields.Str(required=False)
error_msg = fields.Str(required=False)
|
py | 1a3a8a4883d8beb84181609740d2b836f548bc2c | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid import core, unique_name
from functools import reduce
from paddle.distributed.fleet.meta_optimizers.common import is_loss_grad_op, is_backward_op, is_optimizer_op
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
import re
import os
def check_broadcast(block):
"""
if a var is broadcasted, it should have a sync_comm before
this var is used, if not, raise error.
if the broadcasted var has a fill_constant op, the fill_constant
op should stay forward before the broadcast op, and before a
sync_calc op. Otherwise, raise error.
should ignore and skip broadcast_op of inner_parallelism (e.g. Megatron)
"""
broadcast_vars = {}
for idx, op in enumerate(block.ops):
if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name:
if var_name in broadcast_vars:
raise ValueError("var_name areadly exist: {}"
"the old pos is {}, the new pos is {}".
format(var_name, broadcast_vars[
var_name]["broadcast_pos"], idx))
broadcast_vars[var_name] = {
"fill_constant_pos": -1,
"broadcast_pos": idx,
}
for idx, op in enumerate(block.ops):
if op.type == "fill_constant":
var_name = op.desc.output_arg_names()[0]
if var_name in broadcast_vars:
broadcast_vars[var_name]["fill_constant_pos"] = idx
continue
last_sync_comm_op_idx = -1
last_sync_calc_op_idx = -1
for idx, op in enumerate(block.ops):
if op.type == "c_sync_comm_stream":
last_sync_comm_op_idx = idx
continue
if op.type == "c_sync_calc_stream":
last_sync_calc_op_idx = idx
continue
if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name:
if broadcast_vars[var_name]["fill_constant_pos"] != -1:
assert (last_sync_calc_op_idx != -1)
assert (broadcast_vars[var_name]["fill_constant_pos"] <
last_sync_calc_op_idx)
assert (last_sync_calc_op_idx < idx)
continue
for input_name in op.desc.input_arg_names():
if input_name in broadcast_vars:
assert (broadcast_vars[input_name]["broadcast_pos"] != -1)
assert (broadcast_vars[input_name]["broadcast_pos"] <
last_sync_comm_op_idx)
assert (last_sync_comm_op_idx < idx)
return
def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
"""
the op order should be:
grad:
- 0: op that generate Var
- 1: sync_calc
- 2: reduce_sum_sharding (allreduce --> reduce)
- 3: sync_comm
- 4: allreuce_sum_dp (dp_grads)
- 5: sync_comm (dp_grads)
- 6: op that use Var (dp_grads & sum)
should ignore and skip allreduce_op of inner_parallelism (e.g. Megatron)
"""
vars_status = {}
dp_grads_status = {}
idx_last_grad_allreduce = -1
idx_amp_allreduce = -1
idx_gradient_clip_allreduce = -1
for idx, op in enumerate(block.ops):
# sharding use both allreduce and reduce to sync grad
if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False:
ring_id = op.desc.attr("ring_id")
var_name = op.desc.input_arg_names()[0]
param = var_name.split("@")[0]
assert 'sum' in var_name or ("@GRAD" in var_name)
if 'sum' in var_name or (not shard.has_param(param)):
vars_status[var_name] = -1
else:
dp_grads_status[var_name] = -1
if ring_id != sharding_ring_id:
assert shard.has_param(param)
assert ring_id == dp_ring_id
if "sum" in var_name:
idx_amp_allreduce = idx
elif "@GRAD":
idx_last_grad_allreduce = idx
if op.type == "c_allreduce_max":
idx_gradient_clip_allreduce = idx
for op in block.ops:
if op.type == "c_sync_calc_stream":
for var_name in vars_status:
if var_name in vars_status and vars_status[var_name] == 0:
vars_status[var_name] = 1
for var_name in dp_grads_status:
if var_name in dp_grads_status and dp_grads_status[
var_name] == 0:
dp_grads_status[var_name] = 1
# check sharding allreduce and reduce but skip megatron allreduce
elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False:
var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id:
assert op.type == "c_reduce_sum", "Grad in Sharding group should be reduce rather than allreduce"
if var_name in vars_status:
_status = vars_status[var_name]
else:
_status = dp_grads_status[var_name]
if _status == -1:
raise ValueError("{} is not generated, but you are"
"trying to all-reduce it".format(
var_name))
if _status == 0:
raise ValueError("There should be a sync_calc op "
"after generate Var: {} and before the"
"c_allreduce_sum op".format(var_name))
assert (_status == 1)
if var_name in vars_status:
vars_status[var_name] = 2
else:
dp_grads_status[var_name] = 2
else:
assert ring_id == dp_ring_id
param = var_name.split("@")[0]
assert shard.has_param(param)
assert dp_grads_status[var_name] == 3
dp_grads_status[var_name] = 4
elif op.type == "c_sync_comm_stream":
var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id:
for var_name in op.desc.input_arg_names():
if var_name in vars_status:
assert vars_status[var_name] == 2
vars_status[var_name] = 3
elif var_name in dp_grads_status:
assert dp_grads_status[var_name] == 2
dp_grads_status[var_name] = 3
else:
for var_name in op.desc.input_arg_names():
param = var_name.split("@")[0]
assert ring_id == dp_ring_id
assert shard.has_param(param)
assert dp_grads_status[var_name] == 4
dp_grads_status[var_name] = 5
else:
for input_name in op.desc.input_arg_names():
if input_name in vars_status:
if vars_status[input_name] != 3:
raise ValueError("There should be a sync_comm op "
"after allreduce the Var: {}".format(
input_name))
raise ValueError(
"The reduce output grad [{}] should NOT be be used in Non-root rank.".
format(input_name))
if input_name in dp_grads_status:
if dp_ring_id == -1:
if dp_grads_status[input_name] != 3:
raise ValueError("There should be a sync_comm op "
"after allreduce the Var: {}".
format(input_name))
else:
if dp_grads_status[input_name] != 5:
raise ValueError(
"The grad in shard should be allreduce and sync"
"twice before usage {}".format(input_name))
for output_name in op.desc.output_arg_names():
if output_name in vars_status and \
vars_status[output_name] == -1:
vars_status[output_name] = 0
if output_name in dp_grads_status and \
dp_grads_status[output_name] == -1:
dp_grads_status[output_name] = 0
# check sharding with amp
if idx_amp_allreduce != -1:
assert idx_amp_allreduce > idx_last_grad_allreduce
# check sharding with gradient_clip_by_global_norm
if idx_gradient_clip_allreduce != -1:
assert idx_gradient_clip_allreduce > idx_last_grad_allreduce
return
def get_valid_op_role(block, insert_idx):
"""
return OpRole.Forward or OpRole.Backward
"""
op_role = block.ops[insert_idx].attr('op_role')
if (insert_idx >= len(block.ops)) or (
op_role in [int(OpRole.Backward), int(OpRole.Optimize)]):
return OpRole.Backward
if op_role in [int(OpRole.Forward), int(OpRole.Loss)]:
return OpRole.Forward
return get_valid_op_role(block, insert_idx + 1)
def insert_sync_calc_op(block, insert_idx, calc_dep_vars):
"""
_insert_sync_calc_op
"""
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_calc_stream',
inputs={'X': calc_dep_vars},
outputs={'Out': calc_dep_vars},
attrs={OP_ROLE_KEY: op_role})
return
def insert_sync_comm_op(block, insert_idx, ring_id, comm_dep_vars):
"""
insert sync_comm_op for single var
"""
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_comm_stream',
inputs={'X': comm_dep_vars},
outputs={'Out': comm_dep_vars},
attrs={'ring_id': ring_id,
OP_ROLE_KEY: op_role})
return 1
def insert_sync_comm_ops(block, insert_idx, ring_id, comm_dep_vars):
"""
insert sync_comm_op for vars
"""
# NOTE (JZ-LIANG) to be check, may result undefined case
if len(comm_dep_vars) == 0:
return 0
op_role = get_valid_op_role(block, insert_idx)
block._insert_op_without_sync(
insert_idx,
type='c_sync_comm_stream',
inputs={'X': comm_dep_vars},
outputs={'Out': comm_dep_vars},
attrs={'ring_id': int(ring_id),
OP_ROLE_KEY: op_role})
return 1
def insert_fill_constant_ops(block, insert_idx, fill_constant_vars):
"""
_add_fill_constant_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for broadcast_name in fill_constant_vars:
broadcast_var = block.var(broadcast_name)
block._insert_op_without_sync(
insert_idx,
type="fill_constant",
outputs={"Out": broadcast_var.name},
attrs={
"shape": broadcast_var.shape,
"dtype": broadcast_var.dtype,
"value": 0.0,
OP_ROLE_KEY: op_role
})
return
def insert_cast_ops(block, insert_idx, cast_ops):
"""
_add_cast_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for fp16_name, fp32_name in cast_ops.items():
block._insert_op_without_sync(
insert_idx,
type="cast",
inputs={"X": fp32_name},
outputs={"Out": fp16_name},
attrs={
"in_dtype": core.VarDesc.VarType.FP32,
"out_dtype": core.VarDesc.VarType.FP16,
OP_ROLE_KEY: op_role
})
return
def insert_allreduce_ops(block,
insert_idx,
ring_id,
allreduce_vars,
op_role=OpRole.Backward,
use_calc_stream=False,
user_defined_strategy=None):
"""
_add_allreduce_ops
"""
if len(allreduce_vars) == 0:
return
if user_defined_strategy and \
user_defined_strategy.fuse_all_reduce_ops and \
not user_defined_strategy.fuse_grad_merge:
# If fuse_grad_merge is enable, the grad vars have already been fused during
# gradient merge pass, therefore, those vars are not need to be fused here
insert_fused_allreduce_ops(block, insert_idx, ring_id, allreduce_vars,
op_role, use_calc_stream,
user_defined_strategy.fuse_grad_size_in_MB)
else:
for var in allreduce_vars:
block._insert_op_without_sync(
insert_idx,
type='c_allreduce_sum',
inputs={'X': var},
outputs={'Out': var},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return
class FuseHelper(object):
@staticmethod
def sort_vars_by_dtype(block, vars_name):
fp32_vars = []
fp16_vars = []
other_vars = []
for var in vars_name:
dtype = block.var(var).dtype
if dtype == paddle.float32:
fp32_vars.append(var)
elif dtype == paddle.float16:
fp16_vars.append(var)
else:
other_vars.append(var)
assert len(other_vars) == 0, "only support fp32/fp16 vars for fuse"
fp32_vars.extend(fp16_vars)
return fp32_vars
@staticmethod
def get_fused_groups(block, vars_name, fuse_size=32.):
""" coalesce tensor, get fused group """
groups = []
cur_size = 0.
last_dtype = None
for var_name in vars_name:
real_var = block.var(var_name)
var_size = get_var_size(real_var)
if cur_size + var_size > fuse_size \
or len(groups) == 0 \
or real_var.dtype != last_dtype:
groups.append([real_var])
cur_size = var_size
last_dtype = real_var.dtype
else:
groups[-1].append(real_var)
cur_size += var_size
return groups
@staticmethod
def insert_coalesce_tensor(block,
index,
groups,
op_role=OpRole.Backward,
prefix="Output"):
fused_vars = []
insert_num = 0
for group in groups:
assert len(group) >= 1
if len(group) == 1:
# no need fuse
fused_vars.append(group[0])
continue
fused_var = block.create_var(
name=unique_name.generate('Fused{}_{}'.format(prefix, group[0]
.name)),
dtype=group[0].dtype,
persistable=False,
stop_gradient=True)
fused_vars.append(fused_var)
block._insert_op_without_sync(
index,
type="coalesce_tensor",
inputs={"Input": group},
outputs={"Output": group,
"FusedOutput": fused_var},
attrs={
"copy_data": True,
"use_align": True,
"dtype": group[0].dtype,
OP_ROLE_KEY: op_role
})
insert_num += 1
return fused_vars, insert_num
def insert_fused_allreduce_ops(block,
insert_idx,
ring_id,
allreduce_vars,
op_role=OpRole.Backward,
use_calc_stream=False,
fuse_grad_size_in_MB=32):
groups = FuseHelper.get_fused_groups(block, allreduce_vars,
fuse_grad_size_in_MB)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Grad")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_allreduce_sum',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
def insert_fused_reduce_ops(block,
insert_idx,
ring_id,
reduce_vars,
shard,
op_role=OpRole.Backward,
use_calc_stream=False,
rank=None,
fuse_grad_size=32):
nranks = shard.worker_num
device_to_vars = [[] for _ in range(nranks)]
for var in reduce_vars:
root_id = get_grad_device(var, shard)
assert 0 <= root_id < nranks, "root_id should >=0 and < nranks, " \
"but now nranks={}, the root_id of var={} is {}"\
.format(nranks, var, root_id)
device_to_vars[root_id].append(var)
for root_id, vars_name in enumerate(device_to_vars):
groups = FuseHelper.get_fused_groups(block, vars_name, fuse_grad_size)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Grad")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_reduce_sum',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'root_id': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
return [] if rank is None else device_to_vars[rank]
def insert_reduce_ops(block,
insert_idx,
ring_id,
reduce_vars,
shard,
op_role=OpRole.Backward,
use_calc_stream=False,
rank=None,
strategy=None):
"""
_add_reduce_ops
"""
if strategy and strategy.fuse_all_reduce_ops and \
not strategy.fuse_grad_merge:
return insert_fused_reduce_ops(block, insert_idx, ring_id, reduce_vars,
shard, op_role, use_calc_stream, rank,
strategy.fuse_grad_size_in_MB)
grad_in_this_device = []
for var in reduce_vars:
grad_var = var
if strategy and strategy.fuse_all_reduce_ops and \
strategy.fuse_grad_merge:
# TODO(wangxi): if support fp16_allreduce, need be
# 'FusedMergedGrad.cast_fp16._'
grad_var = var.replace('FusedMergedGrad_', '')
root_id = get_grad_device(grad_var, shard)
assert root_id >= 0, "root id should be a positive int, but now root id is {}".format(
root_id)
if rank is not None and rank == root_id:
grad_in_this_device.append(var)
block._insert_op_without_sync(
insert_idx,
type='c_reduce_sum',
inputs={'X': var},
outputs={'Out': var},
attrs={
'ring_id': ring_id,
'root_id': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return grad_in_this_device
def insert_fused_broadcast_param_ops(block,
insert_idx,
ring_id,
params,
shard,
op_role=OpRole.Optimize,
use_calc_stream=False,
rank=None,
fuse_size=32):
nranks = shard.worker_num
device_to_vars = [[] for _ in range(nranks)]
for var in params:
root_id = shard.device(var)
assert 0 <= root_id < nranks, "root_id should >=0 and < nranks, " \
"but now nranks={}, the root_id of var={} is {}"\
.format(nranks, var, root_id)
device_to_vars[root_id].append(var)
for root_id, vars_name in enumerate(device_to_vars):
groups = FuseHelper.get_fused_groups(block, vars_name, fuse_size)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Param")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_broadcast',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'root': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
if not use_calc_stream:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_sync_calc_stream',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={OP_ROLE_KEY: op_role})
return [] if rank is None else device_to_vars[rank]
def insert_broadcast_param_ops(block,
insert_idx,
ring_id,
params,
shard,
op_role=OpRole.Optimize,
use_calc_stream=False,
rank=None,
strategy=None):
"""
add broadcast param ops
"""
if strategy and strategy.fuse_all_reduce_ops:
# TODO(wangxi): put fused var in startup_program, only need exec once
return insert_fused_broadcast_param_ops(
block, insert_idx, ring_id, params, shard, op_role, use_calc_stream,
rank, strategy.fuse_grad_size_in_MB)
param_in_this_device = []
for param in params:
root_id = shard.device(param)
assert root_id >= 0, "root id should be a positive int, but now root id is {}".format(
root_id)
if rank is not None and rank == root_id:
param_in_this_device.append(param)
block._insert_op_without_sync(
insert_idx,
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': ring_id,
'root': root_id,
'use_calc_stream': use_calc_stream,
OP_ROLE_KEY: op_role
})
return param_in_this_device
def fuse_opt_broadcast_param_ops(block,
ring_id,
shard,
op_role=OpRole.Optimize,
strategy=None):
"""
fuse optimizer sharding broadcast param ops
"""
if strategy is None or not strategy.fuse_all_reduce_ops:
return
fuse_size = strategy.fuse_grad_size_in_MB
nranks = shard.worker_num
device_to_vars = [[] for _ in range(nranks)]
for idx, op in reversed(list(enumerate(block.ops))):
if not is_optimizer_op(op) or op.type != 'c_broadcast':
break
var = op.input_arg_names[0]
root_id = op.attr('root')
device_to_vars[root_id].insert(0, var)
block._remove_op(idx, sync=False)
insert_idx = idx + 1
for root_id, vars_name in enumerate(device_to_vars):
vars_name = FuseHelper.sort_vars_by_dtype(block, vars_name)
groups = FuseHelper.get_fused_groups(block, vars_name, fuse_size)
fused_vars, insert_num = FuseHelper.insert_coalesce_tensor(
block, insert_idx, groups, op_role, prefix="Param")
for fused_var in fused_vars:
block._insert_op_without_sync(
insert_idx + insert_num,
type='c_broadcast',
inputs={'X': fused_var},
outputs={'Out': fused_var},
attrs={
'ring_id': ring_id,
'root': root_id,
'use_calc_stream': True,
OP_ROLE_KEY: op_role
})
block._sync_with_cpp()
def get_grad_device(grad_name, shard):
assert "@GRAD" in grad_name, "[{}] should be a grad variable.".format(
grad_name)
base_name = None
# NOTE: mind the traversal order
possible_suffixes = [
# sharding gm
'.cast_fp16@GRAD@MERGED',
'.cast_fp16@GRAD',
# pipeline
'@GRAD@MERGED@FP16',
'@GRAD@MERGED',
'@GRAD',
]
for suffix in possible_suffixes:
if suffix in grad_name:
base_name = re.sub(suffix, '', grad_name)
break
assert base_name in shard.global_param2device, "[{}] should be a param variable.".format(
base_name)
return shard.global_param2device[base_name]
def get_first_check_finite_and_unscale_op_idx(block, raise_error=True):
for idx, op in enumerate(block.ops):
if op.type == "check_finite_and_unscale":
return idx
if raise_error:
raise ValueError(
"amp is turned on but check_finite_and_unscale op does not exist in main block"
)
return -1
def get_first_optimize_op_idx(block):
first_opt_op_idx = None
for index, op in reversed(tuple(enumerate(block.ops))):
if is_backward_op(op) and first_opt_op_idx is None:
first_opt_op_idx = index + 1
break
return first_opt_op_idx
def insert_broadcast_ops(block, insert_idx, ring_id, broadcast2root):
"""
_add_broadcast_ops
"""
op_role = get_valid_op_role(block, insert_idx)
for broadcast_name, root_device in broadcast2root:
block._insert_op_without_sync(
insert_idx,
type='c_broadcast',
inputs={'X': broadcast_name},
outputs={'Out': broadcast_name},
attrs={
'ring_id': ring_id,
'root': root_device,
OP_ROLE_KEY: op_role
})
return
DtypeToSize = {
core.VarDesc.VarType.FP16: 2,
core.VarDesc.VarType.FP32: 4,
core.VarDesc.VarType.FP64: 8,
core.VarDesc.VarType.INT16: 2,
core.VarDesc.VarType.INT32: 4,
core.VarDesc.VarType.INT64: 8,
core.VarDesc.VarType.BOOL: 1,
core.VarDesc.VarType.UINT8: 1,
}
def get_var_size(param):
"""
input:
- param: var
return:
var size in MB
"""
assert -1 not in param.shape
return reduce(lambda x, y: x * y,
param.shape) * DtypeToSize[param.dtype] / 1024.0 / 1024.0
def insert_scale_loss_grad_ops(block, scale=1.0):
'''
In order to keep the learning rate consistent in different numbers of
training workers, we scale the loss grad by the number of workers
'''
for idx, op in reversed(list(enumerate(block.ops))):
if is_loss_grad_op(op):
assert op.type == 'fill_constant', \
"loss_grad_op must be fill_constant op, " \
"but this op is {}".format(op.type)
assert op.has_attr('value')
loss_scale = float(op.attr('value'))
loss_scale = loss_scale / scale
op._set_attr('value', loss_scale)
break
def comm_analyse(main_program):
"""
Analyse the parameter size that need to be broadcast/allreduce during sharding training
"""
reduce_vars = {}
broadcast_vars = {}
block = main_program.global_block()
for op in block.ops:
if op.type == "c_broadcast":
var_name = op.desc.input_arg_names()[0]
# convert MB to KB
broadcast_vars[var_name] = get_var_size(block.var(
var_name)) * 1024.0
elif op.type == "c_allreduce_sum":
var_name = op.desc.input_arg_names()[0]
reduce_vars[var_name] = get_var_size(block.var(var_name)) * 1024.0
varsize_count = {}
gap = 1
for k, v in broadcast_vars.items():
print("broadcast: {}: {} KB".format(k, v))
if (int(v / gap) in varsize_count):
varsize_count[int(v / gap)] += 1
else:
varsize_count[int(v / gap)] = 1
for k, v in reduce_vars.items():
print("allreduce: {}: {} KB".format(k, v))
if (int(v / gap) in varsize_count):
varsize_count[int(v / gap)] += 1
else:
varsize_count[int(v / gap)] = 1
with open("nccl_size.txt", 'w') as f:
sorted_varsize = sorted(varsize_count.items(), key=lambda x: x[0])
for varsize, count in sorted_varsize:
print("NCCL size {}~{} KB: {}".format(varsize, varsize + 1, count))
f.write("NCCL size {}~{} KB: {}\n".format(varsize, varsize + 1,
count))
def add_sync_comm(program, sharding_ring_id):
"""
When clone a test prog by clone from the sharding main prog,
part of the sync_comm op maybe be pruned by mistake, this function
add the sync_comm op for the test prog.
"""
#NOTE (liangjianzhong): only support one comm stream by now, use more than one
# comm streams will cause error. should be revise in future.
assert sharding_ring_id >= 0, "sharding_ring_id should larger than zero"
block = program.global_block()
not_sync_vars = set([])
for op in block.ops:
if op.type in ["c_broadcast", "c_allreduce"]:
for input_name in op.desc.input_arg_names():
not_sync_vars.add(input_name)
if op.type == "c_sync_comm_stream":
for input_name in op.desc.input_arg_names():
not_sync_vars.remove(input_name)
if not_sync_vars:
block.append_op(
type='c_sync_comm_stream',
inputs={'X': list(not_sync_vars)},
outputs={'Out': list(not_sync_vars)},
attrs={
'ring_id': sharding_ring_id,
'op_role': core.op_proto_and_checker_maker.OpRole.Forward
})
return
def save_persistables(exe, dirname, main_program, filename=None):
"""
When use sharding, part of persistable vars are unique and are partitioned in different ranks,
and part of persistable vars are duplicated and exist in all the ranks with different values.
This function handles the model saving for sharding training.
"""
# TODO (JZ-LIANG) revise this for uniform mixed parallelism
if main_program._pipeline_opt:
main_program = main_program._pipeline_opt['section_program']
def is_opt_vars(var):
# NOTE(JZ-LIANG): The checks should be updated when add new compatible optimizer
# now only Momentum and adam are compatible with sharding,
# support EMA optimizer with '_ema_0',
# support offload with '@offload_0' and '.cast_fp16'
checks = [
"_moment1_0", "_moment2_0", "_beta1_pow_acc_0", "_beta2_pow_acc_0",
"_velocity_0", "_ema_0", "@offload_0", ".cast_fp16"
]
for check in checks:
if var.name.endswith(check) and var.persistable:
return True
return False
def is_gradient_merge_vars(var):
# NOTE(JZ-LIANG): to revise save/load logic in framework instead of write this naive rule
return var.name.endswith("@GradiantMerge")
def is_trainable(var):
return isinstance(var,
paddle.fluid.framework.Parameter) and var.trainable
def sharding_predicate(var):
return is_trainable(var) or is_opt_vars(var) or is_gradient_merge_vars(
var)
if int(os.environ.get('PADDLE_TRAINER_ID', 0)) == 0:
paddle.fluid.io.save_persistables(
exe, dirname, main_program=main_program, filename=None)
else:
paddle.fluid.io.save_vars(
exe,
dirname,
main_program=main_program,
predicate=sharding_predicate,
filename=None)
return
def append_naive_sync(block, sync_var, ring_id):
# NOTE (JZ-LIANG) update this to use barrier sync for more elegent logic
# sync within global
block.append_op(
type="fill_constant",
outputs={"Out": sync_var},
attrs={
"shape": sync_var.shape,
"dtype": sync_var.dtype,
"value": int(1),
})
block.append_op(
type='c_allreduce_sum',
inputs={'X': sync_var},
outputs={'Out': sync_var},
attrs={
'ring_id': ring_id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
block.append_op(
type='c_sync_calc_stream',
inputs={'X': [sync_var]},
outputs={'Out': [sync_var]},
attrs={OP_ROLE_KEY: OpRole.Forward})
|
py | 1a3a8b3edfb59be79df6e959bab8a19c0d93320d | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['RegistryArgs', 'Registry']
@pulumi.input_type
class RegistryArgs:
def __init__(__self__, *,
registry_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Registry resource.
:param pulumi.Input[str] registry_name: The Name of the registry.
:param pulumi.Input[str] description: A description of the registry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "registry_name", registry_name)
if description is not None:
pulumi.set(__self__, "description", description)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> pulumi.Input[str]:
"""
The Name of the registry.
"""
return pulumi.get(self, "registry_name")
@registry_name.setter
def registry_name(self, value: pulumi.Input[str]):
pulumi.set(self, "registry_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the registry.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _RegistryState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Registry resources.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of Glue Registry.
:param pulumi.Input[str] description: A description of the registry.
:param pulumi.Input[str] registry_name: The Name of the registry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if registry_name is not None:
pulumi.set(__self__, "registry_name", registry_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of Glue Registry.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the registry.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the registry.
"""
return pulumi.get(self, "registry_name")
@registry_name.setter
def registry_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registry_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class Registry(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Glue Registry resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.Registry("example", registry_name="example")
```
## Import
Glue Registries can be imported using `arn`, e.g.
```sh
$ pulumi import aws:glue/registry:Registry example arn:aws:glue:us-west-2:123456789012:registry/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the registry.
:param pulumi.Input[str] registry_name: The Name of the registry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegistryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Glue Registry resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.glue.Registry("example", registry_name="example")
```
## Import
Glue Registries can be imported using `arn`, e.g.
```sh
$ pulumi import aws:glue/registry:Registry example arn:aws:glue:us-west-2:123456789012:registry/example
```
:param str resource_name: The name of the resource.
:param RegistryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegistryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegistryArgs.__new__(RegistryArgs)
__props__.__dict__["description"] = description
if registry_name is None and not opts.urn:
raise TypeError("Missing required property 'registry_name'")
__props__.__dict__["registry_name"] = registry_name
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
super(Registry, __self__).__init__(
'aws:glue/registry:Registry',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Registry':
"""
Get an existing Registry resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of Glue Registry.
:param pulumi.Input[str] description: A description of the registry.
:param pulumi.Input[str] registry_name: The Name of the registry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegistryState.__new__(_RegistryState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["registry_name"] = registry_name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return Registry(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of Glue Registry.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the registry.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> pulumi.Output[str]:
"""
The Name of the registry.
"""
return pulumi.get(self, "registry_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
|
py | 1a3a8b9f0fbf255a041e365738e4a3412b47fcd1 | """Module to define main fnet model wrapper class."""
from pathlib import Path
from typing import Callable, Iterator, List, Optional, Sequence, Tuple, Union
import logging
import math
import os
from scipy.ndimage import zoom
import numpy as np
import tifffile
import torch
from fnet.metrics import corr_coef
from fnet.predict_piecewise import predict_piecewise as _predict_piecewise_fn
from fnet.transforms import flip_y, flip_x, norm_around_center
from fnet.utils.general_utils import get_args, retry_if_oserror, str_to_object
from fnet.utils.model_utils import move_optim
logger = logging.getLogger(__name__)
def _weights_init(m):
classname = m.__class__.__name__
if classname.startswith("Conv"):
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_per_param_options(module, wd):
"""Returns list of per parameter group options.
Applies the specified weight decay (wd) to parameters except parameters
within batch norm layers and bias parameters.
"""
if wd == 0:
return module.parameters()
with_decay = list()
without_decay = list()
for idx_m, (name_m, module_sub) in enumerate(module.named_modules()):
if list(module_sub.named_children()):
continue # Skip "container" modules
if isinstance(module_sub, torch.nn.modules.batchnorm._BatchNorm):
for param in module_sub.parameters():
without_decay.append(param)
continue
for name_param, param in module_sub.named_parameters():
if "weight" in name_param:
with_decay.append(param)
elif "bias" in name_param:
without_decay.append(param)
# Check that no parameters were missed or duplicated
n_param_module = len(list(module.parameters()))
n_param_lists = len(with_decay) + len(without_decay)
n_elem_module = sum([p.numel() for p in module.parameters()])
n_elem_lists = sum([p.numel() for p in with_decay + without_decay])
assert n_param_module == n_param_lists
assert n_elem_module == n_elem_lists
per_param_options = [
{"params": with_decay, "weight_decay": wd},
{"params": without_decay, "weight_decay": 0.0},
]
return per_param_options
class Model:
"""Class that encompasses a pytorch network and its optimizer.
"""
def __init__(
self,
betas=(0.5, 0.999),
criterion_class="fnet.losses.WeightedMSE",
init_weights=True,
lr=0.001,
nn_class="fnet.nn_modules.fnet_nn_3d.Net",
nn_kwargs={},
scheduler=None,
weight_decay=0,
gpu_ids=-1,
):
self.betas = betas
self.criterion = str_to_object(criterion_class)()
self.gpu_ids = [gpu_ids] if isinstance(gpu_ids, int) else gpu_ids
self.init_weights = init_weights
self.lr = lr
self.nn_class = nn_class
self.nn_kwargs = nn_kwargs
self.scheduler = scheduler
self.weight_decay = weight_decay
self.count_iter = 0
self.device = (
torch.device("cuda", self.gpu_ids[0])
if self.gpu_ids[0] >= 0
else torch.device("cpu")
)
self.optimizer = None
self._init_model()
self.fnet_model_kwargs, self.fnet_model_posargs = get_args()
self.fnet_model_kwargs.pop("self")
def _init_model(self):
self.net = str_to_object(self.nn_class)(**self.nn_kwargs)
if self.init_weights:
self.net.apply(_weights_init)
self.net.to(self.device)
self.optimizer = torch.optim.Adam(
get_per_param_options(self.net, wd=self.weight_decay),
lr=self.lr,
betas=self.betas,
)
if self.scheduler is not None:
if self.scheduler[0] == "snapshot":
period = self.scheduler[1]
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lambda x: (
0.01
+ (1 - 0.01)
* (0.5 + 0.5 * math.cos(math.pi * (x % period) / period))
),
)
elif self.scheduler[0] == "step":
step_size = self.scheduler[1]
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size
)
else:
raise NotImplementedError
def __str__(self):
out_str = [
f"*** {self.__class__.__name__} ***",
f"{self.nn_class}(**{self.nn_kwargs})",
f"iter: {self.count_iter}",
f"gpu: {self.gpu_ids}",
]
return os.linesep.join(out_str)
def get_state(self):
return {
"fnet_model_class": (self.__module__ + "." + self.__class__.__qualname__),
"fnet_model_kwargs": self.fnet_model_kwargs,
"fnet_model_posargs": self.fnet_model_posargs,
"nn_state": self.net.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"count_iter": self.count_iter,
}
def to_gpu(self, gpu_ids: Union[int, List[int]]) -> None:
"""Move network to specified GPU(s).
Parameters
----------
gpu_ids
GPU(s) on which to perform training or prediction.
"""
if isinstance(gpu_ids, int):
gpu_ids = [gpu_ids]
self.gpu_ids = gpu_ids
self.device = (
torch.device("cuda", self.gpu_ids[0])
if self.gpu_ids[0] >= 0
else torch.device("cpu")
)
self.net.to(self.device)
if self.optimizer is not None:
move_optim(self.optimizer, self.device)
def save(self, path_save: str):
"""Saves model to disk.
Parameters
----------
path_save
Filename to which model is saved.
"""
dirname = os.path.dirname(path_save)
if not os.path.exists(dirname):
os.makedirs(dirname)
logger.info(f"Created: {dirname}")
curr_gpu_ids = self.gpu_ids
self.to_gpu(-1)
retry_if_oserror(torch.save)(self.get_state(), path_save)
self.to_gpu(curr_gpu_ids)
def load_state(self, state: dict, no_optim: bool = False):
self.count_iter = state["count_iter"]
self.net.load_state_dict(state["nn_state"])
if no_optim:
self.optimizer = None
return
self.optimizer.load_state_dict(state["optimizer_state"])
def train_on_batch(
self,
x_batch: torch.Tensor,
y_batch: torch.Tensor,
weight_map_batch: Optional[torch.Tensor] = None,
) -> float:
"""Update model using a batch of inputs and targets.
Parameters
----------
x_batch
Batched input.
y_batch
Batched target.
weight_map_batch
Optional batched weight map.
Returns
-------
float
Loss as determined by self.criterion.
"""
if self.scheduler is not None:
self.scheduler.step()
self.net.train()
x_batch = x_batch.to(dtype=torch.float32, device=self.device)
y_batch = y_batch.to(dtype=torch.float32, device=self.device)
if len(self.gpu_ids) > 1:
module = torch.nn.DataParallel(self.net, device_ids=self.gpu_ids)
else:
module = self.net
self.optimizer.zero_grad()
y_hat_batch = module(x_batch)
args = [y_hat_batch, y_batch]
if weight_map_batch is not None:
args.append(weight_map_batch)
loss = self.criterion(*args)
loss.backward()
self.optimizer.step()
self.count_iter += 1
return loss.item()
def _predict_on_batch_tta(self, x_batch: torch.Tensor) -> torch.Tensor:
"""Performs model prediction using test-time augmentation."""
augs = [None, [flip_y], [flip_x], [flip_y, flip_x]]
x_batch = x_batch.numpy()
y_hat_batch_mean = None
for aug in augs:
x_batch_aug = x_batch.copy()
if aug is not None:
for trans in aug:
x_batch_aug = trans(x_batch_aug)
y_hat_batch = self.predict_on_batch(x_batch_aug.copy()).numpy()
if aug is not None:
for trans in aug:
y_hat_batch = trans(y_hat_batch)
if y_hat_batch_mean is None:
y_hat_batch_mean = np.zeros(y_hat_batch.shape, dtype=np.float32)
y_hat_batch_mean += y_hat_batch
y_hat_batch_mean /= len(augs)
return torch.tensor(
y_hat_batch_mean, dtype=torch.float32, device=torch.device("cpu")
)
def predict_on_batch(self, x_batch: torch.Tensor) -> torch.Tensor:
"""Performs model prediction on a batch of data.
Parameters
----------
x_batch
Batch of input data.
Returns
-------
torch.Tensor
Batch of model predictions.
"""
x_batch = torch.tensor(x_batch, dtype=torch.float32, device=self.device)
if len(self.gpu_ids) > 1:
network = torch.nn.DataParallel(self.net, device_ids=self.gpu_ids)
else:
network = self.net
network.eval()
with torch.no_grad():
y_hat_batch = network(x_batch).cpu()
network.train()
return y_hat_batch
def predict(
self, x: Union[torch.Tensor, np.ndarray], tta: bool = False
) -> torch.Tensor:
"""Performs model prediction on a single example.
Parameters
----------
x
Input data.
piecewise
Set to perform piecewise predictions. i.e., predict on patches of
the input and stitch together the predictions.
tta
Set to use test-time augmentation.
Returns
-------
torch.Tensor
Model prediction.
"""
x_batch = torch.unsqueeze(torch.tensor(x), 0)
if tta:
return self._predict_on_batch_tta(x_batch).squeeze(0)
return self.predict_on_batch(x_batch).squeeze(0)
def predict_piecewise(
self, x: Union[torch.Tensor, np.ndarray], **predict_kwargs
) -> torch.Tensor:
"""Performs model prediction piecewise on a single example.
Predicts on patches of the input and stitchs together the predictions.
Parameters
----------
x
Input data.
**predict_kwargs
Kwargs to pass to predict method.
Returns
-------
torch.Tensor
Model prediction.
"""
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if len(x.size()) == 4:
dims_max = [None, 32, 512, 512]
elif len(x.size()) == 3:
dims_max = [None, 1024, 1024]
y_hat = _predict_piecewise_fn(
self, x, dims_max=dims_max, overlaps=16, **predict_kwargs
)
return y_hat
def test_on_batch(
self,
x_batch: torch.Tensor,
y_batch: torch.Tensor,
weight_map_batch: Optional[torch.Tensor] = None,
) -> float:
"""Test model on a batch of inputs and targets.
Parameters
----------
x_batch
Batched input.
y_batch
Batched target.
weight_map_batch
Optional batched weight map.
Returns
-------
float
Loss as evaluated by self.criterion.
"""
y_hat_batch = self.predict_on_batch(x_batch)
args = [y_hat_batch, y_batch]
if weight_map_batch is not None:
args.append(weight_map_batch)
loss = self.criterion(*args)
return loss.item()
def test_on_iterator(self, iterator: Iterator, **kwargs: dict) -> float:
"""Test model on iterator which has items to be passed to
test_on_batch.
Parameters
----------
iterator
Iterator that generates items to be passed to test_on_batch.
kwargs
Additional keyword arguments to be passed to test_on_batch.
Returns
-------
float
Mean loss for items in iterable.
"""
loss_sum = 0
for item in iterator:
loss_sum += self.test_on_batch(*item, **kwargs)
return loss_sum / len(iterator)
def evaluate(
self,
x: torch.Tensor,
y: torch.Tensor,
metric: Optional = None,
piecewise: bool = False,
**kwargs,
) -> Tuple[float, torch.Tensor]:
"""Evaluates model output using a metric function.
Parameters
----------
x
Input data.
y
Target data.
metric
Metric function. If None, uses fnet.metrics.corr_coef.
piecewise
Set to perform predictions piecewise.
**kwargs
Additional kwargs to be passed to predict() method.
Returns
-------
float
Evaluation as determined by metric function.
torch.Tensor
Model prediction.
"""
if metric is None:
metric = corr_coef
if piecewise:
y_hat = self.predict_piecewise(x, **kwargs)
else:
y_hat = self.predict(x, **kwargs)
if y is None:
return None, y_hat
evaluation = metric(y, y_hat)
return evaluation, y_hat
def apply_on_single_zstack(
self,
input_img: Optional[np.ndarray] = None,
filename: Optional[Union[Path, str]] = None,
inputCh: Optional[int] = None,
normalization: Optional[Callable] = None,
already_normalized: bool = False,
ResizeRatio: Optional[Sequence[float]] = None,
cutoff: Optional[float] = None,
) -> np.ndarray:
"""Applies model to a single z-stack input.
This assumes the loaded network architecture can receive 3d grayscale
images as input.
Parameters
----------
input_img
3d or 4d image with shape (Z, Y, X) or (C, Z, Y, X) respectively.
filename
Path to input image. Ignored if input_img is supplied.
inputCh
Selected channel if filename is a path to a 4d image.
normalization
Input image normalization function.
already_normalized
Set to skip input normalization.
ResizeRatio
Resizes each dimension of the the input image by the specified
factor if specified.
cutoff
If specified, converts the output to a binary image with cutoff as
threshold value.
Returns
-------
np.ndarray
Predicted image with shape (Z, Y, X). If cutoff is set, dtype will
be numpy.uint8. Otherwise, dtype will be numpy.float.
Raises
------
ValueError
If parameters are invalid.
FileNotFoundError
If specified file does not exist.
IndexError
If inputCh is invalid.
"""
if input_img is None:
if filename is None:
raise ValueError("input_img or filename must be specified")
input_img = tifffile.imread(str(filename))
if inputCh is not None:
if input_img.ndim != 4:
raise ValueError("input_img must be 4d if inputCh specified")
input_img = input_img[inputCh,]
if input_img.ndim != 3:
raise ValueError("input_img must be 3d")
normalization = normalization or norm_around_center
if not already_normalized:
input_img = normalization(input_img)
if ResizeRatio is not None:
if len(ResizeRatio) != 3:
raise ValueError("ResizeRatio must be length 3")
input_img = zoom(input_img, zoom=ResizeRatio, mode="nearest")
yhat = (
self.predict_piecewise(input_img[np.newaxis,], tta=True)
.squeeze(dim=0)
.numpy()
)
if cutoff is not None:
yhat = (yhat >= cutoff).astype(np.uint8) * 255
return yhat
|
py | 1a3a8ba8d513ed8aa65ecb7d97e9cdfcd4c46a01 | from .brditem import BrdItem
'''
Classes based on BrdItem for parsing and rendering $ZONE and $CZONE_OUTLINE
inside a .brd file.
'''
class Zone(BrdItem):
keyword = '$ZONE'
_by_keyword = {}
def __init__(self, tokens=[], lineiterator=None):
self.items = []
super(Zone,self).__init__(tokens, lineiterator)
def render(self, linelist):
linelist.append(self.keyword)
linelist.extend(x.linetext for x in self.items)
self.EndZone.render(self, linelist)
class EndZone(Zone):
keyword = '$EndZONE'
@classmethod
def subparse(cls, zone, tokens, lineiterator):
cls.checkstate(zone, -1, 0)
@classmethod
def render(cls, zone, linelist):
linelist.append(cls.keyword)
class CZoneOutline(BrdItem):
keyword = '$CZONE_OUTLINE'
_by_keyword = {}
def __init__(self, tokens=[], lineiterator=None):
self.items = []
self.corners = []
super(CZoneOutline,self).__init__(tokens, lineiterator)
def render(self, linelist):
linelist.append(self.keyword)
linelist.extend(x.linetext for x in self.items)
self.EndCZoneOutline.render(self, linelist)
class CZoneInner(CZoneOutline):
@classmethod
def subparse(cls, zone, tokens, lineiterator):
cls.checkstate(zone, -1, -1)
zone.items.append(tokens)
class ZInfo(CZoneInner): pass
class ZLayer(CZoneInner):
@classmethod
def subparse(cls, zone, tokens, lineiterator):
zone.items.append(tokens)
zone.layer = tokens[1]
class ZAux(CZoneInner): pass
class ZClearance(CZoneInner): pass
class ZMinThickness(CZoneInner): pass
class ZOptions(CZoneInner): pass
class ZSmoothing(CZoneInner): pass
class ZCorner(CZoneInner): pass
class EndCZoneOutline(CZoneOutline):
keyword = '$EndCZONE_OUTLINE'
@classmethod
def subparse(cls, czone, tokens, lineiterator):
cls.checkstate(czone, -1, 0)
@classmethod
def render(cls, zone, linelist):
linelist.append(cls.keyword)
class PolysCorners(CZoneOutline):
tokenlength = 1, 1
keyword = '$POLYSCORNERS'
@classmethod
def subparse(cls, czone, tokens, lineiterator):
cls.checkstate(czone, -1, -1)
text = [tokens]
for tokens in lineiterator:
text.append(tokens)
if tokens[0] == '$endPOLYSCORNERS':
break
czone.corners.append([float(x) for x in tokens[0:2]])
czone.items.extend(text)
|
py | 1a3a8c2fe6e2df1ac4170178b720919b75c2c0a0 | #!/usr/bin/env python
"""UI client report handling classes."""
from __future__ import division
import time
from future.utils import iteritems
from grr_response_core.lib import rdfvalue
from grr_response_server import aff4
from grr_response_server.aff4_objects import stats as aff4_stats
from grr_response_server.gui.api_plugins.report_plugins import rdf_report_plugins
from grr_response_server.gui.api_plugins.report_plugins import report_plugin_base
TYPE = rdf_report_plugins.ApiReportDescriptor.ReportType.CLIENT
class GRRVersion1ReportPlugin(report_plugin_base.ReportPluginBase):
"""Display a histogram of last actives based on GRR Version."""
TYPE = TYPE
TITLE = "Active Clients - 1 Day Active"
SUMMARY = ("This shows the number of clients active in the given timerange "
"based on the GRR version.")
ACTIVE_DAY = 1
def _ProcessGraphSeries(self, graph_series, categories):
for graph in graph_series:
# Find the correct graph and merge the OS categories together
if "%d day" % self.__class__.ACTIVE_DAY in graph.title:
for sample in graph:
timestamp = graph_series.age.AsMicrosecondsSinceEpoch() // 1000
categories.setdefault(sample.label, []).append((timestamp,
sample.y_value))
break
def GetReportData(self, get_report_args, token):
"""Show how the last active breakdown evolved over time."""
ret = rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.RepresentationType.
LINE_CHART)
try:
# now
end_time = int(time.time() * 1e6)
# half a year ago
start_time = end_time - (60 * 60 * 24 * 1000000 * 180)
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN("aff4:/stats/ClientFleetStats").Add(
get_report_args.client_label),
token=token,
age=(start_time, end_time))
categories = {}
for graph_series in fd.GetValuesForAttribute(
aff4_stats.ClientFleetStats.SchemaCls.GRRVERSION_HISTOGRAM):
self._ProcessGraphSeries(graph_series, categories)
graphs = []
for k, v in iteritems(categories):
graph = dict(label=k, data=v)
graphs.append(graph)
ret.line_chart.data = sorted(
(rdf_report_plugins.ApiReportDataSeries2D(
label=label,
points=(rdf_report_plugins.ApiReportDataPoint2D(x=x, y=y)
for x, y in points))
for label, points in iteritems(categories)),
key=lambda series: series.label)
except IOError:
pass
return ret
class GRRVersion7ReportPlugin(GRRVersion1ReportPlugin):
"""Display a histogram of last actives based on GRR Version."""
TITLE = "Active Clients - 7 Days Active"
ACTIVE_DAY = 7
class GRRVersion30ReportPlugin(GRRVersion1ReportPlugin):
"""Display a histogram of last actives based on GRR Version."""
TITLE = "Active Clients - 30 Days Active"
ACTIVE_DAY = 30
class LastActiveReportPlugin(report_plugin_base.ReportPluginBase):
"""Displays a histogram of last client activities."""
TYPE = TYPE
TITLE = "Last Active"
SUMMARY = ("Breakdown of Client Count Based on Last Activity of the Client. "
"This plot shows the number of clients active in the last day and "
"how that number evolved over time.")
ACTIVE_DAYS_DISPLAY = [1, 3, 7, 30, 60]
def _ProcessGraphSeries(self, graph_series, categories):
for sample in graph_series:
# Provide the time in js timestamps (milliseconds since the epoch).
days = sample.x_value // 1000000 // 24 // 60 // 60
if days in self.__class__.ACTIVE_DAYS_DISPLAY:
label = "%s day active" % days
timestamp = graph_series.age.AsMicrosecondsSinceEpoch() // 1000
categories.setdefault(label, []).append((timestamp, sample.y_value))
def GetReportData(self, get_report_args, token):
"""Show how the last active breakdown evolved over time."""
ret = rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.RepresentationType.
LINE_CHART)
try:
# now
end_time = int(time.time() * 1e6)
# half a year ago
start_time = end_time - (60 * 60 * 24 * 1000000 * 180)
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN("aff4:/stats/ClientFleetStats").Add(
get_report_args.client_label),
token=token,
age=(start_time, end_time))
categories = {}
for graph_series in fd.GetValuesForAttribute(
aff4_stats.ClientFleetStats.SchemaCls.LAST_CONTACTED_HISTOGRAM):
self._ProcessGraphSeries(graph_series, categories)
graphs = []
for k, v in iteritems(categories):
graph = dict(label=k, data=v)
graphs.append(graph)
ret.line_chart.data = sorted(
(rdf_report_plugins.ApiReportDataSeries2D(
label=label,
points=(rdf_report_plugins.ApiReportDataPoint2D(x=x, y=y)
for x, y in points))
for label, points in iteritems(categories)),
key=lambda series: int(series.label.split()[0]),
reverse=True)
except IOError:
pass
return ret
class OSBreakdown1ReportPlugin(report_plugin_base.ReportPluginBase):
"""Displays a histogram of last client activities."""
TYPE = TYPE
TITLE = "OS Breakdown - 1 Day Active"
SUMMARY = ("Operating system break down. OS breakdown for clients that were "
"active in the given timerange.")
ACTIVE_DAYS = 1
def GetReportData(self, get_report_args, token):
"""Extract only the operating system type from the active histogram."""
ret = rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.RepresentationType.
PIE_CHART)
try:
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN("aff4:/stats/ClientFleetStats").Add(
get_report_args.client_label),
token=token)
for graph in fd.Get(aff4_stats.ClientFleetStats.SchemaCls.OS_HISTOGRAM):
# Find the correct graph and merge the OS categories together
if "%s day" % self.__class__.ACTIVE_DAYS in graph.title:
for sample in graph:
ret.pie_chart.data.Append(
rdf_report_plugins.ApiReportDataPoint1D(
label=sample.label, x=sample.y_value))
break
except (IOError, TypeError):
pass
ret.pie_chart.data = sorted(
ret.pie_chart.data, key=lambda point: point.label)
return ret
class OSBreakdown7ReportPlugin(OSBreakdown1ReportPlugin):
"""Displays a histogram of last client activities."""
TITLE = "OS Breakdown - 7 Days Active"
ACTIVE_DAYS = 7
class OSBreakdown14ReportPlugin(OSBreakdown1ReportPlugin):
"""Displays a histogram of last client activities."""
TITLE = "OS Breakdown - 14 Days Active"
ACTIVE_DAYS = 14
class OSBreakdown30ReportPlugin(OSBreakdown1ReportPlugin):
"""Displays a histogram of last client activities."""
TITLE = "OS Breakdown - 30 Days Active"
ACTIVE_DAYS = 30
class OSReleaseBreakdown1ReportPlugin(report_plugin_base.ReportPluginBase):
"""Displays a histogram of last client activities."""
TYPE = TYPE
TITLE = "OS Release Breakdown - 1 Day Active"
SUMMARY = ("Operating system version break down. What OS Version clients were"
" active within the given timerange.")
ACTIVE_DAYS = 1
def GetReportData(self, get_report_args, token):
"""Extract only the operating system type from the active histogram."""
ret = rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.RepresentationType.
PIE_CHART)
try:
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN("aff4:/stats/ClientFleetStats").Add(
get_report_args.client_label),
token=token)
for graph in fd.Get(
aff4_stats.ClientFleetStats.SchemaCls.RELEASE_HISTOGRAM):
# Find the correct graph and merge the OS categories together
if "%s day" % self.__class__.ACTIVE_DAYS in graph.title:
for sample in graph:
ret.pie_chart.data.Append(
rdf_report_plugins.ApiReportDataPoint1D(
label=sample.label, x=sample.y_value))
break
except (IOError, TypeError):
pass
ret.pie_chart.data = sorted(
ret.pie_chart.data, key=lambda point: point.label)
return ret
class OSReleaseBreakdown7ReportPlugin(OSReleaseBreakdown1ReportPlugin):
"""Displays a histogram of last client activities."""
TITLE = "OS Release Breakdown - 7 Days Active"
ACTIVE_DAYS = 7
class OSReleaseBreakdown14ReportPlugin(OSReleaseBreakdown1ReportPlugin):
"""Displays a histogram of last client activities."""
TITLE = "OS Release Breakdown - 14 Days Active"
ACTIVE_DAYS = 14
class OSReleaseBreakdown30ReportPlugin(OSReleaseBreakdown1ReportPlugin):
"""Displays a histogram of last client activities."""
TITLE = "OS Release Breakdown - 30 Days Active"
ACTIVE_DAYS = 30
|
py | 1a3a8c59b57b910631ea2a1b472fd1b5093b673b | import time
import board
import busio
import adafruit_ahtx0
# Create sensor object, communicating over the board's default I2C bus
i2c = busio.I2C(board.GP9, board.GP8)
sensor = adafruit_ahtx0.AHTx0(i2c)
while True:
print("\nTemperature: %0.1f C" % sensor.temperature)
print("Humidity: %0.1f %%" % sensor.relative_humidity)
time.sleep(2)
|
py | 1a3a8c751be1ceda7e0bc3061b2a34d4ecd19792 | import os
import pandas as pd
from pandas_profiling.model.summarizer import PandasProfilingSummarizer, format_summary
from pandas_profiling.model.typeset import (
URL,
Boolean,
Categorical,
DateTime,
File,
Image,
Numeric,
Path,
ProfilingTypeSet,
Unsupported,
)
base_path = os.path.abspath(os.path.dirname(__file__))
def test_summarizer():
pps = PandasProfilingSummarizer(typeset=ProfilingTypeSet())
_ = format_summary(pps.summarize(pd.Series([1, 2, 3, 4, 5]), Unsupported))
_ = format_summary(pps.summarize(pd.Series([1, 2, 3, 4, 5]), Numeric))
_ = format_summary(
pps.summarize(
pd.Series(pd.date_range(start="1/1/2018", end="1/08/2018")), DateTime
)
)
_ = format_summary(pps.summarize(pd.Series(["abc", "abc", "abba"]), Categorical))
_ = format_summary(pps.summarize(pd.Series(["https://www.example.com"]), URL))
_ = format_summary(
pps.summarize(
pd.Series(
[
os.path.abspath(
base_path
+ r"../../../src/pandas_profiling/model/typeset_does_not_exist.py"
)
]
),
Path,
)
)
_ = format_summary(
pps.summarize(
pd.Series(
[
os.path.abspath(
base_path + r"../../../src/pandas_profiling/model/typeset.py"
)
]
),
File,
)
)
_ = format_summary(
pps.summarize(
pd.Series(
[os.path.abspath(base_path + r"../../../docsrc/assets/lambda-labs.png")]
),
Image,
)
)
_ = format_summary(
pps.summarize(pd.Series([True, False, True, False, False]), Boolean)
)
|
py | 1a3a8c98fd57477a9099feed23b1d8aa16acc3d6 | # Copyright (c) 2012-2017 Snowflake Computing Inc. All rights reserved.
"""
test_tokens.py - This defines a series of tests to ascertain that we are
capable of renewing JWT tokens
"""
from snowflake.ingest.utils import SecurityManager
from snowflake.ingest.error import IngestClientError
from snowflake.ingest.errorcode import ERR_INVALID_PRIVATE_KEY
from datetime import timedelta
from time import sleep
import os
import pytest
def test_same_token(test_util):
"""
Tests that we aren't immediately regenerating the key after each request
"""
private_key, _ = test_util.generate_key_pair()
sec_man = SecurityManager("testaccount", "snowman", private_key,
renewal_delay=timedelta(seconds=3))
assert sec_man.get_token() == sec_man.get_token()
def test_regenerate_token(test_util):
"""
Tests that the security manager generates new tokens after we
cross the set renewal threshold
"""
private_key, _ = test_util.generate_key_pair()
sec_man = SecurityManager("testaccount", "snowman", private_key,
renewal_delay=timedelta(seconds=3))
old_token = sec_man.get_token()
sleep(5)
assert old_token != sec_man.get_token()
def test_calculate_public_key_fingerprint(test_util):
with open(os.path.join(test_util.get_data_dir(), 'test_rsa_key'), 'r') as key_file:
private_key = key_file.read()
sec_man = SecurityManager("testaccount", "snowman", private_key,
renewal_delay=timedelta(minutes=3))
public_key_fingerprint = sec_man.calculate_public_key_fingerprint(private_key)
assert public_key_fingerprint == 'SHA256:QKX8hnXHVAVXp7mLdCAF+vjU2A8RBuRSpgdRjPHhVWY='
def test_invalid_private_key():
sec_man = SecurityManager("testaccount", "snowman", 'invalid_private_key',
renewal_delay=timedelta(minutes=3))
with pytest.raises(IngestClientError) as client_error:
sec_man.get_token()
assert client_error.value.code == ERR_INVALID_PRIVATE_KEY
|
py | 1a3a8e5cca6c7842d3945d5f310322ba681820cf | """
Ethereum Virtual Machine (EVM) Keccak Instructions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Implementations of the EVM keccak instructions.
"""
from ethereum.base_types import U256, Uint
from ethereum.crypto.hash import keccak256
from ethereum.utils.numeric import ceil32
from ethereum.utils.safe_arithmetic import u256_safe_add, u256_safe_multiply
from ...vm.error import OutOfGasError
from .. import Evm
from ..gas import (
GAS_KECCAK256,
GAS_KECCAK256_WORD,
calculate_gas_extend_memory,
subtract_gas,
)
from ..memory import extend_memory, memory_read_bytes
from ..stack import pop, push
def keccak(evm: Evm) -> None:
"""
Pushes to the stack the Keccak-256 hash of a region of memory.
This also expands the memory, in case the memory is insufficient to
access the data's memory location.
Parameters
----------
evm :
The current EVM frame.
Raises
------
:py:class:`~ethereum.dao_fork.vm.error.StackUnderflowError`
If `len(stack)` is less than `2`.
"""
# Converting memory_start_index to Uint as memory_end_index can
# overflow U256.
memory_start_index = Uint(pop(evm.stack))
size = pop(evm.stack)
words = ceil32(Uint(size)) // 32
word_gas_cost = u256_safe_multiply(
GAS_KECCAK256_WORD,
words,
exception_type=OutOfGasError,
)
memory_extend_gas_cost = calculate_gas_extend_memory(
evm.memory, memory_start_index, size
)
total_gas_cost = u256_safe_add(
GAS_KECCAK256,
word_gas_cost,
memory_extend_gas_cost,
exception_type=OutOfGasError,
)
evm.gas_left = subtract_gas(evm.gas_left, total_gas_cost)
extend_memory(evm.memory, memory_start_index, size)
data = memory_read_bytes(evm.memory, memory_start_index, size)
hash = keccak256(data)
push(evm.stack, U256.from_be_bytes(hash))
evm.pc += 1
|
py | 1a3a90aec473fd9351d07afde7eb90753e67a860 | from typing import Any, Dict, Optional, Union
from uuid import uuid4
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.crud.crud_user import user
from app.models.login_link import LoginLink
from app.schemas.login_link import LoginLinkCreate, LoginLinkUpdate
class CRUDLoginLink(CRUDBase[LoginLink, LoginLinkCreate, LoginLinkUpdate]):
def get_by_code(self, db: Session, *, code: str) -> Optional[LoginLink]:
return db.query(LoginLink).filter(LoginLink.code == code).first()
def create(self, db: Session, *, obj_in: LoginLinkCreate) -> LoginLink:
code = str(uuid4())[:8]
link = self.get_by_code(db, code=code)
while link:
code = str(uuid4())[:8]
link = self.get_by_code(db, code=code)
found_user = user.get(db, id=obj_in.user.id)
assert found_user is not None
db_obj = LoginLink(code=code.upper(), user_id=found_user.id)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def update(
self,
db: Session,
*,
db_obj: LoginLink,
obj_in: Union[LoginLinkUpdate, Dict[str, Any]]
) -> LoginLink:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
return super().update(db, db_obj=db_obj, obj_in=update_data)
def is_active(self, db_obj: LoginLink) -> bool:
return db_obj.active
def disable(self, db: Session, *, db_obj: LoginLink) -> bool:
setattr(db_obj, "active", False)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj.active
login_link = CRUDLoginLink(LoginLink)
|
py | 1a3a91e8df8dc7d1b5fc2e5c9fe11a6c638e9bc5 | import launch
import launch_ros.actions
def generate_launch_description():
param0 = launch.substitutions.LaunchConfiguration('param0', default="0")
param1 = launch.substitutions.LaunchConfiguration('param1', default="0")
param2 = launch.substitutions.LaunchConfiguration('param2', default="0")
param3 = launch.substitutions.LaunchConfiguration('param3', default="0")
argment0 = launch.actions.DeclareLaunchArgument("param0", default_value="1")
argment1 = launch.actions.DeclareLaunchArgument("param1", default_value="2")
argment2 = launch.actions.DeclareLaunchArgument("param2", default_value="3")
argment3 = launch.actions.DeclareLaunchArgument("param3", default_value="4")
param_node = launch_ros.actions.Node(
package="launch_launch", executable="param", output="screen",
parameters=[{
"param0": param0,
"param1": param1,
"param2": param2,
"param3": param3,
}]
)
return launch.LaunchDescription([
argment0,
argment1,
# argment2,
# argment3,
param_node,
]) |
py | 1a3a9227ae037a29fe1f0e2c600e1c69fee51e6d | import random
import torch
import sys
import torch.nn as nn
import torchaudio
import bz2
import pickle
import torchvision.transforms as transforms
import cv2
import math
import os
import numpy as np
from torch.utils.data import Dataset, DataLoader
from logging import Logger
from torchvision.transforms.transforms import Lambda
try:
from datasets import MelSpectrogram, align_and_crop_face
except:
sys.path.extend(['..'])
from spectograms import MelSpectrogram
from face_utils import align_and_crop_face
def av_speech_collate_fn_pad(batch):
lower_faces, speeches, melspecs, face_crop = zip(*batch)
max_frames_in_batch = max([l.shape[0] for l in lower_faces])
max_samples_in_batch = max([s.shape[1] for s in speeches])
max_melspec_samples_in_batch = max([m.shape[1] for m in melspecs])
padded_lower_faces = torch.zeros(len(lower_faces), max_frames_in_batch, *tuple(lower_faces[0].shape[1:]))
padded_speeches = torch.zeros(len(speeches), 1, max_samples_in_batch)
padded_melspecs = torch.zeros(len(melspecs), melspecs[0].shape[0], max_melspec_samples_in_batch)
mel_gate_padded = torch.zeros(len(melspecs), max_melspec_samples_in_batch)
video_lengths = list()
audio_lengths = list()
melspec_lengths = list()
for idx, (lower_face, speech, melspec) in enumerate(zip(lower_faces, speeches, melspecs)):
T = lower_face.shape[0]
video_lengths.append(T)
padded_lower_faces[idx, :T, :, :, :] = lower_face
S = speech.shape[-1]
audio_lengths.append(S)
padded_speeches[idx, :, :S] = speech
M = melspec.shape[-1]
melspec_lengths.append(M)
padded_melspecs[idx, :, :M] = melspec
mel_gate_padded[idx, M-1:] = 1.0
face_crop_tensor = torch.cat([f.unsqueeze(0) for f in face_crop], dim=0)
padded_lower_faces = padded_lower_faces.permute(0, 2, 1, 3, 4)
padded_speeches = padded_speeches.squeeze(1)
video_lengths = torch.tensor(video_lengths)
audio_lengths = torch.tensor(audio_lengths)
melspec_lengths = torch.tensor(melspec_lengths)
return (padded_lower_faces, video_lengths), (padded_speeches, audio_lengths), (padded_melspecs, melspec_lengths, mel_gate_padded), face_crop_tensor
def x_round(x):
return math.floor(x * 4) / 4
def loadframes(filename):
with bz2.BZ2File(filename, 'r') as f:
data = pickle.load(f)
return [cv2.cvtColor(cv2.imdecode(imn, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB) for imn in data]
class WILD(Dataset):
def __init__(self, rootpth, face_size=(96, 96), mode='train', demo=False, duration=1, face_augmentation=None, *args, **kwargs):
super(WILD, self).__init__(*args, **kwargs)
assert mode in ('train', 'test')
self.rootpth = rootpth
self.face_recog_resize = transforms.Compose([
transforms.Resize((160, 160)),
transforms.Lambda(lambda im: (im.float() - 127.5) / 128.0),
])
self.face_size = face_size
self.face_resize = transforms.Compose([
transforms.Resize(face_size),
transforms.Lambda(lambda im: im.float() / 255.0),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
if face_augmentation is None:
self.face_augmentation = nn.Identity()
else:
self.face_augmentation = face_augmentation
self.mode = mode
self.demo = demo
self.items = dict()
index = 0
for root, _, filenames in os.walk(self.rootpth):
for filename in filenames:
if filename.endswith(('.mp4', '.mov', '.mpg')):
if filename.endswith('.mov'):
format = '.mov'
elif filename.endswith('.mpg'):
format = '.mpg'
elif filename.endswith('.mp4'):
format = '.mp4'
video_path = os.path.join(root, filename)
audio_path = os.path.join(root, filename.replace(format, '.wav'))
frame_info_path = os.path.join(root, filename.replace(format, '.json'))
spec_path = os.path.join(root, filename.replace(format, '.npz'))
face_path = video_path[:-4] + '_face.npz'
if os.path.isfile(audio_path) and os.path.isfile(frame_info_path) and os.path.isfile(spec_path):
self.items[index] = [video_path, audio_path, spec_path, face_path, frame_info_path]
index += 1
self.len = len(self.items)
self.duration = duration
print(f'Size of {type(self).__name__}: {self.len}')
random.shuffle(self.items)
def __len__(self):
return self.len
def __getitem__(self, idx):
video_path, audio_path, spec_path, face_path, frame_info_path = self.items[idx]
speech, sampling_rate = torchaudio.load(audio_path, normalize=True, format='wav')
melspec = torch.from_numpy(np.load(spec_path)['data'])
melspec = melspec.squeeze(0)
faces = [torch.from_numpy(face).permute(2, 0, 1) for face in loadframes(face_path)]
faces = self.face_augmentation(faces)
face_indices = (torch.rand(2) * len(faces)).int()
face_crop = torch.cat([self.face_recog_resize(faces[f_id]).unsqueeze(0) for f_id in face_indices], dim=0)
lower_faces = list()
for face in faces:
C, H, W = face.shape
lower_face = face[:, H//2:, :]
lower_faces.append(self.face_resize(lower_face).unsqueeze(0))
lower_faces = torch.cat(lower_faces, dim=0)
if self.demo:
return lower_faces, speech, melspec, face_crop, audio_path
return lower_faces, speech, melspec, face_crop
def main():
ds = WILD('/media/ssd/christen-rnd/Experiments/Lip2Speech/Datasets/WILD', mode='test', duration=1)
dl = DataLoader(ds,
batch_size=8,
shuffle=False,
num_workers=0,
pin_memory=False,
drop_last=True,
collate_fn=av_speech_collate_fn_pad)
from IPython.display import Audio, display
for bdx, batch in enumerate(dl):
(video, video_lengths), (speeches, audio_lengths), (melspecs, melspec_lengths, mel_gates), faces = batch
frames = video
print('video.shape', video.shape)
print('faces.shape ', faces.shape)
print('frames[0][0].shape ', frames[0][0].shape)
print('melspecs.shape ', melspecs.shape)
# print('speech.shape ', speech.shape)
# continue
B, C, T, H, W = video.shape
for k in range(B):
face = faces[k, 0, :, :, :].permute(1, 2, 0).numpy()
face = ((face * 128.0) + 127.5).astype(dtype=np.uint8)
cv2.imshow('face', face[:, :, :: -1])
for i in range(T):
image = frames[k, :, i, :, :].permute(1, 2, 0).numpy()
image = image * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])
print(k, i, image.shape)
cv2.imshow('image', image[:, :, :: -1])
if ord('q') == cv2.waitKey(16):
exit()
# sample_rate = 16000
# effects = [
# ["lowpass", "-1", "700"], # apply single-pole lowpass filter
# # ["speed", "0.8"], # reduce the speed
# # This only changes sample rate, so it is necessary to
# # add `rate` effect with original sample rate after this.
# # ["rate", f"{sample_rate}"],
# # ["reverb", "-w"], # Reverbration gives some dramatic feeling
# ]
# aug_speech, sample_rate2 = torchaudio.sox_effects.apply_effects_tensor(
# speech[0], sample_rate, effects)
# torchaudio.save('test.wav', speech[0], 16000)
# torchaudio.save('aug_speech.wav', aug_speech, 16000)
# plot_waveform(waveform, sample_rate)
# plot_specgram(waveform, sample_rate)
# play_audio(waveform, sample_rate)
# images = images.numpy()
# lb = lb.numpy()
# for image, label in zip(images, lb):
# label = ds.vis_label(label)
# print(torch.unique(label))
# print(img.shape, label.shape)
if __name__ == "__main__":
main()
|
py | 1a3a9245c62214742d419b859a3066c8118a8b49 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-31 14:34
from __future__ import unicode_literals
import company.helpers
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0007_auto_20161030_1212'),
]
operations = [
migrations.AlterField(
model_name='company',
name='logo',
field=models.FileField(blank=True, null=True, upload_to=company.helpers.PathAndRename('/company_logos')),
),
]
|
py | 1a3a92c684965a8bad87e65bbdd4bc2cc2bf1b02 | import time
import ccxt
import pandas as pd
from unittest import TestCase
from crypto_data_fetcher.bybit import BybitFetcher
class TestBybitLinear(TestCase):
def test_fetch_ohlcv_initial(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
)
self.assertEqual(df['op'].iloc[0], 6500)
self.assertEqual(df['hi'].iloc[0], 6745.5)
self.assertEqual(df['lo'].iloc[0], 6500)
self.assertEqual(df['cl'].iloc[0], 6698.5)
self.assertEqual(df['volume'].iloc[0], 1809.520)
self.assertEqual(df.index[-1].timestamp() - df.index[0].timestamp(), (df.shape[0] - 1) * 24 * 60 * 60)
# 未確定足が無いことの確認
self.assertLess(df.index.max(), pd.to_datetime(time.time() // (24 * 60 * 60) * (24 * 60 * 60), unit='s', utc=True))
def test_fetch_ohlcv_start_time(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
start_time=pd.to_datetime('2021-01-01 00:00:00Z', utc=True),
)
self.assertEqual(df.index[0], pd.to_datetime('2021-01-01 00:00:00Z', utc=True))
def test_fetch_ohlcv_incremental(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
)
last_row = df.iloc[-1]
df = df.iloc[:-1]
df = fetcher.fetch_ohlcv(
df=df,
market='BTCUSDT',
interval_sec=24 * 60 * 60,
)
self.assertTrue(df.iloc[-1].equals(last_row))
self.assertEqual(df.index[-1].timestamp() - df.index[0].timestamp(), (df.shape[0] - 1) * 24 * 60 * 60)
def test_fetch_ohlcv_incremental_empty(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
)
before_count = df.shape[0]
df = fetcher.fetch_ohlcv(
df=df,
market='BTCUSDT',
interval_sec=24 * 60 * 60,
)
self.assertEqual(df.shape[0], before_count)
def test_fetch_ohlcv_mark(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
price_type='mark',
)
print(df)
self.assertEqual(df['op'].iloc[0], 6718.21)
self.assertEqual(df['hi'].iloc[0], 6955.88)
self.assertEqual(df['lo'].iloc[0], 6451.31)
self.assertEqual(df['cl'].iloc[0], 6677.77)
self.assertEqual(df.index[-1].timestamp() - df.index[0].timestamp(), (df.shape[0] - 1) * 24 * 60 * 60)
def test_fetch_ohlcv_index(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
price_type='index',
)
print(df)
self.assertEqual(df['op'].iloc[0], 6670.01)
self.assertEqual(df['hi'].iloc[0], 6956.84)
self.assertEqual(df['lo'].iloc[0], 6450.96)
self.assertEqual(df['cl'].iloc[0], 6677.77)
self.assertEqual(df.index[-1].timestamp() - df.index[0].timestamp(), (df.shape[0] - 1) * 24 * 60 * 60)
def test_fetch_ohlcv_premium_index(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
price_type='premium_index',
)
print(df)
self.assertEqual(df['op'].iloc[0], 0.000100)
self.assertEqual(df['hi'].iloc[0], 0.000360)
self.assertEqual(df['lo'].iloc[0], -0.000865)
self.assertEqual(df['cl'].iloc[0], -0.000097)
self.assertEqual(df.index[-1].timestamp() - df.index[0].timestamp(), (df.shape[0] - 1) * 24 * 60 * 60)
def test_calc_fr_from_premium_index(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df_premium_index = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
price_type='premium_index',
)
df = fetcher.calc_fr_from_premium_index(df_premium_index=df_premium_index)
print(df)
self.assertEqual(df.index[-1].timestamp() - df.index[0].timestamp(), (df.shape[0] - 1) * 24 * 60 * 60)
def test_fetch_ohlcv_initial_minute(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=60,
start_time=time.time() - 60 * 60
)
self.assertGreater(df.shape[0], 1)
self.assertLess(df.shape[0], 61)
def test_fetch_ohlcv_out_of_range(self):
bybit = ccxt.bybit()
fetcher = BybitFetcher(ccxt_client=bybit)
df = fetcher.fetch_ohlcv(
market='BTCUSDT',
interval_sec=24 * 60 * 60,
start_time=time.time() + 60 * 60
)
self.assertIsNone(df)
|
py | 1a3a93059cf9b33e5a3031b91328dbf53de6650c | from eot import Image
from eot import Camera
mCamera = Camera()
lenaColor = Image ("/mnt/sdcard/lena.png", Image.CCV_IO_RGB_COLOR)
lenaColor.show()
mSnapshot = mCamera.snapshot()
mSnapshot.show()
lenaGray = Image ("/mnt/sdcard/lena.png", Image.CCV_IO_GRAY)
lenaGray.show()
|
py | 1a3a9312e6522a875bbdde32e0d4bb7357bc63dc | from distutils import log
from weaverbird.backends.sql_translator.steps.utils.query_transformation import (
build_selection_query,
)
from weaverbird.backends.sql_translator.types import (
SQLPipelineTranslator,
SQLQuery,
SQLQueryDescriber,
SQLQueryExecutor,
SQLQueryRetriever,
)
from weaverbird.pipeline.steps import ReplaceStep
def translate_replace(
step: ReplaceStep,
query: SQLQuery,
index: int,
sql_query_retriever: SQLQueryRetriever = None,
sql_query_describer: SQLQueryDescriber = None,
sql_query_executor: SQLQueryExecutor = None,
sql_translate_pipeline: SQLPipelineTranslator = None,
subcall_from_other_pipeline_count: int = None,
) -> SQLQuery:
query_name = f'REPLACE_STEP_{index}'
log.debug(
'############################################################'
f'query_name: {query_name}\n'
'------------------------------------------------------------'
f'step.name: {step.name}\n'
f'step.search_column: {step.search_column}\n'
f'step.to_replace: {step.to_replace}\n'
f'query.transformed_query: {query.transformed_query}\n'
f'query.metadata_manager.query_metadata: {query.metadata_manager.retrieve_query_metadata()}\n'
)
def _clean_str(value):
if not isinstance(value, float) and not isinstance(value, int):
value = value.strip('"').strip("'").replace('"', "\'").replace("'", "\\'")
return f'\'{value}\''
return value
compiled_query: str = 'CASE '
for element_to_replace in step.to_replace:
from_value, to_value = element_to_replace
compiled_query += (
f'WHEN {step.search_column}={_clean_str(from_value)} THEN {_clean_str(to_value)} '
)
compiled_query += f'ELSE {step.search_column} END AS {step.search_column}'
completed_fields = query.metadata_manager.retrieve_query_metadata_columns_as_str(
columns_filter=[step.search_column]
)
new_query = SQLQuery(
query_name=query_name,
transformed_query=f"""{query.transformed_query}, {query_name} AS"""
f""" (SELECT {completed_fields},"""
f""" {compiled_query}"""
f""" FROM {query.query_name})""",
selection_query=build_selection_query(
query.metadata_manager.retrieve_query_metadata_columns(), query_name
),
metadata_manager=query.metadata_manager,
)
log.debug(
'------------------------------------------------------------'
f'SQLquery: {new_query.transformed_query}'
'############################################################'
)
return new_query
|
py | 1a3a9397977fe99e5281d74d3101809aceb67fa8 | """ This module implements the A* path planning algorithm.
Two variants are included: grid-based, and mesh-based.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Mario Cobos Maestre"
__authors__ = ["Mario Cobos Maestre"]
__contact__ = "[email protected]"
__copyright__ = "Copyright 2019, UAH"
__credits__ = ["Mario Cobos Maestre"]
__date__ = "2019/03/29"
__deprecated__ = False
__email__ = "[email protected]"
__license__ = "GPLv3"
__maintainer__ = "Mario Cobos Maestre"
__status__ = "Development"
__version__ = "0.0.1"
"""
Code modified from https://gist.github.com/jamiees2/5531924
"""
import path_planning as pp
def children(point,grid):
"""
Calculates the children of a given node over a grid.
Inputs:
- point: node for which to calculate children.
- grid: grid over which to calculate children.
Outputs:
- list of children for the given node.
"""
x,y = point.grid_point
if x > 0 and x < len(grid) - 1:
if y > 0 and y < len(grid[0]) - 1:
links = [grid[d[0]][d[1]] for d in\
[(x-1, y),(x,y - 1),(x,y + 1),(x+1,y),\
(x-1, y-1), (x-1, y+1), (x+1, y-1),\
(x+1, y+1)]]
elif y > 0:
links = [grid[d[0]][d[1]] for d in\
[(x-1, y),(x,y - 1),(x+1,y),\
(x-1, y-1), (x+1, y-1)]]
else:
links = [grid[d[0]][d[1]] for d in\
[(x-1, y),(x,y + 1),(x+1,y),\
(x-1, y+1), (x+1, y+1)]]
elif x > 0:
if y > 0 and y < len(grid[0]) - 1:
links = [grid[d[0]][d[1]] for d in\
[(x-1, y),(x,y - 1),(x,y + 1),\
(x-1, y-1), (x-1, y+1)]]
elif y > 0:
links = [grid[d[0]][d[1]] for d in\
[(x-1, y),(x,y - 1),(x-1, y-1)]]
else:
links = [grid[d[0]][d[1]] for d in\
[(x-1, y), (x,y + 1), (x-1, y+1)]]
else:
if y > 0 and y < len(grid[0]) - 1:
links = [grid[d[0]][d[1]] for d in\
[(x+1, y),(x,y - 1),(x,y + 1),\
(x+1, y-1), (x+1, y+1)]]
elif y > 0:
links = [grid[d[0]][d[1]] for d in\
[(x+1, y),(x,y - 1),(x+1, y-1)]]
else:
links = [grid[d[0]][d[1]] for d in\
[(x+1, y), (x,y + 1), (x+1, y+1)]]
return [link for link in links if link.value != 9]
def aStar(start, goal, grid, heur='naive'):
"""
Executes the A* path planning algorithm over a given grid.
Inputs:
- start: node from which to start.
- goal: node to which it is desired to arrive.
- grid: grid over which to execute the algorithm
- heur: heuristic function to use for the algorithm,
expressed as a string. Results will vary depending on
it. Must be implemented separatedly.
Outputs:
- ordered list of nodes representing the shortest path found
from start to goal.
"""
#The open and closed sets
openset = set()
closedset = set()
#Current point is the starting point
current = start
#Add the starting point to the open set
openset.add(current)
#While the open set is not empty
while openset:
#Find the item in the open set with the lowest G + H score
current = min(openset, key=lambda o:o.G + o.H)
pp.expanded_nodes += 1
#If it is the item we want, retrace the path and return it
if current == goal:
path = []
while current.parent:
path.append(current)
current = current.parent
path.append(current)
return path[::-1]
#Remove the item from the open set
openset.remove(current)
#Add it to the closed set
closedset.add(current)
#Loop through the node's children/siblings
for node in children(current,grid):
#If it is already in the closed set, skip it
if node in closedset:
continue
#Otherwise if it is already in the open set
if node in openset:
#Check if we beat the G score
new_g = current.G + current.move_cost(node)
if node.G > new_g:
#If so, update the node to have a new parent
node.G = new_g
node.parent = current
else:
#If it isn't in the open set, calculate the G and H score for the node
node.G = current.G + current.move_cost(node)
node.H = pp.heuristic[heur](node, goal)
#Set the parent to our current item
node.parent = current
#Add it to the set
openset.add(node)
#Throw an exception if there is no path
raise ValueError('No Path Found')
pp.register_search_method('A*', aStar)
def aStar_mesh(start, goal, grid, heur='naive'):
"""
Executes the A* path planning algorithm over a given nav mesh.
Inputs:
- start: node from which to start.
- goal: node to which it is desired to arrive.
- grid: mesh over which to execute the algorithm
- heur: heuristic function to use for the algorithm,
expressed as a string. Results will vary depending on
it. Must be implemented separatedly.
Outputs:
- ordered list of nodes representing the shortest path found
from start to goal.
"""
#The open and closed sets
openset = set()
closedset = set()
#Current point is the starting point
current = start
#Add the starting point to the open set
openset.add(current)
#While the open set is not empty
while openset:
#Find the item in the open set with the lowest G + H score
current = min(openset, key=lambda o:o.G + o.H)
pp.expanded_nodes += 1
#If it is the item we want, retrace the path and return it
if current == goal:
path = []
while current.parent:
path.append(current)
current = current.parent
path.append(current)
return path[::-1]
#Remove the item from the open set
openset.remove(current)
#Add it to the closed set
closedset.add(current)
#Loop through the node's children/siblings
for node in current.neighbors.values():
#If it is already in the closed set, skip it
if node in closedset:
continue
#Otherwise if it is already in the open set
if node in openset:
#Check if we beat the G score
new_g = current.G + current.move_cost(node)
if node.G > new_g:
#If so, update the node to have a new parent
node.G = new_g
node.parent = current
else:
#If it isn't in the open set, calculate the G and H score for the node
node.G = current.G + current.move_cost(node)
node.H = pp.heuristic[heur](node, goal)
#Set the parent to our current item
node.parent = current
#Add it to the set
openset.add(node)
#Throw an exception if there is no path
raise ValueError('No Path Found')
pp.register_search_method('A* mesh', aStar_mesh)
|
py | 1a3a94299f5a0a6c2e4d762a2b40b93022fe8249 | from route53.xml_parsers.common_change_info import parse_change_info
def delete_health_check_by_id_parser(root, connection):
"""
Parses the API responses for the
:py:meth:`route53.connection.Route53Connection.delete_health_check_by_id` method.
:param lxml.etree._Element root: The root node of the etree parsed
response from the API.
:param Route53Connection connection: The connection instance used to
query the API.
:rtype: dict
:returns: Details about the deletion.
"""
return root
# e_change_info = root.find('./{*}ChangeInfo')
#
# return parse_change_info(e_change_info)
|
py | 1a3a9541d51a2f9df3cc8103945036067ca530f5 | class Solution(object):
def cmpSort(self, versions):
def compare(x, y):
if x > y:
return 1
elif x < y:
return -1
else:
return 0
return sorted(versions, cmp=lambda x, y: compare(x, y))
test = Solution()
print test.cmpSort(["0.0.2a", "0.2.1", "0.0.1", "0.0.2b"]) |
py | 1a3a95b9a299cd62ed4bf8549b4072ba63ef07de | # 深海棲艦の装備一覧のURL
from typing import Dict, Tuple, List
import lxml.html
import requests
from model.weapon import Weapon
from model.weapon_type import WeaponType
ENEMY_WEAPON_DATA_URL = 'https://kancolle.fandom.com/wiki/List_of_equipment_used_by_the_enemy'
# 装備種テキストと装備種との対応表
WEAPON_TYPE_DICT: Dict[str, WeaponType] = {
'Small Caliber Main Gun': WeaponType.SCMG,
'Medium Caliber Main Gun': WeaponType.MCMG,
'Large Caliber Main Gun': WeaponType.LCMG,
'Secondary Gun': WeaponType.SG,
'Torpedo': WeaponType.TORPEDO,
'Midget Submarine': WeaponType.MS,
'Carrier-based Fighter Aircraft': WeaponType.CFA,
'Carrier-based Dive Bomber': WeaponType.CDB,
'Seaplane Bomber': WeaponType.SB,
'Carrier-based Torpedo Bomber': WeaponType.CTB,
'Reconnaissance Seaplane': WeaponType.MS,
'Small Radar': WeaponType.SR,
'Large Radar': WeaponType.LR,
'Engine Improvement': WeaponType.EI,
'Anti-Aircraft Shell': WeaponType.AAS,
'Armor Piercing Shell': WeaponType.APS,
'Anti-Aircraft Gun': WeaponType.AAG,
'Depth Charge': WeaponType.DC,
'Sonar': WeaponType.SONAR,
'Searchlight': WeaponType.S_LIGHT,
}
def read_weapon_name(td_tag: lxml.html.HtmlElement) -> str:
"""装備名を算出する
Parameters
----------
td_tag: lxml.html.HtmlElement
TDタグの中身
Returns
-------
装備名
"""
link_name: str = td_tag.cssselect('a')[0].text
name = td_tag.text_content().replace(link_name, '', 1)
return name.strip()
def read_weapon_parameters(td_tag: lxml.html.HtmlElement) -> Dict[str, int]:
"""装備のパラメーターを読み取る
Parameters
----------
td_tag: lxml.html.HtmlElement
TDタグの中身
Returns
-------
装備のパラメーター
"""
# アイコン情報と値情報を読み取る
# 値情報は、各要素が必ず「+」「-」「特定の文字列」で区切れていることを利用した分割
icon_list = [x.get('title', '') for x in td_tag.cssselect('a')]
value_list = td_tag.text_content().replace('+', '\n+').replace('-', '\n+') \
.replace('Very Long', '\nVL').replace('Short', '\nShort')\
.replace('Medium', '\nMedium').replace('Long', '\nLong') \
.replace('VL', 'Very Long').split('\n')
value_list = [x for x in value_list if x != '']
# 読み取った情報を連想配列に代入する
parameters: Dict[str, int] = {}
for icon, value in zip(icon_list, value_list):
if icon == 'Range':
continue
parameters[icon] = int(value)
return parameters
def get_enemy_weapon_list() -> Tuple[List[Weapon], Dict[str, int]]:
"""深海棲艦の装備の一覧を取得する
Returns
-------
weapon_list[index] = 装備情報
weapon_url_dict[装備URL] = 装備ID
"""
# URLを読み取り、HTMLをパースする
response = requests.get(ENEMY_WEAPON_DATA_URL)
dom: lxml.html.HtmlElement = lxml.html.fromstring(response.text)
# テーブルの各行を読み取り、装備データとしてweapon_dictに代入する
weapon_list: List[Weapon] = [Weapon(
id=0,
name='',
type=WeaponType.NONE,
attack=0,
torpedo=0,
bomber=0,
anti_air=0,
anti_sub=0)]
weapon_url_dict: Dict[str, int] = {}
for tr_tag in dom.cssselect('table.wikitable tr'):
# テーブルなので列毎にバラせる
tr_tag: lxml.html.HtmlElement = tr_tag
td_tag_list: List[lxml.html.HtmlElement] = tr_tag.cssselect('td')
if len(td_tag_list) < 6:
continue
# 装備IDを読み取る
weapon_id = int(td_tag_list[0].text)
# 装備名を読み取る
weapon_name = read_weapon_name(td_tag_list[2])
# 装備URLを読み取る
weapon_url = td_tag_list[2].cssselect('a')[0].get('href', '')
weapon_url_dict[weapon_url] = weapon_id
# 装備種を読み取る
raw_weapon_type = td_tag_list[3].text.strip()
weapon_type = WEAPON_TYPE_DICT.get(raw_weapon_type, WeaponType.NONE)
# 他のパラメーターを読み取る
parameters = read_weapon_parameters(td_tag_list[4])
weapon_attack = parameters.get('Firepower', 0)
weapon_torpedo = parameters.get('Torpedo', 0)
weapon_bomber = parameters.get('Bombing', 0)
weapon_antiair = parameters.get('AA', 0)
weapon_anti_sub = parameters.get('ASW', 0)
# 装備情報を作成し、代入する
weapon = Weapon(
id=weapon_id,
name=weapon_name,
type=weapon_type,
attack=weapon_attack,
torpedo=weapon_torpedo,
bomber=weapon_bomber,
anti_air=weapon_antiair,
anti_sub=weapon_anti_sub)
weapon_list.append(weapon)
return weapon_list, weapon_url_dict
|
py | 1a3a97d36179d7cd6664b3d9cc8411f07074434a | from django.db import models
from django.utils.translation import ugettext_lazy as _
from foundation_tenant.models.base.abstract_thing import AbstractThing
class TagManager(models.Manager):
def delete_all(self):
items = Tag.objects.all()
for item in items.all():
item.delete()
class Tag(models.Model):
class Meta:
app_label = 'foundation_tenant'
db_table = 'smeg_tags'
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
objects = TagManager()
name = models.CharField(
_("Name"),
max_length=127,
help_text=_('The name of the Tag item.'),
unique=True,
)
is_program = models.BooleanField(
_("Is program"),
help_text=_('Indicates if this Tag is to be used for programs.'),
default=False,
blank=True,
)
# DEVELOPERS NOTES:
# - These fields should be set in a ETL.
entrepreneurs_count = models.PositiveSmallIntegerField(
_("entrepreneurs_count"),
help_text=_('Keep track of how many entrepreneurs that are assigned to this tag.'),
default=0,
null=True
)
entrepreneurs_average_stage_num = models.FloatField(
_("Entrepreneurs Average Stage"),
help_text=_('Keep track of the average stage number that most entrepeneurs that belong this tag fall under.'),
default=0,
null=True
)
def __str__(self):
return str(self.name)
|
py | 1a3a9bb2d79abd74fad71d32a96239294cd0eef8 | """
Run all examples and then compare to gallery versions, if available.
"""
from clawpack.clawutil import regression_tests, make_all
import os
env = os.environ
env['GIT_STATUS'] = 'True'
env['FFLAGS'] = '-O2 -fopenmp'
env['OMP_NUM_THREADS'] = '3'
make_all.make_all(make_clean_first=True, env=env)
print "\n-----------------------------------------------------------\n"
all_ok = regression_tests.test_subdirs()
if all_ok:
print "===> All tests pass"
else:
print "===> Some test(s) failed"
|
py | 1a3a9c0edc79dcb4cf348361271a2622dfe73a7a | from fasteve import Fasteve, BaseSchema, Resource, ObjectID, SubResource
from fasteve.utils import Unique, DataRelation
from typing import Optional, List, NewType, Union, Any
from pydantic import EmailStr, SecretStr, Field, BaseModel
from datetime import datetime
from time import sleep
class Data(BaseSchema):
date: datetime # datetime.date not supported by mongo
confirmed: int
deaths: int
recovered: int
country_id: ObjectID
data = Resource(
schema=Data, resource_methods=["GET", "POST", "DELETE"], item_name="datum"
)
class Leader(BaseSchema):
name: str
age: int
leader = Resource(schema=Leader, resource_methods=["GET", "POST", "DELETE"])
class Countries(BaseSchema):
name: Unique[str]
# leader: DataRelation = leader
data_sub_resource = SubResource(resource=data, id_field="country_id", name="data")
countries = Resource(
schema=Countries,
resource_methods=["GET", "POST", "DELETE"],
item_name="country",
alt_id="name",
sub_resources=[data_sub_resource], # GET /countries/<country_id|name>/data
)
resources = [countries, leader, data]
app = Fasteve(resources=resources, cors_origins=["*"])
@app.repeat_every(seconds=60 * 60 * 24) # every day
async def count_countries_in_db() -> None:
data, count = await app.data.find(countries)
print(f"There are {count} countries in the database!")
@app.get("/custom_endpoint")
def custom_endpoint():
return {"custom": "endpoint"}
|
gyp | 1a3a9c8241cbc2a97397ba019559ac416915ab19 | {
'conditions': [
['OS=="win"', {
'variables': {
'GTK_Root%': 'C:/GTK', # Set the location of GTK all-in-one bundle
'with_jpeg%': 'false',
'with_gif%': 'false',
'with_pango%': 'false',
'with_freetype%': 'false'
}
}, { # 'OS!="win"'
'variables': {
'with_jpeg%': '<!(./util/has_lib.sh jpeg)',
'with_gif%': '<!(./util/has_lib.sh gif)',
# disable pango as it causes issues with freetype.
'with_pango%': 'false',
'with_freetype%': '<!(./util/has_cairo_freetype.sh)'
}
}]
],
'targets': [
{
'target_name': 'canvas',
'include_dirs': ["<!(node -e \"require('nan')\")"],
'sources': [
'src/Canvas.cc',
'src/CanvasGradient.cc',
'src/CanvasPattern.cc',
'src/CanvasRenderingContext2d.cc',
'src/color.cc',
'src/Image.cc',
'src/ImageData.cc',
'src/init.cc',
'src/PixelArray.cc'
],
'conditions': [
['OS=="win"', {
'libraries': [
'-l<(GTK_Root)/lib/cairo.lib',
'-l<(GTK_Root)/lib/libpng.lib'
],
'include_dirs': [
'<(GTK_Root)/include',
],
'defines': [
'snprintf=_snprintf',
'_USE_MATH_DEFINES' # for M_PI
]
}, { # 'OS!="win"'
'libraries': [
'<!@(pkg-config pixman-1 --libs)',
'<!@(pkg-config cairo --libs)',
'<!@(pkg-config libpng --libs)'
],
'include_dirs': [
'<!@(pkg-config cairo --cflags-only-I | sed s/-I//g)',
'<!@(pkg-config libpng --cflags-only-I | sed s/-I//g)'
]
}],
['with_freetype=="true"', {
'defines': [
'HAVE_FREETYPE'
],
'sources': [
'src/FontFace.cc'
],
'conditions': [
['OS=="win"', {
# No support for windows right now.
}, { # 'OS!="win"'
'include_dirs': [ # tried to pass through cflags but failed.
# Need to include the header files of cairo AND freetype.
# Looking up the includes of cairo does both.
'<!@(pkg-config cairo --cflags-only-I | sed s/-I//g)'
]
}]
]
}],
['with_pango=="true"', {
'defines': [
'HAVE_PANGO'
],
'conditions': [
['OS=="win"', {
'libraries': [
'-l<(GTK_Root)/lib/pangocairo.lib'
]
}, { # 'OS!="win"'
'include_dirs': [ # tried to pass through cflags but failed
'<!@(pkg-config pangocairo --cflags-only-I | sed s/-I//g)'
],
'libraries': [
'<!@(pkg-config pangocairo --libs)'
]
}]
]
}],
['with_jpeg=="true"', {
'defines': [
'HAVE_JPEG'
],
'conditions': [
['OS=="win"', {
'libraries': [
'-l<(GTK_Root)/lib/jpeg.lib'
]
}, {
'libraries': [
'-ljpeg'
]
}]
]
}],
['with_gif=="true"', {
'defines': [
'HAVE_GIF'
],
'conditions': [
['OS=="win"', {
'libraries': [
'-l<(GTK_Root)/lib/gif.lib'
]
}, {
'libraries': [
'-lgif'
]
}]
]
}]
]
}
]
}
|
py | 1a3a9da43172272499da1cf4f4ea690233e9436a | import json
from os import urandom
import urllib
import urlparse
import flask
import requests
from requests_oauthlib import OAuth1 as OAuth1Manager
from oauthlib.oauth1.rfc5849 import SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
from oauthlib.oauth2.draft25 import tokens
from werkzeug.urls import url_decode
from foauth import OAuthError
BEARER = 'BEARER'
BEARER_HEADER = 'HEADER'
BEARER_BODY = 'BODY'
BEARER_URI = 'URI'
BEARER_TYPES = (BEARER_HEADER, BEARER_BODY, BEARER_URI)
class Bearer(object):
def __init__(self, token, bearer_type=BEARER_HEADER):
self.token = token
if bearer_type in BEARER_TYPES or callable(bearer_type):
self.bearer_type = bearer_type
else:
raise ValueError('Unknown bearer type %s' % bearer_type)
def __call__(self, r):
if self.bearer_type == BEARER_HEADER:
r.headers = tokens.prepare_bearer_headers(self.token, r.headers)
elif self.bearer_type == BEARER_BODY:
r.data = tokens.prepare_bearer_body(self.token, r.data)
elif self.bearer_type == BEARER_URI:
r.url = tokens.prepare_bearer_uri(self.token, r.url)
elif callable(self.bearer_type):
r = self.bearer_type(self.token, r)
return r
class OAuthMeta(type):
def __init__(cls, name, bases, attrs):
if 'alias' not in attrs:
cls.alias = cls.__name__.lower()
if 'api_domain' in attrs and 'api_domains' not in attrs:
cls.api_domains = [cls.api_domain]
if 'provider_url' in attrs and 'favicon_url' not in attrs:
# Use a favicon service when no favicon is supplied
primary = 'https://getfavicon.appspot.com/%s' % cls.provider_url
domain = urlparse.urlparse(cls.provider_url).netloc
backup = 'https://www.google.com/s2/favicons?domain=%s' % domain
cls.favicon_url = '%s?defaulticon=%s' % (primary, urllib.quote(backup))
if 'name' not in attrs:
cls.name = cls.__name__
class OAuth(object):
__metaclass__ = OAuthMeta
https = True
verify = True
signature_method = SIGNATURE_HMAC
signature_type = SIGNATURE_TYPE_AUTH_HEADER
permissions_widget = 'checkbox'
description = ''
disclaimer = ''
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
def get_request_token_url(self):
return self.request_token_url
def get_access_token_url(self):
return self.access_token_url
def get_scope_string(self, scopes):
return ''
def get_authorize_url(self, redirect_uri, scopes):
params = self.get_authorize_params(redirect_uri=redirect_uri,
scopes=scopes)
req = requests.Request(url=self.authorize_url, params=params)
return req.prepare().url
def get_login_uri(self, redirect_uri):
params = self.get_authorize_params(redirect_uri=redirect_uri,
scopes=[])
req = requests.Request(url=self.authorize_url, params=params)
return req.prepare().url
# The remainder of the API must be implemented for each flavor of OAuth
def callback(self, data, redirect_uri):
"""
Receives the full callback from the service and returns a 2-tuple
containing the user token and user secret (if applicable).
"""
raise NotImplementedError("callback() must be defined in a subclass")
def api(self, key, domain, path, method='GET', params=None, data=None):
"""
Passes along an API request to the service and returns the response.
"""
raise NotImplementedError("api() must be defined in a subclass")
class OAuth1(OAuth):
returns_token = True
def parse_token(self, content):
content = url_decode(content)
return {
'access_token': content['oauth_token'],
'secret': content['oauth_token_secret'],
}
def get_request_token_params(self, redirect_uri, scopes):
return {}
def get_request_token_response(self, redirect_uri, scopes):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
callback_uri=redirect_uri,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.post(self.get_request_token_url(), auth=auth,
params=self.get_request_token_params(redirect_uri, scopes),
verify=self.verify)
def get_authorize_params(self, redirect_uri, scopes):
resp = self.get_request_token_response(redirect_uri, scopes)
try:
data = self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
flask.session['%s_temp_secret' % self.alias] = data['secret']
if not self.returns_token:
redirect_uri += ('?oauth_token=%s' % data['access_token'])
return {
'oauth_token': data['access_token'],
'oauth_callback': redirect_uri,
}
def get_access_token_response(self, token, secret, verifier=None):
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=token,
resource_owner_secret=secret,
verifier=verifier,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.post(self.get_access_token_url(), auth=auth,
verify=self.verify)
def callback(self, data, redirect_uri):
token = data['oauth_token']
verifier = data.get('oauth_verifier', None)
secret = flask.session['%s_temp_secret' % self.alias]
del flask.session['%s_temp_secret' % self.alias]
resp = self.get_access_token_response(token, secret, verifier)
try:
return self.parse_token(resp.content)
except Exception:
raise OAuthError('Unable to parse access token')
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
auth = OAuth1Manager(client_key=self.client_id,
client_secret=self.client_secret,
resource_owner_key=key.access_token,
resource_owner_secret=key.secret,
signature_method=self.signature_method,
signature_type=self.signature_type)
return requests.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
class OAuth2(OAuth):
token_type = BEARER
bearer_type = BEARER_HEADER
supports_state = True
auth = None
def parse_token(self, content):
return json.loads(content)
def get_scope_string(self, scopes):
return ' '.join(scopes)
def get_authorize_params(self, redirect_uri, scopes):
state = ''.join('%02x' % ord(x) for x in urandom(16))
flask.session['%s_state' % self.alias] = state
if not self.supports_state:
redirect_uri += ('?state=%s' % state)
params = {
'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'state': state,
}
if any(scopes):
params['scope'] = self.get_scope_string(scopes)
return params
def get_access_token_response(self, redirect_uri, data):
return requests.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': data['code'],
'redirect_uri': redirect_uri
}, verify=self.verify, auth=self.auth)
def callback(self, data, redirect_uri):
state = flask.session['%s_state' % self.alias]
if 'state' in data and state != data['state']:
flask.abort(403)
del flask.session['%s_state' % self.alias]
if not self.supports_state:
redirect_uri += ('?state=%s' % state)
resp = self.get_access_token_response(redirect_uri, data)
return self.parse_token(resp.content)
def refresh_token(self, token):
resp = requests.post(self.get_access_token_url(), {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
'refresh_token': token
}, verify=self.verify, auth=self.auth)
return self.parse_token(resp.content)
def api(self, key, domain, path, method='GET', params=None, data=None,
headers=None):
protocol = self.https and 'https' or 'http'
url = '%s://%s%s' % (protocol, domain, path)
if self.token_type == BEARER:
auth = Bearer(key.access_token, bearer_type=self.bearer_type)
return requests.request(method, url, auth=auth, params=params or {},
data=data or {}, headers=headers or {},
verify=self.verify, stream=True)
|
py | 1a3a9e22f734a79c879a6d4a837061eb55f29615 |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from replay_buffer import ReplayBuffer
def DeepQNetwork(lr, num_actions, input_dims, fc1, fc2):
q_net = Sequential()
q_net.add(Dense(fc1, input_dim=input_dims, activation='relu'))
q_net.add(Dense(fc2, activation='relu'))
q_net.add(Dense(num_actions, activation=None))
q_net.compile(optimizer=Adam(learning_rate=lr), loss='mse')
return q_net
class Agent:
def __init__(self, lr, discount_factor, num_actions, epsilon, batch_size, input_dims):
self.action_space = [i for i in range(num_actions)]
self.discount_factor = discount_factor
self.epsilon = epsilon
self.batch_size = batch_size
self.epsilon_decay = 0.001
self.epsilon_final = 0.01
self.update_rate = 100
self.step_counter = 0
self.buffer = ReplayBuffer(1000000, input_dims)
self.q_net = DeepQNetwork(lr, num_actions, input_dims, 256, 256)
self.q_target_net = DeepQNetwork(lr, num_actions, input_dims, 256, 256)
def store_tuple(self, state, action, reward, new_state, done):
self.buffer.store_tuples(state, action, reward, new_state, done)
def policy(self, observation):
if np.random.random() < self.epsilon:
action = np.random.choice(self.action_space)
else:
state = np.array([observation])
actions = self.q_net(state)
action = tf.math.argmax(actions, axis=1).numpy()[0]
return action
def train(self):
if self.buffer.counter < self.batch_size:
return
if self.step_counter % self.update_rate == 0:
self.q_target_net.set_weights(self.q_net.get_weights())
state_batch, action_batch, reward_batch, new_state_batch, done_batch = \
self.buffer.sample_buffer(self.batch_size)
q_predicted = self.q_net(state_batch)
q_next = self.q_target_net(new_state_batch)
q_max_next = tf.math.reduce_max(q_next, axis=1, keepdims=True).numpy()
q_target = np.copy(q_predicted)
for idx in range(done_batch.shape[0]):
target_q_val = reward_batch[idx]
if not done_batch[idx]:
target_q_val += self.discount_factor*q_max_next[idx]
q_target[idx, action_batch[idx]] = target_q_val
self.q_net.train_on_batch(state_batch, q_target)
self.epsilon = self.epsilon - self.epsilon_decay if self.epsilon > self.epsilon_final else self.epsilon_final
self.step_counter += 1
def train_model(self, env, num_episodes, graph):
scores, episodes, avg_scores, obj = [], [], [], []
goal = -110
f = 0
txt = open("saved_networks.txt", "w")
for i in range(num_episodes):
done = False
score = 0.0
state = env.reset()
while not done:
action = self.policy(state)
new_state, reward, done, _ = env.step(action)
score += reward
self.store_tuple(state, action, reward, new_state, done)
state = new_state
self.train()
scores.append(score)
obj.append(goal)
episodes.append(i)
avg_score = np.mean(scores[-100:])
avg_scores.append(avg_score)
print("Episode {0}/{1}, Score: {2} ({3}), AVG Score: {4}".format(i, num_episodes, score, self.epsilon,
avg_score))
if avg_score >= -110 and score >= -108:
self.q_net.save(("saved_networks/dqn_model{0}".format(f)))
self.q_net.save_weights(("saved_networks/dqn_model{0}/net_weights{0}.h5".format(f)))
txt.write("Save {0} - Episode {1}/{2}, Score: {3} ({4}), AVG Score: {5}\n".format(f, i, num_episodes,
score, self.epsilon,
avg_score))
f += 1
print("Network saved")
txt.close()
if graph:
df = pd.DataFrame({'x': episodes, 'Score': scores, 'Average Score': avg_scores, 'Solved Requirement': obj})
plt.plot('x', 'Score', data=df, marker='', color='blue', linewidth=2, label='Score')
plt.plot('x', 'Average Score', data=df, marker='', color='orange', linewidth=2, linestyle='dashed',
label='AverageScore')
plt.plot('x', 'Solved Requirement', data=df, marker='', color='red', linewidth=2, linestyle='dashed',
label='Solved Requirement')
plt.legend()
plt.savefig('MountainCar_Train.png')
def test(self, env, num_episodes, file_type, file, graph):
if file_type == 'tf':
self.q_net = tf.keras.models.load_model(file)
elif file_type == 'h5':
self.train_model(env, 5, False)
self.q_net.load_weights(file)
self.epsilon = 0.0
scores, episodes, avg_scores, obj = [], [], [], []
goal = -110
score = 0.0
for i in range(num_episodes):
state = env.reset()
done = False
episode_score = 0.0
while not done:
env.render()
action = self.policy(state)
new_state, reward, done, _ = env.step(action)
episode_score += reward
state = new_state
score += episode_score
scores.append(episode_score)
obj.append(goal)
episodes.append(i)
avg_score = np.mean(scores[-100:])
avg_scores.append(avg_score)
if graph:
df = pd.DataFrame({'x': episodes, 'Score': scores, 'Average Score': avg_scores, 'Solved Requirement': obj})
plt.plot('x', 'Score', data=df, marker='', color='blue', linewidth=2, label='Score')
plt.plot('x', 'Average Score', data=df, marker='', color='orange', linewidth=2, linestyle='dashed',
label='AverageScore')
plt.plot('x', 'Solved Requirement', data=df, marker='', color='red', linewidth=2, linestyle='dashed',
label='Solved Requirement')
plt.legend()
plt.savefig('MountainCar_Test.png')
env.close()
|
py | 1a3a9f1ea28c66eb3b0ed5de76b83e9460bdcd62 | import re
from lxml import etree
from pyramid.settings import asbool
from .exception import ConfigurationError
def clean_oai_settings(settings):
"""Parse and validate OAI app settings in a dictionary.
Check that the settings required by the OAI app are in the settings
dictionary and have valid values. Convert them to correct types.
Required settings are:
admin_emails
deleted_records
item_list_limit
logging_config
repository_descriptions
repository_name
sqlalchemy.url
Parameters
----------
settings: dict from str to str
The settings dictionary.
Raises
------
ConfigurationError:
If some setting is missing or has an invalid value.
"""
cleaners = {
'admin_emails': _clean_admin_emails,
'deleted_records': _clean_deleted_records,
'item_list_limit': _clean_item_list_limit,
'logging_config': _clean_unicode,
'repository_descriptions': _load_repository_descriptions,
'repository_name': _clean_unicode,
'sqlalchemy.url': _clean_unicode,
}
_clean_settings(settings, cleaners)
def clean_importer_settings(settings):
"""Parse and validate metadata importer settings in a dictionary.
Check that the settings required by the metadata importer are in the
settings dictionary and have valid values. Convert them to correct
types. Required settings are:
deleted_records
dry_run
force_update
logging_config
sqlalchemy.url
timestamp_file
metadata_provider_class
metadata_provider_args
Parameters
----------
settings: dict from str to str
The settings dictionary.
Raises
------
ConfigurationError:
If some setting is missing or has an invalid value.
"""
cleaners = {
'deleted_records': _clean_deleted_records,
'dry_run': _clean_boolean,
'force_update': _clean_boolean,
'logging_config': _clean_unicode,
'sqlalchemy.url': _clean_unicode,
'timestamp_file': _clean_unicode,
'metadata_provider_args': _clean_unicode,
'metadata_provider_class': _clean_provider_class,
}
return _clean_settings(settings, cleaners)
def _clean_settings(settings, cleaners):
"""Check that settings are ok.
The parameter `cleaners` is a dict from setting names to functions.
Each cleaner function is called with the value of the corresponding
setting. The cleaners should raise an exception if the value is invalid
and otherwise return a cleaned value. The old value gets replaced by
the cleaned value.
Parameters
----------
settings: dict from str to str
The settings dictionary.
cleaners: dict from str to callable
Mapping from setting names to cleaner functions.
Raises
------
ConfigurationError:
If any setting is missing or invalid.
"""
for name, func in cleaners.items():
if name not in settings:
raise ConfigurationError('missing setting {0}'.format(name))
try:
cleaned = func(settings[name])
settings[name] = cleaned
except Exception as error:
raise ConfigurationError(
'invalid {0} setting: {1}'.format(name, error)
)
def _clean_admin_emails(value):
"""Check that the value is a list of valid email addresses."""
# email regex pattern defined in the OAI-PMH XML schema
pattern = re.compile(r'^\S+@(\S+\.)+\S+$', flags=re.UNICODE)
emails = _clean_unicode(value).split()
if not emails:
raise ValueError('no emails')
for email in emails:
if re.match(pattern, email) is None:
raise ValueError(
'invalid email address: {0}'
''.format(repr(email))
)
return emails
def _clean_deleted_records(value):
"""Check that value is one of "no", "transient", "persistent"."""
allowed_values = ['no', 'transient', 'persistent']
if value not in allowed_values:
raise ValueError('deleted_records must be one of {0}'.format(
allowed_values
))
return str(value)
def _clean_boolean(value):
"""Return the value as a bool."""
return asbool(value)
def _clean_item_list_limit(value):
"""Check that value is a positive integer."""
int_value = int(value)
if int_value <= 0:
raise ValueError('item_list_limit must be positive')
return int_value
def _clean_unicode(value):
"""Return the value as a unicode."""
if isinstance(value, bytes):
return value.decode('utf-8')
else:
return str(value)
def _clean_provider_class(value):
"""Split the value to module name and classname."""
modulename, classname = value.split(':')
if len(modulename) == 0:
raise ValueError('empty module name')
if len(classname) == 0:
raise ValueError('empty class name')
return (modulename, classname)
def _load_repository_descriptions(value):
"""Load XML fragments from files."""
def load_description(path):
"""Load a single description."""
with open(path, 'r') as file_:
contents = file_.read()
try:
doc = etree.fromstring(contents.encode('utf-8'))
except Exception as error:
raise ValueError(
'ill-formed XML in repository description {0}: '
'{1}'.format(repr(path), error)
)
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
if doc.get('{{{0}}}schemaLocation'.format(xsi_ns)) is None:
raise ValueError('no schema location')
return contents
paths = value.split()
return list(map(load_description, paths))
|
py | 1a3a9fa9203517313086a45c56669cc6b9f36a4b | import pygame.font
class Button():
"""Basic button, since pygame doesn't have it built-in"""
def __init__(self, ai_game, message):
"""Initialize button attributes"""
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
self._render_message(message)
def _render_message(self, message):
"""Turn message into a rendered image on center of the button"""
self.message_image = self.font.render(
message, True, self.text_color, self.button_color)
self.message_image_rect = self.message_image.get_rect()
self.message_image_rect.center = self.rect.center
def draw(self):
"""Draw button with message"""
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.message_image, self.message_image_rect)
|
py | 1a3aa0f91af47171a9e8f6588d5d779c2f0d8e07 | #! /usr/bin/python
import argparse
import glob
import os
import sys
import tarfile
def parse_args():
parser = argparse.ArgumentParser()
products = ["rdn", "obs_ort", "loc", "igm", "glt"]
formats = ["envi", "hdf"]
parser.add_argument("-p", "--product",
help=("Choose one of the following product types: " + ", ".join(products)))
parser.add_argument("-f", "--format",
help=("Choose one of the following formats: " + ", ".join(formats)))
args = parser.parse_args()
if args.product:
if args.product not in products:
print("ERROR: Product \"%s\" is not a valid product choice." % args.product)
sys.exit(1)
if args.format:
if args.format not in formats:
print("ERROR: Format \"%s\" is not a valid format choice." % f)
sys.exit(1)
return args
def main():
args = parse_args()
# Unzip and untar granules
input_dir = "input"
granule_paths = glob.glob(os.path.join(input_dir, "*.tar.gz"))
for g in granule_paths:
tar_file = tarfile.open(g)
tar_file.extractall(input_dir)
tar_file.close()
os.remove(g)
dirs = [d for d in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir, d))]
instrument = "PRISMA" if dirs[0][:3] == "PRS" else "AVIRIS"
# Get paths based on product type file matching
paths = []
if instrument == "PRISMA":
if args.product == "rdn":
paths = glob.glob(os.path.join(input_dir, "*", "*rdn_prj"))
elif args.product == "obs_ort":
paths = glob.glob(os.path.join(input_dir, "*", "*obs_prj"))
elif args.product == "loc":
paths = glob.glob(os.path.join(input_dir, "*", "*loc_prj"))
elif instrument == "AVIRIS":
if args.product == "rdn":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*rdn*img"))
elif args.product == "obs_ort":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*obs_ort"))
elif args.product == "loc":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*loc"))
elif args.product == "igm":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*igm"))
elif args.product == "glt":
paths = glob.glob(os.path.join(input_dir, "*rdn*", "*glt"))
print(",".join(paths))
if __name__ == "__main__":
main()
|
py | 1a3aa1239aceb19e499dac84c831ef7694f27fcb | import logging
import os
from pythonjsonlogger import jsonlogger
def setup_logging(log_level):
logger = logging.getLogger()
logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setFormatter(
jsonlogger.JsonFormatter(
fmt='%(asctime)s %(levelname)s %(lambda)s %(message)s'
)
)
logger.addHandler(handler)
logger.removeHandler(logger.handlers[0])
def get_logger():
logger = logging.getLogger()
logger = logging.LoggerAdapter(
logger,
{'lambda': os.environ.get('AWS_LAMBDA_FUNCTION_NAME', '')}
)
return logger
|
py | 1a3aa2f3c80cdc1dbf631d61849e57a2e12eb984 | import speech_recognition
import re
name = re.compile(r'(name is | nome é)(.*)', re.IGNORECASE)
goodbye = re.compile(r'(.*)(goodbye)(.*)', re.IGNORECASE)
recognizer = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source:
print("Say something!")
audio = recognizer.listen(source)
print("Google Speech Recognition thinks you said:")
print(recognizer.recognize_google(audio))
words = recognizer.recognize_google(audio)
if mo := name.search(words):
print(f"Hello, {mo.group(2)}")
elif mo := goodbye.search(words):
print(f"{mo.group(2)} to you!")
|
py | 1a3aa4461eb002a1a4bdcf09964eb166982962fe | # Generated by Django 3.0.2 on 2020-01-24 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImageFile',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('filename', models.TextField(max_length=255)),
],
),
]
|
py | 1a3aa5873d85c80eb880245aa9f339e478dc9790 | """
:ref:`chainladder.methods<methods>`.MackChainladder
===================================================
:ref:`MackChainladder<mack>` produces the same IBNR results as the deterministic
approach, but ldf selection happens in a regression framework that allows for
the calculation of prediction errors. The Mack Chainladder technique is the OG
stochastic method.
"""
import numpy as np
import pandas as pd
import copy
from chainladder.methods import Chainladder
class MackChainladder(Chainladder):
""" Basic stochastic chainladder method popularized by Thomas Mack
Parameters
----------
None
Attributes
----------
triangle
returns **X**
ultimate_
The ultimate losses per the method
ibnr_
The IBNR per the method
full_expectation_
The ultimates back-filled to each development period in **X** replacing
the known data
full_triangle_
The ultimates back-filled to each development period in **X** retaining
the known data
summary_
summary of the model
full_std_err_
The full standard error
total_process_risk_
The total process error
total_parameter_risk_
The total parameter error
mack_std_err_
The total prediction error by origin period
total_mack_std_err_
The total prediction error across all origin periods
"""
def fit(self, X, y=None, sample_weight=None):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Data to which the model will be applied.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
super().fit(X, y, sample_weight)
self._mack_recursion('param_risk')
self._mack_recursion('process_risk')
self._mack_recursion('total_param_risk')
return self
@property
def full_std_err_(self):
obj = copy.deepcopy(self.X_)
tri_array = self.full_triangle_.triangle
weight_dict = {'regression': 2, 'volume': 1, 'simple': 0}
val = np.array([weight_dict.get(item.lower(), 2)
for item in list(self.average_) + ['volume']])
for i in [2, 1, 0]:
val = np.repeat(np.expand_dims(val, 0), tri_array.shape[i], axis=0)
k, v, o, d = val.shape
weight = np.sqrt(tri_array[..., :len(self.X_.ddims)]**(2-val))
weight[weight == 0] = np.nan
obj.triangle = self.X_.sigma_.triangle / weight
w = np.concatenate((self.X_.w_, np.ones((k, v, o, 1))*np.nan), axis=3)
w[np.isnan(w)] = 1
obj.triangle = np.nan_to_num(obj.triangle) * w
obj.nan_override = True
return obj
@property
def total_process_risk_(self):
obj = copy.deepcopy(self.process_risk_)
obj.triangle = np.sqrt(np.nansum(self.process_risk_.triangle**2, 2))
obj.triangle = np.expand_dims(obj.triangle, 2)
obj.odims = ['tot_proc_risk']
return obj
def _mack_recursion(self, est):
obj = copy.deepcopy(self.X_)
# replace this with nan_x_latest
nans = np.expand_dims(np.expand_dims(self.X_.nan_triangle(), 0), 0)
k, v, o, d = self.X_.shape
nans = nans * np.ones((k, v, o, d))
nans = np.concatenate((nans, np.ones((k, v, o, 1))*np.nan), 3)
nans = 1-np.nan_to_num(nans)
properties = self.full_triangle_
obj.ddims, obj.valuation = properties.ddims, properties.valuation
obj.nan_override = True
risk_arr = np.zeros((k, v, o, 1))
if est == 'param_risk':
obj.triangle = self._get_risk(nans, risk_arr,
obj.std_err_.triangle)
self.parameter_risk_ = obj
elif est == 'process_risk':
obj.triangle = self._get_risk(nans, risk_arr,
self.full_std_err_.triangle)
self.process_risk_ = obj
else:
risk_arr = risk_arr[..., 0:1, :]
obj.triangle = self._get_tot_param_risk(risk_arr)
obj.odims = ['Total param risk']
self.total_parameter_risk_ = obj
def _get_risk(self, nans, risk_arr, std_err):
full_tri = self.full_triangle_.triangle[..., :len(self.X_.ddims)]
t1_t = (full_tri * std_err)**2
extend = self.X_.ldf_.shape[-1]-self.X_.shape[-1]+1
ldf = self.X_.ldf_.triangle[..., :len(self.X_.ddims)-1]
ldf = np.concatenate(
(ldf, np.prod(self.X_.ldf_.triangle[..., -extend:], -1,
keepdims=True)), -1)
for i in range(len(self.X_.ddims)):
t1 = t1_t[..., i:i+1]
t2 = (ldf[..., i:i+1] * risk_arr[..., i:i+1])**2
t_tot = np.sqrt(t1+t2)*nans[..., i+1:i+2]
risk_arr = np.concatenate((risk_arr, t_tot), 3)
return risk_arr
def _get_tot_param_risk(self, risk_arr):
""" This assumes triangle symmertry """
t1 = self.full_triangle_.triangle[..., :len(self.X_.ddims)] - \
np.nan_to_num(self.X_.triangle) + \
np.nan_to_num(self.X_.get_latest_diagonal(False).triangle)
t1 = np.expand_dims(np.sum(t1*self.X_.std_err_.triangle, 2), 2)
extend = self.X_.ldf_.shape[-1]-self.X_.shape[-1]+1
ldf = self.X_.ldf_.triangle[..., :len(self.X_.ddims)-1]
ldf = np.concatenate(
(ldf, np.prod(self.X_.ldf_.triangle[..., -extend:], -1,
keepdims=True)), -1)
ldf = np.unique(ldf, axis=-2)
for i in range(self.full_triangle_.shape[-1]-1):
t_tot = np.sqrt((t1[..., i:i+1])**2 + (ldf[..., i:i+1] *
risk_arr[..., -1:])**2)
risk_arr = np.concatenate((risk_arr, t_tot), -1)
return risk_arr
@property
def mack_std_err_(self):
obj = copy.deepcopy(self.parameter_risk_)
obj.triangle = np.sqrt(self.parameter_risk_.triangle**2 +
self.process_risk_.triangle**2)
return obj
@property
def total_mack_std_err_(self):
# This might be better as a dataframe
obj = copy.deepcopy(self.X_.latest_diagonal)
obj.triangle = np.sqrt(self.total_process_risk_.triangle**2 +
self.total_parameter_risk_.triangle**2)
obj.triangle = obj.triangle[..., -1:]
obj.ddims = ['Total Mack Std Err']
obj.odims = ['Total']
return obj
@property
def summary_(self):
# This might be better as a dataframe
obj = copy.deepcopy(self.X_)
obj.triangle = np.concatenate(
(self.X_.latest_diagonal.triangle,
self.ibnr_.triangle,
self.ultimate_.triangle,
self.mack_std_err_.triangle[..., -1:]), 3)
obj.ddims = ['Latest', 'IBNR', 'Ultimate', 'Mack Std Err']
obj.nan_override = True
return obj
|
py | 1a3aa5cb1777c4f8e95944a4ae011ef517a0daf7 | """
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding, \
UnclosingTextIOWrapper
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
from pygments.lexers.special import TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-s] [-v] [-x] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -C
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
If -s is passed, lexing will be done in "streaming" mode, reading and
highlighting one line at a time. This will only work properly with
lexers that have no constructs spanning multiple lines!
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
The additional option -x allows custom lexers and formatters to be
loaded from a .py file relative to the current working directory. For
example, ``-l ./customlexer.py -x``. By default, this option expects a
file with a class named CustomLexer or CustomFormatter; you can also
specify your own class name with a colon (``-l ./lexer.py:MyLexer``).
Users should be very careful not to use this option with untrusted files,
because it will import and run them.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -C option is like -N, but prints out a lexer name based solely on
a given content from standard input.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -s option processes lines one at a time until EOF, rather than
waiting to process the entire file. This only works for stdin, and
is intended for streaming input such as you get from 'tail -f'.
Example usage: "tail -f sql.log | pygmentize -s -l sql"
The -v option prints a detailed traceback on unhandled exceptions,
which is useful for debugging and bug reports.
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str.strip():
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = get_lexer_by_name(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
return 0
except (AttributeError, ValueError):
print("%s not found!" % what, file=sys.stderr)
return 1
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main_inner(popts, args, usage):
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2021 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args # pylint: disable=unbalanced-tuple-unpacking
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
return _print_help(what, name)
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
lexer = find_lexer_class_for_filename(infn)
if lexer is None:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -C``
infc = opts.pop('-C', None)
if infc is not None:
inp = sys.stdin.buffer.read()
try:
lexer = guess_lexer(inp, inencoding=inencoding)
except ClassNotFound:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
print(fmter.get_style_defs(a_opt or ''))
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
allow_custom_lexer_formatter = False
# -x: allow custom (eXternal) lexers and formatters
if opts.pop('-x', None) is not None:
allow_custom_lexer_formatter = True
# select lexer
lexer = None
# given by name?
lexername = opts.pop('-l', None)
if lexername:
# custom lexer, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in lexername:
try:
filename = None
name = None
if ':' in lexername:
filename, name = lexername.rsplit(':', 1)
if '.py' in name:
# This can happen on Windows: If the lexername is
# C:\lexer.py -- return to normal load path in that case
name = None
if filename and name:
lexer = load_lexer_from_file(filename, name,
**parsed_opts)
else:
lexer = load_lexer_from_file(lexername, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
if '-s' in opts:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 2
infn = args[0]
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else: # -s option needs a lexer with -l
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 2
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
# custom formatter, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in fmter:
try:
filename = None
name = None
if ':' in fmter:
# Same logic as above for custom lexer
filename, name = fmter.rsplit(':', 1)
if '.py' in name:
name = None
if filename and name:
fmter = load_formatter_from_file(filename, name,
**parsed_opts)
else:
fmter = load_formatter_from_file(fmter, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
if '256' in os.environ.get('TERM', ''):
fmter = Terminal256Formatter(**parsed_opts)
else:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout.buffer
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
if '-s' not in opts:
# process whole input as per normal...
try:
highlight(code, lexer, fmter, outfile)
finally:
if outfn:
outfile.close()
return 0
else:
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
line = sys.stdin.buffer.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
return 0
except KeyboardInterrupt: # pragma: no cover
return 0
finally:
if outfn:
outfile.close()
def main(args=sys.argv):
"""
Main command line entry point.
"""
usage = USAGE % ((args[0],) * 7)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:CvhVHgsx")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
try:
return main_inner(popts, args, usage)
except Exception:
if '-v' in dict(popts):
print(file=sys.stderr)
print('*' * 65, file=sys.stderr)
print('An unhandled exception occurred while highlighting.',
file=sys.stderr)
print('Please report the whole traceback to the issue tracker at',
file=sys.stderr)
print('<https://github.com/pygments/pygments/issues>.',
file=sys.stderr)
print('*' * 65, file=sys.stderr)
print(file=sys.stderr)
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
print('*** If this is a bug you want to report, please rerun with -v.',
file=sys.stderr)
return 1
|
py | 1a3aa5dfae1769f181b979357e7fd502eef96053 | # Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import py.test
import os
from bson.objectid import ObjectId
from pytransact import commit, context
from pytransact.contextbroker import ContextBroker
from pytransact.testsupport import ContextTests
blmdir = os.path.join(os.path.dirname(__file__), 'blm')
import blm
def setup_module(module):
global blm
from pytransact import blm
blm.addBlmPath(blmdir)
from blm import fundamental, testcommit
def teardown_module(module):
blm.removeBlmPath(blmdir)
blm.clear()
class FakeUser(object):
@classmethod
def _create(cls, user):
return user
class TestContext(ContextTests):
def setup_method(self, method):
super(TestContext, self).setup_method(method)
self.user = blm.fundamental.AccessHolder()
def test_createQuery(self):
query = self.ctx.createQuery(blm.testcommit.Test, {'name': 'test'})
assert isinstance(query, context.ServiceQuery)
def test_query_invisible(self):
toi = blm.testcommit.Test(name=['test'])
self.sync()
cctx = self.pushnewctx(user=self.user)
r = blm.testcommit.Test._query(name='test').run()
assert r == []
q = blm.testcommit.Test._query()
q.clear()
r = q.run()
assert r == []
def test_query_visible(self):
toi = blm.testcommit.Test(name=['test'], allowRead=[self.user])
self.sync()
cctx = self.pushnewctx(user=self.user)
r = blm.testcommit.Test._query(name='test').run()
assert r == [toi]
assert not r[0]._phantom
def test_query_with_data_fetching(self):
blm.testcommit.Test(name=['test'], reorder=['foo', 'bar'], unique=['baz'])
self.sync()
cctx = self.pushnewctx()
query = blm.testcommit.Test._query(name='test')
query.attrList = {'name', 'reorder', 'toirefmap'}
toi, = query.run()
assert toi._attrData == {'name': ['test'], 'reorder': ['foo', 'bar'],
'toirefmap': {}}
assert not toi._phantom
query = blm.testcommit.Test._query(name='test')
query.attrList = {'unique'}
toi, = query.run()
assert toi._attrData == {'name': ['test'], 'reorder': ['foo', 'bar'],
'toirefmap': {},
'unique': ['baz']}
# test that we don't explode on non existing attributes in attrlist
query = blm.testcommit.Test._query(name='test')
query.attrList = {'doesnotexist'}
toi, = query.run() # don't explode
assert toi._attrData == {'name': ['test'], 'reorder': ['foo', 'bar'],
'toirefmap': {},
'unique': ['baz']}
def test_clearTois(self):
toi = blm.testcommit.Test(name=['test'])
assert 'name' in toi._attrData
self.ctx.clearTois()
assert toi._attrData == {}
def test_clone(self):
clone = self.ctx.clone()
assert id(clone) != id(self.ctx)
assert type(clone) == type(self.ctx)
assert clone.user == self.ctx.user
assert clone.database == self.ctx.database
class OtherContext(context.ReadonlyContext):
pass
clone = OtherContext.clone()
assert type(clone) == OtherContext
assert clone.user == self.ctx.user
assert clone.database == self.ctx.database
clone = OtherContext.clone(self.ctx)
assert type(clone) == OtherContext
assert clone.user == self.ctx.user
assert clone.database == self.ctx.database
clone = OtherContext.clone(self.ctx, user=self.user)
assert type(clone) == OtherContext
assert clone.user == self.user
assert clone.database == self.ctx.database
def test_requestAttribute(self):
toi = blm.testcommit.Test(name=['foo'], reorder=['bar'], unique=['baz'])
id = toi.id[0]
self.sync()
ctx = self.pushnewctx()
toi = blm.testcommit.Test._create(id)
assert toi._phantom # toi is not known yet
assert toi.name == ['foo']
assert toi.toirefmap == {} # default for maps is a dict, not a list
assert not toi._phantom # toi is known
# toi not in the db and not newly created
toi = blm.testcommit.Test._create(ObjectId())
assert toi._phantom # toi is not known yet
assert toi.name == []
assert toi._phantom # toi is still not known
def test_requestAttribute_copy_default(self):
toi1 = blm.testcommit.Test()
toi2 = blm.testcommit.Test()
id1, id2 = toi1.id[0], toi2.id[0]
self.sync()
ctx = self.pushnewctx()
toi1, = blm.testcommit.Test._query(id=id1).run()
name = toi1.name.value
name.append('foo')
toi1.name = name
assert toi1.name == ['foo'] # sanity
toi2, = blm.testcommit.Test._query(id=id2).run()
# if we are not careful with *copying* the default value above
# we may end up with toi2.name == ['foo']
assert toi2.name == []
def test_preload(self):
toi = blm.testcommit.Test(name=['foo'], reorder=['bar'], unique=['baz'])
id = toi.id[0]
self.sync()
ctx = self.pushnewctx()
toi, = blm.testcommit.Test._query(id=id).run()
assert not toi._attrData
toi._preload(['reorder', 'unique'])
assert not toi._attrData
assert toi.name.value # load
assert toi._attrData == {
'name': ['foo'],
'reorder': ['bar'],
'unique': ['baz'],
}
def test_setUser(self):
# Make sure contexts have user objects that are reliable from
# within the context itself:
# context.user should give you a TO which is equivalent to the
# one you'd get from a blm.User query
# Specifically, we do not want any stale data from an outdated
# context to linger in the object.
# Thus, we make sure to always create a fresh, context specific
# copy of the user TO in setUser().
user = blm.testcommit.User(name=['foo'])
user.allowRead = [user]
self.sync()
ctx = self.pushnewctx(ContextClass=commit.CommitContext, user=user)
user = ctx.user
user.name = ['not commited!']
with self.pushnewctx(user=user) as newctx:
assert newctx.user.id == user.id
assert newctx.user is not user
assert newctx.user.name == ['foo']
assert newctx.user.name != ['not commited!']
assert newctx.user in newctx.__instances__
class TestMaybeWithContext(object):
def test_with_no_context(self):
py.test.raises(Exception, lambda: ContextBroker().context) # sanity
database = object()
@context.maybe_with_context()
def foo(arg):
assert isinstance(ContextBroker().context, context.ReadonlyContext)
assert ContextBroker().context.database is database
return arg
obj = object()
assert foo(obj, database=database) is obj
def test_with_factory(self):
class MyContext(context.ReadonlyContext):
pass
@context.maybe_with_context(MyContext)
def foo():
assert isinstance(ContextBroker().context, MyContext)
foo(database=object())
def test_with_correct_context_class(self):
@context.maybe_with_context()
def foo():
return ContextBroker().context
with context.ReadonlyContext(object()) as ctx:
assert foo() is ctx
class MyContext(context.ReadonlyContext):
pass
with MyContext(object()) as ctx:
assert foo() is ctx
class WeirdContext(object):
database = object()
user = FakeUser()
def __enter__(self):
ContextBroker().pushContext(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert ContextBroker().context == self
ContextBroker().popContext()
with WeirdContext():
ctx = foo()
assert isinstance(ctx, context.ReadonlyContext)
assert ctx.database is WeirdContext.database
def test_with_custom_context_class(self):
class MyContext(context.ReadonlyContext):
pass
database = object()
user = FakeUser()
@context.maybe_with_context(MyContext)
def foo():
return ContextBroker().context
with context.ReadonlyContext(database, user):
ctx = foo()
assert isinstance(ctx, MyContext)
assert ctx.database is database
assert ctx.user is user
def test_no_database(self):
@context.maybe_with_context()
def foo():
return ContextBroker().context
py.test.raises(ValueError, foo)
|
py | 1a3aa79799737c7213ececde191749f04b3621e7 | """
A stress-test of sorts for LLDB's handling of threads in the inferior.
This test sets a breakpoint in the main thread where test parameters (numbers of
threads) can be adjusted, runs the inferior to that point, and modifies the
locals that control the event thread counts. This test also sets a breakpoint in
breakpoint_func (the function executed by each 'breakpoint' thread) and a
watchpoint on a global modified in watchpoint_func. The inferior is continued
until exit or a crash takes place, and the number of events seen by LLDB is
verified to match the expected number of events.
"""
from __future__ import print_function
import unittest2
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ConcurrentEventsBase(TestBase):
# Concurrency is the primary test factor here, not debug info variants.
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
super(ConcurrentEventsBase, self).setUp()
# Find the line number for our breakpoint.
self.filename = 'main.cpp'
self.thread_breakpoint_line = line_number(
self.filename, '// Set breakpoint here')
self.setup_breakpoint_line = line_number(
self.filename, '// Break here and adjust num')
self.finish_breakpoint_line = line_number(
self.filename, '// Break here and verify one thread is active')
def describe_threads(self):
ret = []
for x in self.inferior_process:
id = x.GetIndexID()
reason = x.GetStopReason()
status = "stopped" if x.IsStopped() else "running"
reason_str = lldbutil.stop_reason_to_str(reason)
if reason == lldb.eStopReasonBreakpoint:
bpid = x.GetStopReasonDataAtIndex(0)
bp = self.inferior_target.FindBreakpointByID(bpid)
reason_str = "%s hit %d times" % (
lldbutil.get_description(bp), bp.GetHitCount())
elif reason == lldb.eStopReasonWatchpoint:
watchid = x.GetStopReasonDataAtIndex(0)
watch = self.inferior_target.FindWatchpointByID(watchid)
reason_str = "%s hit %d times" % (
lldbutil.get_description(watch), watch.GetHitCount())
elif reason == lldb.eStopReasonSignal:
signals = self.inferior_process.GetUnixSignals()
signal_name = signals.GetSignalAsCString(
x.GetStopReasonDataAtIndex(0))
reason_str = "signal %s" % signal_name
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
ret.append(
"thread %d %s due to %s at\n\t%s" %
(id, status, reason_str, location))
return ret
def add_breakpoint(self, line, descriptions):
""" Adds a breakpoint at self.filename:line and appends its description to descriptions, and
returns the LLDB SBBreakpoint object.
"""
bpno = lldbutil.run_break_set_by_file_and_line(
self, self.filename, line, num_expected_locations=-1)
bp = self.inferior_target.FindBreakpointByID(bpno)
descriptions.append(
": file = 'main.cpp', line = %d" %
self.finish_breakpoint_line)
return bp
def inferior_done(self):
""" Returns true if the inferior is done executing all the event threads (and is stopped at self.finish_breakpoint,
or has terminated execution.
"""
return self.finish_breakpoint.GetHitCount() > 0 or \
self.crash_count > 0 or \
self.inferior_process.GetState() == lldb.eStateExited
def count_signaled_threads(self):
count = 0
for thread in self.inferior_process:
if thread.GetStopReason() == lldb.eStopReasonSignal and thread.GetStopReasonDataAtIndex(
0) == self.inferior_process.GetUnixSignals().GetSignalNumberFromName('SIGUSR1'):
count += 1
return count
def do_thread_actions(self,
num_breakpoint_threads=0,
num_signal_threads=0,
num_watchpoint_threads=0,
num_crash_threads=0,
num_delay_breakpoint_threads=0,
num_delay_signal_threads=0,
num_delay_watchpoint_threads=0,
num_delay_crash_threads=0):
""" Sets a breakpoint in the main thread where test parameters (numbers of threads) can be adjusted, runs the inferior
to that point, and modifies the locals that control the event thread counts. Also sets a breakpoint in
breakpoint_func (the function executed by each 'breakpoint' thread) and a watchpoint on a global modified in
watchpoint_func. The inferior is continued until exit or a crash takes place, and the number of events seen by LLDB
is verified to match the expected number of events.
"""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Get the target
self.inferior_target = self.dbg.GetSelectedTarget()
expected_bps = []
# Initialize all the breakpoints (main thread/aux thread)
self.setup_breakpoint = self.add_breakpoint(
self.setup_breakpoint_line, expected_bps)
self.finish_breakpoint = self.add_breakpoint(
self.finish_breakpoint_line, expected_bps)
# Set the thread breakpoint
if num_breakpoint_threads + num_delay_breakpoint_threads > 0:
self.thread_breakpoint = self.add_breakpoint(
self.thread_breakpoint_line, expected_bps)
# Verify breakpoints
self.expect(
"breakpoint list -f",
"Breakpoint locations shown correctly",
substrs=expected_bps)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Check we are at line self.setup_breakpoint
self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 1."])
# Initialize the (single) watchpoint on the global variable (g_watchme)
if num_watchpoint_threads + num_delay_watchpoint_threads > 0:
self.runCmd("watchpoint set variable g_watchme")
for w in self.inferior_target.watchpoint_iter():
self.thread_watchpoint = w
self.assertTrue(
"g_watchme" in str(
self.thread_watchpoint),
"Watchpoint location not shown correctly")
# Get the process
self.inferior_process = self.inferior_target.GetProcess()
# We should be stopped at the setup site where we can set the number of
# threads doing each action (break/crash/signal/watch)
self.assertEqual(
self.inferior_process.GetNumThreads(),
1,
'Expected to stop before any additional threads are spawned.')
self.runCmd("expr num_breakpoint_threads=%d" % num_breakpoint_threads)
self.runCmd("expr num_crash_threads=%d" % num_crash_threads)
self.runCmd("expr num_signal_threads=%d" % num_signal_threads)
self.runCmd("expr num_watchpoint_threads=%d" % num_watchpoint_threads)
self.runCmd(
"expr num_delay_breakpoint_threads=%d" %
num_delay_breakpoint_threads)
self.runCmd(
"expr num_delay_crash_threads=%d" %
num_delay_crash_threads)
self.runCmd(
"expr num_delay_signal_threads=%d" %
num_delay_signal_threads)
self.runCmd(
"expr num_delay_watchpoint_threads=%d" %
num_delay_watchpoint_threads)
# Continue the inferior so threads are spawned
self.runCmd("continue")
# Make sure we see all the threads. The inferior program's threads all synchronize with a pseudo-barrier; that is,
# the inferior program ensures all threads are started and running
# before any thread triggers its 'event'.
num_threads = self.inferior_process.GetNumThreads()
expected_num_threads = num_breakpoint_threads + num_delay_breakpoint_threads \
+ num_signal_threads + num_delay_signal_threads \
+ num_watchpoint_threads + num_delay_watchpoint_threads \
+ num_crash_threads + num_delay_crash_threads + 1
self.assertEqual(
num_threads,
expected_num_threads,
'Expected to see %d threads, but seeing %d. Details:\n%s' %
(expected_num_threads,
num_threads,
"\n\t".join(
self.describe_threads())))
self.signal_count = self.count_signaled_threads()
self.crash_count = len(
lldbutil.get_crashed_threads(
self, self.inferior_process))
# Run to completion (or crash)
while not self.inferior_done():
if self.TraceOn():
self.runCmd("thread backtrace all")
self.runCmd("continue")
self.signal_count += self.count_signaled_threads()
self.crash_count += len(
lldbutil.get_crashed_threads(
self, self.inferior_process))
if num_crash_threads > 0 or num_delay_crash_threads > 0:
# Expecting a crash
self.assertTrue(
self.crash_count > 0,
"Expecting at least one thread to crash. Details: %s" %
"\t\n".join(
self.describe_threads()))
# Ensure the zombie process is reaped
self.runCmd("process kill")
elif num_crash_threads == 0 and num_delay_crash_threads == 0:
# There should be a single active thread (the main one) which hit
# the breakpoint after joining
self.assertEqual(
1,
self.finish_breakpoint.GetHitCount(),
"Expected main thread (finish) breakpoint to be hit once")
num_threads = self.inferior_process.GetNumThreads()
self.assertEqual(
1,
num_threads,
"Expecting 1 thread but seeing %d. Details:%s" %
(num_threads,
"\n\t".join(
self.describe_threads())))
self.runCmd("continue")
# The inferior process should have exited without crashing
self.assertEqual(
0,
self.crash_count,
"Unexpected thread(s) in crashed state")
self.assertEqual(
self.inferior_process.GetState(),
lldb.eStateExited,
PROCESS_EXITED)
# Verify the number of actions took place matches expected numbers
expected_breakpoint_threads = num_delay_breakpoint_threads + num_breakpoint_threads
breakpoint_hit_count = self.thread_breakpoint.GetHitCount(
) if expected_breakpoint_threads > 0 else 0
self.assertEqual(
expected_breakpoint_threads,
breakpoint_hit_count,
"Expected %d breakpoint hits, but got %d" %
(expected_breakpoint_threads,
breakpoint_hit_count))
expected_signal_threads = num_delay_signal_threads + num_signal_threads
self.assertEqual(
expected_signal_threads,
self.signal_count,
"Expected %d stops due to signal delivery, but got %d" %
(expected_signal_threads,
self.signal_count))
expected_watchpoint_threads = num_delay_watchpoint_threads + num_watchpoint_threads
watchpoint_hit_count = self.thread_watchpoint.GetHitCount(
) if expected_watchpoint_threads > 0 else 0
self.assertEqual(
expected_watchpoint_threads,
watchpoint_hit_count,
"Expected %d watchpoint hits, got %d" %
(expected_watchpoint_threads,
watchpoint_hit_count))
|
py | 1a3aa7e9e8022782edf1b0a08bee33318f48fbdd | import socket
host = '127.0.0.1'
port = 4000
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
client.send(b'Hello! Is there anybody in there?!')
response = client.recv(4096)
print(response)
|
py | 1a3aa81cbc23b4896ece3a02cea969e655c29554 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import typing
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message
from rasa.nlu.training_data import TrainingData
from client_wrapper import ServiceClient
import nlpserv_pb2 as nlp_messages
import nlpserv_pb2_grpc as nlp_service
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
class Cabocha(Component):
# name = "nlp_xxx"
name="sagas.provider.cabocha_utils.Cabocha"
provides = ["cabocha_doc", "cabocha"]
defaults = {
# name of the language model to load - if it is not set
# we will be looking for a language model that is named
# after the language of the model, e.g. `en`
"model": None,
# when retrieving word vectors, this will decide if the casing
# of the word is relevant. E.g. `hello` and `Hello` will
# retrieve the same vector, if set to `False`. For some
# applications and models it makes sense to differentiate
# between these two words, therefore setting this to `True`.
"case_sensitive": False,
"host": "localhost",
"port": 50051
}
def __init__(self, component_config=None, nlp=None):
# type: (Dict[Text, Any], ServiceClient) -> None
self.nlp = nlp
super(Cabocha, self).__init__(component_config)
@classmethod
def required_packages(cls):
# type: () -> List[Text]
return ["grpc"]
@classmethod
def create_client(cls, component_conf):
try:
rpc_host = component_conf.get("host")
rpc_port = component_conf.get("port")
# if no model is specified, we fall back to the language string
# if not spacy_model_name:
logger.info("Trying to connect cabocha rpc with "
"address '{}:{}'".format(rpc_host, rpc_port))
client = ServiceClient(nlp_service, 'CabochaNlpProcsStub', rpc_host, int(rpc_port))
return client
except ValueError as e: # pragma: no cover
raise Exception("cabocha init error. {}".format(e))
@classmethod
# def create(cls, cfg):
def create(
cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig
) -> "Component":
component_conf = config.for_component(cls.name, cls.defaults)
# cls.ensure_proper_language_model(nlp)
client=cls.create_client(component_conf)
return Cabocha(component_conf, client)
def provide_context(self):
# type: () -> Dict[Text, Any]
return {"cabocha": self.nlp}
def doc_for_text(self, text):
request = nlp_messages.NlText(text=text)
response = self.nlp.Tokenizer(request)
return response
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
for example in training_data.training_examples:
example.set("cabocha_doc", self.doc_for_text(example.text))
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.set("cabocha_doc", self.doc_for_text(message.text))
# @classmethod
# def load(cls,
# model_dir=None,
# model_metadata=None,
# cached_component=None,
# **kwargs):
# # type: (Text, Metadata, Optional[Cabocha], **Any) -> Cabocha
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
if cached_component:
return cached_component
# component_config = model_metadata.for_component(cls.name)
# return cls(component_config, cls.create_client(component_config))
return cls(meta, cls.create_client(meta))
|
py | 1a3aa86e3024ea91692b1ba54fe999600f75a41b | import cv2
from bowutils import *
def generate_dictionary(imgs_data, dictionary_size):
# Extracting descriptors
desc = stack_array([img_data.descriptors for img_data in imgs_data])
# important, cv2.kmeans only accepts type32 descriptors
desc = np.float32(desc)
# Clustering
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 0.01)
flags = cv2.KMEANS_PP_CENTERS
# desc is a type32 numpy array of vstacked descriptors
compactness, labels, dictionary = cv2.kmeans(desc, dictionary_size, None, criteria, 1, flags)
np.save(params.DICT_PATH, dictionary)
return dictionary
def main():
dictionary_size = 512
# Loading images
"""imgs_data = [] # type: list[ImageData]
pos_imgs_path = "train/pos"
neg_imgs_path = "train/neg"
print("Loading images...")
# imreads returns a list of all images in that directory
pos_imgs = imreads(pos_imgs_path)
neg_imgs = imreads(neg_imgs_path)
img_count = 0
for img in pos_imgs:
img_data = ImageData(img)
img_data.set_class("pos")
imgs_data.insert(img_count, img_data)
img_count += 1
for img in neg_imgs:
img_data = ImageData(img)
img_data.set_class("neg")
imgs_data.insert(img_count, img_data)
img_count += 1"""
program_start = cv2.getTickCount()
print("Loading images...")
start = cv2.getTickCount()
paths = ["train/pos", "train/neg"]
class_names = ["pos", "neg"]
imgs_data = get_imgs_data(paths, class_names)
print("Loaded {} image(s)".format(len(imgs_data)))
print_duration(start)
print("Computing descriptors...")
start = cv2.getTickCount()
[img_data.compute_descriptors() for img_data in imgs_data]
print_duration(start)
print("Clustering...")
start = cv2.getTickCount()
dictionary = generate_dictionary(imgs_data, dictionary_size)
print_duration(start)
print("Generating histograms...")
start = cv2.getTickCount()
[img_data.generate_bow_hist(dictionary) for img_data in imgs_data]
print_duration(start)
print imgs_data[0].hog().shape
print imgs_data[0].features.shape
print("Training SVM...")
start = cv2.getTickCount()
# Begin training SVM
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setC(2.67)
svm.setGamma(5.383)
# Compile samples
samples = get_samples(imgs_data)
responses = np.int32([img_data.response for img_data in imgs_data])
svm.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 1000, 1.e-06))
svm.train(samples, cv2.ml.ROW_SAMPLE, responses)
svm.save(params.SVM_PATH)
output = svm.predict(samples)[1].ravel()
error = (np.absolute(responses.ravel() - output).sum()) / float(output.shape[0])
if error < 0.2:
print "Successfully trained SVM with {}% error".format(error * 100)
else:
print "Failed to train SVM. {}% error".format(error * 100)
print_duration(start)
print("Finished training BOW detector. {}".format(format_time(get_elapsed_time(program_start))))
if __name__ == '__main__':
main()
|
py | 1a3aa8fe4dd6f7f05cc38c5e95362e29288ef0d6 | #!/usr/bin/env python
# https://github.com/rveachkc/pymsteams/
# reference: https://dev.outlook.com/connectors/reference
import requests
class cardsection:
def title(self, stitle):
# title of the section
self.payload["title"] = stitle
def activityTitle(self, sactivityTitle):
# Title of the event or action. Often this will be the name of the "actor".
self.payload["activityTitle"] = sactivityTitle
def activitySubtitle(self, sactivitySubtitle):
# A subtitle describing the event or action. Often this will be a summary of the action.
self.payload["activitySubtitle"] = sactivitySubtitle
def activityImage(self, sactivityImage):
# URL to image or a data URI with the base64-encoded image inline.
# An image representing the action. Often this is an avatar of the "actor" of the activity.
self.payload["activityImage"] = sactivityImage
def activityText(self, sactivityText):
# A full description of the action.
self.payload["activityText"] = sactivityText
def addFact(self, factname, factvalue):
if "facts" not in self.payload.keys():
self.payload["facts"] = []
newfact = {
"name" : factname,
"value" : factvalue
}
self.payload["facts"].append(newfact)
def addImage(self, simage, ititle=None):
if "images" not in self.payload.keys():
self.payload["images"] = []
imobj = {}
imobj["image"] = simage
if ititle:
imobj["title"] = ititle
self.payload["images"].append(imobj)
def text(self, stext):
self.payload["text"] = stext
def linkButton(self, buttontext, buttonurl):
self.payload["potentialAction"] = [
{
"@context" : "http://schema.org",
"@type" : "ViewAction",
"name" : buttontext,
"target" : [ buttonurl ]
}
]
def disableMarkdown(self):
self.payload["markdown"] = False
def enableMarkdown(self):
self.payload["markdown"] = True
def dumpSection(self):
return self.payload
def __init__(self):
self.payload = {}
class connectorcard:
def text(self, mtext):
self.payload["text"] = mtext
def title(self, mtitle):
self.payload["title"] = mtitle
def summary(self, msummary):
self.payload["summary"] = msummary
def color(self, mcolor):
if mcolor.lower() == "red":
self.payload["themeColor"] = "E81123"
else:
self.payload["themeColor"] = mcolor
def addLinkButton(self, buttontext, buttonurl):
if "potentialAction" not in self.payload:
self.payload["potentialAction"] = []
thisbutton = {
"@context" : "http://schema.org",
"@type" : "ViewAction",
"name" : buttontext,
"target" : [ buttonurl ]
}
self.payload["potentialAction"].append(thisbutton)
def newhookurl(self, nhookurl):
self.hookurl = nhookurl
def addSection(self, newsection):
# this function expects a cardsection object
if "sections" not in self.payload.keys():
self.payload["sections"] = []
self.payload["sections"].append(newsection.dumpSection())
def printme(self):
print("hookurl: %s" % self.hookurl)
print("payload: %s" % self.payload)
def send(self):
headers = {"Content-Type":"application/json"}
r = requests.post(self.hookurl, json=self.payload, headers=headers, proxies=self.proxies)
if r.status_code == requests.codes.ok:
return True
else:
print(r.text)
return False
def __init__(self, hookurl, http_proxy=None, https_proxy=None):
self.payload = {}
self.hookurl = hookurl
self.proxies = {}
if http_proxy:
self.proxies['http'] = http_proxy
if https_proxy:
self.proxies['https'] = https_proxy
if not self.proxies:
self.proxies = None
def formaturl(display, url):
mdurl = "[%s](%s)" % (display, url)
return mdurl
|
py | 1a3aa9aed83eda9575f95c21065d86b87576b104 | print robot
from dynamic_graph.sot.torque_control.tests.test_velocity_filters import *
robot.timeStep=0.00125
dt = robot.timeStep
conf_traj = Bunch()
conf_list = conf_filter_list()
conf = get_default_conf();
#conf_traj.j_name="rk"
#conf_traj.x_f=1.5
#conf_traj.w_min=0.1
#conf_traj.w_max=0.4
#conf_traj.deltaT=30.0
conf_traj.j_name="rhp"
conf_traj.x_f=-1.5
conf_traj.w_min=0.05
conf_traj.w_max=0.2
conf_traj.deltaT=20.0
conf.joint_torque_controller.FRICTION_COMPENSATION_PERCENTAGE = 0.0
b_list = [tuple(b) for b in conf_list.b_list]
a_list = [tuple(a) for a in conf_list.a_list]
filter_index = 15
#setup_velocity_filter(robot, conf, (1.,0.), (1.,0.)
setup_velocity_filter(robot, conf, b_list[filter_index], a_list[filter_index])
robot.inv_dyn_ctrl.dynamicsError.value = (0.,)*36
robot.inv_dyn_ctrl.Kf.value = 30*(0.0,)
robot.inv_dyn_ctrl.Kd.value = 30*(7.0,)
robot.inv_dyn_ctrl.Kp.value = 30*(200.,)
start_sot()
create_topic(robot.ros, robot.estimator_kin.dx, 'dq_savgol', robot, robot.estimator_kin);
create_topic(robot.ros, robot.estimator_fd.dx, 'dq_fd', robot, robot.estimator_fd);
create_topic(robot.ros, robot.inv_dyn_ctrl.dqRef, 'dqRef', robot, robot.inv_dyn_ctrl);
create_topic(robot.ros, robot.inv_dyn_ctrl.qRef, 'qRef', robot, robot.inv_dyn_ctrl);
create_topic(robot.ros, robot.device.robotState, 'robotState', robot, robot.device)
create_topic(robot.ros, robot.inv_dyn_ctrl.qError, 'qError', robot, robot.inv_dyn_ctrl);
create_topic(robot.ros, robot.estimator_fd.x_filtered, 'q_fd', robot, robot.estimator_fd);
create_topic(robot.ros, robot.device.currents, 'i');
create_topic(robot.ros, robot.ctrl_manager.currents_real, 'i_real');
create_topic(robot.ros, robot.ctrl_manager.pwmDes, 'i_des')
create_topic(robot.ros, robot.estimator_ft.jointsTorques, 'tau');
create_topic(robot.ros, robot.torque_ctrl.jointsTorquesDesired, 'tau_des');
sleep(1.0)
go_to_position(robot.traj_gen, robot.halfSitting[6:], 10.0);
# start torque control on right leg joints
robot.ctrl_manager.setCtrlMode('rhp-rhy-rhr-rk-rar-rap','torque')
i_max = np.array(robot.ctrl_manager.max_current.value)
i_max[3] = 10.0
i_max[9] = 10.0
robot.ctrl_manager.max_current.value = tuple(i_max)
ctrl_max = np.array(robot.ctrl_manager.max_ctrl.value)
ctrl_max[:12] = 20.0
robot.ctrl_manager.max_ctrl.value = tuple(ctrl_max)
kp = np.array(robot.pos_ctrl.Kp.value)
kp[:12] = 100.0
robot.pos_ctrl.Kp.value = tuple(kp)
robot.traj_gen.playTrajectoryFile('/home/adelpret/devel/src/sot-torque-control/share/climbing32_1.25ms.pos')
#################################################################
robot.ctrl_manager.setCtrlMode(conf_traj.j_name, 'torque')
sleep(0.5)
#################################################################3
#filter_index -=1
#b = b_list[filter_index]
#a = a_list[filter_index]
#robot.estimator_fd.init(dt, NJ, b, a)
filter_index=0
robot.estimator_fd.switch_filter(b_list[filter_index], a_list[filter_index])
robot.estimator_fd.switch_filter((1.,0.), (1.0,0.0))
sleep(2.0)
#dir_name='/tmp/'+'climbing32_1.25ms.pos'+'/filter_'+str(filter_index)+'/'
#robot.tracer = create_tracer(robot, dir_name)
#robot.tracer.start()
#robot.traj_gen.playTrajectoryFile('/home/adelpret/devel/src/sot-torque-control/share/climbing32_1.25ms.pos')
sleep(5.0)
robot.traj_gen.playTrajectoryFile('/home/adelpret/devel/src/sot-torque-control/share/climbing32_1.25ms.pos')
sleep(12.0)
dump_stop_tracer(robot.tracer)
robot.ctrl_manager.setCtrlMode(conf_traj.j_name, 'pos')
go_to_position(robot.traj_gen, robot.halfSitting[6:], 10.0);
robot.pos_ctrl.jointsVelocities.value = 30*(0.,)
robot.estimator_ft.dq_filtered.value = 30*(0.,)
robot.torque_ctrl.jointsVelocities.value = 30*(0.,)
robot.inv_dyn_ctrl.jointsVelocities.value = 30*(0.,)
robot.ctrl_manager.dq.value = 30*(0.,)
sleep(0.5)
filter_index = 15
#robot.estimator_fd.switch_filter((1., 0.), (1., 0.))
sleep(5.0)
plug(robot.estimator_fd.dx, robot.pos_ctrl.jointsVelocities);
plug(robot.estimator_fd.dx, robot.estimator_ft.dq_filtered);
plug(robot.estimator_fd.dx, robot.torque_ctrl.jointsVelocities);
plug(robot.estimator_fd.dx, robot.inv_dyn_ctrl.jointsVelocities);
plug(robot.estimator_fd.dx, robot.ctrl_manager.dq);
stop_sot()
|
py | 1a3aaadf6e6090a50f35ef7b9e0da94b219816e7 | import argparse
import datetime
import logging
import requests
import rdflib
import sys
import tempfile
import time
import urllib
import xml.etree.ElementTree as ET
def run():
parser = argparse.ArgumentParser(description='Finds vocabulary concepts (identifying them by a namespace) in a triplestore and enriches the triplestore with full concept definitions fetched from their URIs (assuming requesting concept\'s URI with HTTP Accept text/turtle header will provide concept\'s data in the turtle format)')
parser.add_argument('sparqlUrl', help="Triplestore's SPARQL endpoint URL")
parser.add_argument('conceptsNamespace', help="URI namespace of RDF nodes to be processed")
parser.add_argument('--sparqlUser', help='HTTP basic auth user name to be used when communicating with the triplestore')
parser.add_argument('--sparqlPswd', help='HTTP basic auth password to be used when communicating with the triplestore')
parser.add_argument('--sparqlGraph', help='Process only a given triplestore graph')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG if args.verbose else logging.INFO)
harvester = VocabularyHarvester(args)
harvester.harvest()
class VocabularyHarvester:
sparqlUrl = None
sparqlAuth = None
sparqlGraph = None
conceptsNmsp = None
def __init__(self, args):
self.sparqlUrl = args.sparqlUrl
if args.sparqlGraph:
self.sparqlGraph = rdflib.term.URIRef(args.sparqlGraph).n3()
self.conceptsNmsp = rdflib.term.Literal(args.conceptsNamespace).n3()
if args.sparqlUser != '' and args.sparqlPswd != '':
self.sparqlAuth = requests.auth.HTTPBasicAuth(args.sparqlUser, args.sparqlPswd)
def harvest(self):
fromGraph = "from named %s" % self.sparqlGraph if self.sparqlGraph else ''
query = """
select distinct ?g ?o %s
where {
graph ?g {
?s ?p ?o
filter strstarts(str(?o), %s)
}
}
""" % (fromGraph, self.conceptsNmsp)
response = requests.post(self.sparqlUrl, data={"query": query}, headers={"Accept": "application/json"}, auth=self.sparqlAuth)
if response.status_code != 200:
logging.error("Failed to find concepts in the triplestore with status code %d and response body: %s" % (response.status_code, response.text))
data = response.json()
for i in data['results']['bindings']:
logging.info("Fetching concept %s" % i['o']['value'])
try:
conceptGraph = self.fetchConcept(i['o']['value'])
#print(conceptGraph.serialize(format='turtle'))
self.updateTriplestore(conceptGraph, i['g']['value'])
except Exception as e:
logging.warning("Failed to fetch data for concept %s:\n %s" % (i['o']['value'], str(e)))
def fetchConcept(self, url):
response = requests.get(url, headers={"Accept": "text/turtle"})
graph = rdflib.Graph()
graph.parse(data=response.text, format='turtle')
return graph
def updateTriplestore(self, conceptGraph, graph):
graph = rdflib.term.URIRef(graph).n3()
query = "INSERT DATA { GRAPH " + graph + " { " + conceptGraph.serialize(format='nt') + " } }"
response = requests.post(self.sparqlUrl, data={'update': query}, auth=self.sparqlAuth)
if response.status_code != 200:
raise Exception("Sending data to the triplestore failed with code %d and response body: %s" % (response.status_code, response.text))
|
py | 1a3aab08617e0278e844189902c505c18ac26495 | # stdlib
from typing import Any
from typing import Optional
# third party
from torch import device
# relative
from ...core.common.serde.serializable import serializable
from ...proto.lib.torch.device_pb2 import Device as Device_PB
# use -2 to represent index=None
INDEX_NONE = -2
def object2proto(obj: device) -> "Device_PB":
proto = Device_PB()
proto.type = obj.type
proto.index = INDEX_NONE if obj.index is None else obj.index
return proto
def proto2object(proto: "Device_PB") -> Any:
device_type = proto.type
index: Optional[int] = None if proto.index == INDEX_NONE else proto.index
obj = device(device_type, index)
return obj
serializable(generate_wrapper=True)(
wrapped_type=device,
import_path="torch.device",
protobuf_scheme=Device_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
|
py | 1a3aab608bfe149fa262af2a2204ddf4e4ab80a1 | # -*- coding: utf-8 -*-
def command():
return "list-instance"
def init_argument(parser):
parser.add_argument("--farm-no", required=True)
def execute(requester, args):
farm_no = args.farm_no
parameters = {}
parameters["FarmNo"] = farm_no
return requester.execute("/ListInstance", parameters)
|
py | 1a3aaf643b12ee2eeb74f0c596fafe1bef137379 | import os
import config
import assemble as assemble
if __name__ == '__main__':
anno_list = []
# pnet_landmark_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_LANDMARK_ANNO_FILENAME)
pnet_postive_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_POSTIVE_ANNO_FILENAME)
pnet_part_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_PART_ANNO_FILENAME)
pnet_neg_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_NEGATIVE_ANNO_FILENAME)
anno_list.append(pnet_postive_file)
anno_list.append(pnet_part_file)
anno_list.append(pnet_neg_file)
# anno_list.append(pnet_landmark_file)
imglist_filename = config.PNET_TRAIN_IMGLIST_FILENAME
anno_dir = config.ANNO_STORE_DIR
imglist_file = os.path.join(anno_dir, imglist_filename)
chose_count = assemble.assemble_data(imglist_file ,anno_list)
print "PNet train annotation result file path:%s" % imglist_file
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.