filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_16897 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//kotlin/internal/utils:utils.bzl",
_utils = "utils",
)
KtJvmPluginInfo = provider(
doc = "This provider contains the plugin info for the JVM aspect",
fields = {
"annotation_processors": "a serializeable list of structs containing annotation processor definitions",
"transitive_runtime_jars": "set of jars required during annotation processor execution",
},
)
_EMPTY_PLUGIN_INFO = [KtJvmPluginInfo(annotation_processors = [], transitive_runtime_jars = depset())]
def merge_plugin_infos(attrs):
"""Merge all of the plugin infos found in the provided sequence of attributes.
Returns:
A KtJvmPluginInfo provider, Each of the entries is serializable."""
tally = {}
annotation_processors = []
runtime_jars = depset()
for info in [a[KtJvmPluginInfo] for a in attrs]:
for p in info.annotation_processors:
if p.label not in tally:
tally[p.label] = True
annotation_processors.append(p)
runtime_jars += info.transitive_runtime_jars
return KtJvmPluginInfo(
annotation_processors = annotation_processors,
transitive_runtime_jars = runtime_jars,
)
def _kt_jvm_plugin_aspect_impl(target, ctx):
if ctx.rule.kind == "java_plugin":
processor = ctx.rule.attr
merged_deps = java_common.merge([j[JavaInfo] for j in processor.deps])
return [KtJvmPluginInfo(
annotation_processors = [
struct(
label = _utils.restore_label(ctx.label),
processor_class = processor.processor_class,
classpath = [cp.path for cp in merged_deps.transitive_runtime_jars],
generates_api = processor.generates_api,
),
],
transitive_runtime_jars = merged_deps.transitive_runtime_jars,
)]
elif ctx.rule.kind == "java_library":
return [merge_plugin_infos(ctx.rule.attr.exported_plugins)]
else:
return _EMPTY_PLUGIN_INFO
kt_jvm_plugin_aspect = aspect(
doc = """This aspect collects Java Plugins info and other Kotlin compiler plugin configurations from the graph.""",
attr_aspects = [
"plugins",
"exported_plugins",
],
provides = [KtJvmPluginInfo],
implementation = _kt_jvm_plugin_aspect_impl,
)
|
the-stack_0_16898 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class NetlibScalapack(CMakePackage):
"""ScaLAPACK is a library of high-performance linear algebra routines for
parallel distributed memory machines
"""
homepage = "http://www.netlib.org/scalapack/"
url = "http://www.netlib.org/scalapack/scalapack-2.0.2.tgz"
version('2.0.2', '2f75e600a2ba155ed9ce974a1c4b536f')
version('2.0.1', '17b8cde589ea0423afe1ec43e7499161')
version('2.0.0', '9e76ae7b291be27faaad47cfc256cbfe')
# versions before 2.0.0 are not using cmake and requires blacs as
# a separated package
variant(
'shared',
default=True,
description='Build the shared library version'
)
variant(
'pic',
default=False,
description='Build position independent code'
)
provides('scalapack')
depends_on('mpi')
depends_on('lapack')
depends_on('blas')
depends_on('cmake', when='@2.0.0:', type='build')
# See: https://github.com/Reference-ScaLAPACK/scalapack/issues/9
patch("cmake_fortran_mangle.patch", when='@2.0.2:')
@property
def libs(self):
# Note that the default will be to search
# for 'libnetlib-scalapack.<suffix>'
shared = True if '+shared' in self.spec else False
return find_libraries(
'libscalapack', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
options = [
"-DBUILD_SHARED_LIBS:BOOL=%s" % ('ON' if '+shared' in spec else
'OFF'),
"-DBUILD_STATIC_LIBS:BOOL=%s" % ('OFF' if '+shared' in spec else
'ON')
]
# Make sure we use Spack's Lapack:
blas = spec['blas'].libs
lapack = spec['lapack'].libs
options.extend([
'-DLAPACK_FOUND=true',
'-DLAPACK_INCLUDE_DIRS=%s' % spec['lapack'].prefix.include,
'-DLAPACK_LIBRARIES=%s' % (lapack.joined(';')),
'-DBLAS_LIBRARIES=%s' % (blas.joined(';'))
])
if '+pic' in spec:
options.extend([
"-DCMAKE_C_FLAGS=%s" % self.compiler.pic_flag,
"-DCMAKE_Fortran_FLAGS=%s" % self.compiler.pic_flag
])
return options
@run_after('install')
def fix_darwin_install(self):
# The shared libraries are not installed correctly on Darwin:
if (sys.platform == 'darwin') and ('+shared' in self.spec):
fix_darwin_install_name(self.spec.prefix.lib)
|
the-stack_0_16899 | """
Tests for Term.
"""
from collections import Counter
from itertools import product
from unittest import TestCase
from toolz import assoc
import pandas as pd
from zipline.assets import Asset, ExchangeInfo
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonWindowSafeInput,
NotDType,
TermInputsNotSpecified,
NonPipelineInputs,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.pipeline import (
Classifier,
CustomClassifier,
CustomFactor,
Factor,
Filter,
ExecutionPlan,
)
from zipline.pipeline.data import Column, DataSet
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.expression import NUMEXPR_MATH_FUNCS
from zipline.pipeline.factors import RecarrayField
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import AssetExists, Slice
from zipline.testing import parameter_space
from zipline.testing.fixtures import WithTradingSessions, ZiplineTestCase
from zipline.testing.predicates import (
assert_equal,
assert_raises,
assert_raises_regex,
assert_regex,
)
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
complex128_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
NoDefaultMissingValue,
)
class SomeDataSet(DataSet):
foo = Column(float64_dtype)
bar = Column(float64_dtype)
buzz = Column(float64_dtype)
class SubDataSet(SomeDataSet):
pass
class SubDataSetNewCol(SomeDataSet):
qux = Column(float64_dtype)
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
SomeFactorAlias = SomeFactor
class SomeOtherFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
class DateFactor(Factor):
dtype = datetime64ns_dtype
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
class NoLookbackFactor(Factor):
dtype = float64_dtype
window_length = 0
class GenericCustomFactor(CustomFactor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo]
class MultipleOutputs(CustomFactor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
outputs = ['alpha', 'beta']
def some_method(self):
return
class GenericFilter(Filter):
dtype = bool_dtype
window_length = 0
inputs = []
class GenericClassifier(Classifier):
dtype = categorical_dtype
window_length = 0
inputs = []
def gen_equivalent_factors():
"""
Return an iterator of SomeFactor instances that should all be the same
object.
"""
yield SomeFactor()
yield SomeFactor(inputs=NotSpecified)
yield SomeFactor(SomeFactor.inputs)
yield SomeFactor(inputs=SomeFactor.inputs)
yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
yield SomeFactor(window_length=SomeFactor.window_length)
yield SomeFactor(window_length=NotSpecified)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=NotSpecified,
)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=SomeFactor.window_length,
)
yield SomeFactorAlias()
def to_dict(l):
"""
Convert a list to a dict with keys drawn from '0', '1', '2', ...
Examples
--------
>>> to_dict([2, 3, 4]) # doctest: +SKIP
{'0': 2, '1': 3, '2': 4}
"""
return dict(zip(map(str, range(len(l))), l))
class DependencyResolutionTestCase(WithTradingSessions, ZiplineTestCase):
TRADING_CALENDAR_STRS = ('NYSE',)
START_DATE = pd.Timestamp('2014-01-02', tz='UTC')
END_DATE = pd.Timestamp('2014-12-31', tz='UTC')
execution_plan_start = pd.Timestamp('2014-06-01', tz='UTC')
execution_plan_end = pd.Timestamp('2014-06-30', tz='UTC')
def check_dependency_order(self, ordered_terms):
seen = set()
for term in ordered_terms:
for dep in term.dependencies:
self.assertIn(dep, seen)
seen.add(term)
def make_execution_plan(self, terms):
return ExecutionPlan(
terms,
self.nyse_sessions,
self.execution_plan_start,
self.execution_plan_end,
)
def test_single_factor(self):
"""
Test dependency resolution for a single factor.
"""
def check_output(graph):
resolution_order = list(graph.ordered())
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertIn(SomeDataSet.foo, resolution_order)
self.assertIn(SomeDataSet.bar, resolution_order)
self.assertIn(SomeFactor(), resolution_order)
self.assertEqual(
graph.graph.node[SomeDataSet.foo]['extra_rows'],
4,
)
self.assertEqual(
graph.graph.node[SomeDataSet.bar]['extra_rows'],
4,
)
for foobar in gen_equivalent_factors():
check_output(self.make_execution_plan(to_dict([foobar])))
def test_single_factor_instance_args(self):
"""
Test dependency resolution for a single factor with arguments passed to
the constructor.
"""
bar, buzz = SomeDataSet.bar, SomeDataSet.buzz
factor = SomeFactor([bar, buzz], window_length=5)
graph = self.make_execution_plan(to_dict([factor]))
resolution_order = list(graph.ordered())
# SomeFactor, its inputs, and AssetExists()
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertEqual(graph.extra_rows[AssetExists()], 4)
self.assertIn(bar, resolution_order)
self.assertIn(buzz, resolution_order)
self.assertIn(SomeFactor([bar, buzz], window_length=5),
resolution_order)
self.assertEqual(graph.extra_rows[bar], 4)
self.assertEqual(graph.extra_rows[buzz], 4)
def test_reuse_loadable_terms(self):
"""
Test that raw inputs only show up in the dependency graph once.
"""
f1 = SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
f2 = SomeOtherFactor([SomeDataSet.bar, SomeDataSet.buzz])
graph = self.make_execution_plan(to_dict([f1, f2]))
resolution_order = list(graph.ordered())
# bar should only appear once.
self.assertEqual(len(resolution_order), 6)
self.assertEqual(len(set(resolution_order)), 6)
self.check_dependency_order(resolution_order)
def test_disallow_recursive_lookback(self):
with self.assertRaises(NonWindowSafeInput):
SomeFactor(inputs=[SomeFactor(), SomeDataSet.foo])
def test_window_safety_one_window_length(self):
"""
Test that window safety problems are only raised if
the parent factor has window length greater than 1
"""
with self.assertRaises(NonWindowSafeInput):
SomeFactor(inputs=[SomeOtherFactor()])
SomeFactor(inputs=[SomeOtherFactor()], window_length=1)
class ObjectIdentityTestCase(TestCase):
def assertSameObject(self, *objs):
first = objs[0]
for obj in objs:
self.assertIs(first, obj)
def assertDifferentObjects(self, *objs):
id_counts = Counter(map(id, objs))
((most_common_id, count),) = id_counts.most_common(1)
if count > 1:
dupe = [o for o in objs if id(o) == most_common_id][0]
self.fail("%s appeared %d times in %s" % (dupe, count, objs))
def test_instance_caching(self):
self.assertSameObject(*gen_equivalent_factors())
self.assertIs(
SomeFactor(window_length=SomeFactor.window_length + 1),
SomeFactor(window_length=SomeFactor.window_length + 1),
)
self.assertIs(
SomeFactor(dtype=float64_dtype),
SomeFactor(dtype=float64_dtype),
)
self.assertIs(
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
mask = SomeFactor() + SomeOtherFactor()
self.assertIs(SomeFactor(mask=mask), SomeFactor(mask=mask))
def test_instance_caching_multiple_outputs(self):
self.assertIs(MultipleOutputs(), MultipleOutputs())
self.assertIs(
MultipleOutputs(),
MultipleOutputs(outputs=MultipleOutputs.outputs),
)
self.assertIs(
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
)
# Ensure that both methods of accessing our outputs return the same
# things.
multiple_outputs = MultipleOutputs()
alpha, beta = MultipleOutputs()
self.assertIs(alpha, multiple_outputs.alpha)
self.assertIs(beta, multiple_outputs.beta)
def test_instance_caching_of_slices(self):
my_asset = Asset(
1,
exchange_info=ExchangeInfo('TEST FULL', 'TEST', 'US'),
)
f = GenericCustomFactor()
f_slice = f[my_asset]
self.assertIs(f_slice, Slice(GenericCustomFactor(), my_asset))
f = GenericFilter()
f_slice = f[my_asset]
self.assertIs(f_slice, Slice(GenericFilter(), my_asset))
c = GenericClassifier()
c_slice = c[my_asset]
self.assertIs(c_slice, Slice(GenericClassifier(), my_asset))
def test_instance_non_caching(self):
f = SomeFactor()
# Different window_length.
self.assertIsNot(
f,
SomeFactor(window_length=SomeFactor.window_length + 1),
)
# Different dtype
self.assertIsNot(
f,
SomeFactor(dtype=datetime64ns_dtype)
)
# Reordering inputs changes semantics.
self.assertIsNot(
f,
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
def test_instance_non_caching_redefine_class(self):
orig_foobar_instance = SomeFactorAlias()
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
self.assertIsNot(orig_foobar_instance, SomeFactor())
def test_instance_non_caching_multiple_outputs(self):
multiple_outputs = MultipleOutputs()
# Different outputs.
self.assertIsNot(
MultipleOutputs(), MultipleOutputs(outputs=['beta', 'gamma']),
)
# Reordering outputs.
self.assertIsNot(
multiple_outputs,
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
)
# Different factors sharing an output name should produce different
# RecarrayField factors.
orig_beta = multiple_outputs.beta
beta, gamma = MultipleOutputs(outputs=['beta', 'gamma'])
self.assertIsNot(beta, orig_beta)
def test_instance_caching_binops(self):
f = SomeFactor()
g = SomeOtherFactor()
for lhs, rhs in product([f, g], [f, g]):
self.assertIs((lhs + rhs), (lhs + rhs))
self.assertIs((lhs - rhs), (lhs - rhs))
self.assertIs((lhs * rhs), (lhs * rhs))
self.assertIs((lhs / rhs), (lhs / rhs))
self.assertIs((lhs ** rhs), (lhs ** rhs))
self.assertIs((1 + rhs), (1 + rhs))
self.assertIs((rhs + 1), (rhs + 1))
self.assertIs((1 - rhs), (1 - rhs))
self.assertIs((rhs - 1), (rhs - 1))
self.assertIs((2 * rhs), (2 * rhs))
self.assertIs((rhs * 2), (rhs * 2))
self.assertIs((2 / rhs), (2 / rhs))
self.assertIs((rhs / 2), (rhs / 2))
self.assertIs((2 ** rhs), (2 ** rhs))
self.assertIs((rhs ** 2), (rhs ** 2))
self.assertIs((f + g) + (f + g), (f + g) + (f + g))
def test_instance_caching_unary_ops(self):
f = SomeFactor()
self.assertIs(-f, -f)
self.assertIs(--f, --f)
self.assertIs(---f, ---f)
def test_instance_caching_math_funcs(self):
f = SomeFactor()
for funcname in NUMEXPR_MATH_FUNCS:
method = getattr(f, funcname)
self.assertIs(method(), method())
def test_instance_caching_grouped_transforms(self):
f = SomeFactor()
c = GenericClassifier()
m = GenericFilter()
for meth in f.demean, f.zscore, f.rank:
self.assertIs(meth(), meth())
self.assertIs(meth(groupby=c), meth(groupby=c))
self.assertIs(meth(mask=m), meth(mask=m))
self.assertIs(meth(groupby=c, mask=m), meth(groupby=c, mask=m))
class SomeFactorParameterized(SomeFactor):
params = ('a', 'b')
def test_parameterized_term(self):
f = self.SomeFactorParameterized(a=1, b=2)
self.assertEqual(f.params, {'a': 1, 'b': 2})
g = self.SomeFactorParameterized(a=1, b=3)
h = self.SomeFactorParameterized(a=2, b=2)
self.assertDifferentObjects(f, g, h)
f2 = self.SomeFactorParameterized(a=1, b=2)
f3 = self.SomeFactorParameterized(b=2, a=1)
self.assertSameObject(f, f2, f3)
self.assertEqual(f.params['a'], 1)
self.assertEqual(f.params['b'], 2)
self.assertEqual(f.window_length, SomeFactor.window_length)
self.assertEqual(f.inputs, tuple(SomeFactor.inputs))
def test_parameterized_term_non_hashable_arg(self):
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=1)
assert_equal(
str(e.exception),
"SomeFactorParameterized expected a hashable value for parameter"
" 'a', but got [] instead.",
)
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=1, b=[])
assert_equal(
str(e.exception),
"SomeFactorParameterized expected a hashable value for parameter"
" 'b', but got [] instead.",
)
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=[])
assert_regex(
str(e.exception),
r"SomeFactorParameterized expected a hashable value for parameter"
r" '(a|b)', but got \[\] instead\.",
)
def test_parameterized_term_default_value(self):
defaults = {'a': 'default for a', 'b': 'default for b'}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
assert_equal(F().params, defaults)
assert_equal(F(a='new a').params, assoc(defaults, 'a', 'new a'))
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(
F(a='new a', b='new b').params,
{'a': 'new a', 'b': 'new b'},
)
def test_parameterized_term_default_value_with_not_specified(self):
defaults = {'a': 'default for a', 'b': NotSpecified}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
pattern = r"F expected a keyword parameter 'b'\."
with assert_raises_regex(TypeError, pattern):
F()
with assert_raises_regex(TypeError, pattern):
F(a='new a')
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(
F(a='new a', b='new b').params,
{'a': 'new a', 'b': 'new b'},
)
def test_bad_input(self):
class SomeFactor(Factor):
dtype = float64_dtype
class SomeFactorDefaultInputs(SomeFactor):
inputs = (SomeDataSet.foo, SomeDataSet.bar)
class SomeFactorDefaultLength(SomeFactor):
window_length = 10
class SomeFactorNoDType(SomeFactor):
window_length = 10
inputs = (SomeDataSet.foo,)
dtype = NotSpecified
with self.assertRaises(TermInputsNotSpecified):
SomeFactor(window_length=1)
with self.assertRaises(TermInputsNotSpecified):
SomeFactorDefaultLength()
with self.assertRaises(NonPipelineInputs):
SomeFactor(window_length=1, inputs=[2])
with self.assertRaises(WindowLengthNotSpecified):
SomeFactor(inputs=(SomeDataSet.foo,))
with self.assertRaises(WindowLengthNotSpecified):
SomeFactorDefaultInputs()
with self.assertRaises(DTypeNotSpecified):
SomeFactorNoDType()
with self.assertRaises(NotDType):
SomeFactor(dtype=1)
with self.assertRaises(NoDefaultMissingValue):
SomeFactor(dtype=int64_dtype)
with self.assertRaises(UnsupportedDType):
SomeFactor(dtype=complex128_dtype)
with self.assertRaises(TermOutputsEmpty):
MultipleOutputs(outputs=[])
def test_bad_output_access(self):
with self.assertRaises(AttributeError) as e:
SomeFactor().not_an_attr
errmsg = str(e.exception)
self.assertEqual(
errmsg, "'SomeFactor' object has no attribute 'not_an_attr'",
)
mo = MultipleOutputs()
with self.assertRaises(AttributeError) as e:
mo.not_an_attr
errmsg = str(e.exception)
expected = (
"Instance of MultipleOutputs has no output named 'not_an_attr'."
" Possible choices are: ('alpha', 'beta')."
)
self.assertEqual(errmsg, expected)
with self.assertRaises(ValueError) as e:
alpha, beta = GenericCustomFactor()
errmsg = str(e.exception)
self.assertEqual(
errmsg, "GenericCustomFactor does not have multiple outputs.",
)
# Public method, user-defined method.
# Accessing these attributes should return the output, not the method.
conflicting_output_names = ['zscore', 'some_method']
mo = MultipleOutputs(outputs=conflicting_output_names)
for name in conflicting_output_names:
self.assertIsInstance(getattr(mo, name), RecarrayField)
# Non-callable attribute, private method, special method.
disallowed_output_names = ['inputs', '_init', '__add__']
for name in disallowed_output_names:
with self.assertRaises(InvalidOutputName):
GenericCustomFactor(outputs=[name])
def test_require_super_call_in_validate(self):
class MyFactor(Factor):
inputs = ()
dtype = float64_dtype
window_length = 0
def _validate(self):
"Woops, I didn't call super()!"
with self.assertRaises(AssertionError) as e:
MyFactor()
errmsg = str(e.exception)
self.assertEqual(
errmsg,
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
def test_latest_on_different_dtypes(self):
factor_dtypes = (float64_dtype, datetime64ns_dtype)
for column in TestingDataSet.columns:
if column.dtype == bool_dtype:
self.assertIsInstance(column.latest, Filter)
elif (column.dtype == int64_dtype
or column.dtype.kind in ('O', 'S', 'U')):
self.assertIsInstance(column.latest, Classifier)
elif column.dtype in factor_dtypes:
self.assertIsInstance(column.latest, Factor)
else:
self.fail(
"Unknown dtype %s for column %s" % (column.dtype, column)
)
# These should be the same value, plus this has the convenient
# property of correctly handling `NaN`.
self.assertIs(column.missing_value, column.latest.missing_value)
def test_failure_timing_on_bad_dtypes(self):
# Just constructing a bad column shouldn't fail.
Column(dtype=int64_dtype)
with self.assertRaises(NoDefaultMissingValue) as e:
class BadDataSet(DataSet):
bad_column = Column(dtype=int64_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
self.assertTrue(
str(e.exception.args[0]).startswith(
"Failed to create Column with name 'bad_column'"
)
)
Column(dtype=complex128_dtype)
with self.assertRaises(UnsupportedDType):
class BadDataSetComplex(DataSet):
bad_column = Column(dtype=complex128_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
class SubDataSetTestCase(TestCase):
def test_subdataset(self):
some_dataset_map = {
column.name: column for column in SomeDataSet.columns
}
sub_dataset_map = {
column.name: column for column in SubDataSet.columns
}
self.assertEqual(
{column.name for column in SomeDataSet.columns},
{column.name for column in SubDataSet.columns},
)
for k, some_dataset_column in some_dataset_map.items():
sub_dataset_column = sub_dataset_map[k]
self.assertIsNot(
some_dataset_column,
sub_dataset_column,
'subclass column %r should not have the same identity as'
' the parent' % k,
)
self.assertEqual(
some_dataset_column.dtype,
sub_dataset_column.dtype,
'subclass column %r should have the same dtype as the parent' %
k,
)
def test_add_column(self):
some_dataset_map = {
column.name: column for column in SomeDataSet.columns
}
sub_dataset_new_col_map = {
column.name: column for column in SubDataSetNewCol.columns
}
sub_col_names = {column.name for column in SubDataSetNewCol.columns}
# check our extra col
self.assertIn('qux', sub_col_names)
self.assertEqual(
sub_dataset_new_col_map['qux'].dtype,
float64_dtype,
)
self.assertEqual(
{column.name for column in SomeDataSet.columns},
sub_col_names - {'qux'},
)
for k, some_dataset_column in some_dataset_map.items():
sub_dataset_column = sub_dataset_new_col_map[k]
self.assertIsNot(
some_dataset_column,
sub_dataset_column,
'subclass column %r should not have the same identity as'
' the parent' % k,
)
self.assertEqual(
some_dataset_column.dtype,
sub_dataset_column.dtype,
'subclass column %r should have the same dtype as the parent' %
k,
)
@parameter_space(
dtype_=[categorical_dtype, int64_dtype],
outputs_=[('a',), ('a', 'b')],
)
def test_reject_multi_output_classifiers(self, dtype_, outputs_):
"""
Multi-output CustomClassifiers don't work because they use special
output allocation for string arrays.
"""
class SomeClassifier(CustomClassifier):
dtype = dtype_
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
outputs = outputs_
missing_value = dtype_.type('123')
expected_error = (
"SomeClassifier does not support custom outputs, "
"but received custom outputs={outputs}.".format(outputs=outputs_)
)
with self.assertRaises(ValueError) as e:
SomeClassifier()
self.assertEqual(str(e.exception), expected_error)
with self.assertRaises(ValueError) as e:
SomeClassifier()
self.assertEqual(str(e.exception), expected_error)
def test_unreasonable_missing_values(self):
for base_type, dtype_, bad_mv in ((Factor, float64_dtype, 'ayy'),
(Filter, bool_dtype, 'lmao'),
(Classifier, int64_dtype, 'lolwut'),
(Classifier, categorical_dtype, 7)):
class SomeTerm(base_type):
inputs = ()
window_length = 0
missing_value = bad_mv
dtype = dtype_
with self.assertRaises(TypeError) as e:
SomeTerm()
prefix = (
"^Missing value {mv!r} is not a valid choice "
"for term SomeTerm with dtype {dtype}.\n\n"
"Coercion attempt failed with:"
).format(mv=bad_mv, dtype=dtype_)
self.assertRegexpMatches(str(e.exception), prefix)
|
the-stack_0_16902 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Can be used with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
requirements: [ "boto3", "botocore" ]
options:
state:
description:
- register or deregister the instance
choices: ['present', 'absent']
default: present
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group. Use for classic load balancers.
target_group_arns:
description:
- List of target group ARNs to use for the group. Use for application load balancers.
version_added: "2.4"
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
If unspecified then the current group value will be used. One of launch_config_name or launch_template must be provided.
launch_template:
description:
- Dictionary describing the Launch Template to use
suboptions:
version:
description:
- The version number of the launch template to use. Defaults to latest version if not provided.
default: "latest"
launch_template_name:
description:
- The name of the launch template. Only one of launch_template_name or launch_template_id is required.
launch_template_id:
description:
- The id of the launch template. Only one of launch_template_name or launch_template_id is required.
version_added: "2.8"
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
placement_group:
description:
- Physical location of your cluster placement group created in Amazon EC2.
version_added: "2.3"
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
replace_all_instances:
description:
- In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration.
It increases the ASG size by C(replace_batch_size), waits for the new instances to be up and running.
After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced.
Once that's done the ASG size is reduced back to the expected size.
version_added: "1.8"
default: 'no'
type: bool
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch
configuration.
version_added: "1.8"
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
version_added: "1.8"
default: 'yes'
type: bool
lt_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_template or launch_template version.
version_added: "2.8"
default: 'yes'
type: bool
vpc_zone_identifier:
description:
- List of VPC subnets to use
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 300 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy",
try increasing this value.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: 'yes'
type: bool
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the
current termination policies are maintained.
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
notification_topic:
description:
- A SNS topic ARN to send auto scaling notifications to.
version_added: "2.2"
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
default:
- 'autoscaling:EC2_INSTANCE_LAUNCH'
- 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
- 'autoscaling:EC2_INSTANCE_TERMINATE'
- 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
required: false
version_added: "2.2"
suspend_processes:
description:
- A list of scaling processes to suspend.
default: []
choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
version_added: "2.3"
metrics_collection:
description:
- Enable ASG metrics collection
type: bool
default: 'no'
version_added: "2.6"
metrics_granularity:
description:
- When metrics_collection is enabled this will determine granularity of metrics collected by CloudWatch
default: "1minute"
version_added: "2.6"
metrics_list:
description:
- List of autoscaling metrics to collect when enabling metrics_collection
default:
- 'GroupMinSize'
- 'GroupMaxSize'
- 'GroupDesiredCapacity'
- 'GroupInServiceInstances'
- 'GroupPendingInstances'
- 'GroupStandbyInstances'
- 'GroupTerminatingInstances'
- 'GroupTotalInstances'
version_added: "2.6"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration with Launch Configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
# Below is an example of how to assign a new launch config to an ASG and terminate old instances.
#
# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
# a rolling fashion with instances using the current launch configuration, "my_new_lc".
#
# This could also be considered a rolling deploy of a pre-baked AMI.
#
# If this is a newly created group, the instances will not be replaced since all instances
# will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
# To only replace a couple of instances instead of all of them, supply a list
# to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
# Basic Configuration with Launch Template
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_template:
version: '1'
launch_template_name: 'lt-example'
launch_template_id: 'lt-123456'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
'''
RETURN = '''
---
auto_scaling_group_name:
description: The unique name of the auto scaling group
returned: success
type: str
sample: "myasg"
auto_scaling_group_arn:
description: The unique ARN of the autoscaling group
returned: success
type: str
sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg"
availability_zones:
description: The availability zones for the auto scaling group
returned: success
type: list
sample: [
"us-east-1d"
]
created_time:
description: Timestamp of create time of the auto scaling group
returned: success
type: str
sample: "2017-11-08T14:41:48.272000+00:00"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
healthcheck_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
healthcheck_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
healthy_instances:
description: Number of instances in a healthy state
returned: success
type: int
sample: 5
in_service_instances:
description: Number of instances in service
returned: success
type: int
sample: 3
instance_facts:
description: Dictionary of EC2 instances and their status as it relates to the ASG.
returned: success
type: dict
sample: {
"i-0123456789012": {
"health_status": "Healthy",
"launch_config_name": "public-webapp-production-1",
"lifecycle_state": "InService"
}
}
instances:
description: list of instance IDs in the ASG
returned: success
type: list
sample: [
"i-0123456789012"
]
launch_config_name:
description: >
Name of launch configuration associated with the ASG. Same as launch_configuration_name,
provided for compatibility with ec2_asg module.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancers:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
pending_instances:
description: Number of instances in pending state
returned: success
type: int
sample: 1
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
target_group_arns:
description: List of ARNs of the target groups that the ASG populates
returned: success
type: list
sample: [
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
]
target_group_names:
description: List of names of the target groups that the ASG populates
returned: success
type: list
sample: [
"target-group-host-hello",
"target-group-path-world"
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
unhealthy_instances:
description: Number of instances in an unhealthy state
returned: success
type: int
sample: 0
viable_instances:
description: Number of instances in a viable state
returned: success
type: int
sample: 1
vpc_zone_identifier:
description: VPC zone ID / subnet id for the auto scaling group
returned: success
type: str
sample: "subnet-a31ef45f"
metrics_collection:
description: List of enabled AutosSalingGroup metrics
returned: success
type: list
sample: [
{
"Granularity": "1Minute",
"Metric": "GroupInServiceInstances"
}
]
'''
import time
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup',
'TerminationPolicies', 'VPCZoneIdentifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
backoff_params = dict(tries=10, delay=3, backoff=1.5)
@AWSRetry.backoff(**backoff_params)
def describe_autoscaling_groups(connection, group_name):
pg = connection.get_paginator('describe_auto_scaling_groups')
return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', [])
@AWSRetry.backoff(**backoff_params)
def deregister_lb_instances(connection, lb_name, instance_id):
connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)])
@AWSRetry.backoff(**backoff_params)
def describe_instance_health(connection, lb_name, instances):
params = dict(LoadBalancerName=lb_name)
if instances:
params.update(Instances=instances)
return connection.describe_instance_health(**params)
@AWSRetry.backoff(**backoff_params)
def describe_target_health(connection, target_group_arn, instances):
return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances)
@AWSRetry.backoff(**backoff_params)
def suspend_asg_processes(connection, asg_name, processes):
connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
@AWSRetry.backoff(**backoff_params)
def resume_asg_processes(connection, asg_name, processes):
connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
@AWSRetry.backoff(**backoff_params)
def describe_launch_configurations(connection, launch_config_name):
pg = connection.get_paginator('describe_launch_configurations')
return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result()
@AWSRetry.backoff(**backoff_params)
def describe_launch_templates(connection, launch_template):
if launch_template['launch_template_id'] is not None:
try:
lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']])
return lt
except (botocore.exceptions.ClientError) as e:
module.fail_json(msg="No launch template found matching: %s" % launch_template)
else:
try:
lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']])
return lt
except (botocore.exceptions.ClientError) as e:
module.fail_json(msg="No launch template found matching: %s" % launch_template)
@AWSRetry.backoff(**backoff_params)
def create_asg(connection, **params):
connection.create_auto_scaling_group(**params)
@AWSRetry.backoff(**backoff_params)
def put_notification_config(connection, asg_name, topic_arn, notification_types):
connection.put_notification_configuration(
AutoScalingGroupName=asg_name,
TopicARN=topic_arn,
NotificationTypes=notification_types
)
@AWSRetry.backoff(**backoff_params)
def del_notification_config(connection, asg_name, topic_arn):
connection.delete_notification_configuration(
AutoScalingGroupName=asg_name,
TopicARN=topic_arn
)
@AWSRetry.backoff(**backoff_params)
def attach_load_balancers(connection, asg_name, load_balancers):
connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
@AWSRetry.backoff(**backoff_params)
def detach_load_balancers(connection, asg_name, load_balancers):
connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
@AWSRetry.backoff(**backoff_params)
def attach_lb_target_groups(connection, asg_name, target_group_arns):
connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
@AWSRetry.backoff(**backoff_params)
def detach_lb_target_groups(connection, asg_name, target_group_arns):
connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
@AWSRetry.backoff(**backoff_params)
def update_asg(connection, **params):
connection.update_auto_scaling_group(**params)
@AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params)
def delete_asg(connection, asg_name, force_delete):
connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete)
@AWSRetry.backoff(**backoff_params)
def terminate_asg_instance(connection, instance_id, decrement_capacity):
connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id,
ShouldDecrementDesiredCapacity=decrement_capacity)
def enforce_required_arguments_for_create():
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None:
module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create")
for arg in ('min_size', 'max_size'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict()
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = dict()
autoscaling_group_instances = autoscaling_group.get('Instances')
if autoscaling_group_instances:
properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
for i in autoscaling_group_instances:
if i.get('LaunchConfigurationName'):
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'],
'launch_config_name': i['LaunchConfigurationName']}
elif i.get('LaunchTemplate'):
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'],
'launch_template': i['LaunchTemplate']}
else:
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState']}
if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
properties['viable_instances'] += 1
if i['HealthStatus'] == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i['LifecycleState'] == 'InService':
properties['in_service_instances'] += 1
if i['LifecycleState'] == 'Terminating':
properties['terminating_instances'] += 1
if i['LifecycleState'] == 'Pending':
properties['pending_instances'] += 1
else:
properties['instances'] = []
properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName')
properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN')
properties['availability_zones'] = autoscaling_group.get('AvailabilityZones')
properties['created_time'] = autoscaling_group.get('CreatedTime')
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
if autoscaling_group.get('LaunchConfigurationName'):
properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
else:
properties['launch_template'] = autoscaling_group.get('LaunchTemplate')
properties['tags'] = autoscaling_group.get('Tags')
properties['min_size'] = autoscaling_group.get('MinSize')
properties['max_size'] = autoscaling_group.get('MaxSize')
properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod')
properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType')
properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
properties['termination_policies'] = autoscaling_group.get('TerminationPolicies')
properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs')
properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier')
properties['metrics_collection'] = autoscaling_group.get('EnabledMetrics')
if properties['target_group_arns']:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result()
target_groups = tg_result['TargetGroups']
else:
target_groups = []
properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups]
return properties
def get_launch_object(connection, ec2_connection):
launch_object = dict()
launch_config_name = module.params.get('launch_config_name')
launch_template = module.params.get('launch_template')
if launch_config_name is None and launch_template is None:
return launch_object
elif launch_config_name:
try:
launch_configs = describe_launch_configurations(connection, launch_config_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe launch configurations",
exception=traceback.format_exc())
if len(launch_configs['LaunchConfigurations']) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']}
return launch_object
elif launch_template:
lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0]
if launch_template['version'] is not None:
launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}}
else:
launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}}
return launch_object
def elb_dreg(asg_connection, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
wait_timeout = module.params.get('wait_timeout')
count = 1
if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
else:
return
for lb in as_group['LoadBalancerNames']:
deregister_lb_instances(elb_connection, lb, instance_id)
module.debug("De-registering %s from ELB %s" % (instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group['LoadBalancerNames']:
lb_instances = describe_instance_health(elb_connection, lb, [])
for i in lb_instances['InstanceStates']:
if i['InstanceId'] == instance_id and i['State'] == "InService":
count += 1
module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description']))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(InstanceId=instance))
module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
module.debug("ELB instance status:")
lb_instances = list()
for lb in as_group.get('LoadBalancerNames'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = describe_instance_health(elb_connection, lb, instances)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstance':
return None
module.fail_json(msg="Failed to get load balancer.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to get load balancer.",
exception=traceback.format_exc())
for i in lb_instances.get('InstanceStates'):
if i['State'] == "InService":
healthy_instances.add(i['InstanceId'])
module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State']))
return len(healthy_instances)
def tg_healthy(asg_connection, elbv2_connection, group_name):
healthy_instances = set()
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(Id=instance))
module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
module.debug("Target Group instance status:")
tg_instances = list()
for tg in as_group.get('TargetGroupARNs'):
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
tg_instances = describe_target_health(elbv2_connection, tg, instances)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidInstance':
return None
module.fail_json(msg="Failed to get target group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to get target group.",
exception=traceback.format_exc())
for i in tg_instances.get('TargetHealthDescriptions'):
if i['TargetHealth']['State'] == "healthy":
healthy_instances.add(i['Target']['Id'])
module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State']))
return len(healthy_instances)
def wait_for_elb(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for ELB to consider instances healthy.")
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
module.debug("ELB thinks %s instances are healthy." % healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances)
def wait_for_target_group(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for Target Group to consider instances healthy.")
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
wait_timeout = time.time() + wait_timeout
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
module.debug("Target Group thinks %s instances are healthy." % healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances)
def suspend_processes(ec2_connection, as_group):
suspend_processes = set(module.params.get('suspend_processes'))
try:
suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']])
except AttributeError:
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
if suspend_processes == suspended_processes:
return False
resume_processes = list(suspended_processes - suspend_processes)
if resume_processes:
resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes)
if suspend_processes:
suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes))
return True
def create_autoscaling_group(connection):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
target_group_arns = module.params['target_group_arns']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
launch_template = module.params.get('launch_template')
min_size = module.params['min_size']
max_size = module.params['max_size']
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
notification_topic = module.params.get('notification_topic')
notification_types = module.params.get('notification_types')
metrics_collection = module.params.get('metrics_collection')
metrics_granularity = module.params.get('metrics_granularity')
metrics_list = module.params.get('metrics_list')
try:
as_groups = describe_autoscaling_groups(connection, group_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe auto scaling groups.",
exception=traceback.format_exc())
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
ec2_connection = boto3_conn(module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
if vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k, v in tag.items():
if k != 'propagate_at_launch':
asg_tags.append(dict(Key=k,
Value=to_native(v),
PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)),
ResourceType='auto-scaling-group',
ResourceId=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
enforce_required_arguments_for_create()
if desired_capacity is None:
desired_capacity = min_size
ag = dict(
AutoScalingGroupName=group_name,
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=desired_capacity,
Tags=asg_tags,
HealthCheckGracePeriod=health_check_period,
HealthCheckType=health_check_type,
DefaultCooldown=default_cooldown,
TerminationPolicies=termination_policies)
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
if availability_zones:
ag['AvailabilityZones'] = availability_zones
if placement_group:
ag['PlacementGroup'] = placement_group
if load_balancers:
ag['LoadBalancerNames'] = load_balancers
if target_group_arns:
ag['TargetGroupARNs'] = target_group_arns
launch_object = get_launch_object(connection, ec2_connection)
if 'LaunchConfigurationName' in launch_object:
ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
elif 'LaunchTemplate' in launch_object:
ag['LaunchTemplate'] = launch_object['LaunchTemplate']
else:
module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate",
exception=traceback.format_exc())
try:
create_asg(connection, **ag)
if metrics_collection:
connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
all_ag = describe_autoscaling_groups(connection, group_name)
if len(all_ag) == 0:
module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
as_group = all_ag[0]
suspend_processes(connection, as_group)
if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
if load_balancers:
wait_for_elb(connection, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
wait_for_target_group(connection, group_name)
if notification_topic:
put_notification_config(connection, group_name, notification_topic, notification_types)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
changed = True
return changed, asg_properties
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to create Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to create Autoscaling Group.",
exception=traceback.format_exc())
else:
as_group = as_groups[0]
initial_asg_properties = get_properties(as_group)
changed = False
if suspend_processes(connection, as_group):
changed = True
# process tag changes
if len(set_tags) > 0:
have_tags = as_group.get('Tags')
want_tags = asg_tags
dead_tags = []
have_tag_keyvals = [x['Key'] for x in have_tags]
want_tag_keyvals = [x['Key'] for x in want_tags]
for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals):
changed = True
dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'],
ResourceType='auto-scaling-group', Key=dead_tag))
have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag]
if dead_tags:
connection.delete_tags(Tags=dead_tags)
zipped = zip(have_tags, want_tags)
if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped):
changed = True
connection.create_or_update_tags(Tags=asg_tags)
# Handle load balancer attachments/detachments
# Attach load balancers if they are specified but none currently exist
if load_balancers and not as_group['LoadBalancerNames']:
changed = True
try:
attach_load_balancers(connection, group_name, load_balancers)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc())
# Update load balancers if they are specified and one or more already exists
elif as_group['LoadBalancerNames']:
change_load_balancers = load_balancers is not None
# Get differences
if not load_balancers:
load_balancers = list()
wanted_elbs = set(load_balancers)
has_elbs = set(as_group['LoadBalancerNames'])
# check if all requested are already existing
if has_elbs - wanted_elbs and change_load_balancers:
# if wanted contains less than existing, then we need to delete some
elbs_to_detach = has_elbs.difference(wanted_elbs)
if elbs_to_detach:
changed = True
try:
detach_load_balancers(connection, group_name, list(elbs_to_detach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)),
exception=traceback.format_exc())
if wanted_elbs - has_elbs:
# if has contains less than wanted, then we need to add some
elbs_to_attach = wanted_elbs.difference(has_elbs)
if elbs_to_attach:
changed = True
try:
attach_load_balancers(connection, group_name, list(elbs_to_attach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)),
exception=traceback.format_exc())
# Handle target group attachments/detachments
# Attach target groups if they are specified but none currently exist
if target_group_arns and not as_group['TargetGroupARNs']:
changed = True
try:
attach_lb_target_groups(connection, group_name, target_group_arns)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group.",
exception=traceback.format_exc())
# Update target groups if they are specified and one or more already exists
elif target_group_arns is not None and as_group['TargetGroupARNs']:
# Get differences
wanted_tgs = set(target_group_arns)
has_tgs = set(as_group['TargetGroupARNs'])
# check if all requested are already existing
if has_tgs.issuperset(wanted_tgs):
# if wanted contains less than existing, then we need to delete some
tgs_to_detach = has_tgs.difference(wanted_tgs)
if tgs_to_detach:
changed = True
try:
detach_lb_target_groups(connection, group_name, list(tgs_to_detach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)),
exception=traceback.format_exc())
if wanted_tgs.issuperset(has_tgs):
# if has contains less than wanted, then we need to add some
tgs_to_attach = wanted_tgs.difference(has_tgs)
if tgs_to_attach:
changed = True
try:
attach_lb_target_groups(connection, group_name, list(tgs_to_attach))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)),
exception=traceback.format_exc())
# check for attributes that aren't required for updating an existing ASG
# check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group['MinSize']
if max_size is None:
max_size = as_group['MaxSize']
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
ag = dict(
AutoScalingGroupName=group_name,
MinSize=min_size,
MaxSize=max_size,
DesiredCapacity=desired_capacity,
HealthCheckGracePeriod=health_check_period,
HealthCheckType=health_check_type,
DefaultCooldown=default_cooldown,
TerminationPolicies=termination_policies)
# Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not.
launch_object = get_launch_object(connection, ec2_connection)
if 'LaunchConfigurationName' in launch_object:
ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
elif 'LaunchTemplate' in launch_object:
ag['LaunchTemplate'] = launch_object['LaunchTemplate']
else:
try:
ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
except Exception:
launch_template = as_group['LaunchTemplate']
# Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
if availability_zones:
ag['AvailabilityZones'] = availability_zones
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
try:
update_asg(connection, **ag)
if metrics_collection:
connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
else:
connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e),
exception=traceback.format_exc())
if notification_topic:
try:
put_notification_config(connection, group_name, notification_topic, notification_types)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications.",
exception=traceback.format_exc())
if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
# Wait for ELB health if ELB(s)defined
if load_balancers:
module.debug('\tWAITING FOR ELB HEALTH')
wait_for_elb(connection, group_name)
# Wait for target group health if target group(s)defined
if target_group_arns:
module.debug('\tWAITING FOR TG HEALTH')
wait_for_target_group(connection, group_name)
try:
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
if asg_properties != initial_asg_properties:
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups.",
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups.",
exception=traceback.format_exc())
return changed, asg_properties
def delete_autoscaling_group(connection):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
wait_for_instances = module.params.get('wait_for_instances')
wait_timeout = module.params.get('wait_timeout')
if notification_topic:
del_notification_config(connection, group_name, notification_topic)
groups = describe_autoscaling_groups(connection, group_name)
if groups:
wait_timeout = time.time() + wait_timeout
if not wait_for_instances:
delete_asg(connection, group_name, force_delete=True)
else:
updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
update_asg(connection, **updated_params)
instances = True
while instances and wait_for_instances and wait_timeout >= time.time():
tmp_groups = describe_autoscaling_groups(connection, group_name)
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.get('Instances'):
instances = False
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
delete_asg(connection, group_name, force_delete=False)
while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time():
time.sleep(5)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime())
return True
return False
def get_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def update_size(connection, group, max_size, min_size, dc):
module.debug("setting ASG sizes")
module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
updated_group = dict()
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
updated_group['MinSize'] = min_size
updated_group['MaxSize'] = max_size
updated_group['DesiredCapacity'] = dc
update_asg(connection, **updated_group)
def replace(connection):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
launch_config_name = module.params.get('launch_config_name')
# Required to maintain the default value being set to 'true'
if launch_config_name:
lc_check = module.params.get('lc_check')
else:
lc_check = False
# Mirror above behaviour for Launch Templates
launch_template = module.params.get('launch_template')
if launch_template:
lt_check = module.params.get('lt_check')
else:
lt_check = False
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
as_group = describe_autoscaling_groups(connection, group_name)[0]
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_all_instances:
# If replacing all instances, then set replace_instances to current set
# This allows replace_instances and replace_all_instances to behave same
replace_instances = instances
if replace_instances:
instances = replace_instances
# check to see if instances are replaceable if checking launch configs
if launch_config_name:
new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances)
elif launch_template:
new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check or lt_check:
if num_new_inst_needed == 0 and old_instances:
module.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, old_instances, instances, True)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
module.debug("Overriding batch size to %s" % num_new_inst_needed)
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group['MinSize']
if max_size is None:
max_size = as_group['MaxSize']
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
as_group = describe_autoscaling_groups(connection, group_name)[0]
update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
module.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
wait_for_term_inst(connection, term_instances)
wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
if break_early:
module.debug("breaking loop")
break
update_size(connection, as_group, max_size, min_size, desired_capacity)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
module.debug("Rolling update complete.")
changed = True
return(changed, asg_properties)
def get_instances_by_launch_config(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
# Check if migrating from launch_template to launch_config first
if 'launch_template' in props['instance_facts'][i]:
old_instances.append(i)
elif props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
module.debug("Comparing initial instances with current: %s" % initial_instances)
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
return new_instances, old_instances
def get_instances_by_launch_template(props, lt_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch template or version of the same launch template
if lt_check:
for i in props['instances']:
# Check if migrating from launch_config_name to launch_template_name first
if 'launch_config_name' in props['instance_facts'][i]:
old_instances.append(i)
elif props['instance_facts'][i]['launch_template'] == props['launch_template']:
new_instances.append(i)
else:
old_instances.append(i)
else:
module.debug("Comparing initial instances with current: %s" % initial_instances)
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if module.params.get('launch_config_name'):
if lc_check:
for i in instances:
if 'launch_template' in props['instance_facts'][i]:
instances_to_terminate.append(i)
elif props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
elif module.params.get('launch_template'):
if lt_check:
for i in instances:
if 'launch_config_name' in props['instance_facts'][i]:
instances_to_terminate.append(i)
elif props['instance_facts'][i]['launch_template'] != props['launch_template']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
lt_check = module.params.get('lt_check')
decrement_capacity = False
break_loop = False
as_group = describe_autoscaling_groups(connection, group_name)[0]
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
props = get_properties(as_group)
desired_size = as_group['MinSize']
if module.params.get('launch_config_name'):
new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances)
else:
new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances)
module.debug("new instances needed: %s" % num_new_inst_needed)
module.debug("new instances: %s" % new_instances)
module.debug("old instances: %s" % old_instances)
module.debug("batch instances: %s" % ",".join(instances_to_terminate))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group['MinSize'] != min_size:
if min_size is None:
min_size = as_group['MinSize']
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
update_asg(connection, **updated_params)
module.debug("Updating minimum size back to original of %s" % min_size)
# if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
module.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
module.debug("%s new instances needed" % num_new_inst_needed)
module.debug("decrementing capacity: %s" % decrement_capacity)
for instance_id in instances_to_terminate:
elb_dreg(connection, group_name, instance_id)
module.debug("terminating instance: %s" % instance_id)
terminate_asg_instance(connection, instance_id, decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, term_instances):
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
as_group = describe_autoscaling_groups(connection, group_name)[0]
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
module.debug("waiting for instances to terminate")
count = 0
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = (i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health))
if lifecycle.startswith('Terminating') or health == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
time.sleep(10)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
module.debug("Reached %s: %s" % (prop, desired_size))
return props
def asg_exists(connection):
group_name = module.params.get('name')
as_group = describe_autoscaling_groups(connection, group_name)
return bool(len(as_group))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
launch_template=dict(type='dict',
default=None,
options=dict(
version=dict(type='str'),
launch_template_name=dict(type='str'),
launch_template_id=dict(type='str'),
),
),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
lt_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[]),
metrics_collection=dict(type='bool', default=False),
metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(type='list', default=[
'GroupMinSize',
'GroupMaxSize',
'GroupDesiredCapacity',
'GroupInServiceInstances',
'GroupPendingInstances',
'GroupStandbyInstances',
'GroupTerminatingInstances',
'GroupTotalInstances'
])
),
)
global module
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['replace_all_instances', 'replace_instances'],
['launch_config_name', 'launch_template']]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
changed = create_changed = replace_changed = False
exists = asg_exists(connection)
if state == 'present':
create_changed, asg_properties = create_autoscaling_group(connection)
elif state == 'absent':
changed = delete_autoscaling_group(connection)
module.exit_json(changed=changed)
# Only replace instances if asg existed at start of call
if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')):
replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)
if __name__ == '__main__':
main()
|
the-stack_0_16903 | # -*- coding: utf-8 -*-
import logging
from operator import mod
from typing import List
from nodedge.blocks.block import Block
from nodedge.blocks.block_config import BLOCKS_ICONS_PATH, registerNode
from nodedge.blocks.block_exception import EvaluationError
from nodedge.socket_type import SocketType
_LOG = logging.getLogger(__name__)
try:
from nodedge.blocks.block_config import OP_NODE_MODULO
except NameError:
_LOG.warning(f"Not registered block: {__name__}")
op_block_string = -1
@registerNode(OP_NODE_MODULO)
class ModBlock(Block):
icon = f"{BLOCKS_ICONS_PATH}/percentage_100.png"
operationCode = OP_NODE_MODULO
operationTitle = "Modulo"
contentLabel = "%"
contentLabelObjectName = "BlockBackground"
evalString = "mod"
library = "operator"
inputSocketTypes: List[SocketType] = [
SocketType.Number,
SocketType.Number,
]
outputSocketTypes: List[SocketType] = [
SocketType.Number,
]
def evalImplementation(self):
inputs = []
for i in range(len(self.inputSockets)):
inputs.append(self.inputNodeAt(i))
try:
evaluatedInputs = [str(currentInput.eval()) for currentInput in inputs]
operation = f"{ModBlock.evalString}({', '.join(evaluatedInputs)})"
result = eval(operation)
except TypeError as e:
raise EvaluationError(e)
self.value = result
return self.value
|
the-stack_0_16904 | def atomicdictionary():
atomic = {"H":"Hydrogen","He":"Helium","Li":"Lithium","Be":"Berrylium","B":"Boron","C":"Carbon","N":"Nitrogen","F":"Fluorine","Ne":"Neon"}
print(atomic)
sym = input("Enter an existing symbol: ")
name = input("Enter an element name: ")
atomic[sym] = name
print(atomic)
sym = input("Enter a new symbol: ")
name = input("Enter a new element name: ")
atomic[sym] = name
print(atomic)
print("No. of elements: ",len(atomic))
ele=input("Enter element to search: ")
for i in atomic:
if(i==ele):
print("Element found!")
|
the-stack_0_16905 | import os, time
from datetime import datetime
from panda3d.core import *
from direct.distributed.MsgTypes import *
from direct.gui.DirectGui import *
from direct.gui.DirectGuiGlobals import NO_FADE_SORT_INDEX
from direct.fsm import StateData
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from otp.otpgui import OTPDialog
from otp.otpbase import OTPLocalizer
from otp.otpbase import OTPGlobals
from otp.uberdog.AccountDetailRecord import AccountDetailRecord, SubDetailRecord
import TTAccount
import GuiScreen
class LoginScreen(StateData.StateData, GuiScreen.GuiScreen):
AutoLoginName = base.config.GetString('%s-auto-login%s' % (game.name, os.getenv('otp_client', '')), '')
AutoLoginPassword = base.config.GetString('%s-auto-password%s' % (game.name, os.getenv('otp_client', '')), '')
notify = DirectNotifyGlobal.directNotify.newCategory('LoginScreen')
ActiveEntryColor = Vec4(1, 1, 1, 1)
InactiveEntryColor = Vec4(0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
def __init__(self, cr, doneEvent):
self.notify.debug('__init__')
StateData.StateData.__init__(self, doneEvent)
GuiScreen.GuiScreen.__init__(self)
self.cr = cr
self.loginInterface = self.cr.loginInterface
self.userName = ''
self.password = ''
self.fsm = ClassicFSM.ClassicFSM('LoginScreen', [
State.State('off', self.enterOff, self.exitOff, [
'login',
'waitForLoginResponse']),
State.State('login', self.enterLogin, self.exitLogin, [
'waitForLoginResponse',
'login',
'showLoginFailDialog']),
State.State('showLoginFailDialog', self.enterShowLoginFailDialog, self.exitShowLoginFailDialog, [
'login',
'showLoginFailDialog']),
State.State('waitForLoginResponse', self.enterWaitForLoginResponse, self.exitWaitForLoginResponse, [
'login',
'showLoginFailDialog',
'showConnectionProblemDialog']),
State.State('showConnectionProblemDialog', self.enterShowConnectionProblemDialog, self.exitShowConnectionProblemDialog, [
'login'])], 'off', 'off')
self.fsm.enterInitialState()
def load(self):
self.notify.debug('load')
masterScale = 0.80000000000000004
textScale = 0.10000000000000001 * masterScale
entryScale = 0.080000000000000002 * masterScale
lineHeight = 0.20999999999999999 * masterScale
buttonScale = 1.1499999999999999 * masterScale
buttonLineHeight = 0.14000000000000001 * masterScale
self.frame = DirectFrame(parent = aspect2d, relief = None, sortOrder = 20)
self.frame.hide()
linePos = -0.26000000000000001
self.nameLabel = DirectLabel(parent = self.frame, relief = None, pos = (-0.20999999999999999, 0, linePos), text = OTPLocalizer.LoginScreenUserName, text_scale = textScale, text_align = TextNode.ARight)
self.nameEntry = DirectEntry(parent = self.frame, relief = DGG.SUNKEN, borderWidth = (0.10000000000000001, 0.10000000000000001), scale = entryScale, pos = (-0.125, 0.0, linePos), width = OTPGlobals.maxLoginWidth, numLines = 1, focus = 0, cursorKeys = 1)
linePos -= lineHeight
self.passwordLabel = DirectLabel(parent = self.frame, relief = None, pos = (-0.20999999999999999, 0, linePos), text = OTPLocalizer.LoginScreenPassword, text_scale = textScale, text_align = TextNode.ARight)
self.passwordEntry = DirectEntry(parent = self.frame, relief = DGG.SUNKEN, borderWidth = (0.10000000000000001, 0.10000000000000001), scale = entryScale, pos = (-0.125, 0.0, linePos), width = OTPGlobals.maxLoginWidth, numLines = 1, focus = 0, cursorKeys = 1, obscured = 1, command = self._LoginScreen__handleLoginPassword)
linePos -= lineHeight
buttonImageScale = (1.7, 1.1000000000000001, 1.1000000000000001)
self.loginButton = DirectButton(parent = self.frame, relief = DGG.RAISED, borderWidth = (0.01, 0.01), pos = (0, 0, linePos), scale = buttonScale, text = OTPLocalizer.LoginScreenLogin, text_scale = 0.059999999999999998, text_pos = (0, -0.02), command = self._LoginScreen__handleLoginButton)
linePos -= buttonLineHeight
self.createAccountButton = DirectButton(parent = self.frame, relief = DGG.RAISED, borderWidth = (0.01, 0.01), pos = (0, 0, linePos), scale = buttonScale, text = OTPLocalizer.LoginScreenCreateAccount, text_scale = 0.059999999999999998, text_pos = (0, -0.02), command = self._LoginScreen__handleCreateAccount)
linePos -= buttonLineHeight
self.quitButton = DirectButton(parent = self.frame, relief = DGG.RAISED, borderWidth = (0.01, 0.01), pos = (0, 0, linePos), scale = buttonScale, text = OTPLocalizer.LoginScreenQuit, text_scale = 0.059999999999999998, text_pos = (0, -0.02), command = self._LoginScreen__handleQuit)
linePos -= buttonLineHeight
self.dialogDoneEvent = 'loginDialogAck'
dialogClass = OTPGlobals.getGlobalDialogClass()
self.dialog = dialogClass(dialogName = 'loginDialog', doneEvent = self.dialogDoneEvent, message = '', style = OTPDialog.Acknowledge, sortOrder = NO_FADE_SORT_INDEX + 100)
self.dialog.hide()
self.failDialog = DirectFrame(parent = aspect2dp, relief = DGG.RAISED, borderWidth = (0.01, 0.01), pos = (0, 0.10000000000000001, 0), text = '', text_scale = 0.080000000000000002, text_pos = (0.0, 0.29999999999999999), text_wordwrap = 15, sortOrder = NO_FADE_SORT_INDEX)
linePos = -0.050000000000000003
self.failTryAgainButton = DirectButton(parent = self.failDialog, relief = DGG.RAISED, borderWidth = (0.01, 0.01), pos = (0, 0, linePos), scale = 0.90000000000000002, text = OTPLocalizer.LoginScreenTryAgain, text_scale = 0.059999999999999998, text_pos = (0, -0.02), command = self._LoginScreen__handleFailTryAgain)
linePos -= buttonLineHeight
self.failCreateAccountButton = DirectButton(parent = self.failDialog, relief = DGG.RAISED, borderWidth = (0.01, 0.01), pos = (0, 0, linePos), scale = 0.90000000000000002, text = OTPLocalizer.LoginScreenCreateAccount, text_scale = 0.059999999999999998, text_pos = (0, -0.02), command = self._LoginScreen__handleFailCreateAccount)
linePos -= buttonLineHeight
self.failDialog.hide()
self.connectionProblemDialogDoneEvent = 'loginConnectionProblemDlgAck'
dialogClass = OTPGlobals.getGlobalDialogClass()
self.connectionProblemDialog = dialogClass(dialogName = 'connectionProblemDialog', doneEvent = self.connectionProblemDialogDoneEvent, message = '', style = OTPDialog.Acknowledge, sortOrder = NO_FADE_SORT_INDEX + 100)
self.connectionProblemDialog.hide()
def unload(self):
self.notify.debug('unload')
self.nameEntry.destroy()
self.passwordEntry.destroy()
self.failTryAgainButton.destroy()
self.failCreateAccountButton.destroy()
self.createAccountButton.destroy()
self.loginButton.destroy()
self.quitButton.destroy()
self.dialog.cleanup()
del self.dialog
self.failDialog.destroy()
del self.failDialog
self.connectionProblemDialog.cleanup()
del self.connectionProblemDialog
self.frame.destroy()
del self.fsm
del self.loginInterface
del self.cr
def enter(self):
if self.cr.playToken:
self.userName = '*'
self.password = self.cr.playToken
self.fsm.request('waitForLoginResponse')
else:
self.fsm.request('login')
def exit(self):
self.frame.hide()
self.ignore(self.dialogDoneEvent)
self.fsm.requestFinalState()
def enterOff(self):
pass
def exitOff(self):
pass
def enterLogin(self):
self.cr.resetPeriodTimer(None)
self.userName = ''
self.password = ''
self.userName = launcher.getLastLogin()
if self.userName and self.nameEntry.get():
if self.userName != self.nameEntry.get():
self.userName = ''
self.frame.show()
self.nameEntry.enterText(self.userName)
self.passwordEntry.enterText(self.password)
self.focusList = [
self.nameEntry,
self.passwordEntry]
focusIndex = 0
if self.userName:
focusIndex = 1
self.startFocusMgmt(startFocus = focusIndex)
def exitLogin(self):
self.stopFocusMgmt()
def enterShowLoginFailDialog(self, msg):
base.transitions.fadeScreen(0.5)
self.failDialog['text'] = msg
self.failDialog.show()
def _LoginScreen__handleFailTryAgain(self):
self.fsm.request('login')
def _LoginScreen__handleFailCreateAccount(self):
messenger.send(self.doneEvent, [
{
'mode': 'createAccount' }])
def _LoginScreen__handleFailNoNewAccountsAck(self):
self.dialog.hide()
self.fsm.request('showLoginFailDialog', [
self.failDialog['text']])
def exitShowLoginFailDialog(self):
base.transitions.noTransitions()
self.failDialog.hide()
def _LoginScreen__handleLoginPassword(self, password):
if password != '':
if self.nameEntry.get() != '':
self._LoginScreen__handleLoginButton()
def _LoginScreen__handleLoginButton(self):
self.removeFocus()
self.userName = self.nameEntry.get()
self.password = self.passwordEntry.get()
if self.userName == '':
self.dialog.setMessage(OTPLocalizer.LoginScreenLoginPrompt)
self.dialog.show()
self.acceptOnce(self.dialogDoneEvent, self._LoginScreen__handleEnterLoginAck)
else:
self.fsm.request('waitForLoginResponse')
def _LoginScreen__handleQuit(self):
self.removeFocus()
messenger.send(self.doneEvent, [
{
'mode': 'quit' }])
def _LoginScreen__handleCreateAccount(self):
self.removeFocus()
messenger.send(self.doneEvent, [
{
'mode': 'createAccount' }])
def enterWaitForLoginResponse(self):
self.cr.handler = self.handleWaitForLoginResponse
self.cr.userName = self.userName
self.cr.password = self.password
try:
error = self.loginInterface.authorize(self.userName, self.password)
except TTAccount.TTAccountException:
e = None
self.fsm.request('showConnectionProblemDialog', [
str(e)])
return None
if error:
self.notify.info(error)
freeTimeExpired = self.loginInterface.getErrorCode() == 10
if freeTimeExpired:
self.cr.logAccountInfo()
messenger.send(self.doneEvent, [
{
'mode': 'freeTimeExpired' }])
else:
self.fsm.request('showLoginFailDialog', [
error])
else:
self.loginInterface.sendLoginMsg()
self.waitForDatabaseTimeout(requestName = 'WaitForLoginResponse')
def exitWaitForLoginResponse(self):
self.cleanupWaitingForDatabase()
self.cr.handler = None
def enterShowConnectionProblemDialog(self, msg):
self.connectionProblemDialog.setMessage(msg)
self.connectionProblemDialog.show()
self.acceptOnce(self.connectionProblemDialogDoneEvent, self._LoginScreen__handleConnectionProblemAck)
def _LoginScreen__handleConnectionProblemAck(self):
self.connectionProblemDialog.hide()
self.fsm.request('login')
def exitShowConnectionProblemDialog(self):
pass
def handleWaitForLoginResponse(self, msgType, di):
if msgType == CLIENT_LOGIN_2_RESP:
self.handleLoginResponseMsg2(di)
elif msgType == CLIENT_LOGIN_RESP:
self.handleLoginResponseMsg(di)
elif msgType == CLIENT_LOGIN_3_RESP:
self.handleLoginResponseMsg3(di)
elif msgType == CLIENT_LOGIN_TOONTOWN_RESP:
self.handleLoginToontownResponse(di)
else:
self.cr.handleMessageType(msgType, di)
def getExtendedErrorMsg(self, errorString):
prefix = 'Bad DC Version Compare'
if len(errorString) < len(prefix):
return errorString
if errorString[:len(prefix)] == prefix:
return '%s%s' % (errorString, ', address=%s' % base.cr.getServerAddress())
return errorString
def handleLoginResponseMsg3(self, di):
now = time.time()
returnCode = di.getInt8()
errorString = self.getExtendedErrorMsg(di.getString())
self.notify.info('Login response return code %s' % returnCode)
if returnCode != 0:
self.notify.info('Login failed: %s' % errorString)
messenger.send(self.doneEvent, [
{
'mode': 'reject' }])
return None
accountDetailRecord = AccountDetailRecord()
accountDetailRecord.openChatEnabled = di.getString() == 'YES'
accountDetailRecord.createFriendsWithChat = di.getString() == 'YES'
chatCodeCreation = di.getString()
accountDetailRecord.chatCodeCreation = chatCodeCreation == 'YES'
parentControlledChat = chatCodeCreation == 'PARENT'
access = di.getString()
if access == 'VELVET':
access = OTPGlobals.AccessVelvetRope
elif access == 'FULL':
access = OTPGlobals.AccessFull
else:
self.notify.warning('Unknown access: %s' % access)
access = OTPGlobals.AccessUnknown
accountDetailRecord.piratesAccess = access
accountDetailRecord.familyAccountId = di.getInt32()
accountDetailRecord.playerAccountId = di.getInt32()
accountDetailRecord.playerName = di.getString()
accountDetailRecord.playerNameApproved = di.getInt8()
accountDetailRecord.maxAvatars = di.getInt32()
self.cr.openChatAllowed = accountDetailRecord.openChatEnabled
if not accountDetailRecord.chatCodeCreation:
pass
self.cr.secretChatAllowed = parentControlledChat
self.cr.setIsPaid(accountDetailRecord.piratesAccess)
self.userName = accountDetailRecord.playerName
self.cr.userName = accountDetailRecord.playerName
accountDetailRecord.numSubs = di.getUint16()
for i in range(accountDetailRecord.numSubs):
subDetailRecord = SubDetailRecord()
subDetailRecord.subId = di.getUint32()
subDetailRecord.subOwnerId = di.getUint32()
subDetailRecord.subName = di.getString()
subDetailRecord.subActive = di.getString()
access = di.getString()
if access == 'VELVET':
access = OTPGlobals.AccessVelvetRope
elif access == 'FULL':
access = OTPGlobals.AccessFull
else:
access = OTPGlobals.AccessUnknown
subDetailRecord.subAccess = access
subDetailRecord.subLevel = di.getUint8()
subDetailRecord.subNumAvatars = di.getUint8()
subDetailRecord.subNumConcur = di.getUint8()
subDetailRecord.subFounder = di.getString() == 'YES'
accountDetailRecord.subDetails[subDetailRecord.subId] = subDetailRecord
accountDetailRecord.WLChatEnabled = di.getString() == 'YES'
if accountDetailRecord.WLChatEnabled:
self.cr.whiteListChatEnabled = 1
else:
self.cr.whiteListChatEnabled = 0
self.notify.info('End of DISL token parse')
self.notify.info('accountDetailRecord: %s' % accountDetailRecord)
self.cr.accountDetailRecord = accountDetailRecord
self._LoginScreen__handleLoginSuccess()
def handleLoginResponseMsg2(self, di):
self.notify.debug('handleLoginResponseMsg2')
if self.notify.getDebug():
dgram = di.getDatagram()
dgram.dumpHex(ostream)
now = time.time()
returnCode = di.getUint8()
errorString = self.getExtendedErrorMsg(di.getString())
self.userName = di.getString()
self.cr.userName = self.userName
accountDetailRecord = AccountDetailRecord()
self.cr.accountDetailRecord = accountDetailRecord
canChat = di.getUint8()
self.cr.secretChatAllowed = canChat
self.notify.info('Chat from game server login: %s' % canChat)
sec = di.getUint32()
usec = di.getUint32()
serverTime = sec + usec / 1000000.0
self.cr.serverTimeUponLogin = serverTime
self.cr.clientTimeUponLogin = now
self.cr.globalClockRealTimeUponLogin = globalClock.getRealTime()
if hasattr(self.cr, 'toontownTimeManager'):
self.cr.toontownTimeManager.updateLoginTimes(serverTime, now, self.cr.globalClockRealTimeUponLogin)
serverDelta = serverTime - now
self.cr.setServerDelta(serverDelta)
self.notify.setServerDelta(serverDelta, 28800)
self.isPaid = di.getUint8()
self.cr.setIsPaid(self.isPaid)
if self.isPaid:
launcher.setPaidUserLoggedIn()
self.notify.info('Paid from game server login: %s' % self.isPaid)
self.cr.resetPeriodTimer(None)
if di.getRemainingSize() >= 4:
minutesRemaining = di.getInt32()
self.notify.info('Minutes remaining from server %s' % minutesRemaining)
if minutesRemaining >= 0:
self.notify.info('Spawning period timer')
self.cr.resetPeriodTimer(minutesRemaining * 60)
elif self.isPaid:
self.notify.warning('Negative minutes remaining for paid user (?)')
else:
self.notify.warning('Not paid, but also negative minutes remaining (?)')
else:
self.notify.info('Minutes remaining not returned from server; not spawning period timer')
familyStr = di.getString()
WhiteListResponse = di.getString()
if WhiteListResponse == 'YES':
self.cr.whiteListChatEnabled = 1
else:
self.cr.whiteListChatEnabled = 0
if di.getRemainingSize() > 0:
self.cr.accountDays = self.parseAccountDays(di.getInt32())
else:
self.cr.accountDays = 100000
if di.getRemainingSize() > 0:
self.lastLoggedInStr = di.getString()
self.notify.info('last logged in = %s' % self.lastLoggedInStr)
else:
self.lastLoggedInStr = ''
self.cr.lastLoggedIn = datetime.now()
if hasattr(self.cr, 'toontownTimeManager'):
self.cr.lastLoggedIn = self.cr.toontownTimeManager.convertStrToToontownTime(self.lastLoggedInStr)
self.cr.withParentAccount = False
self.notify.info('Login response return code %s' % returnCode)
if returnCode == 0:
self._LoginScreen__handleLoginSuccess()
elif returnCode == -13:
self.notify.info('Period Time Expired')
self.fsm.request('showLoginFailDialog', [
OTPLocalizer.LoginScreenPeriodTimeExpired])
else:
self.notify.info('Login failed: %s' % errorString)
messenger.send(self.doneEvent, [
{
'mode': 'reject' }])
def handleLoginResponseMsg(self, di):
self.notify.debug('handleLoginResponseMsg1')
if self.notify.getDebug():
dgram = di.getDatagram() |
the-stack_0_16906 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
authorize = extensions.extension_authorizer('compute', 'hypervisors')
class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
self.ext_mgr = ext_mgr
def _view_hypervisor(self, hypervisor, service, detail, servers=None,
**kwargs):
hyp_dict = {
'id': hypervisor.id,
'hypervisor_hostname': hypervisor.hypervisor_hostname,
}
ext_status_loaded = self.ext_mgr.is_loaded('os-hypervisor-status')
if ext_status_loaded:
alive = self.servicegroup_api.service_is_up(service)
hyp_dict['state'] = 'up' if alive else "down"
hyp_dict['status'] = (
'disabled' if service.disabled else 'enabled')
if detail and not servers:
fields = ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least')
ext_loaded = self.ext_mgr.is_loaded('os-extended-hypervisors')
if ext_loaded:
fields += ('host_ip',)
for field in fields:
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': service.id,
'host': hypervisor.host,
}
if ext_status_loaded:
hyp_dict['service'].update(
disabled_reason=service.disabled_reason)
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in compute_nodes])
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
True)
for hyp in compute_nodes])
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, hyp.host)
return dict(hypervisor=self._view_hypervisor(hyp, service, True))
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp.host
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, host)
return dict(hypervisor=self._view_hypervisor(hyp, service, False,
uptime=uptime))
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node.host)
service = self.host_api.service_get_by_compute_host(
context, compute_node.host)
hyp = self._view_hypervisor(compute_node, service, False,
instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.ExtensionDescriptor):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
namespace = "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1"
updated = "2012-06-21T00:00:00Z"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hypervisors',
HypervisorsController(self.ext_mgr),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
|
the-stack_0_16907 | # Copyright Action Without Borders, Inc., the Alfajor authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'Alfajor' and is distributed under the BSD license.
# See LICENSE for more details.
"""An in-process browser that acts as a WSGI server."""
from __future__ import absolute_import
import cookielib
from cookielib import Cookie
import dummy_threading
from cStringIO import StringIO
from logging import getLogger
import os.path
from urlparse import urljoin, urlparse, urlunparse
from time import time
import urllib2
from wsgiref.util import request_uri
from blinker import signal
from werkzeug import (
BaseResponse,
FileStorage,
MultiDict,
create_environ,
parse_cookie,
run_wsgi_app,
url_encode,
)
from werkzeug.test import encode_multipart
from alfajor.browsers._lxml import (
ButtonElement,
DOMElement,
DOMMixin,
FormElement,
InputElement,
SelectElement,
TextareaElement,
html_parser_for,
)
from alfajor.browsers._waitexpr import WaitExpression
from alfajor.utilities import lazy_property, to_pairs
from alfajor._compat import property
__all__ = ['WSGI']
logger = getLogger('tests.browser')
after_browser_activity = signal('after_browser_activity')
before_browser_activity = signal('before_browser_activity')
class WSGI(DOMMixin):
capabilities = [
'in-process',
'cookies',
'headers',
'status',
]
wait_expression = WaitExpression
_wsgi_server = {
'multithread': False,
'multiprocess': False,
'run_once': False,
}
user_agent = {
'browser': 'wsgi',
'platform': 'python',
'version': '1.0',
}
def __init__(self, wsgi_app, base_url=None):
# accept additional request headers? (e.g. user agent)
self._wsgi_app = wsgi_app
self._base_url = base_url
self._referrer = None
self._request_environ = None
self._cookie_jar = CookieJar()
self._charset = 'utf-8'
self.status_code = 0
self.status = ''
self.response = None
self.headers = ()
def open(self, url, wait_for=None, timeout=0):
"""Open web page at *url*."""
self._open(url, refer=False)
def reset(self):
self._cookie_jar = CookieJar()
@property
def location(self):
if not self._request_environ:
return None
return request_uri(self._request_environ)
def wait_for(self, condition, timeout=None):
pass
def sync_document(self):
"""The document is always synced."""
_sync_document = DOMMixin.sync_document
@property
def cookies(self):
if not (self._cookie_jar and self.location):
return {}
request = urllib2.Request(self.location)
policy = self._cookie_jar._policy
policy._now = int(time())
# return ok will only return a cookie if the following attrs are set
# correctly => # "version", "verifiability", "secure", "expires",
# "port", "domain"
return dict((c.name, c.value.strip('"'))
for c in self._cookie_jar if policy.return_ok(c, request))
def set_cookie(self, name, value, domain=None, path=None,
session=True, expires=None, port=None, request=None):
"""
:param expires: Seconds from epoch
:param port: must match request port
:param domain: the fqn of your server hostname
"""
# Cookie(version, name, value, port, port_specified,
# domain, domain_specified, domain_initial_dot,
# path, path_specified, secure, expires,
# discard, comment, comment_url, rest,
# rfc2109=False):
cookie = Cookie(0, name, value, port, bool(port),
domain or '', bool(domain),
(domain and domain.startswith('.')),
path or '', bool(path), False, expires,
session, None, None, {}, False)
self._cookie_jar.set_cookie(cookie)
def delete_cookie(self, name, domain=None, path=None):
try:
self._cookie_jar.clear(domain, path, name)
except KeyError:
pass
# Internal methods
@lazy_property
def _lxml_parser(self):
return html_parser_for(self, wsgi_elements)
def _open(self, url, method='GET', data=None, refer=True, content_type=None):
before_browser_activity.send(self)
open_started = time()
environ = self._create_environ(url, method, data, refer, content_type)
# keep a copy, the app may mutate the environ
request_environ = dict(environ)
logger.info('%s(%s) == %s', method, url, request_uri(environ))
request_started = time()
rv = run_wsgi_app(self._wsgi_app, environ)
response = BaseResponse(*rv)
# TODO:
# response.make_sequence() # werkzeug 0.6+
# For now, must:
response.response = list(response.response)
if hasattr(rv[0], 'close'):
rv[0].close()
# end TODO
# request is complete after the app_iter (rv[0]) has been fully read +
# closed down.
request_ended = time()
self._request_environ = request_environ
self._cookie_jar.extract_from_werkzeug(response, environ)
self.status_code = response.status_code
# Automatically follow redirects
if 301 <= self.status_code <= 302:
logger.debug("Redirect to %s", response.headers['Location'])
after_browser_activity.send(self)
self._open(response.headers['Location'])
return
# redirects report the original referrer
self._referrer = request_uri(environ)
self.status = response.status
self.headers = response.headers
# TODO: unicodify
self.response = response.data
self._sync_document()
# TODO: what does a http-equiv redirect report for referrer?
if 'meta[http-equiv=refresh]' in self.document:
refresh = self.document['meta[http-equiv=refresh]'][0]
if 'content' in refresh.attrib:
parts = refresh.get('content').split(';url=', 1)
if len(parts) == 2:
logger.debug("HTTP-EQUIV Redirect to %s", parts[1])
after_browser_activity.send(self)
self._open(parts[1])
return
open_ended = time()
request_time = request_ended - request_started
logger.info("Fetched %s in %0.3fsec + %0.3fsec browser overhead",
url, request_time,
open_ended - open_started - request_time)
after_browser_activity.send(self)
def _create_environ(self, url, method, data, refer, content_type=None):
"""Return an environ to request *url*, including cookies."""
environ_args = dict(self._wsgi_server, method=method)
base_url = self._referrer if refer else self._base_url
environ_args.update(self._canonicalize_url(url, base_url))
environ_args.update(self._prep_input(method, data, content_type))
environ = create_environ(**environ_args)
if refer and self._referrer:
environ['HTTP_REFERER'] = self._referrer
environ.setdefault('REMOTE_ADDR', '127.0.0.1')
self._cookie_jar.export_to_environ(environ)
return environ
def _canonicalize_url(self, url, base_url):
"""Return fully qualified URL components formatted for environ."""
if '?' in url:
url, query_string = url.split('?', 1)
else:
query_string = None
canonical = {'query_string': query_string}
# canonicalize against last request (add host/port, resolve
# relative paths)
if base_url:
url = urljoin(base_url, url)
parsed = urlparse(url)
if not parsed.scheme:
raise RuntimeError(
"No base url available for resolving relative url %r" % url)
canonical['path'] = urlunparse((
'', '', parsed.path, parsed.params, '', ''))
canonical['base_url'] = urlunparse((
parsed.scheme, parsed.netloc, '', '', '', ''))
return canonical
def _prep_input(self, method, data, content_type):
"""Return encoded and packed POST data."""
if data is None or method != 'POST':
prepped = {
'input_stream': None,
'content_length': None,
'content_type': None,
}
if method == 'GET' and data:
qs = MultiDict()
for key, value in to_pairs(data):
qs.setlistdefault(key).append(value)
prepped['query_string'] = url_encode(qs)
return prepped
else:
payload = url_encode(MultiDict(to_pairs(data)))
content_type = 'application/x-www-form-urlencoded'
return {
'input_stream': StringIO(payload),
'content_length': len(payload),
'content_type': content_type
}
def _wrap_file(filename, content_type):
"""Open the file *filename* and wrap in a FileStorage object."""
assert os.path.isfile(filename), "File does not exist."
return FileStorage(
stream=open(filename, 'rb'),
filename=os.path.basename(filename),
content_type=content_type
)
class FormElement(FormElement):
"""A <form/> that can be submitted."""
def submit(self, wait_for=None, timeout=0, _extra_values=()):
"""Submit the form's values.
Equivalent to hitting 'return' in a browser form: the data is
submitted without the submit button's key/value pair.
"""
if _extra_values and hasattr(_extra_values, 'items'):
_extra_values = _extra_values.items()
values = self.form_values()
values.extend(_extra_values)
method = self.method or 'GET'
if self.action:
action = self.action
elif self.browser._referrer:
action = urlparse(self.browser._referrer).path
else:
action = '/'
self.browser._open(action, method=method, data=values,
content_type=self.get('enctype'))
class InputElement(InputElement):
"""An <input/> tag."""
# Toss aside checkbox code present in the base lxml @value
@property
def value(self):
return self.get('value')
@value.setter
def value(self, value):
self.set('value', value)
@value.deleter
def value(self):
if 'value' in self.attrib:
del self.attrib['value']
def click(self, wait_for=None, timeout=None):
if self.checkable:
self.checked = not self.checked
return
if self.type != 'submit':
super(InputElement, self).click(wait_for, timeout)
return
for element in self.iterancestors():
if element.tag == 'form':
break
else:
# Not in a form: clicking does nothing.
# TODO: probably not true
return
extra = ()
if 'name' in self.attrib:
extra = [[self.attrib['name'], self.attrib.get('value', 'Submit')]]
element.submit(wait_for=wait_for, timeout=timeout, _extra_values=extra)
class ButtonElement(object):
"""Buttons that can be .click()ed."""
def click(self, wait_for=None, timeout=0):
# TODO: process type=submit|reset|button?
for element in self.iterancestors():
if element.tag == 'form':
break
else:
# Not in a form: clicking does nothing.
return
pairs = []
name = self.attrib.get('name', False)
if name:
pairs.append((name, self.attrib.get('value', '')))
return element.submit(_extra_values=pairs)
class LinkElement(object):
"""Links that can be .click()ed."""
def click(self, wait_for=None, timeout=0):
try:
link = self.attrib['href']
except AttributeError:
pass
else:
self.browser._open(link, 'GET')
wsgi_elements = {
'*': DOMElement,
'a': LinkElement,
'button': ButtonElement,
'form': FormElement,
'input': InputElement,
'select': SelectElement,
'textarea': TextareaElement,
}
class CookieJar(cookielib.CookieJar):
"""A lock-less CookieJar that can clone itself."""
def __init__(self, policy=None):
if policy is None:
policy = cookielib.DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
self._cookies_lock = dummy_threading.RLock()
def export_to_environ(self, environ):
if len(self):
u_request = _WSGI_urllib2_request(environ)
self.add_cookie_header(u_request)
def extract_from_werkzeug(self, response, request_environ):
headers = response.headers
if 'Set-Cookie' in headers or 'Set-Cookie2' in headers:
u_response = _Werkzeug_urlib2_response(response)
u_request = _WSGI_urllib2_request(request_environ)
self.extract_cookies(u_response, u_request)
class _Duck(object):
"""Has arbitrary attributes assigned at construction time."""
def __init__(self, **kw):
for attr, value in kw.iteritems():
setattr(self, attr, value)
class _Werkzeug_urlib2_response(object):
__slots__ = 'response',
def __init__(self, response):
self.response = response
def info(self):
return _Duck(getallmatchingheaders=self.response.headers.getlist,
getheaders=self.response.headers.getlist)
class _WSGI_urllib2_request(object):
def __init__(self, environ):
self.environ = environ
self.url = request_uri(self.environ)
self.url_parts = urlparse(self.url)
def get_full_url(self):
return self.url
def get_host(self):
return self.url_parts.hostname
def get_type(self):
return self.url_parts.scheme
def is_unverifiable(self):
return False
def get_origin_req_host(self):
raise Exception('fixme need previous request')
def has_header(self, header):
key = header.replace('-', '_').upper()
return key in self.environ or 'HTTP_%s' % key in self.environ
def get_header(self, header):
return self.environ.get('HTTP_%s' % header.replace('-', '_').upper())
def header_items(self):
items = []
for key, value in self.environ.iteritems():
if ((key.startswith('HTTP_') or key.startswith('CONTENT_')) and
isinstance(value, basestring)):
if key.startswith('HTTP_'):
key = key[5:]
key = key.replace('_', '-').title()
items.append((key, value))
return items
def add_unredirected_header(self, key, value):
if key == 'Cookie':
self.environ['HTTP_COOKIE'] = "%s: %s" % (key, value)
|
the-stack_0_16910 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyMplhepData(PythonPackage):
"""Font (Data) sub-package for mplhep"""
homepage = "https://github.com/Scikit-HEP/mplhep_data"
pypi = "mplhep_data/mplhep_data-0.0.3.tar.gz"
version('0.0.3', sha256='b54d257f3f53c93a442cda7a6681ce267277e09173c0b41fd78820f78321772f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools@42:', type='build')
depends_on('[email protected]:+toml', type='build')
|
the-stack_0_16912 | import FWCore.ParameterSet.Config as cms
process = cms.Process("DTT0Analyzer")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag.globaltag = ''
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Geometry.DTGeometry.dtGeometry_cfi")
process.DTGeometryESModule.applyAlignment = False
process.DTGeometryESModule.fromDDD = False
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.dtT0Analyzer = cms.EDAnalyzer("DTT0Analyzer",
rootFileName = cms.untracked.string("")
)
process.p = cms.Path(process.dtT0Analyzer)
|
the-stack_0_16913 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = next(_current_process._counter)
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return indentifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
|
the-stack_0_16914 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from . import geom
import math
import random
from math import sqrt, hypot
# Points are 3-tuples or 2-tuples of reals: (x,y,z) or (x,y)
# Faces are lists of integers (vertex indices into coord lists)
# After triangulation/quadrangulation, the tris and quads will
# be tuples instead of lists.
# Vmaps are lists taking vertex index -> Point
TOL = 1e-7 # a tolerance for fuzzy equality
GTHRESH = 75 # threshold above which use greedy to _Quandrangulate
ANGFAC = 1.0 # weighting for angles in quad goodness measure
DEGFAC = 10.0 # weighting for degree in quad goodness measure
# Angle kind constants
Ang0 = 1
Angconvex = 2
Angreflex = 3
Angtangential = 4
Ang360 = 5
def TriangulateFace(face, points):
"""Triangulate the given face.
Uses an easy triangulation first, followed by a constrained delauney
triangulation to get better shaped triangles.
Args:
face: list of int - indices in points, assumed CCW-oriented
points: geom.Points - holds coordinates for vertices
Returns:
list of (int, int, int) - 3-tuples are CCW-oriented vertices of
triangles making up the triangulation
"""
if len(face) <= 3:
return [tuple(face)]
tris = EarChopTriFace(face, points)
bord = _BorderEdges([face])
triscdt = _CDT(tris, bord, points)
return triscdt
def TriangulateFaceWithHoles(face, holes, points):
"""Like TriangulateFace, but with holes inside the face.
Works by making one complex polygon that has segments to
and from the holes ("islands"), and then using the same method
as TriangulateFace.
Args:
face: list of int - indices in points, assumed CCW-oriented
holes: list of list of int - each sublist is like face
but CW-oriented and assumed to be inside face
points: geom.Points - holds coordinates for vertices
Returns:
list of (int, int, int) - 3-tuples are CCW-oriented vertices of
triangles making up the triangulation
"""
if len(holes) == 0:
return TriangulateFace(face, points)
allfaces = [face] + holes
sholes = [_SortFace(h, points) for h in holes]
joinedface = _JoinIslands(face, sholes, points)
tris = EarChopTriFace(joinedface, points)
bord = _BorderEdges(allfaces)
triscdt = _CDT(tris, bord, points)
return triscdt
def QuadrangulateFace(face, points):
"""Quadrangulate the face (subdivide into convex quads and tris).
Like TriangulateFace, but after triangulating, join as many pairs
of triangles as possible into convex quadrilaterals.
Args:
face: list of int - indices in points, assumed CCW-oriented
points: geom.Points - holds coordinates for vertices
Returns:
list of 3-tuples or 4-tuples of ints - CCW-oriented vertices of
quadrilaterals and triangles making up the quadrangulation.
"""
if len(face) <= 3:
return [tuple(face)]
tris = EarChopTriFace(face, points)
bord = _BorderEdges([face])
triscdt = _CDT(tris, bord, points)
qs = _Quandrangulate(triscdt, bord, points)
return qs
def QuadrangulateFaceWithHoles(face, holes, points):
"""Like QuadrangulateFace, but with holes inside the faces.
Args:
face: list of int - indices in points, assumed CCW-oriented
holes: list of list of int - each sublist is like face
but CW-oriented and assumed to be inside face
points: geom.Points - holds coordinates for vertices
Returns:
list of 3-tuples or 4-tuples of ints - CCW-oriented vertices of
quadrilaterals and triangles making up the quadrangulation.
"""
if len(holes) == 0:
return QuadrangulateFace(face, points)
allfaces = [face] + holes
sholes = [_SortFace(h, points) for h in holes]
joinedface = _JoinIslands(face, sholes, points)
tris = EarChopTriFace(joinedface, points)
bord = _BorderEdges(allfaces)
triscdt = _CDT(tris, bord, points)
qs = _Quandrangulate(triscdt, bord, points)
return qs
def _SortFace(face, points):
"""Rotate face so leftmost vertex is first, where face is
list of indices in points."""
n = len(face)
if n <= 1:
return face
lefti = 0
leftv = face[0]
for i in range(1, n):
# following comparison is lexicographic on n-tuple
# so sorts on x first, using lower y as tie breaker.
if points.pos[face[i]] < points.pos[leftv]:
lefti = i
leftv = face[i]
return face[lefti:] + face[0:lefti]
def EarChopTriFace(face, points):
"""Triangulate given face, with coords given by indexing into points.
Return list of faces, each of which will be a triangle.
Use the ear-chopping method."""
# start with lowest coord in 2d space to try
# to get a pleasing uniform triangulation if starting with
# a regular structure (like a grid)
start = _GetLeastIndex(face, points)
ans = []
incr = 1
n = len(face)
while n > 3:
i = _FindEar(face, n, start, incr, points)
vm1 = face[(i - 1) % n]
v0 = face[i]
v1 = face[(i + 1) % n]
face = _ChopEar(face, i)
n = len(face)
incr = - incr
if incr == 1:
start = i % n
else:
start = (i - 1) % n
ans.append((vm1, v0, v1))
ans.append(tuple(face))
return ans
def _GetLeastIndex(face, points):
"""Return index of coordinate that is leftmost, lowest in face."""
bestindex = 0
bestpos = points.pos[face[0]]
for i in range(1, len(face)):
pos = points.pos[face[i]]
if pos[0] < bestpos[0] or \
(pos[0] == bestpos[0] and pos[1] < bestpos[1]):
bestindex = i
bestpos = pos
return bestindex
def _FindEar(face, n, start, incr, points):
"""An ear of a polygon consists of three consecutive vertices
v(-1), v0, v1 such that v(-1) can connect to v1 without intersecting
the polygon.
Finds an ear, starting at index 'start' and moving
in direction incr. (We attempt to alternate directions, to find
'nice' triangulations for simple convex polygons.)
Returns index into faces of v0 (will always find one, because
uses a desperation mode if fails to find one with above rule)."""
angk = _ClassifyAngles(face, n, points)
for mode in range(0, 5):
i = start
while True:
if _IsEar(face, i, n, angk, points, mode):
return i
i = (i + incr) % n
if i == start:
break # try next higher desperation mode
def _IsEar(face, i, n, angk, points, mode):
"""Return true, false depending on ear status of vertices
with indices i-1, i, i+1.
mode is amount of desperation: 0 is Normal mode,
mode 1 allows degenerate triangles (with repeated vertices)
mode 2 allows local self crossing (folded) ears
mode 3 allows any convex vertex (should always be one)
mode 4 allows anything (just to be sure loop terminates!)"""
k = angk[i]
vm2 = face[(i - 2) % n]
vm1 = face[(i - 1) % n]
v0 = face[i]
v1 = face[(i + 1) % n]
v2 = face[(i + 2) % n]
if vm1 == v0 or v0 == v1:
return (mode > 0)
b = (k == Angconvex or k == Angtangential or k == Ang0)
c = _InCone(vm1, v0, v1, v2, angk[(i + 1) % n], points) and \
_InCone(v1, vm2, vm1, v0, angk[(i - 1) % n], points)
if b and c:
return _EarCheck(face, n, angk, vm1, v0, v1, points)
if mode < 2:
return False
if mode == 3:
return SegsIntersect(vm2, vm1, v0, v1, points)
if mode == 4:
return b
return True
def _EarCheck(face, n, angk, vm1, v0, v1, points):
"""Return True if the successive vertices vm1, v0, v1
forms an ear. We already know that it is not a reflex
Angle, and that the local cone containment is ok.
What remains to check is that the edge vm1-v1 doesn't
intersect any other edge of the face (besides vm1-v0
and v0-v1). Equivalently, there can't be a reflex Angle
inside the triangle vm1-v0-v1. (Well, there are
messy cases when other points of the face coincide with
v0 or touch various lines involved in the ear.)"""
for j in range(0, n):
fv = face[j]
k = angk[j]
b = (k == Angreflex or k == Ang360) \
and not(fv == vm1 or fv == v0 or fv == v1)
if b:
# Is fv inside closure of triangle (vm1,v0,v1)?
c = not(Ccw(v0, vm1, fv, points) \
or Ccw(vm1, v1, fv, points) \
or Ccw(v1, v0, fv, points))
fvm1 = face[(j - 1) % n]
fv1 = face[(j + 1) % n]
# To try to deal with some degenerate cases,
# also check to see if either segment attached to fv
# intersects either segment of potential ear.
d = SegsIntersect(fvm1, fv, vm1, v0, points) or \
SegsIntersect(fvm1, fv, v0, v1, points) or \
SegsIntersect(fv, fv1, vm1, v0, points) or \
SegsIntersect(fv, fv1, v0, v1, points)
if c or d:
return False
return True
def _ChopEar(face, i):
"""Return a copy of face (of length n), omitting element i."""
return face[0:i] + face[i + 1:]
def _InCone(vtest, a, b, c, bkind, points):
"""Return true if point with index vtest is in Cone of points with
indices a, b, c, where Angle ABC has AngleKind Bkind.
The Cone is the set of points inside the left face defined by
segments ab and bc, disregarding all other segments of polygon for
purposes of inside test."""
if bkind == Angreflex or bkind == Ang360:
if _InCone(vtest, c, b, a, Angconvex, points):
return False
return not((not(Ccw(b, a, vtest, points)) \
and not(Ccw(b, vtest, a, points)) \
and Ccw(b, a, vtest, points))
or
(not(Ccw(b, c, vtest, points)) \
and not(Ccw(b, vtest, c, points)) \
and Ccw(b, a, vtest, points)))
else:
return Ccw(a, b, vtest, points) and Ccw(b, c, vtest, points)
def _JoinIslands(face, holes, points):
"""face is a CCW face containing the CW faces in the holes list,
where each hole is sorted so the leftmost-lowest vertex is first.
faces and holes are given as lists of indices into points.
The holes should be sorted by softface.
Add edges to make a new face that includes the holes (a Ccw traversal
of the new face will have the inside always on the left),
and return the new face."""
while len(holes) > 0:
(hole, holeindex) = _LeftMostFace(holes, points)
holes = holes[0:holeindex] + holes[holeindex + 1:]
face = _JoinIsland(face, hole, points)
return face
def _JoinIsland(face, hole, points):
"""Return a modified version of face that splices in the
vertices of hole (which should be sorted)."""
if len(hole) == 0:
return face
hv0 = hole[0]
d = _FindDiag(face, hv0, points)
newface = face[0:d + 1] + hole + [hv0] + face[d:]
return newface
def _LeftMostFace(holes, points):
"""Return (hole,index of hole in holes) where hole has
the leftmost first vertex. To be able to handle empty
holes gracefully, call an empty hole 'leftmost'.
Assumes holes are sorted by softface."""
assert(len(holes) > 0)
lefti = 0
lefthole = holes[0]
if len(lefthole) == 0:
return (lefthole, lefti)
leftv = lefthole[0]
for i in range(1, len(holes)):
ihole = holes[i]
if len(ihole) == 0:
return (ihole, i)
iv = ihole[0]
if points.pos[iv] < points.pos[leftv]:
(lefti, lefthole, leftv) = (i, ihole, iv)
return (lefthole, lefti)
def _FindDiag(face, hv, points):
"""Find a vertex in face that can see vertex hv, if possible,
and return the index into face of that vertex.
Should be able to find a diagonal that connects a vertex of face
left of v to hv without crossing face, but try two
more desperation passes after that to get SOME diagonal, even if
it might cross some edge somewhere.
First desperation pass (mode == 1): allow points right of hv.
Second desperation pass (mode == 2): allow crossing boundary poly"""
besti = - 1
bestdist = 1e30
for mode in range(0, 3):
for i in range(0, len(face)):
v = face[i]
if mode == 0 and points.pos[v] > points.pos[hv]:
continue # in mode 0, only want points left of hv
dist = _DistSq(v, hv, points)
if dist < bestdist:
if _IsDiag(i, v, hv, face, points) or mode == 2:
(besti, bestdist) = (i, dist)
if besti >= 0:
break # found one, so don't need other modes
assert(besti >= 0)
return besti
def _IsDiag(i, v, hv, face, points):
"""Return True if vertex v (at index i in face) can see vertex hv.
v and hv are indices into points.
(v, hv) is a diagonal if hv is in the cone of the Angle at index i on face
and no segment in face intersects (h, hv).
"""
n = len(face)
vm1 = face[(i - 1) % n]
v1 = face[(i + 1) % n]
k = _AngleKind(vm1, v, v1, points)
if not _InCone(hv, vm1, v, v1, k, points):
return False
for j in range(0, n):
vj = face[j]
vj1 = face[(j + 1) % n]
if SegsIntersect(v, hv, vj, vj1, points):
return False
return True
def _DistSq(a, b, points):
"""Return distance squared between coords with indices a and b in points.
"""
diff = Sub2(points.pos[a], points.pos[b])
return Dot2(diff, diff)
def _BorderEdges(facelist):
"""Return a set of (u,v) where u and v are successive vertex indices
in some face in the list in facelist."""
ans = set()
for i in range(0, len(facelist)):
f = facelist[i]
for j in range(1, len(f)):
ans.add((f[j - 1], f[j]))
ans.add((f[-1], f[0]))
return ans
def _CDT(tris, bord, points):
"""Tris is a list of triangles ((a,b,c), CCW-oriented indices into points)
Bord is a set of border edges (u,v), oriented so that tris
is a triangulation of the left face of the border(s).
Make the triangulation "Constrained Delaunay" by flipping "reversed"
quadrangulaterals until can flip no more.
Return list of triangles in new triangulation."""
td = _TriDict(tris)
re = _ReveresedEdges(tris, td, bord, points)
ts = set(tris)
# reverse the reversed edges until done.
# reversing and edge adds new edges, which may or
# may not be reversed or border edges, to re for
# consideration, but the process will stop eventually.
while len(re) > 0:
(a, b) = e = re.pop()
if e in bord or not _IsReversed(e, td, points):
continue
# rotate e in quad adbc to get other diagonal
erev = (b, a)
tl = td.get(e)
tr = td.get(erev)
if not tl or not tr:
continue # shouldn't happen
c = _OtherVert(tl, a, b)
d = _OtherVert(tr, a, b)
if c is None or d is None:
continue # shouldn't happen
newt1 = (c, d, b)
newt2 = (c, a, d)
del td[e]
del td[erev]
td[(c, d)] = newt1
td[(d, b)] = newt1
td[(b, c)] = newt1
td[(d, c)] = newt2
td[(c, a)] = newt2
td[(a, d)] = newt2
if tl in ts:
ts.remove(tl)
if tr in ts:
ts.remove(tr)
ts.add(newt1)
ts.add(newt2)
re.extend([(d, b), (b, c), (c, a), (a, d)])
return list(ts)
def _TriDict(tris):
"""tris is a list of triangles (a,b,c), CCW-oriented indices.
Return dict mapping all edges in the triangles to the containing
triangle list."""
ans = dict()
for i in range(0, len(tris)):
(a, b, c) = t = tris[i]
ans[(a, b)] = t
ans[(b, c)] = t
ans[(c, a)] = t
return ans
def _ReveresedEdges(tris, td, bord, points):
"""Return list of reversed edges in tris.
Only want edges not in bord, and only need one representative
of (u,v)/(v,u), so choose the one with u < v.
td is dictionary from _TriDict, and is used to find left and right
triangles of edges."""
ans = []
for i in range(0, len(tris)):
(a, b, c) = tris[i]
for e in [(a, b), (b, c), (c, a)]:
if e in bord:
continue
(u, v) = e
if u < v:
if _IsReversed(e, td, points):
ans.append(e)
return ans
def _IsReversed(e, td, points):
"""If e=(a,b) is a non-border edge, with left-face triangle tl and
right-face triangle tr, then it is 'reversed' if the circle through
a, b, and (say) the other vertex of tl contains the other vertex of tr.
td is a _TriDict, for finding triangles containing edges, and points
gives the coordinates for vertex indices used in edges."""
tl = td.get(e)
if not tl:
return False
(a, b) = e
tr = td.get((b, a))
if not tr:
return False
c = _OtherVert(tl, a, b)
d = _OtherVert(tr, a, b)
if c is None or d is None:
return False
return InCircle(a, b, c, d, points)
def _OtherVert(tri, a, b):
"""tri should be a tuple of 3 vertex indices, two of which are a and b.
Return the third index, or None if all vertices are a or b"""
for v in tri:
if v != a and v != b:
return v
return None
def _ClassifyAngles(face, n, points):
"""Return vector of anglekinds of the Angle around each point in face."""
return [_AngleKind(face[(i - 1) % n], face[i], face[(i + 1) % n], points) \
for i in list(range(0, n))]
def _AngleKind(a, b, c, points):
"""Return one of the Ang... constants to classify Angle formed by ABC,
in a counterclockwise traversal of a face,
where a, b, c are indices into points."""
if Ccw(a, b, c, points):
return Angconvex
elif Ccw(a, c, b, points):
return Angreflex
else:
vb = points.pos[b]
udotv = Dot2(Sub2(vb, points.pos[a]), Sub2(points.pos[c], vb))
if udotv > 0.0:
return Angtangential
else:
return Ang0 # to fix: return Ang360 if "inside" spur
def _Quandrangulate(tris, bord, points):
"""Tris is list of triangles, forming a triangulation of region whose
border edges are in set bord.
Combine adjacent triangles to make quads, trying for "good" quads where
possible. Some triangles will probably remain uncombined"""
(er, td) = _ERGraph(tris, bord, points)
if len(er) == 0:
return tris
if len(er) > GTHRESH:
match = _GreedyMatch(er)
else:
match = _MaxMatch(er)
return _RemoveEdges(tris, match)
def _RemoveEdges(tris, match):
"""tris is list of triangles.
er is as returned from _MaxMatch or _GreedyMatch.
Return list of (A,D,B,C) resulting from deleting edge (A,B) causing a merge
of two triangles; append to that list the remaining unmatched triangles."""
ans = []
triset = set(tris)
while len(match) > 0:
(_, e, tl, tr) = match.pop()
(a, b) = e
if tl in triset:
triset.remove(tl)
if tr in triset:
triset.remove(tr)
c = _OtherVert(tl, a, b)
d = _OtherVert(tr, a, b)
if c is None or d is None:
continue
ans.append((a, d, b, c))
return ans + list(triset)
def _ERGraph(tris, bord, points):
"""Make an 'Edge Removal Graph'.
Given a list of triangles, the 'Edge Removal Graph' is a graph whose
nodes are the triangles (think of a point in the center of them),
and whose edges go between adjacent triangles (they share a non-border
edge), such that it would be possible to remove the shared edge
and form a convex quadrilateral. Forming a quadrilateralization
is then a matter of finding a matching (set of edges that don't
share a vertex - remember, these are the 'face' vertices).
For better quadrilaterlization, we'll make the Edge Removal Graph
edges have weights, with higher weights going to the edges that
are more desirable to remove. Then we want a maximum weight matching
in this graph.
We'll return the graph in a kind of implicit form, using edges of
the original triangles as a proxy for the edges between the faces
(i.e., the edge of the triangle is the shared edge). We'll arbitrarily
pick the triangle graph edge with lower-index start vertex.
Also, to aid in traversing the implicit graph, we'll keep the left
and right triangle triples with edge 'ER edge'.
Finally, since we calculate it anyway, we'll return a dictionary
mapping edges of the triangles to the triangle triples they're in.
Args:
tris: list of (int, int, int) giving a triple of vertex indices for
triangles, assumed CCW oriented
bord: set of (int, int) giving vertex indices for border edges
points: geom.Points - for mapping vertex indices to coords
Returns:
(list of (weight,e,tl,tr), dict)
where edge e=(a,b) is non-border edge
with left face tl and right face tr (each a triple (i,j,k)),
where removing the edge would form an "OK" quad (no concave angles),
with weight representing the desirability of removing the edge
The dict maps int pairs (a,b) to int triples (i,j,k), that is,
mapping edges to their containing triangles.
"""
td = _TriDict(tris)
dd = _DegreeDict(tris)
ans = []
ctris = tris[:] # copy, so argument not affected
while len(ctris) > 0:
(i, j, k) = tl = ctris.pop()
for e in [(i, j), (j, k), (k, i)]:
if e in bord:
continue
(a, b) = e
# just consider one of (a,b) and (b,a), to avoid dups
if a > b:
continue
erev = (b, a)
tr = td.get(erev)
if not tr:
continue
c = _OtherVert(tl, a, b)
d = _OtherVert(tr, a, b)
if c is None or d is None:
continue
# calculate amax, the max of the new angles that would
# be formed at a and b if tl and tr were combined
amax = max(Angle(c, a, b, points) + Angle(d, a, b, points),
Angle(c, b, a, points) + Angle(d, b, a, points))
if amax > 180.0:
continue
weight = ANGFAC * (180.0 - amax) + DEGFAC * (dd[a] + dd[b])
ans.append((weight, e, tl, tr))
return (ans, td)
def _GreedyMatch(er):
"""er is list of (weight,e,tl,tr).
Find maximal set so that each triangle appears in at most
one member of set"""
# sort in order of decreasing weight
er.sort(key=lambda v: v[0], reverse=True)
match = set()
ans = []
while len(er) > 0:
(_, _, tl, tr) = q = er.pop()
if tl not in match and tr not in match:
match.add(tl)
match.add(tr)
ans.append(q)
return ans
def _MaxMatch(er):
"""Like _GreedyMatch, but use divide and conquer to find best possible set.
Args:
er: list of (weight,e,tl,tr) - see _ERGraph
Returns:
list that is a subset of er giving a maximum weight match
"""
(ans, _) = _DCMatch(er)
return ans
def _DCMatch(er):
"""Recursive helper for _MaxMatch.
Divide and Conquer approach to finding max weight matching.
If we're lucky, there's an edge in er that separates the edge removal
graph into (at least) two separate components. Then the max weight
is either one that includes that edge or excludes it - and we can
use a recursive call to _DCMatch to handle each component separately
on what remains of the graph after including/excluding the separating edge.
If we're not lucky, we fall back on _EMatch (see below).
Args:
er: list of (weight, e, tl, tr) (see _ERGraph)
Returns:
(list of (weight, e, tl, tr), float) - the subset forming a maximum
matching, and the total weight of the match.
"""
if not er:
return ([], 0.0)
if len(er) == 1:
return (er, er[0][0])
match = []
matchw = 0.0
for i in range(0, len(er)):
(nc, comp) = _FindComponents(er, i)
if nc == 1:
# er[i] doesn't separate er
continue
(wi, _, tl, tr) = er[i]
if comp[tl] != comp[tr]:
# case 1: er separates graph
# compare the matches that include er[i] versus
# those that exclude it
(a, b) = _PartitionComps(er, comp, i, comp[tl], comp[tr])
ax = _CopyExcluding(a, tl, tr)
bx = _CopyExcluding(b, tl, tr)
(axmatch, wax) = _DCMatch(ax)
(bxmatch, wbx) = _DCMatch(bx)
if len(ax) == len(a):
wa = wax
amatch = axmatch
else:
(amatch, wa) = _DCMatch(a)
if len(bx) == len(b):
wb = wbx
bmatch = bxmatch
else:
(bmatch, wb) = _DCMatch(b)
w = wa + wb
wx = wax + wbx + wi
if w > wx:
match = amatch + bmatch
matchw = w
else:
match = [er[i]] + axmatch + bxmatch
matchw = wx
else:
# case 2: er not needed to separate graph
(a, b) = _PartitionComps(er, comp, -1, 0, 0)
(amatch, wa) = _DCMatch(a)
(bmatch, wb) = _DCMatch(b)
match = amatch + bmatch
matchw = wa + wb
if match:
break
if not match:
return _EMatch(er)
return (match, matchw)
def _EMatch(er):
"""Exhaustive match helper for _MaxMatch.
This is the case when we were unable to find a single edge
separating the edge removal graph into two components.
So pick a single edge and try _DCMatch on the two cases of
including or excluding that edge. We may be lucky in these
subcases (say, if the graph is currently a simple cycle, so
only needs one more edge after the one we pick here to separate
it into components). Otherwise, we'll end up back in _EMatch
again, and the worse case will be exponential.
Pick a random edge rather than say, the first, to hopefully
avoid some pathological cases.
Args:
er: list of (weight, el, tl, tr) (see _ERGraph)
Returns:
(list of (weight, e, tl, tr), float) - the subset forming a maximum
matching, and the total weight of the match.
"""
if not er:
return ([], 0.0)
if len(er) == 1:
return (er, er[1][1])
i = random.randint(0, len(er) - 1)
eri = (wi, _, tl, tr) = er[i]
# case a: include eri. exclude other edges that touch tl or tr
a = _CopyExcluding(er, tl, tr)
a.append(eri)
(amatch, wa) = _DCMatch(a)
wa += wi
if len(a) == len(er) - 1:
# if a excludes only eri, then er didn't touch anything else
# in the graph, and the best match will always include er
# and we can skip the call for case b
wb = -1.0
bmatch = []
else:
b = er[:i] + er[i + 1:]
(bmatch, wb) = _DCMatch(b)
if wa > wb:
match = amatch
match.append(eri)
matchw = wa
else:
match = bmatch
matchw = wb
return (match, matchw)
def _FindComponents(er, excepti):
"""Find connected components induced by edges, excluding one edge.
Args:
er: list of (weight, el, tl, tr) (see _ERGraph)
excepti: index in er of edge to be excluded
Returns:
(int, dict): int is number of connected components found,
dict maps triangle triple ->
connected component index (starting at 1)
"""
ncomps = 0
comp = dict()
for i in range(0, len(er)):
(_, _, tl, tr) = er[i]
for t in [tl, tr]:
if t not in comp:
ncomps += 1
_FCVisit(er, excepti, comp, t, ncomps)
return (ncomps, comp)
def _FCVisit(er, excepti, comp, t, compnum):
"""Helper for _FindComponents depth-first-search."""
comp[t] = compnum
for i in range(0, len(er)):
if i == excepti:
continue
(_, _, tl, tr) = er[i]
if tl == t or tr == t:
s = tl
if s == t:
s = tr
if s not in comp:
_FCVisit(er, excepti, comp, s, compnum)
def _PartitionComps(er, comp, excepti, compa, compb):
"""Partition the edges of er by component number, into two lists.
Generally, put odd components into first list and even into second,
except that component compa goes in the first and compb goes in the second,
and we ignore edge er[excepti].
Args:
er: list of (weight, el, tl, tr) (see _ERGraph)
comp: dict - mapping triangle triple -> connected component index
excepti: int - index in er to ignore (unless excepti==-1)
compa: int - component to go in first list of answer (unless 0)
compb: int - component to go in second list of answer (unless 0)
Returns:
(list, list) - a partition of er according to above rules
"""
parta = []
partb = []
for i in range(0, len(er)):
if i == excepti:
continue
tl = er[i][2]
c = comp[tl]
if c == compa or (c != compb and (c & 1) == 1):
parta.append(er[i])
else:
partb.append(er[i])
return (parta, partb)
def _CopyExcluding(er, s, t):
"""Return a copy of er, excluding all those involving triangles s and t.
Args:
er: list of (weight, e, tl, tr) - see _ERGraph
s: 3-tuple of int - a triangle
t: 3-tuple of int - a triangle
Returns:
Copy of er excluding those with tl or tr == s or t
"""
ans = []
for e in er:
(_, _, tl, tr) = e
if tl == s or tr == s or tl == t or tr == t:
continue
ans.append(e)
return ans
def _DegreeDict(tris):
"""Return a dictionary mapping vertices in tris to the number of triangles
that they are touch."""
ans = dict()
for t in tris:
for v in t:
if v in ans:
ans[v] = ans[v] + 1
else:
ans[v] = 1
return ans
def PolygonPlane(face, points):
"""Return a Normal vector for the face with 3d coords given by indexing
into points."""
if len(face) < 3:
return (0.0, 0.0, 1.0) # arbitrary, we really have no idea
else:
coords = [points.pos[i] for i in face]
return Normal(coords)
# This Normal appears to be on the CCW-traversing side of a polygon
def Normal(coords):
"""Return an average Normal vector for the point list, 3d coords."""
if len(coords) < 3:
return (0.0, 0.0, 1.0) # arbitrary
(ax, ay, az) = coords[0]
(bx, by, bz) = coords[1]
(cx, cy, cz) = coords[2]
if len(coords) == 3:
sx = (ay - by) * (az + bz) + \
(by - cy) * (bz + cz) + \
(cy - ay) * (cz + az)
sy = (az - bz) * (ax + bx) + \
(bz - cz) * (bx + cx) + \
(cz - az) * (cx + ax)
sz = (ax - bx) * (by + by) + \
(bx - cx) * (by + cy) + \
(cx - ax) * (cy + ay)
return Norm3(sx, sy, sz)
else:
sx = (ay - by) * (az + bz) + (by - cy) * (bz + cz)
sy = (az - bz) * (ax + bx) + (bz - cz) * (bx + cx)
sz = (ax - bx) * (ay + by) + (bx - cx) * (by + cy)
return _NormalAux(coords[3:], coords[0], sx, sy, sz)
def _NormalAux(rest, first, sx, sy, sz):
(ax, ay, az) = rest[0]
if len(rest) == 1:
(bx, by, bz) = first
else:
(bx, by, bz) = rest[1]
nx = sx + (ay - by) * (az + bz)
ny = sy + (az - bz) * (ax + bx)
nz = sz + (ax - bx) * (ay + by)
if len(rest) == 1:
return Norm3(nx, ny, nz)
else:
return _NormalAux(rest[1:], first, nx, ny, nz)
def Norm3(x, y, z):
"""Return vector (x,y,z) normalized by dividing by squared length.
Return (0.0, 0.0, 1.0) if the result is undefined."""
sqrlen = x * x + y * y + z * z
if sqrlen < 1e-100:
return (0.0, 0.0, 1.0)
else:
try:
d = sqrt(sqrlen)
return (x / d, y / d, z / d)
except:
return (0.0, 0.0, 1.0)
# We're using right-hand coord system, where
# forefinger=x, middle=y, thumb=z on right hand.
# Then, e.g., (1,0,0) x (0,1,0) = (0,0,1)
def Cross3(a, b):
"""Return the cross product of two vectors, a x b."""
(ax, ay, az) = a
(bx, by, bz) = b
return (ay * bz - az * by, az * bx - ax * bz, ax * by - ay * bx)
def Dot2(a, b):
"""Return the dot product of two 2d vectors, a . b."""
return a[0] * b[0] + a[1] * b[1]
def Perp2(a, b):
"""Return a sort of 2d cross product."""
return a[0] * b[1] - a[1] * b[0]
def Sub2(a, b):
"""Return difference of 2d vectors, a-b."""
return (a[0] - b[0], a[1] - b[1])
def Add2(a, b):
"""Return the sum of 2d vectors, a+b."""
return (a[0] + b[0], a[1] + b[1])
def Length2(v):
"""Return length of vector v=(x,y)."""
return hypot(v[0], v[1])
def LinInterp2(a, b, alpha):
"""Return the point alpha of the way from a to b."""
beta = 1 - alpha
return (beta * a[0] + alpha * b[0], beta * a[1] + alpha * b[1])
def Normalized2(p):
"""Return vector p normlized by dividing by its squared length.
Return (0.0, 1.0) if the result is undefined."""
(x, y) = p
sqrlen = x * x + y * y
if sqrlen < 1e-100:
return (0.0, 1.0)
else:
try:
d = sqrt(sqrlen)
return (x / d, y / d)
except:
return (0.0, 1.0)
def Angle(a, b, c, points):
"""Return Angle abc in degrees, in range [0,180),
where a,b,c are indices into points."""
u = Sub2(points.pos[c], points.pos[b])
v = Sub2(points.pos[a], points.pos[b])
n1 = Length2(u)
n2 = Length2(v)
if n1 == 0.0 or n2 == 0.0:
return 0.0
else:
costheta = Dot2(u, v) / (n1 * n2)
if costheta > 1.0:
costheta = 1.0
if costheta < - 1.0:
costheta = - 1.0
return math.acos(costheta) * 180.0 / math.pi
def SegsIntersect(ixa, ixb, ixc, ixd, points):
"""Return true if segment AB intersects CD,
false if they just touch. ixa, ixb, ixc, ixd are indices
into points."""
a = points.pos[ixa]
b = points.pos[ixb]
c = points.pos[ixc]
d = points.pos[ixd]
u = Sub2(b, a)
v = Sub2(d, c)
w = Sub2(a, c)
pp = Perp2(u, v)
if abs(pp) > TOL:
si = Perp2(v, w) / pp
ti = Perp2(u, w) / pp
return 0.0 < si < 1.0 and 0.0 < ti < 1.0
else:
# parallel or overlapping
if Dot2(u, u) == 0.0 or Dot2(v, v) == 0.0:
return False
else:
pp2 = Perp2(w, v)
if abs(pp2) > TOL:
return False # parallel, not collinear
z = Sub2(b, c)
(vx, vy) = v
(wx, wy) = w
(zx, zy) = z
if vx == 0.0:
(t0, t1) = (wy / vy, zy / vy)
else:
(t0, t1) = (wx / vx, zx / vx)
return 0.0 < t0 < 1.0 and 0.0 < t1 < 1.0
def Ccw(a, b, c, points):
"""Return true if ABC is a counterclockwise-oriented triangle,
where a, b, and c are indices into points.
Returns false if not, or if colinear within TOL."""
(ax, ay) = (points.pos[a][0], points.pos[a][1])
(bx, by) = (points.pos[b][0], points.pos[b][1])
(cx, cy) = (points.pos[c][0], points.pos[c][1])
d = ax * by - bx * ay - ax * cy + cx * ay + bx * cy - cx * by
return d > TOL
def InCircle(a, b, c, d, points):
"""Return true if circle through points with indices a, b, c
contains point with index d (indices into points).
Except: if ABC forms a counterclockwise oriented triangle
then the test is reversed: return true if d is outside the circle.
Will get false, no matter what orientation, if d is cocircular, with TOL^2.
| xa ya xa^2+ya^2 1 |
| xb yb xb^2+yb^2 1 | > 0
| xc yc xc^2+yc^2 1 |
| xd yd xd^2+yd^2 1 |
"""
(xa, ya, za) = _Icc(points.pos[a])
(xb, yb, zb) = _Icc(points.pos[b])
(xc, yc, zc) = _Icc(points.pos[c])
(xd, yd, zd) = _Icc(points.pos[d])
det = xa * (yb * zc - yc * zb - yb * zd + yd * zb + yc * zd - yd * zc) \
- xb * (ya * zc - yc * za - ya * zd + yd * za + yc * zd - yd * zc) \
+ xc * (ya * zb - yb * za - ya * zd + yd * za + yb * zd - yd * zb) \
- xd * (ya * zb - yb * za - ya * zc + yc * za + yb * zc - yc * zb)
return det > TOL * TOL
def _Icc(p):
(x, y) = (p[0], p[1])
return (x, y, x * x + y * y)
|
the-stack_0_16915 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from product_management import create_product, delete_product
from reference_image_management import (
create_reference_image, delete_reference_image, list_reference_images)
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
LOCATION = 'us-west1'
PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing'
PRODUCT_CATEGORY = 'homegoods'
PRODUCT_ID = 'fake_product_id_for_testing'
REFERENCE_IMAGE_ID = 'fake_reference_image_id_for_testing'
GCS_URI = 'gs://python-docs-samples-tests/product_search/shoes_1.jpg'
@pytest.fixture
def product():
# set up
create_product(
PROJECT_ID, LOCATION, PRODUCT_ID,
PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY)
yield None
# tear down
delete_product(PROJECT_ID, LOCATION, PRODUCT_ID)
def test_create_reference_image(capsys, product):
list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID)
out, _ = capsys.readouterr()
assert REFERENCE_IMAGE_ID not in out
create_reference_image(
PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID,
GCS_URI)
list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID)
out, _ = capsys.readouterr()
assert REFERENCE_IMAGE_ID in out
delete_product(PROJECT_ID, LOCATION, PRODUCT_ID)
def test_delete_reference_image(capsys, product):
create_reference_image(
PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID,
GCS_URI)
list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID)
out, _ = capsys.readouterr()
assert REFERENCE_IMAGE_ID in out
delete_reference_image(
PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID)
list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID)
out, _ = capsys.readouterr()
assert REFERENCE_IMAGE_ID not in out
delete_product(PROJECT_ID, LOCATION, PRODUCT_ID)
|
the-stack_0_16916 | import math
from torch import Tensor
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
import random
def evaluate(model, crit, batches):
model.eval()
hidden = mem = None
with torch.no_grad():
postfix = {}
total_loss = 0
mem = hidden = None
pbar = tqdm(desc='eval', total=len(batches) // bptt, postfix=postfix)
for i in range(0, len(batches), bptt):
chunk = batches[i:i+1+bptt]
x, target = chunk[:-1], chunk[1:]
y, mem, hidden = model(x, mem, hidden)
loss = crit(y.flatten(end_dim=1), target.flatten())
total_loss += loss.item()
# progress bar
pbar.update(1)
cur_loss = total_loss / pbar.n
postfix['loss'] = f"{cur_loss:.3f}"
if cur_loss < 20:
postfix['ppl'] = f"{math.exp(cur_loss):.3f}"
postfix['bpc'] = f"{cur_loss / math.log(2):.3f}"
pbar.set_postfix(postfix)
pbar.close()
return total_loss / pbar.n
def train(model, crit, optim, sched, dataset, epochs):
hid,mem = None,None
for i in range(epochs):
model.train()
batches = dataset.train_data
postfix = {'lr': optim.param_groups[0]['lr']}
total_loss = 0
pbar = tqdm(desc=f"train[{i+1}]", total=len(batches) // bptt, postfix=postfix)
while True:
seq_len = random.randint(bptt - 5, bptt + 5)
if i + seq_len > len(batches):
break
chunk = batches[i:i+1+seq_len]
x, target = chunk[:-1], chunk[1:]
i += seq_len
if hid ==None:
y, mem, hid = model(x)
else:
y, mem, hid = model(x,mem, hid)
mem = [m.detach() for m in mem ]
hid = [(h1.detach(),h2.detach()) for h1,h2 in hid ]
loss = crit(y.flatten(end_dim=1), target.flatten())
# loss = 0
# for j in range(len(x)):
# y, mem, hidden = model.forward(x[j].unsqueeze(0), mem, hidden)
# loss += crit(y[-1], target[j])
if False:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
else:
scaler.scale(loss).backward()
scaler.unscale_(optim) # for clipping
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
scaler.step(optim)
scaler.update()
optim.zero_grad()
total_loss += loss.item()
# progress bar accounting
pbar.update(1)
cur_loss = total_loss / pbar.n
postfix['loss'] = f"{cur_loss:.3f}"
if cur_loss < 20:
postfix['ppl'] = f"{math.exp(cur_loss):.3f}"
postfix['bpc'] = f"{cur_loss / math.log(2):.3f}"
pbar.set_postfix(postfix)
pbar.close()
val_loss = evaluate(model, crit, dataset.valid_data)
sched.step(val_loss)
with open('model.pt', 'wb') as f:
torch.save(model, f)
if __name__ == '__main__':
from tqdm import tqdm
from model import SHARNN
from data import enwik8
fresh = True
cuda = True
distributed = False
bsz = 16
epochs = 40
bptt = 1024
device = 'cuda' if cuda else 'cpu'
if distributed:
torch.distributed.init_process_group(backend='nccl')
rank = torch.distributed.get_rank()
torch.cuda.set_device(rank)
dataset = enwik8()
if not fresh:
with open('model.pt', 'rb') as f:
model = torch.load(f)
else:
model = SHARNN(n_token=dataset.n_token, embed_dim=1024, hidden_dim=4096, ff_dim=2048, n_layers=4, heads=1, max_len=5000, dropout=0.1, tied=True)
model.to(device)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank], output_device=rank, dim=1, find_unused_parameters=True)
# optim = torch.optim.Adam(model.parameters(), lr=0.002)
from pytorch_lamb import Lamb
optim = Lamb(model.parameters(), lr=0.002, min_trust=0.25)
crit = nn.CrossEntropyLoss().to(device)
# sched = torch.optim.lr_scheduler.CosineAnnealingLR(optim, epochs)
sched = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, patience=2)
scaler = torch.cuda.amp.GradScaler()
if True:
train(model, crit, optim, sched, dataset, epochs)
test_loss = evaluate(model, crit, dataset.test_data)
print(f"Test | loss {test_loss:.3f} | ppl {math.exp(test_loss):.3f} | bpc {test_loss / math.log(2):.3f}")
exit()
|
the-stack_0_16917 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
from ax.core.types import TCandidateMetadata, TConfig
from ax.models.torch.alebo import ei_or_nei
from ax.models.torch.botorch import BotorchModel
from ax.models.torch.cbo_sac import generate_model_space_decomposition
from ax.models.torch_base import TorchModel
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import get_logger
from botorch.fit import fit_gpytorch_model
from botorch.models.contextual import LCEAGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from torch import Tensor
MIN_OBSERVED_NOISE_LEVEL = 1e-7
logger = get_logger(__name__)
def get_map_model(
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
decomposition: Dict[str, List[int]],
train_embedding: bool = True,
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
context_weight_dict: Optional[Dict] = None,
) -> Tuple[LCEAGP, ExactMarginalLogLikelihood]:
"""Obtain MAP fitting of Latent Context Embedding Additive (LCE-A) GP."""
# assert train_X is non-batched
assert train_X.dim() < 3, "Don't support batch training"
model = LCEAGP(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
decomposition=decomposition,
train_embedding=train_embedding,
embs_dim_list=embs_dim_list,
cat_feature_dict=cat_feature_dict,
embs_feature_dict=embs_feature_dict,
context_weight_dict=context_weight_dict,
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
return model, mll
class LCEABO(BotorchModel):
r"""Does Bayesian optimization with Latent Context Embedding Additive (LCE-A) GP.
The parameter space decomposition must be provided.
Args:
decomposition: Keys are context names. Values are the lists of parameter
names belong to the context, e.g.
{'context1': ['p1_c1', 'p2_c1'],'context2': ['p1_c2', 'p2_c2']}.
gp_model_args: Dictionary of kwargs to pass to GP model training.
- train_embedding: Boolen. If true, we will train context embedding;
otherwise, we use pre-trained embeddings from embds_feature_dict only.
Default is True.
"""
def __init__(
self,
decomposition: Dict[str, List[str]],
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
context_weight_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
gp_model_args: Optional[Dict[str, Any]] = None,
) -> None:
# add validation for input decomposition
for param_list in list(decomposition.values()):
assert len(param_list) == len(
list(decomposition.values())[0]
), "Each Context should contain same number of parameters"
self.decomposition = decomposition
self.cat_feature_dict = cat_feature_dict
self.embs_feature_dict = embs_feature_dict
self.context_weight_dict = context_weight_dict
self.embs_dim_list = embs_dim_list
self.gp_model_args = gp_model_args if gp_model_args is not None else {}
self.feature_names: List[str] = []
self.train_embedding = self.gp_model_args.get("train_embedding", True)
super().__init__(
model_constructor=self.get_and_fit_model,
acqf_constructor=ei_or_nei, # pyre-ignore
)
@copy_doc(TorchModel.fit)
def fit(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
bounds: List[Tuple[float, float]],
task_features: List[int],
feature_names: List[str],
metric_names: List[str],
fidelity_features: List[int],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
) -> None:
if len(feature_names) == 0:
raise ValueError("feature names are required for LCEABO")
self.feature_names = feature_names
super().fit(
Xs=Xs,
Ys=Ys,
Yvars=Yvars,
bounds=bounds,
task_features=task_features,
feature_names=feature_names,
metric_names=metric_names,
fidelity_features=fidelity_features,
)
@copy_doc(TorchModel.best_point)
def best_point(
self,
bounds: List[Tuple[float, float]],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
model_gen_options: Optional[TConfig] = None,
target_fidelities: Optional[Dict[int, float]] = None,
) -> Optional[Tensor]:
raise NotImplementedError
def get_and_fit_model(
self,
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
task_features: List[int],
fidelity_features: List[int],
metric_names: List[str],
state_dict: Optional[Dict[str, Tensor]] = None,
fidelity_model_id: Optional[int] = None,
**kwargs: Any,
) -> GPyTorchModel:
"""Get a fitted LCEAGP model for each outcome.
Args:
Xs: X for each outcome.
Ys: Y for each outcome.
Yvars: Noise variance of Y for each outcome.
Returns: Fitted LCEAGP model.
"""
# generate model space decomposition dict
decomp_index = generate_model_space_decomposition(
decomposition=self.decomposition, feature_names=self.feature_names
)
models = []
for i, X in enumerate(Xs):
Yvar = Yvars[i].clamp_min_(MIN_OBSERVED_NOISE_LEVEL)
gp_m, _ = get_map_model(
train_X=X,
train_Y=Ys[i],
train_Yvar=Yvar,
decomposition=decomp_index,
train_embedding=self.train_embedding,
cat_feature_dict=self.cat_feature_dict,
embs_feature_dict=self.embs_feature_dict,
embs_dim_list=self.embs_dim_list,
context_weight_dict=self.context_weight_dict,
)
models.append(gp_m)
if len(models) == 1:
model = models[0]
else:
model = ModelListGP(*models)
model.to(Xs[0])
return model
|
the-stack_0_16919 | #-*- coding:utf-8 -*-
# &Author AnFany
import pandas as pd
data = pd.read_csv(r'C:\Users\GWT9\Desktop\iris.csv')
# y值Softmax
ydata = data['Species'].values
# x值
xdata = data.iloc[:, 1:5].values
# 数据处理
import numpy as np
# x数据标准化
handle_x_data = (xdata - np.mean(xdata, axis=0)) / np.std(xdata, axis=0)
# y数据独热化
ydata = pd.get_dummies(data['Species']).values
# 因为数据中类别比较集中,不易于训练,因此打乱数据
# 首先将x数据和y数据合在一起
xydata = np.hstack((handle_x_data, ydata))
# 打乱顺序
np.random.shuffle(xydata)
# 分离数据
X_DATA = xydata[:, :4]
Y_DATA = xydata[:, 4:]
Data = [X_DATA, Y_DATA]
# 数据结构
# X_DATA.shape = (样本数, 特征数)
# Y_DATA.shape = (样本数, 类别数)
# 类别
# setosa [1,0,0]
# versicolor [0,1,0]
# virginica [0,0,1]
|
the-stack_0_16920 | """Tests for certbot.helpful_parser"""
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
from certbot import errors
from certbot._internal.cli import HelpfulArgumentParser
from certbot._internal.cli import _DomainsAction
from certbot._internal import constants
class TestScanningFlags(unittest.TestCase):
'''Test the prescan_for_flag method of HelpfulArgumentParser'''
def test_prescan_no_help_flag(self):
arg_parser = HelpfulArgumentParser(['run'], {})
detected_flag = arg_parser.prescan_for_flag('--help',
['all', 'certonly'])
self.assertFalse(detected_flag)
detected_flag = arg_parser.prescan_for_flag('-h',
['all, certonly'])
self.assertFalse(detected_flag)
def test_prescan_unvalid_topic(self):
arg_parser = HelpfulArgumentParser(['--help', 'all'], {})
detected_flag = arg_parser.prescan_for_flag('--help',
['potato'])
self.assertIs(detected_flag, True)
detected_flag = arg_parser.prescan_for_flag('-h',
arg_parser.help_topics)
self.assertFalse(detected_flag)
def test_prescan_valid_topic(self):
arg_parser = HelpfulArgumentParser(['-h', 'all'], {})
detected_flag = arg_parser.prescan_for_flag('-h',
arg_parser.help_topics)
self.assertEqual(detected_flag, 'all')
detected_flag = arg_parser.prescan_for_flag('--help',
arg_parser.help_topics)
self.assertFalse(detected_flag)
class TestDetermineVerbs(unittest.TestCase):
'''Tests for determine_verb methods of HelpfulArgumentParser'''
def test_determine_verb_wrong_verb(self):
arg_parser = HelpfulArgumentParser(['potato'], {})
self.assertEqual(arg_parser.verb, "run")
self.assertEqual(arg_parser.args, ["potato"])
def test_determine_verb_help(self):
arg_parser = HelpfulArgumentParser(['--help', 'everything'], {})
self.assertEqual(arg_parser.verb, "help")
self.assertEqual(arg_parser.args, ["--help", "everything"])
arg_parser = HelpfulArgumentParser(['-d', 'some_domain', '--help',
'all'], {})
self.assertEqual(arg_parser.verb, "help")
self.assertEqual(arg_parser.args, ['-d', 'some_domain', '--help',
'all'])
def test_determine_verb(self):
arg_parser = HelpfulArgumentParser(['certonly'], {})
self.assertEqual(arg_parser.verb, 'certonly')
self.assertEqual(arg_parser.args, [])
arg_parser = HelpfulArgumentParser(['auth'], {})
self.assertEqual(arg_parser.verb, 'certonly')
self.assertEqual(arg_parser.args, [])
arg_parser = HelpfulArgumentParser(['everything'], {})
self.assertEqual(arg_parser.verb, 'run')
self.assertEqual(arg_parser.args, [])
class TestAdd(unittest.TestCase):
'''Tests for add method in HelpfulArgumentParser'''
def test_add_trivial_argument(self):
arg_parser = HelpfulArgumentParser(['run'], {})
arg_parser.add(None, "--hello-world")
parsed_args = arg_parser.parser.parse_args(['--hello-world',
'Hello World!'])
self.assertIs(parsed_args.hello_world, 'Hello World!')
self.assertFalse(hasattr(parsed_args, 'potato'))
def test_add_expected_argument(self):
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid", action="store",
metavar="EAB_KID",
help="Key Identifier for External Account Binding")
parsed_args = arg_parser.parser.parse_args(["--eab-kid", None])
self.assertIs(parsed_args.eab_kid, None)
self.assertTrue(hasattr(parsed_args, 'eab_kid'))
class TestAddGroup(unittest.TestCase):
'''Test add_group method of HelpfulArgumentParser'''
def test_add_group_no_input(self):
arg_parser = HelpfulArgumentParser(['run'], {})
self.assertRaises(TypeError, arg_parser.add_group)
def test_add_group_topic_not_visible(self):
# The user request help on run. A topic that given somewhere in the
# args won't be added to the groups in the parser.
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add_group("auth",
description="description of auth")
self.assertEqual(arg_parser.groups, {})
def test_add_group_topic_requested_help(self):
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add_group("run",
description="description of run")
self.assertTrue(arg_parser.groups["run"])
arg_parser.add_group("certonly", description="description of certonly")
with self.assertRaises(KeyError):
self.assertFalse(arg_parser.groups["certonly"])
class TestParseArgsErrors(unittest.TestCase):
'''Tests for errors that should be met for some cases in parse_args method
in HelpfulArgumentParser'''
def test_parse_args_renew_force_interactive(self):
arg_parser = HelpfulArgumentParser(['renew', '--force-interactive'],
{})
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
with self.assertRaises(errors.Error):
arg_parser.parse_args()
def test_parse_args_non_interactive_and_force_interactive(self):
arg_parser = HelpfulArgumentParser(['--force-interactive',
'--non-interactive'], {})
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true"
)
with self.assertRaises(errors.Error):
arg_parser.parse_args()
def test_parse_args_subset_names_wildcard_domain(self):
arg_parser = HelpfulArgumentParser(['--domain',
'*.example.com,potato.example.com',
'--allow-subset-of-names'], {})
# The following arguments are added because they have to be defined
# in order for arg_parser to run completely. They are not used for the
# test.
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true")
arg_parser.add(
None, "--staging"
)
arg_parser.add(None, "--dry-run")
arg_parser.add(None, "--csr")
arg_parser.add(None, "--must-staple")
arg_parser.add(None, "--validate-hooks")
arg_parser.add(None, "-d", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction)
arg_parser.add(None, "--allow-subset-of-names")
# with self.assertRaises(errors.Error):
# arg_parser.parse_args()
def test_parse_args_hosts_and_auto_hosts(self):
arg_parser = HelpfulArgumentParser(['--hsts', '--auto-hsts'], {})
arg_parser.add(
None, "--hsts", action="store_true", dest="hsts")
arg_parser.add(
None, "--auto-hsts", action="store_true", dest="auto_hsts")
# The following arguments are added because they have to be defined
# in order for arg_parser to run completely. They are not used for the
# test.
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true")
arg_parser.add(None, "--staging")
arg_parser.add(None, "--dry-run")
arg_parser.add(None, "--csr")
arg_parser.add(None, "--must-staple")
arg_parser.add(None, "--validate-hooks")
arg_parser.add(None, "--allow-subset-of-names")
with self.assertRaises(errors.Error):
arg_parser.parse_args()
class TestAddDeprecatedArgument(unittest.TestCase):
"""Tests for add_deprecated_argument method of HelpfulArgumentParser"""
@mock.patch.object(HelpfulArgumentParser, "modify_kwargs_for_default_detection")
def test_no_default_detection_modifications(self, mock_modify):
arg_parser = HelpfulArgumentParser(["run"], {}, detect_defaults=True)
arg_parser.add_deprecated_argument("--foo", 0)
arg_parser.parse_args()
mock_modify.assert_not_called()
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
the-stack_0_16921 | import os
import win32api
import time
import win32gui
import sys
import platform
import ctypes
import winproc
from win32.lib import win32con
oFolds=[]
openedExe={}
isFolder=False;
def foChild(hwnd,cc):
global oFolds;
global isFolder
#print(hwnd," : "+win32gui.GetClassName(hwnd))
cname=win32gui.GetClassName(hwnd)
if cname=="ToolbarWindow32":
tname=win32gui.GetWindowText(hwnd)
#print("cname:"+tname)
if tname.find("地址: ")>=0:
addr=tname.replace("地址: ","")
isFolder=True
#print("addr:"+addr);
oFolds.append(addr)
def findChild(hwnd,className):
tchild=1;
rst=hwnd;
index=0;
while tchild>0:
tchild=win32gui.FindWindowEx(hwnd,index,None,None)
index=tchild;
if tchild>0:
print("child:"+win32gui.GetClassName(tchild))
def foo(hwnd,mouse):
global isFolder
isFolder=False
clz=win32gui.GetClassName(hwnd)
if not clz=="CabinetWClass":
return;
#print(hwnd," : ")
if 1==1:
#print(win32gui.GetWindowText(hwnd))
win32gui.EnumChildWindows(hwnd,foChild ,None)
if isFolder:
print(win32gui.GetWindowText(hwnd))
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def getOpenFolds():
win32gui.EnumWindows(foo, 0)
if __name__ == "__main__":
print("args:",sys.argv)
args=sys.argv
getOpenFolds()
|
the-stack_0_16922 | # qubit number=3
# total number=83
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=70
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC453.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_16923 | #!/usr/bin/env python
'''
===============================================================================
Interactive Image Segmentation using GrabCut algorithm.
This sample shows interactive image segmentation using grabcut algorithm.
USAGE:
python grabcut.py <filename>
README FIRST:
Two windows will show up, one for input and one for output.
At first, in input window, draw a rectangle around the object using
mouse right button. Then press 'n' to segment the object (once or a few times)
For any finer touch-ups, you can press any of the keys below and draw lines on
the areas you want. Then again press 'n' for updating the output.
Key '0' - To select areas of sure background
Key '1' - To select areas of sure foreground
Key '2' - To select areas of probable background
Key '3' - To select areas of probable foreground
Key 'n' - To update the segmentation
Key 'r' - To reset the setup
Key 's' - To save the results
===============================================================================
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import sys
BLUE = [255,0,0] # rectangle color
RED = [0,0,255] # PR BG
GREEN = [0,255,0] # PR FG
BLACK = [0,0,0] # sure BG
WHITE = [255,255,255] # sure FG
DRAW_BG = {'color' : BLACK, 'val' : 0}
DRAW_FG = {'color' : WHITE, 'val' : 1}
DRAW_PR_FG = {'color' : GREEN, 'val' : 3}
DRAW_PR_BG = {'color' : RED, 'val' : 2}
# setting up flags
rect = (0,0,1,1)
drawing = False # flag for drawing curves
rectangle = False # flag for drawing rect
rect_over = False # flag to check if rect drawn
rect_or_mask = 100 # flag for selecting rect or mask mode
value = DRAW_FG # drawing initialized to FG
thickness = 3 # brush thickness
def onmouse(event,x,y,flags,param):
global img,img2,drawing,value,mask,rectangle,rect,rect_or_mask,ix,iy,rect_over
# Draw Rectangle
if event == cv2.EVENT_RBUTTONDOWN:
rectangle = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if rectangle == True:
img = img2.copy()
cv2.rectangle(img,(ix,iy),(x,y),BLUE,2)
rect = (min(ix,x),min(iy,y),abs(ix-x),abs(iy-y))
rect_or_mask = 0
elif event == cv2.EVENT_RBUTTONUP:
rectangle = False
rect_over = True
cv2.rectangle(img,(ix,iy),(x,y),BLUE,2)
rect = (min(ix,x),min(iy,y),abs(ix-x),abs(iy-y))
rect_or_mask = 0
print(" Now press the key 'n' a few times until no further change \n")
# draw touchup curves
if event == cv2.EVENT_LBUTTONDOWN:
if rect_over == False:
print("first draw rectangle \n")
else:
drawing = True
cv2.circle(img,(x,y),thickness,value['color'],-1)
cv2.circle(mask,(x,y),thickness,value['val'],-1)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.circle(img,(x,y),thickness,value['color'],-1)
cv2.circle(mask,(x,y),thickness,value['val'],-1)
elif event == cv2.EVENT_LBUTTONUP:
if drawing == True:
drawing = False
cv2.circle(img,(x,y),thickness,value['color'],-1)
cv2.circle(mask,(x,y),thickness,value['val'],-1)
if __name__ == '__main__':
# print documentation
print(__doc__)
# Loading images
if len(sys.argv) == 2:
filename = sys.argv[1] # for drawing purposes
else:
print("No input image given, so loading default image, ../data/lena.jpg \n")
print("Correct Usage: python grabcut.py <filename> \n")
filename = '../data/lena.jpg'
img = cv2.imread(filename)
img2 = img.copy() # a copy of original image
mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape,np.uint8) # output image to be shown
# input and output windows
cv2.namedWindow('output')
cv2.namedWindow('input')
cv2.setMouseCallback('input',onmouse)
cv2.moveWindow('input',img.shape[1]+10,90)
print(" Instructions: \n")
print(" Draw a rectangle around the object using right mouse button \n")
while(1):
cv2.imshow('output',output)
cv2.imshow('input',img)
k = cv2.waitKey(1)
# key bindings
if k == 27: # esc to exit
break
elif k == ord('0'): # BG drawing
print(" mark background regions with left mouse button \n")
value = DRAW_BG
elif k == ord('1'): # FG drawing
print(" mark foreground regions with left mouse button \n")
value = DRAW_FG
elif k == ord('2'): # PR_BG drawing
value = DRAW_PR_BG
elif k == ord('3'): # PR_FG drawing
value = DRAW_PR_FG
elif k == ord('s'): # save image
bar = np.zeros((img.shape[0],5,3),np.uint8)
res = np.hstack((img2,bar,img,bar,output))
cv2.imwrite('grabcut_output.png',res)
print(" Result saved as image \n")
elif k == ord('r'): # reset everything
print("resetting \n")
rect = (0,0,1,1)
drawing = False
rectangle = False
rect_or_mask = 100
rect_over = False
value = DRAW_FG
img = img2.copy()
mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape,np.uint8) # output image to be shown
elif k == ord('n'): # segment the image
print(""" For finer touchups, mark foreground and background after pressing keys 0-3
and again press 'n' \n""")
if (rect_or_mask == 0): # grabcut with rect
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_RECT)
rect_or_mask = 1
elif rect_or_mask == 1: # grabcut with mask
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_MASK)
mask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8')
output = cv2.bitwise_and(img2,img2,mask=mask2)
cv2.destroyAllWindows()
|
the-stack_0_16924 | # Copyright 2020 Graphcore Ltd.
import os
import numpy as np
from tensorflow.python import ipu
from tensorflow.python.ipu.scopes import ipu_scope
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
cfg = ipu.utils.create_ipu_config(profiling=True, use_poplar_text_report=True)
cfg = ipu.utils.auto_select_ipus(cfg, 1)
ipu.utils.configure_ipu_system(cfg)
size = 5
with tf.device("cpu"):
x_data = tf.placeholder(np.float32, [size])
y_data = tf.placeholder(np.float32, [size])
def add_op(x, y):
outputs = {
"output_types": [tf.float32],
"output_shapes": [tf.TensorShape([size])],
}
base_path = os.getcwd()
lib_path = os.path.join(base_path, "libcustom_op.so")
gp_path = os.path.join(base_path, "custom_codelet.gp")
return ipu.custom_ops.precompiled_user_op([x, y],
lib_path,
gp_path,
outs=outputs)
with ipu_scope("/device:IPU:0"):
xla_result = ipu.ipu_compiler.compile(add_op, [x_data, y_data])
with tf.Session() as sess:
a = np.random.rand(size)
b = np.random.rand(size)
result = sess.run(xla_result, feed_dict = {x_data: a, y_data: b})
# Show result from the IPU:
print("IPU:", result[0])
# Same calculation on host for comparison:
print("numpy:", a + b)
|
the-stack_0_16926 |
# coding: utf-8
# In[1]:
# Load dependencies
import numpy as np
import pandas as pd
import sys
sys.path.insert(0,'../../../statistics_helper/')
from excel_utils import *
# # Estimating the total biomass of humans
# To estimate the total biomass of humans, we rely on estimates of the total human population from the [UN World Population Prospects of 2017](https://esa.un.org/unpd/wpp/Download/Standard/Population/) (File - 'Total Population - Both Sexes'). We use the estimate for the total human population in 2015
# In[2]:
#Load data from the UN
data = pd.read_excel('humans_data.xlsx',index_col=0,skiprows=16)
# Use data from 2015, multiply by 1000 because data is given in thousands
tot_human_pop = data.loc[1,'2015']*1e3
print('The UN estimate for the total human population is ≈%.1e' %tot_human_pop)
# We use an estimate for the average body mass of humans of ≈50 kg from [Hern](http://link.springer.com/article/10.1023/A:1022153110536). We convert the average body weight to carbon mass assuming 70% water content and 50% carbon out of the dry weight:
# In[3]:
wet_to_c = 0.15
human_cc = 5e4*wet_to_c
# We estimate the total biomass of humans by multiplying the total number of humans by the average carbon content of a single human:
# In[4]:
best_estimate = tot_human_pop*human_cc
print('Our best estimate for the total biomass of humans is ≈%.2f Gt C' %(best_estimate/1e15))
# In[5]:
# Feed results to the chordate biomass data
old_results = pd.read_excel('../../animal_biomass_estimate.xlsx',index_col=0)
result = old_results.copy()
result.loc['Humans',(['Biomass [Gt C]','Uncertainty'])] = (best_estimate/1e15,None)
result.to_excel('../../animal_biomass_estimate.xlsx')
# Feed results to Table 1 & Fig. 1
update_results(sheet='Table1 & Fig1',
row=('Animals','Humans'),
col='Biomass [Gt C]',
values=best_estimate/1e15,
path='../../../results.xlsx')
# Feed results to Table S1
update_results(sheet='Table S1',
row=('Animals','Humans'),
col='Number of individuals',
values=tot_human_pop,
path='../../../results.xlsx')
|
the-stack_0_16928 | def from_file_h5(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.forms.api_file_h5 import to_mdtraj_Topology as file_h5_to_mdtraj_Topology
from molsysmt.native.io.topology import from_mdtraj_Topology as mdtraj_Topology_to_molsysmt_Topology
tmp_item, _ = file_h5_to_mdtraj_Topology(item)
tmp_item, _ = mdtraj_Topology_to_molsysmt_Topology(tmp_item, atom_indices=atom_indices)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices)
else:
tmp_molecular_system = None
return tmp_item, tmp_molecular_system
|
the-stack_0_16930 | import json, os
from threading import Thread
from igdb.wrapper import IGDBWrapper
global wrapper #Global variable to reference wrapper so it can be used across all methods and getters
"""
Use formatted strings in getters
def example(str):
return f'my {str} is formatted'
def example2(str):
return 'my {0} is formatted'.format(str)
"""
"""
GETTERS FOR API INFORMATION
Main Idea:
-> Given a certain reference ID, make a query extracting the name, url, etc specifed and return it in a string
-> Said string will contain the information to be replaced in the main hastable that stores our game infromation
-> We have to iterate through the transfromed byte array given from the query lookign for the key that is not the id associated
-> Hence why we have a double for loop in each getter
-> Every 'for key in endpoint[0]:' has a in index ([0]) because the api gives us an array of one element,
the hashtable containing the requested url, name or array of items
"""
def getCoverUrl(id):
covers = json.loads(wrapper.api_request('covers', f'fields url; where id = {id};'))
for key in covers[0]:
if key == "url":
return ("https:" + covers[0]['url']).replace('thumb',"1080p")
def getPlatforms(id):
platforms = json.loads(wrapper.api_request('platforms', f'fields name; where id = {id};'))
for key in platforms[0]:
if key == "name":
if platforms[0][key] == "Xbox Series":
return platforms[0][key] + " X"
return platforms[0][key]
def getGenres(id):
genres = json.loads(wrapper.api_request('genres', f'fields name; where id = {id};'))
for key in genres[0]:
if key == "name":
return genres[0][key]
def getInvolvedCompanies(id):
# We do this internal method to avoid over complicating the code and adding an external method to only call it here
def getCompany(id):
company = json.loads(wrapper.api_request('companies', f'fields name; where id = {id};'))
for key in company[0]:
if key == "name":
return company[0][key]
involved_companies = json.loads(wrapper.api_request('involved_companies', f'fields company; where id = {id};'))
for key in involved_companies[0]:
if key == "company":
# Internal method is called and it's value is returned in the main method
return getCompany(involved_companies[0][key])
def getSummary(id, wrapper):
# This method is intended to be used externally, import it where needed
# summary is a list of dictionaries that follows a json format
summary = json.loads(wrapper.api_request('games', f'fields storyline, summary; where id = {id};'))
# Since some games do not have a storyline description, we can use the summary of the game
# or just simply put that it has no summary yet
# summary[0] is the first dictionary that is in the list of json formatted dictionaries
if "storyline" in summary[0]:
return summary[0]['storyline'] # summary[0][key], since summary[0] is a dictionary
elif "summary" in summary[0]:
return summary[0]['summary']
else:
return "This game has no summary yet"
"""""""""""""""""""""""""""""""""""""""""MAIN METHOD FOR EXTRACTING GAMES"""""""""""""""""""""""""""""""""""""""""""""""
def extractAPIGames(endpoint: str, query: str, fileNumber:int):
byte_array = wrapper.api_request(endpoint, query) #Byte array that stores the infromation given from the API with a given endpoint & query
games = json.loads(byte_array) #Convert the byte array into a json-like hashtable for easy extraction and iteration
print(f"Started Game Extraction for file {fileNumber}")
gamesExtracted = 0
"""
MAIN FOR LOOP TO EXTRACT DATA FROM API
Main Idea:
-> games is a hashtable that is modeled in a json format to extract data from API
-> Every value from the hashtable is an id reference to the actual information from the API
-> We iterate through each key and extract that information rlated to that key using a getter
-> Some keys have values that are arrays of ID's, so we have to call the getter for each individual ID in
the array for that key
"""
for game in games:
gamesExtracted += 1
print(f"Games: {gamesExtracted} - File: {fileNumber}")
for key in game:
if key == "cover":
game[key] = getCoverUrl(game[key])
elif key == "platforms":
#game[key] is an array of platforms ids
for i in range(len(game[key])):
#game[key][i] is a platform ID in the array that must be extracted with the getter
game[key][i] = getPlatforms(game[key][i])
elif key == "genres":
for i in range(len(game[key])):
game[key][i] = getGenres(game[key][i])
elif key == "involved_companies":
for i in range(len(game[key])):
game[key][i] = getInvolvedCompanies(game[key][i])
#We parse the hashtable information to a .json file we deliver as output using json.dump()
with open(f'../res/data_{fileNumber}.json', 'w') as outfile:
json.dump(games, outfile, indent=4)
print(f"Games Extracted: {gamesExtracted}")
print(f"Finished, check your data_{fileNumber}.json")
#Command to initialize game extraction every time this file is ran
if __name__ == "__main__":
"""
TO FIX UNAUTHORIZED URL FROM API:
SEND POST REQUEST TO THIS ENDPOINT:
https://id.twitch.tv/oauth2/token?client_id=yourClientID&client_secret=yourClientSecret&grant_type=client_credentials
"""
wrapper = IGDBWrapper("2zu4l0leu7rrc9i8ysagqlxuu5rh89", "9tvwz8wnwyjuqvn5h4nmq8k413wzwt")
# extractAPIGames('games', 'fields name,genres,platforms,cover,involved_companies; where platforms=48 & category=0; limit 200;',1) #PS4
# extractAPIGames('games', 'fields name,genres,platforms,cover,involved_companies; where platforms=49 & category=0; limit 200;', 2) #XB1
# extractAPIGames('games', 'fields name,genres,platforms,cover,involved_companies; where platforms=130 & category=0; limit 200;',3) #Switch
# extractAPIGames('games', 'fields name,genres,platforms,cover,involved_companies; where platforms=6 & category=0; limit 200;', 4) #PC
# extractAPIGames('games', 'fields name,genres,platforms,cover,involved_companies; where platforms=167; limit 200;', 5) #PS5
# extractAPIGames('games', 'fields name,genres,platforms,cover,involved_companies; where platforms=169; limit 200;',6) #XB Series X
|
the-stack_0_16931 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a TFGAN trained compression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import data_provider
import networks
import summaries
import tensorflow as tf
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '/tmp/compression/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/compression/',
'Directory where the results are saved to.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_string('dataset_dir', None, 'Location of data.')
# Compression-specific flags.
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')
flags.DEFINE_integer('bits_per_patch', 1230,
'The number of bits to produce per patch.')
flags.DEFINE_integer('model_depth', 64,
'Number of filters for compression model')
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
images = data_provider.provide_data(
'validation', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,
patch_size=FLAGS.patch_size)
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('generator'):
reconstructions, _, prebinary = networks.compression_model(
images,
num_bits=FLAGS.bits_per_patch,
depth=FLAGS.model_depth,
is_training=False)
summaries.add_reconstruction_summaries(images, reconstructions, prebinary)
# Visualize losses.
pixel_loss_per_example = tf.reduce_mean(
tf.abs(images - reconstructions), axis=[1, 2, 3])
pixel_loss = tf.reduce_mean(pixel_loss_per_example)
tf.summary.histogram('pixel_l1_loss_hist', pixel_loss_per_example)
tf.summary.scalar('pixel_l1_loss', pixel_loss)
# Create ops to write images to disk.
uint8_images = data_provider.float_image_to_uint8(images)
uint8_reconstructions = data_provider.float_image_to_uint8(reconstructions)
uint8_reshaped = summaries.stack_images(uint8_images, uint8_reconstructions)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'compression.png'),
tf.image.encode_png(uint8_reshaped[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
if __name__ == '__main__':
app.run()
|
the-stack_0_16932 | from collections import deque
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as T
import random
from LearningAgents.LearningAgent import LearningAgent
from Utils.LevelSelection import LevelSelectionSchema
from LearningAgents.Memory import ReplayMemory
from SBEnvironment.SBEnvironmentWrapper import SBEnvironmentWrapper
from torch.utils.tensorboard import SummaryWriter
from einops import rearrange, reduce
class MultiHeadRelationalModuleImage(nn.Module):
def __init__(self, h, w, outputs, device, ch_in):
super(MultiHeadRelationalModuleImage, self).__init__()
self.device = device
self.input_type = 'image'
self.output_type = 'discrete'
self.transform = T.Compose([T.ToPILImage(), T.Resize((h, w)),
T.ToTensor(), T.Normalize((.5, .5, .5), (.5, .5, .5))])
self.conv1_ch = 8
self.conv2_ch = 10 # dim `F` in paper
self.conv3_ch = 24
self.conv4_ch = 30
self.H = h
self.W = w
self.node_size = 64 # entity embedding size
self.lin_hid = 100
self.out_dim = outputs # actions
self.outputs = outputs
self.ch_in = ch_in
self.sp_coord_dim = 2
self.N = int(self.H * self.W)
self.n_heads = 1
self.conv1 = nn.Conv2d(self.ch_in, self.conv1_ch, kernel_size=(7, 7), padding=1)
self.conv2 = nn.Conv2d(self.conv1_ch, self.conv2_ch, kernel_size=(3, 3), padding=1)
self.feature_head = nn.Sequential(self.conv1, self.conv2)
self.proj_shape = (self.conv2_ch + self.sp_coord_dim, self.n_heads * self.node_size)
# Multihead attention
self.k_proj = nn.Linear(*self.proj_shape)
self.q_proj = nn.Linear(*self.proj_shape)
self.v_proj = nn.Linear(*self.proj_shape)
# Compute shape by doing one forward pass
with torch.no_grad():
self.N = int(self.feature_head(torch.rand(size=(1, self.ch_in, self.H, self.W))).flatten().size(0)/self.conv2_ch)
self.k_lin = nn.Linear(self.node_size, self.N)
self.q_lin = nn.Linear(self.node_size, self.N)
self.a_lin = nn.Linear(self.N, self.N)
self.node_shape = (self.n_heads, self.N, self.node_size)
self.k_norm = nn.LayerNorm(self.node_shape, elementwise_affine=True)
self.q_norm = nn.LayerNorm(self.node_shape, elementwise_affine=True)
self.v_norm = nn.LayerNorm(self.node_shape, elementwise_affine=True)
self.linear1 = nn.Linear(self.n_heads * self.node_size, self.node_size)
self.norm1 = nn.LayerNorm([self.N, self.node_size], elementwise_affine=False)
self.linear2 = nn.Linear(self.node_size, self.out_dim)
def forward(self, x):
N, Cin, H, W = x.shape
x = self.conv1(x)
x = torch.relu(x)
x = self.conv2(x)
x = torch.relu(x)
with torch.no_grad():
self.conv_map = x.clone()
_, _, cH, cW = x.shape
xcoords = torch.arange(cW).repeat(cH, 1).float() / cW
ycoords = torch.arange(cH).repeat(cW, 1).transpose(1, 0).float() / cH
spatial_coords = torch.stack([xcoords, ycoords], dim=0)
spatial_coords = spatial_coords.unsqueeze(dim=0)
spatial_coords = spatial_coords.repeat(N, 1, 1, 1).to(x.device)
x = torch.cat([x, spatial_coords], dim=1)
x = x.permute(0, 2, 3, 1) # batch_size, H, W, C
x = x.flatten(1, 2) # batch_size, HxW, C
# key, query, value separation
K = rearrange(self.k_proj(x), "b n (head d) -> b head n d", head=self.n_heads)
K = self.k_norm(K)
Q = rearrange(self.q_proj(x), "b n (head d) -> b head n d", head=self.n_heads)
Q = self.q_norm(Q)
V = rearrange(self.v_proj(x), "b n (head d) -> b head n d", head=self.n_heads)
V = self.v_norm(V)
A = torch.nn.functional.elu(self.q_lin(Q) + self.k_lin(K))
A = self.a_lin(A)
A = torch.nn.functional.softmax(A, dim=3)
with torch.no_grad():
self.att_map = A.cpu().clone()
E = torch.einsum('bhfc,bhcd->bhfd', A, V)
# collapse head dimension
E = rearrange(E, 'b head n d -> b n (head d)')
# B N D' . D' D -> B N D
E = self.linear1(E)
E = torch.relu(E)
E = self.norm1(E)
# B N D -> B D
E = E.max(dim=1)[0]
y = self.linear2(E)
y = torch.nn.functional.elu(y)
return y
def train_model_memory(self, target_net: torch.nn.Module, total_train_time: int, train_time: int, train_batch: int,
gamma: float, memory: ReplayMemory, optimizer: torch.optim, lr=0.001, sample_eps=1):
pass
def transform(self, state):
t = T.Compose([T.ToPILImage(), T.Resize((self.H, self.W)),
T.ToTensor(), T.Normalize((.5, .5, .5), (.5, .5, .5))])
return t(state)
|
the-stack_0_16933 | import rospy
import tf2_ros as tf2
import math
from tf.transformations import quaternion_from_euler
from tf2_geometry_msgs import PoseStamped
from geometry_msgs.msg import Point, Quaternion
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Vector3
from std_msgs.msg import ColorRGBA
from actionlib_msgs.msg import GoalStatus
from dynamic_stack_decider.abstract_action_element import AbstractActionElement
class GoToBall(AbstractActionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(GoToBall, self).__init__(blackboard, dsd, parameters)
if 'target' not in parameters.keys():
rospy.logerr('The parameter "target" could not be used to decide whether map information is accesible')
else:
self.target = parameters['target']
self.blocking = parameters.get('blocking', True)
self.distance = parameters.get('distance', self.blackboard.config['ball_approach_dist'])
def perform(self, reevaluate=False):
if 'map_goal' == self.target:
goal_angle = self.blackboard.world_model.get_map_based_opp_goal_angle_from_ball()
ball_x, ball_y = self.blackboard.world_model.get_ball_position_xy()
goal_x = ball_x - math.cos(goal_angle) * self.distance
goal_y = ball_y - math.sin(goal_angle) * self.distance
ball_point = (goal_x, goal_y, goal_angle, self.blackboard.map_frame)
elif 'detection_goal' == self.target:
x_dist = self.blackboard.world_model.get_detection_based_goal_position_uv()[0] - \
self.blackboard.world_model.get_ball_position_uv()[0]
y_dist = self.blackboard.world_model.get_detection_based_goal_position_uv()[1] - \
self.blackboard.world_model.get_ball_position_uv()[1]
goal_angle = math.atan2(y_dist, x_dist)
ball_u, ball_v = self.blackboard.world_model.get_ball_position_uv()
goal_u = ball_u + math.cos(goal_angle) * self.distance
goal_v = ball_v + math.sin(goal_angle) * self.distance
ball_point = (goal_u, goal_v, goal_angle, self.blackboard.world_model.base_footprint_frame)
elif 'none' == self.target or 'current_orientation' == self.target:
ball_u, ball_v = self.blackboard.world_model.get_ball_position_uv()
ball_point = (ball_u, ball_v, 0, self.blackboard.world_model.base_footprint_frame)
elif 'close' == self.target:
ball_u, ball_v = self.blackboard.world_model.get_ball_position_uv()
angle = math.atan2(ball_v, ball_u)
ball_point = (ball_u, ball_v, angle, self.blackboard.world_model.base_footprint_frame)
else:
rospy.logerr("Target %s for go_to_ball action not specified.", self.target)
return
pose_msg = PoseStamped()
pose_msg.header.stamp = rospy.Time.now()
pose_msg.header.frame_id = ball_point[3]
pose_msg.pose.position = Point(ball_point[0], ball_point[1], 0)
quaternion = quaternion_from_euler(0, 0, ball_point[2])
pose_msg.pose.orientation.x = quaternion[0]
pose_msg.pose.orientation.y = quaternion[1]
pose_msg.pose.orientation.z = quaternion[2]
pose_msg.pose.orientation.w = quaternion[3]
self.blackboard.pathfinding.publish(pose_msg)
approach_marker = Marker()
approach_marker.pose.position.x = self.distance
approach_marker.type = Marker.SPHERE
approach_marker.action = Marker.MODIFY
approach_marker.id = 1
color = ColorRGBA()
color.r = 1.0
color.g = 1.0
color.b = 1.0
color.a = 1.0
approach_marker.color = color
approach_marker.lifetime = rospy.Duration(nsecs=0.5)
scale = Vector3(0.2, 0.2, 0.2)
approach_marker.scale = scale
approach_marker.header.stamp = rospy.Time.now()
approach_marker.header.frame_id = self.blackboard.world_model.base_footprint_frame
self.blackboard.pathfinding.approach_marker_pub.publish(approach_marker)
if self.blackboard.pathfinding.status in [GoalStatus.SUCCEEDED, GoalStatus.ABORTED] or not self.blocking:
self.pop()
|
the-stack_0_16935 | import json
import re
from urllib.parse import urlparse
import scrapy
from scrapy import Request
from config.DBInfo import SessionFactory
from db.transaction import lianjia_transaction
from tools import tools
class LianJiaSpider(scrapy.Spider):
name = "lianjia"
start_urls = [
"https://www.lianjia.com/city/"
]
def parse(self, response):
tools.writeLog("lianjia", "start")
hreflist = response.selector.css(".city_list_ul a::attr(href)").extract()
for url in hreflist:
yield Request(url + "chengjiao", callback=self.mainPage)
def mainPage(self, response):
title = response.xpath('//title/text()').extract()
if "|成交查询" in title[0]:
res=self.recursiveListUrl(response)
for url in res:
yield Request(url, callback=self.getList)
yield Request(response.url + "pg1/", callback=self.getList)
def getList(self, response):
res = self.recursiveListUrl(response)
for url in res:
yield Request(url, callback=self.getList)
infourl = response.selector.css('.listContent .title a::attr(href)').extract()
for url in infourl:
yield Request(url, callback=self.detail)
try:
strpageinfo = response.selector.css('.page-box .house-lst-page-box ::attr(page-data)').extract()[0]
pageinfo = json.loads(strpageinfo)
cur = pageinfo['curPage']
total = pageinfo['totalPage']
ourl=response.url
result = urlparse(ourl)
if "pg" not in result.path:
ourl+="pg1/"
result = urlparse(ourl)
if cur == 1:
while cur < total:
cur += 1
res = re.sub(r'pg\d+', "pg" + str(cur), result.path)
res = result.scheme + "://" + result.netloc + res
yield Request(res, callback=self.getList)
except:
pass
def detail(self, response):
# 成交时间
date = response.selector.css('.house-title .wrapper span ::text').extract()[0][0:-2]
price = response.selector.css('.info.fr .price i ::text').extract()[0]
avgPrice = response.selector.css('.info.fr .price b ::text').extract()[0]
ljID = response.selector.css('.transaction .content li:first-child ::text').extract()[1]
address = ''
address1 = ''
address2 = ''
address3 = ''
address4 = ''
address5 = ''
address6 = ''
address7 = ''
address8 = ''
address9 = ''
address10 = ''
index = 1
for i in response.selector.css('.deal-bread ::text').extract()[1:-1]:
i = i.replace("二手房成交价格", "")
address += i
if i != '' and i != '>':
if index == 1:
address1 = i
if index == 2:
address2 = i
if index == 3:
address3 = i
if index == 4:
address4 = i
if index == 5:
address5 = i
if index == 6:
address6 = i
if index == 7:
address7 = i
if index == 8:
address8 = i
if index == 9:
address9 = i
if index == 10:
address10 = i
index += 1
data = lianjia_transaction({
'transactiondate': date,
'price': float(price) * 10000,
'avgPrice': avgPrice,
'ljID': ljID.strip(),
'address': address,
'address1': address1,
'address2': address2,
'address3': address3,
'address4': address4,
'address5': address5,
'address6': address6,
'address7': address7,
'address8': address8,
'address9': address9,
'address10': address10,
'url': response.url
})
session = SessionFactory()
# 添加到session:
session.add(data)
# 提交即保存到数据库:
try:
session.commit()
except Exception as e:
if 'Duplicate' in repr(e):
session.close()
else:
session.close()
# 关闭session:
session.close()
def writelog(self, url):
with open("log.txt", 'a') as f:
f.write(url + "\n")
def recursiveListUrl(self, response):
host = urlparse(response.url)
host = host.scheme + "://" + host.netloc
areaList = response.selector.css('.position a::attr(href)').extract()
ret = []
for url in areaList:
if "https://" in url:
ret.append(url)
else:
ret.append(host + url)
return ret |
the-stack_0_16937 | from typing import Any, Dict, Type
from .awac import AWAC
from .awr import AWR, DiscreteAWR
from .base import AlgoBase
from .bc import BC, DiscreteBC
from .bcq import BCQ, DiscreteBCQ
from .bear import BEAR
from .combo import COMBO
from .cql import CQL, DiscreteCQL
from .crr import CRR
from .ddpg import DDPG
from .dqn import DQN, DoubleDQN
from .iql import IQL
from .mopo import MOPO
from .plas import PLAS, PLASWithPerturbation
from .random_policy import DiscreteRandomPolicy, RandomPolicy
from .sac import SAC, DiscreteSAC
from .td3 import TD3
from .td3_plus_bc import TD3PlusBC
__all__ = [
"AlgoBase",
"AWAC",
"AWR",
"DiscreteAWR",
"BC",
"DiscreteBC",
"BCQ",
"DiscreteBCQ",
"BEAR",
"COMBO",
"CQL",
"DiscreteCQL",
"CRR",
"DDPG",
"DQN",
"DoubleDQN",
"IQL",
"MOPO",
"PLAS",
"PLASWithPerturbation",
"SAC",
"DiscreteSAC",
"TD3",
"TD3PlusBC",
"RandomPolicy",
"DiscreteRandomPolicy",
"get_algo",
"create_algo",
]
DISCRETE_ALGORITHMS: Dict[str, Type[AlgoBase]] = {
"awr": DiscreteAWR,
"bc": DiscreteBC,
"bcq": DiscreteBCQ,
"cql": DiscreteCQL,
"dqn": DQN,
"double_dqn": DoubleDQN,
"sac": DiscreteSAC,
"random": DiscreteRandomPolicy,
}
CONTINUOUS_ALGORITHMS: Dict[str, Type[AlgoBase]] = {
"awac": AWAC,
"awr": AWR,
"bc": BC,
"bcq": BCQ,
"bear": BEAR,
"combo": COMBO,
"cql": CQL,
"crr": CRR,
"ddpg": DDPG,
"iql": IQL,
"mopo": MOPO,
"plas": PLASWithPerturbation,
"sac": SAC,
"td3": TD3,
"td3_plus_bc": TD3PlusBC,
"random": RandomPolicy,
}
def get_algo(name: str, discrete: bool) -> Type[AlgoBase]:
"""Returns algorithm class from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
Returns:
type: algorithm class.
"""
if discrete:
if name in DISCRETE_ALGORITHMS:
return DISCRETE_ALGORITHMS[name]
raise ValueError(f"{name} does not support discrete action-space.")
if name in CONTINUOUS_ALGORITHMS:
return CONTINUOUS_ALGORITHMS[name]
raise ValueError(f"{name} does not support continuous action-space.")
def create_algo(name: str, discrete: bool, **params: Any) -> AlgoBase:
"""Returns algorithm object from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
params (any): arguments for algorithm.
Returns:
d3rlpy.algos.base.AlgoBase: algorithm.
"""
return get_algo(name, discrete)(**params)
|
the-stack_0_16939 | # -*- coding: utf-8 -*-
"""
sphinx.domains.rst
~~~~~~~~~~~~~~~~~~
The reStructuredText domain.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import iteritems
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.locale import _
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
if False:
# For type annotation
from typing import Any, Dict, Iterator, List, Tuple # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
dir_sig_re = re.compile(r'\.\. (.+?)::(.*)$')
class ReSTMarkup(ObjectDescription):
"""
Description of generic reST markup.
"""
def add_target_and_index(self, name, sig, signode):
# type: (unicode, unicode, addnodes.desc_signature) -> None
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['rst']['objects']
key = (self.objtype, name)
if key in objects:
self.state_machine.reporter.warning(
'duplicate description of %s %s, ' % (self.objtype, name) +
'other instance in ' + self.env.doc2path(objects[key]),
line=self.lineno)
objects[key] = self.env.docname
indextext = self.get_index_text(self.objtype, name)
if indextext:
self.indexnode['entries'].append(('single', indextext,
targetname, '', None))
def get_index_text(self, objectname, name):
# type: (unicode, unicode) -> unicode
if self.objtype == 'directive':
return _('%s (directive)') % name
elif self.objtype == 'role':
return _('%s (role)') % name
return ''
def parse_directive(d):
# type: (unicode) -> Tuple[unicode, unicode]
"""Parse a directive signature.
Returns (directive, arguments) string tuple. If no arguments are given,
returns (directive, '').
"""
dir = d.strip()
if not dir.startswith('.'):
# Assume it is a directive without syntax
return (dir, '')
m = dir_sig_re.match(dir)
if not m:
return (dir, '')
parsed_dir, parsed_args = m.groups()
return (parsed_dir.strip(), ' ' + parsed_args.strip())
class ReSTDirective(ReSTMarkup):
"""
Description of a reST directive.
"""
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
name, args = parse_directive(sig)
desc_name = '.. %s::' % name
signode += addnodes.desc_name(desc_name, desc_name)
if len(args) > 0:
signode += addnodes.desc_addname(args, args)
return name
class ReSTRole(ReSTMarkup):
"""
Description of a reST role.
"""
def handle_signature(self, sig, signode):
# type: (unicode, addnodes.desc_signature) -> unicode
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
class ReSTDomain(Domain):
"""ReStructuredText domain."""
name = 'rst'
label = 'reStructuredText'
object_types = {
'directive': ObjType(_('directive'), 'dir'),
'role': ObjType(_('role'), 'role'),
}
directives = {
'directive': ReSTDirective,
'role': ReSTRole,
}
roles = {
'dir': XRefRole(),
'role': XRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
} # type: Dict[unicode, Dict[unicode, Tuple[unicode, ObjType]]]
def clear_doc(self, docname):
# type: (unicode) -> None
for (typ, name), doc in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][typ, name]
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
# XXX check duplicates
for (typ, name), doc in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][typ, name] = doc
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
if (objtype, target) in objects:
return make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA
objects = self.data['objects']
results = []
for objtype in self.object_types:
if (objtype, target) in self.data['objects']:
results.append(('rst:' + self.role_for_objtype(objtype),
make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)))
return results
def get_objects(self):
# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
for (typ, name), docname in iteritems(self.data['objects']):
yield name, name, typ, docname, typ + '-' + name, 1
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(ReSTDomain)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_0_16940 | # -*- coding: utf-8 -*-
import io
import demjson
import pandas as pd
import requests
from zvdata.api import df_to_db, init_entities
from zvdata.recorder import Recorder
from zvt.api.common import china_stock_code_to_id
from zvt.domain import StockIndex
from zvdata.utils.time_utils import to_pd_timestamp
class ChinaIndexListSpider(Recorder):
data_schema = StockIndex
def __init__(self, batch_size=10, force_update=False, sleeping_time=2.0, provider='exchange') -> None:
self.provider = provider
super(ChinaIndexListSpider, self).__init__(batch_size, force_update, sleeping_time)
def run(self):
# 上证、中证
self.fetch_csi_index()
# 深证
self.fetch_szse_index()
# 国证
self.fetch_cni_index()
def fetch_csi_index(self) -> None:
"""
抓取上证、中证指数列表
"""
url = 'http://www.csindex.com.cn/zh-CN/indices/index' \
'?page={}&page_size={}&data_type=json&class_1=1&class_2=2&class_7=7&class_10=10'
index_list = []
page = 1
page_size = 50
while True:
query_url = url.format(page, page_size)
response = requests.get(query_url)
response_dict = demjson.decode(response.text)
response_index_list = response_dict.get('list', [])
if len(response_index_list) == 0:
break
index_list.extend(response_index_list)
self.logger.info(f'上证、中证指数第 {page} 页抓取完成...')
page += 1
self.sleep()
df = pd.DataFrame(index_list)
df = df[['base_date', 'base_point', 'index_code', 'indx_sname', 'online_date', 'class_eseries']]
df.columns = ['timestamp', 'base_point', 'code', 'name', 'list_date', 'class_eseries']
df['category'] = df['class_eseries'].apply(lambda x: x.split(' ')[0].lower())
df = df.drop('class_eseries', axis=1)
df = df.loc[df['code'].str.contains(r'^\d{6}$')]
self.persist_index(df)
self.logger.info('上证、中证指数列表抓取完成...')
# 抓取上证、中证指数成分股
self.fetch_csi_index_component(df)
self.logger.info('上证、中证指数成分股抓取完成...')
def fetch_csi_index_component(self, df: pd.DataFrame):
"""
抓取上证、中证指数成分股
"""
query_url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/{}cons.xls'
for _, index in df.iterrows():
index_code = index['code']
url = query_url.format(index_code)
try:
response = requests.get(url)
response.raise_for_status()
except requests.HTTPError as error:
self.logger.error(f'{index["name"]} - {index_code} 成分股抓取错误 ({error})')
continue
response_df = pd.read_excel(io.BytesIO(response.content))
index_id = f'index_cn_{index_code}'
response_df = response_df[['成分券代码Constituent Code']].rename(columns={'成分券代码Constituent Code': 'stock_code'})
response_df['id'] = response_df['stock_code'].apply(
lambda x: f'{index_id}_{china_stock_code_to_id(str(x))}')
response_df['entity_id'] = response_df['id']
response_df['stock_id'] = response_df['stock_code'].apply(lambda x: china_stock_code_to_id(str(x)))
response_df['index_id'] = index_id
response_df.drop('stock_code', axis=1, inplace=True)
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{index["name"]} - {index_code} 成分股抓取完成...')
self.sleep()
def fetch_szse_index(self) -> None:
"""
抓取深证指数列表
"""
url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1812_zs&TABKEY=tab1'
response = requests.get(url)
df = pd.read_excel(io.BytesIO(response.content), dtype='str')
df.columns = ['code', 'name', 'timestamp', 'base_point', 'list_date']
df['category'] = 'szse'
df = df.loc[df['code'].str.contains(r'^\d{6}$')]
self.persist_index(df)
self.logger.info('深证指数列表抓取完成...')
# 抓取深证指数成分股
self.fetch_szse_index_component(df)
self.logger.info('深证指数成分股抓取完成...')
def fetch_szse_index_component(self, df: pd.DataFrame):
"""
抓取深证指数成分股
"""
query_url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1747_zs&TABKEY=tab1&ZSDM={}'
for _, index in df.iterrows():
index_code = index['code']
url = query_url.format(index_code)
response = requests.get(url)
response_df = pd.read_excel(io.BytesIO(response.content), dtype='str')
index_id = f'index_cn_{index_code}'
response_df = response_df[['证券代码']]
response_df['id'] = response_df['证券代码'].apply(lambda x: f'{index_id}_{china_stock_code_to_id(str(x))}')
response_df['entity_id'] = response_df['id']
response_df['stock_id'] = response_df['证券代码'].apply(lambda x: china_stock_code_to_id(str(x)))
response_df['index_id'] = index_id
response_df.drop('证券代码', axis=1, inplace=True)
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{index["name"]} - {index_code} 成分股抓取完成...')
self.sleep()
def fetch_cni_index(self) -> None:
"""
抓取国证指数列表
"""
url = 'http://www.cnindex.com.cn/zstx/jcxl/'
response = requests.get(url)
response.encoding = 'utf-8'
dfs = pd.read_html(response.text)
# 第 9 个 table 之后为非股票指数
dfs = dfs[1:9]
result_df = pd.DataFrame()
for df in dfs:
header = df.iloc[0]
df = df[1:]
df.columns = header
df.astype('str')
result_df = pd.concat([result_df, df])
result_df = result_df.drop('样本股数量', axis=1)
result_df.columns = ['name', 'code', 'timestamp', 'base_point', 'list_date']
result_df['timestamp'] = result_df['timestamp'].apply(lambda x: x.replace('-', ''))
result_df['list_date'] = result_df['list_date'].apply(lambda x: x.replace('-', ''))
result_df['category'] = 'csi'
result_df = result_df.loc[result_df['code'].str.contains(r'^\d{6}$')]
self.persist_index(result_df)
self.logger.info('国证指数列表抓取完成...')
# 抓取国证指数成分股
self.fetch_cni_index_component(result_df)
self.logger.info('国证指数成分股抓取完成...')
def fetch_cni_index_component(self, df: pd.DataFrame):
"""
抓取国证指数成分股
"""
query_url = 'http://www.cnindex.com.cn/docs/yb_{}.xls'
for _, index in df.iterrows():
index_code = index['code']
url = query_url.format(index_code)
try:
response = requests.get(url)
response.raise_for_status()
except requests.HTTPError as error:
self.logger.error(f'{index["name"]} - {index_code} 成分股抓取错误 ({error})')
continue
response_df = pd.read_excel(io.BytesIO(response.content), dtype='str')
index_id = f'index_cn_{index_code}'
try:
response_df = response_df[['样本股代码']]
except KeyError:
response_df = response_df[['证券代码']]
response_df.columns = ['stock_code']
response_df['id'] = response_df['stock_code'].apply(
lambda x: f'{index_id}_{china_stock_code_to_id(str(x))}')
response_df['entity_id'] = response_df['id']
response_df['stock_id'] = response_df['stock_code'].apply(lambda x: china_stock_code_to_id(str(x)))
response_df['index_id'] = index_id
response_df.drop('stock_code', axis=1, inplace=True)
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{index["name"]} - {index_code} 成分股抓取完成...')
self.sleep()
def persist_index(self, df) -> None:
df['timestamp'] = df['timestamp'].apply(lambda x: to_pd_timestamp(x))
df['list_date'] = df['list_date'].apply(lambda x: to_pd_timestamp(x))
df['id'] = df['code'].apply(lambda code: f'index_cn_{code}')
df['entity_id'] = df['id']
df['exchange'] = 'cn'
df['entity_type'] = 'index'
df['is_delisted'] = False
df = df.dropna(axis=0, how='any')
df = df.drop_duplicates(subset='id', keep='last')
init_entities(df, entity_type='index', provider=self.provider)
if __name__ == '__main__':
spider = ChinaIndexListSpider(provider='exchange')
spider.run()
|
the-stack_0_16941 | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Dict
import re
from jina import Executor, DocumentArray, requests, Document
from jina.logging.logger import JinaLogger
class Sentencizer(Executor):
"""
:class:`Sentencizer` split the text on the doc-level
into sentences on the chunk-level with a rule-base strategy.
The text is split by the punctuation characters listed in ``punct_chars``.
The sentences that are shorter than the ``min_sent_len``
or longer than the ``max_sent_len`` after stripping will be discarded.
:param min_sent_len: the minimal number of characters,
(including white spaces) of the sentence, by default 1.
:param max_sent_len: the maximal number of characters,
(including white spaces) of the sentence, by default 512.
:param punct_chars: the punctuation characters to split on,
whatever is in the list will be used,
for example ['!', '.', '?'] will use '!', '.' and '?'
:param uniform_weight: the definition of it should have
uniform weight or should be calculated
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(self,
min_sent_len: int = 1,
max_sent_len: int = 512,
punct_chars: Optional[List[str]] = None,
uniform_weight: bool = True,
default_traversal_path: Optional[List[str]] = None,
*args, **kwargs):
"""Set constructor."""
super().__init__(*args, **kwargs)
self.min_sent_len = min_sent_len
self.max_sent_len = max_sent_len
self.punct_chars = punct_chars
self.uniform_weight = uniform_weight
self.logger = JinaLogger(self.__class__.__name__)
self.default_traversal_path = default_traversal_path or ['r']
if not punct_chars:
self.punct_chars = ['!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '‼', '‽', '⁇', '⁈', '⁉', '⸮', '﹖', '﹗',
'!', '.', '?', '。', '。', '\n']
if self.min_sent_len > self.max_sent_len:
self.logger.warning('the min_sent_len (={}) should be smaller or equal to the max_sent_len (={})'.format(
self.min_sent_len, self.max_sent_len))
self._slit_pat = re.compile('\s*([^{0}]+)(?<!\s)[{0}]*'.format(''.join(set(self.punct_chars))))
@requests
def segment(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Split the text into sentences.
:param docs: Documents that contain the text
:param parameters: Dictionary of parameters
:param kwargs: Additional keyword arguments
:return: a list of chunk dicts with the split sentences
"""
traversal_path = parameters.get('traversal_paths', self.default_traversal_path)
flat_docs = docs.traverse_flat(traversal_path)
for doc in flat_docs:
text = doc.text
ret = [(m.group(0), m.start(), m.end()) for m in
re.finditer(self._slit_pat, text)]
if not ret:
ret = [(text, 0, len(text))]
for ci, (r, s, e) in enumerate(ret):
f = re.sub('\n+', ' ', r).strip()
f = f[:self.max_sent_len]
if len(f) > self.min_sent_len:
doc.chunks.append(
Document(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e])
) |
the-stack_0_16943 | # -*- coding: utf-8 -*-
# © Toons
from binascii import unhexlify
import cSecp256k1 as secp256k1
from cSecp256k1 import _ecdsa
from cSecp256k1 import _schnorr
msg = secp256k1.hash_sha256(b"message to sign")
_msg = secp256k1.hash_sha256(b"bad message to check")
pr_key = secp256k1.hash_sha256(b"secret")
pu_key = secp256k1.PublicKey.from_secret(b"secret")
enc_pu_key = secp256k1.PublicKey.from_secret(b"secret").encode()
k = b"%064x" % secp256k1.rand_k()
rfc6979_k = b"%064x" % secp256k1.rfc6979_k(
unhexlify(msg), unhexlify(pr_key)
)[0]
class TestCSecp256k1Signatures:
def test_C_ecdsa_sign(self, benchmark):
signer = _ecdsa.sign
sig = benchmark(signer, msg, pr_key, k, 1).contents
assert not _ecdsa.verify(_msg, pu_key.x, pu_key.y, sig.r, sig.s)
def test_C_ecdsa_verify(self, benchmark):
sig = _ecdsa.sign(msg, pr_key, k, 1).contents
verifier = _ecdsa.verify
assert benchmark(verifier, msg, pu_key.x, pu_key.y, sig.r, sig.s)
def test_C_ecdsa_rfc6949_sign(self, benchmark):
signer = _ecdsa.sign
sig = benchmark(signer, msg, pr_key, rfc6979_k, 1).contents
assert not _ecdsa.verify(_msg, pu_key.x, pu_key.y, sig.r, sig.s)
def test_C_ecdsa_rfc6949_verify(self, benchmark):
sig = _ecdsa.sign(msg, pr_key, rfc6979_k, 1).contents
verifier = _ecdsa.verify
assert benchmark(verifier, msg, pu_key.x, pu_key.y, sig.r, sig.s)
def test_C_schnorr_bcrypto410_sign(self, benchmark):
signer = _schnorr.bcrypto410_sign
sig = benchmark(signer, msg, pr_key).contents
assert not _schnorr.bcrypto410_verify(
_msg, pu_key.x, pu_key.y, sig.r, sig.s
)
def test_C_schnorr_bcrypto410_verify(self, benchmark):
sig = _schnorr.bcrypto410_sign(msg, pr_key).contents
verifier = _schnorr.bcrypto410_verify
assert benchmark(verifier, msg, pu_key.x, pu_key.y, sig.r, sig.s)
def test_C_schnorr_sign(self, benchmark):
signer = _schnorr.sign
sig = benchmark(signer, msg, pr_key, k).contents
assert not _schnorr.verify(_msg, pu_key.x, sig.r, sig.s)
def test_C_schnorr_verify(self, benchmark):
sig = _schnorr.sign(msg, pr_key, k).contents
verifier = _schnorr.verify
assert benchmark(verifier, msg, pu_key.x, sig.r, sig.s)
try:
from pySecp256k1 import schnorr
import binascii
class TestCompare:
def test_schnorr(self):
signer = _schnorr.bcrypto410_sign
sig = signer(msg, pr_key).contents
assert sig.raw() == binascii.hexlify(
schnorr.bcrypto410_sign(
binascii.unhexlify(msg), binascii.unhexlify(pr_key)
)
)
except ImportError:
pass
|
the-stack_0_16946 | import json
from pathlib import Path
from blspy import AugSchemeMPL, PublicKeyMPL, SignatureMPL
from pipscoin.util.byte_types import hexstr_to_bytes
from pipscoin.util.hash import std_hash
def validate_alert_file(file_path: Path, pubkey: str) -> bool:
text = file_path.read_text()
validated = validate_alert(text, pubkey)
return validated
def validate_alert(text: str, pubkey: str) -> bool:
json_obj = json.loads(text)
data = json_obj["data"]
message = bytes(data, "UTF-8")
signature = json_obj["signature"]
signature = SignatureMPL.from_bytes(hexstr_to_bytes(signature))
pubkey_bls = PublicKeyMPL.from_bytes(hexstr_to_bytes(pubkey))
sig_match_my = AugSchemeMPL.verify(pubkey_bls, message, signature)
return sig_match_my
def create_alert_file(alert_file_path: Path, key, genesis_challenge_preimage: str):
bytes_preimage = bytes(genesis_challenge_preimage, "UTF-8")
genesis_challenge = std_hash(bytes_preimage)
file_dict = {
"ready": True,
"genesis_challenge": genesis_challenge.hex(),
"genesis_challenge_preimage": genesis_challenge_preimage,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
def create_not_ready_alert_file(alert_file_path: Path, key):
file_dict = {
"ready": False,
}
data: str = json.dumps(file_dict)
signature = AugSchemeMPL.sign(key, bytes(data, "utf-8"))
file_data = {"data": data, "signature": f"{signature}"}
file_data_json = json.dumps(file_data)
alert_file_path.write_text(file_data_json)
|
the-stack_0_16947 | """Control Action item definition."""
from gaphas.geometry import Rectangle
from gaphor.core.modeling import DrawContext
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import Box, Text, draw_border
from gaphor.diagram.support import represents
from gaphor.diagram.text import FontStyle, FontWeight
from gaphor.i18n import gettext
from gaphor.RAAML import raaml
from gaphor.UML.recipes import stereotypes_str
@represents(raaml.ControlAction)
class ControlActionItem(Classified, ElementPresentation):
def __init__(self, diagram, id=None):
super().__init__(diagram, id)
self.watch("subject[NamedElement].name").watch(
"subject[NamedElement].namespace.name"
)
def update_shapes(self, event=None):
self.shape = Box(
Box(
Text(
text=lambda: stereotypes_str(
self.subject, [gettext("Control Action")]
),
),
Text(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
style={"padding": (12, 4, 12, 4)},
),
draw=draw_control_action,
)
def draw_control_action(box, context: DrawContext, bounding_box: Rectangle):
draw_border(box, context, bounding_box)
|
the-stack_0_16950 | """
Read demography data from input database.
"""
from typing import List
from functools import lru_cache
import numpy as np
import pandas as pd
from autumn.inputs.database import get_input_db
INF = float("inf")
def _get_death_rates(country_iso_code: str):
input_db = get_input_db()
death_df = input_db.query("deaths", conditions=[f"iso3='{country_iso_code}'"],)
pop_df = input_db.query(
"population", conditions=[f"iso3='{country_iso_code}'", "region IS NULL",],
)
# Calculate mean year and time period
death_df["mean_year"] = (death_df["start_year"] + death_df["end_year"]) / 2
death_df["period"] = death_df["end_year"] - death_df["start_year"]
# Combine population and total death data so we can calulate death rate.
# Throws away data for population over 100 y.o.
rate_df = pd.merge(
death_df, pop_df, left_on=["start_year", "start_age"], right_on=["year", "start_age"]
)
# Calculate death rate.
rate_df["death_rate"] = rate_df["death_count"] / (rate_df["population"] * rate_df["period"])
cols = ["mean_year", "start_age", "death_rate"]
rate_df = rate_df.drop(columns=[c for c in rate_df.columns if c not in cols])
rate_df = rate_df.sort_values(["mean_year", "start_age"])
return rate_df
def _get_life_expectancy(country_iso_code: str):
input_db = get_input_db()
expectancy_df = input_db.query("life_expectancy", conditions=[f"iso3='{country_iso_code}'"],)
# Calculate mean year
expectancy_df["mean_year"] = (expectancy_df["start_year"] + expectancy_df["end_year"]) / 2
cols = ["mean_year", "start_age", "life_expectancy"]
expectancy_df = expectancy_df.drop(columns=[c for c in expectancy_df.columns if c not in cols])
expectancy_df = expectancy_df.sort_values(["mean_year", "start_age"])
return expectancy_df
def get_death_rates_by_agegroup(age_breakpoints: List[float], country_iso_code: str):
"""
Find death rates from UN data that are specific to the age groups provided.
Returns a list of death rates and a list of years.
"""
assert age_breakpoints == sorted(age_breakpoints)
assert age_breakpoints[0] == 0
input_db = get_input_db()
rate_df = _get_death_rates(country_iso_code)
years = rate_df["mean_year"].unique().tolist()
orig_ages = rate_df["start_age"].unique().tolist()
year_step = 5
year_rates = {}
for year in years:
orig_rates = rate_df[rate_df["mean_year"] == year]["death_rate"].tolist()
new_rates = downsample_rate(orig_rates, orig_ages, year_step, age_breakpoints)
year_rates[year] = new_rates
death_rates_by_agegroup = {}
for i, age in enumerate(age_breakpoints):
death_rates_by_agegroup[age] = [year_rates[y][i] for y in years]
return death_rates_by_agegroup, years
def get_life_expectancy_by_agegroup(age_breakpoints: List[float], country_iso_code: str):
"""
Find life expectancy from UN data that are specific to the age groups provided.
Returns a list of life expectancy and a list of years.
"""
assert age_breakpoints == sorted(age_breakpoints)
assert age_breakpoints[0] == 0
life_expectancy_df = _get_life_expectancy(country_iso_code)
years = life_expectancy_df["mean_year"].unique().tolist()
orig_ages = life_expectancy_df["start_age"].unique().tolist()
year_step = 5
year_expectancy = {}
for year in years:
orig_expectancy = life_expectancy_df[life_expectancy_df["mean_year"] == year]["life_expectancy"].tolist()
new_expectancy = downsample_rate(orig_expectancy, orig_ages, year_step, age_breakpoints)
year_expectancy[year] = new_expectancy
life_expectancy_by_agegroup = {}
for i, age in enumerate(age_breakpoints):
life_expectancy_by_agegroup[age] = [year_expectancy[y][i] for y in years]
return life_expectancy_by_agegroup, years
def get_iso3_from_country_name(country_name: str):
"""
Return the iso3 code matching with a given country name.
"""
input_db = get_input_db()
country_df = input_db.query("countries", conditions=[f"country='{country_name}'"])
results = country_df["iso3"].tolist()
if results:
return results[0]
else:
raise ValueError(f"Country name {country_name} not found")
def get_crude_birth_rate(country_iso_code: str):
"""
Gets crude birth rate over time for a given country.
Returns a list of birth rates and a list of years.
"""
input_db = get_input_db()
birth_df = input_db.query("birth_rates", conditions=[f"iso3='{country_iso_code}'"])
birth_df = birth_df.sort_values(["mean_year"])
return birth_df["birth_rate"].tolist(), birth_df["mean_year"].tolist()
def get_population_by_agegroup(
age_breakpoints: List[float], country_iso_code: str, region: str = None, year: int = 2020
):
"""
Find population for age bins.
Returns a list of ints, each item being the population for that age bracket.
"""
assert age_breakpoints == sorted(age_breakpoints)
assert age_breakpoints[0] == 0
input_db = get_input_db()
pop_df = input_db.query(
"population",
conditions=[
f"iso3='{country_iso_code}'",
f"year={year}",
f"region='{region}'" if region else "region IS NULL",
],
)
pop_df = pop_df.sort_values(["start_age"])
orig_ages = pop_df["start_age"].tolist()
orig_pop = pop_df["population"].tolist()
assert len(orig_ages) == len(orig_pop)
population = downsample_quantity(orig_pop, orig_ages, age_breakpoints)
return [int(p) for p in population]
def downsample_rate(
orig_rates: List[float], orig_bins: List[float], orig_step: float, new_bins: List[float]
):
"""
Downsample original rates from their current bins to new bins
Assume new bins are smaller than, or equal to, the original bins.
Requires that original values are equispaced by `orig_step` amount.
"""
num_orig_bins = len(orig_bins)
num_new_bins = len(new_bins)
weights = get_bin_weights(orig_bins, new_bins)
new_rates = [0 for _ in range(num_new_bins)]
orig_rates = np.array(orig_rates)
for i_n in range(num_new_bins):
time_chunks = np.zeros(num_orig_bins)
for i_o in range(num_orig_bins):
time_chunks[i_o] = weights[i_o, i_n] * orig_step
new_rates[i_n] = (orig_rates * time_chunks).sum() / time_chunks.sum()
return new_rates
def downsample_quantity(orig_vals: List[float], orig_bins: List[float], new_bins: List[float]):
"""
Downsample original values from their current bins to new bins
Assume new bins are smaller than, or equal to, the original bins
"""
num_orig_bins = len(orig_bins)
num_new_bins = len(new_bins)
weights = get_bin_weights(orig_bins, new_bins)
new_vals = [0 for _ in range(num_new_bins)]
for i_n in range(num_new_bins):
for i_o in range(num_orig_bins):
new_vals[i_n] += weights[i_o, i_n] * orig_vals[i_o]
assert sum(orig_vals) - sum(new_vals) < 1e-3
return new_vals
def get_bin_weights(orig_bins: List[float], new_bins: List[float]):
"""
Gets 2D weight matrix for moving from orig bins to new bins.
"""
num_orig_bins = len(orig_bins)
num_new_bins = len(new_bins)
weights = np.zeros([num_orig_bins, num_new_bins])
for i_n, new_start in enumerate(new_bins):
# Find the new bin end
if i_n == num_new_bins - 1:
new_end = INF
else:
new_end = new_bins[i_n + 1]
# Loop through all old bins, take matching proportion
for i_o, orig_start in enumerate(orig_bins):
# Find the orig bin end
if i_o == len(orig_bins) - 1:
orig_end = INF
else:
orig_end = orig_bins[i_o + 1]
is_new_bin_inside_old_one = new_start > orig_start and new_end < orig_end
assert not is_new_bin_inside_old_one, "New bin inside old bin"
if orig_end == INF and new_end == INF:
# Final bins, add everything
assert new_start <= orig_start, "Cannot slice up infinity"
weights[i_o, i_n] = 1
elif orig_start <= new_start < orig_end:
# New bin starts at start, or half way through an old bin
# We get a fraction of the end of the bin
weights[i_o, i_n] = (orig_end - new_start) / (orig_end - orig_start)
elif new_start < orig_start and new_end >= orig_end:
# New bin encompasses old bin, add the whole thing
weights[i_o, i_n] = 1
elif orig_start < new_end < orig_end:
# New bin ends inside an old bin, take a fraction of the start.
weights[i_o, i_n] = (new_end - orig_start) / (orig_end - orig_start)
return weights
|
the-stack_0_16953 | from typing import List
from financial_data.extensions.database import db
from .interface import EfficiencyIndicatorsInterface
from .model import EfficiencyIndicators
class EfficiencyIndicatorsService:
@staticmethod
def get_all() -> List[EfficiencyIndicators]:
return EfficiencyIndicators.query.all()
@staticmethod
def get_by_id(asset_id: int) -> EfficiencyIndicators:
return EfficiencyIndicators.query.filter(EfficiencyIndicators.asset_id == asset_id).first()
@staticmethod
def get_by_symbol(asset_symbol: str) -> EfficiencyIndicators:
return EfficiencyIndicators.query.filter(EfficiencyIndicators.asset_symbol == asset_symbol.upper()).first()
@staticmethod
def update(ei: EfficiencyIndicators, ei_changes: EfficiencyIndicatorsInterface) -> EfficiencyIndicators:
ei.update(ei_changes)
db.session.commit()
return ei
@staticmethod
def delete_by_symbol(asset_symbol: str) -> List[str]:
ei = EfficiencyIndicators.query.filter(EfficiencyIndicators.asset_symbol == asset_symbol.upper()).first()
if not ei:
return []
db.session.delete(ei)
db.session.commit()
return asset_symbol.upper()
@staticmethod
def create(new_attrs: EfficiencyIndicatorsInterface) -> EfficiencyIndicators:
new_ei = EfficiencyIndicators(
asset_symbol=new_attrs['asset_symbol'].upper(),
search_date=new_attrs['search_date'],
gross_margin=new_attrs['gross_margin'],
ebitda_margin=new_attrs['ebitda_margin'],
ebit_margin=new_attrs['ebit_margin'],
net_margin=new_attrs['net_margin']
)
db.session.add(new_ei)
db.session.commit()
return new_ei
|
the-stack_0_16954 | #! /usr/bin/env python3
'''
FAVITES: FrAmework for VIral Transmission and Evolution Simulation
'''
import argparse
from json import loads
from os import makedirs
from os.path import abspath,expanduser,isdir,isfile
from sys import platform,stderr
from subprocess import call,check_output,CalledProcessError,STDOUT
from tempfile import NamedTemporaryFile
from warnings import warn
from urllib.error import URLError
from urllib.request import urlopen
DOCKER_IMAGE = "niemasd/favites"
MAIN_VERSION_SYMBOLS = {'0','1','2','3','4','5','6','7','8','9','.'}
# return True if the given tag (string) is a main version (e.g. '1.1.1') or False if not (e.g. '1.1.1a')
def is_main_version(tag):
for c in tag:
if c not in MAIN_VERSION_SYMBOLS:
return False
return True
# get the latest FAVITES Docker image main version
def get_latest_version():
try:
DOCKER_TAGS = list(); curr_url = "https://hub.docker.com/v2/repositories/%s/tags/?page=1" % DOCKER_IMAGE
while curr_url is not None:
tmp = loads(urlopen(curr_url).read().decode('utf-8'))
DOCKER_TAGS += [e['name'] for e in tmp['results']]
curr_url = tmp['next']
DOCKER_TAGS = [tag for tag in DOCKER_TAGS if is_main_version(tag)] # remove non-main-version
DOCKER_TAGS = [tuple(int(i) for i in tag.split('.')) for tag in DOCKER_TAGS] # convert to tuple of ints
DOCKER_TAGS.sort() # sort in ascending order
return '.'.join(str(i) for i in DOCKER_TAGS[-1])
except Exception as e:
raise RuntimeError("Failed to use Python 3 urllib to connect to FAVITES Docker repository webpage\n%s" % str(e))
# if Mac OS X, use portable TMPDIR
if platform == 'darwin':
from os import environ
environ['TMPDIR'] = '/tmp/docker_tmp'
# parse user args
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', required=True, type=str, help="Configuration file")
parser.add_argument('-o', '--out_dir', required=False, type=str, help="Output directory")
parser.add_argument('-s', '--random_number_seed', required=False, type=int, help="Random number seed")
parser.add_argument('-v', '--verbose', action="store_true", help="Print verbose messages to stderr")
parser.add_argument('-u', '--update', nargs='*', help="Update Docker image (-u to pull newest version, -u <VERSION> to pull <VERSION>)")
args = parser.parse_args()
# check user args
CONFIG = abspath(expanduser(args.config))
assert isfile(CONFIG), "ERROR: Cannot open configuration file: %s" % CONFIG
try:
CONFIG_DICT = eval(open(CONFIG).read())
except:
raise SyntaxError("Malformed FAVITES configuration file. Must be valid JSON")
if args.out_dir is not None:
if 'out_dir' in CONFIG_DICT:
warn("Output directory specified in command line (%s) and config file (%s). Command line will take precedence" % (args.out_dir, CONFIG_DICT['out_dir']))
CONFIG_DICT['out_dir'] = args.out_dir
assert 'out_dir' in CONFIG_DICT, "Parameter 'out_dir' is not in the configuration file!"
OUTPUT_DIR = abspath(expanduser(CONFIG_DICT['out_dir']))
if args.random_number_seed is not None:
if "random_number_seed" in CONFIG_DICT:
warn("Random number seed specified in command line (%d) and config file (%s). Command line will take precedence" % (args.random_number_seed, CONFIG_DICT['random_number_seed']))
CONFIG_DICT["random_number_seed"] = args.random_number_seed
if "random_number_seed" not in CONFIG_DICT:
CONFIG_DICT["random_number_seed"] = ""
CN_FILE = None
if 'contact_network_file' in CONFIG_DICT:
CN_FILE = abspath(expanduser(CONFIG_DICT['contact_network_file']))
assert isfile(CN_FILE), "File not found: %s" % CONFIG_DICT['contact_network_file']
CONFIG_DICT['contact_network_file'] = '/FAVITES_MOUNT/%s' % CN_FILE.split('/')[-1]
TN_FILE = None
if 'transmission_network_file' in CONFIG_DICT:
TN_FILE = abspath(expanduser(CONFIG_DICT['transmission_network_file']))
assert isfile(TN_FILE), "File not found: %s" % CONFIG_DICT['transmission_network_file']
CONFIG_DICT['transmission_network_file'] = '/FAVITES_MOUNT/%s' % TN_FILE.split('/')[-1]
SAMPLE_TIME_FILE = None
if 'sample_time_file' in CONFIG_DICT:
SAMPLE_TIME_FILE = abspath(expanduser(CONFIG_DICT['sample_time_file']))
assert isfile(SAMPLE_TIME_FILE), "File not found: %s" % CONFIG_DICT['sample_time_file']
CONFIG_DICT['sample_time_file'] = '/FAVITES_MOUNT/%s' % SAMPLE_TIME_FILE.split('/')[-1]
TREE_FILE = None
if 'tree_file' in CONFIG_DICT:
TREE_FILE = abspath(expanduser(CONFIG_DICT['tree_file']))
assert isfile(TREE_FILE), "File not found: %s" % CONFIG_DICT['tree_file']
CONFIG_DICT['tree_file'] = '/FAVITES_MOUNT/%s' % TREE_FILE.split('/')[-1]
ERRORFREE_SEQ_FILE = None
if 'errorfree_sequence_file' in CONFIG_DICT:
ERRORFREE_SEQ_FILE = abspath(expanduser(CONFIG_DICT['errorfree_sequence_file']))
assert isfile(ERRORFREE_SEQ_FILE), "File not found: %s" % CONFIG_DICT['errorfree_sequence_file']
CONFIG_DICT['errorfree_sequence_file'] = '/FAVITES_MOUNT/%s' % ERRORFREE_SEQ_FILE.split('/')[-1]
HMMBUILD_MSA_FILE = None
if 'hmmbuild_msafile' in CONFIG_DICT:
HMMBUILD_MSA_FILE = abspath(expanduser(CONFIG_DICT['hmmbuild_msafile']))
assert isfile(HMMBUILD_MSA_FILE), "File not found: %s" % CONFIG_DICT['hmmbuild_msafile']
CONFIG_DICT['hmmbuild_msafile'] = '/FAVITES_MOUNT/%s' % HMMBUILD_MSA_FILE.split('/')[-1]
TMP_CONFIG = NamedTemporaryFile('w')
TMP_CONFIG.write(str(CONFIG_DICT).replace(": inf",": float('inf')"))
TMP_CONFIG.flush()
# pull the newest versioned Docker image (if applicable)
if args.update is None:
version = None
try:
o = check_output(['docker','images']).decode().splitlines()
for l in o:
if l.startswith(DOCKER_IMAGE):
version = '%s:%s' % (DOCKER_IMAGE,l.split()[1]); break
except CalledProcessError as e:
raise RuntimeError("docker images command failed\n%s"%e.output)
if version is None:
args.update = []
if args.update is not None:
assert len(args.update) < 2, "More than one Docker image version specified. Must either specify just -u or -u <VERSION>"
if len(args.update) == 0:
tag = get_latest_version()
else:
tag = args.update[0]
version = '%s:%s'%(DOCKER_IMAGE,tag)
try:
need_to_pull = True
if tag != 'latest':
o = check_output(['docker','images']).decode().splitlines()
for l in o:
if l.startswith(DOCKER_IMAGE) and l.split()[1] == version.split(':')[1]:
need_to_pull = False; break
except CalledProcessError as e:
raise RuntimeError("docker images command failed\n%s"%e.output)
if need_to_pull:
print("Pulling Docker image (%s)..." % tag, end=' ', file=stderr); stderr.flush()
try:
o = check_output(['docker','pull',version], stderr=STDOUT)
print("done", file=stderr); stderr.flush()
except Exception as e:
if "manifest for %s not found"%version in e.output.decode():
raise ValueError("Invalid FAVITES version specified: %s"%tag)
else:
raise RuntimeError("docker pull command failed\n%s"%e.output)
try:
print("Removing old Docker images...", end=' ', file=stderr); stderr.flush()
o = check_output(['docker','images']).decode().splitlines()
for l in o:
if l.startswith(DOCKER_IMAGE):
p = l.split()
if tag != p[1]:
check_output(['docker','image','rm','--force',p[2]])
print("done", file=stderr)
except:
print("Failed to remove old Docker images", file=stderr); stderr.flush()
# create output directory
try:
makedirs(OUTPUT_DIR)
except:
if isdir(OUTPUT_DIR):
response = 'x'
while len(response) == 0 or response[0] not in {'y','n'}:
response = input("ERROR: Output directory exists. Overwrite? All contents will be deleted. (y/n) ").strip().lower()
if response[0] == 'y':
from shutil import rmtree
rmtree(OUTPUT_DIR); makedirs(OUTPUT_DIR)
else:
exit(-1)
# call Docker image for user
COMMAND = ['docker','run',] # Docker command
COMMAND += ['-v',TMP_CONFIG.name+':/FAVITES_MOUNT/USER_CONFIG.JSON'] # mount config file
COMMAND += ['-v',OUTPUT_DIR+':/FAVITES_MOUNT/OUTPUT_DIR'] # mount output directory
COMMAND += ['-v',TMP_CONFIG.name+':/USER_CONFIG.JSON'] # compatibility for older Docker images
COMMAND += ['-v',OUTPUT_DIR+':/OUTPUT_DIR']
###################
# hack to access ccm config and supporting file
COMMAND += ['-v','/Users/b37v456/GIT/social_sampling_in_epidemics/simulations/20220610/contact_config.json:/FAVITES_MOUNT/contact_config.json']
COMMAND += ['-v','/Users/b37v456/GIT/social_sampling_in_epidemics/simulations/20220610/initial_contact_graph.csv:/FAVITES_MOUNT/initial_contact_graph.csv']
###################
if CN_FILE is not None: # mount contact network file (if need be)
COMMAND += ['-v',CN_FILE+':'+CONFIG_DICT['contact_network_file']]
if TN_FILE is not None: # mount transmission network file (if need be)
COMMAND += ['-v',TN_FILE+':'+CONFIG_DICT['transmission_network_file']]
if SAMPLE_TIME_FILE is not None:
COMMAND += ['-v',SAMPLE_TIME_FILE+':'+CONFIG_DICT['sample_time_file']]
if TREE_FILE is not None:
COMMAND += ['-v',TREE_FILE+':'+CONFIG_DICT['tree_file']]
if ERRORFREE_SEQ_FILE is not None:
COMMAND += ['-v',ERRORFREE_SEQ_FILE+':'+CONFIG_DICT['errorfree_sequence_file']]
if HMMBUILD_MSA_FILE is not None:
COMMAND += ['-v',HMMBUILD_MSA_FILE+':'+CONFIG_DICT['hmmbuild_msafile']]
if not platform.startswith('win'): # if not Windows,
from os import geteuid,getegid
COMMAND += ['-u',str(geteuid())+':'+str(getegid())] # make output files owned by user instead of root
COMMAND += [version] # Docker image
try:
if args.verbose:
print("\n\nRunning FAVITES Docker command:\n%s\n\n" % ' '.join(COMMAND))
call(COMMAND)
except:
exit(-1)
TMP_CONFIG.close()
|
the-stack_0_16955 | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Defines base session class."""
from __future__ import annotations
import weakref
from typing import TYPE_CHECKING
import _ba
if TYPE_CHECKING:
from weakref import ReferenceType
from typing import Sequence, List, Dict, Any, Optional, Set
import ba
class Session:
"""Defines a high level series of activities with a common purpose.
category: Gameplay Classes
Examples of sessions are ba.FreeForAllSession, ba.DualTeamSession, and
ba.CoopSession.
A Session is responsible for wrangling and transitioning between various
ba.Activity instances such as mini-games and score-screens, and for
maintaining state between them (players, teams, score tallies, etc).
Attributes:
teams
All the ba.Teams in the Session. Most things should use the team
list in ba.Activity; not this.
players
All ba.Players in the Session. Most things should use the player
list in ba.Activity; not this. Some players, such as those who have
not yet selected a character, will only appear on this list.
min_players
The minimum number of Players who must be present for the Session
to proceed past the initial joining screen.
max_players
The maximum number of Players allowed in the Session.
lobby
The ba.Lobby instance where new ba.Players go to select a
Profile/Team/etc. before being added to games.
Be aware this value may be None if a Session does not allow
any such selection.
campaign
The ba.Campaign instance this Session represents, or None if
there is no associated Campaign.
"""
# Note: even though these are instance vars, we annotate them at the
# class level so that docs generation can access their types.
campaign: Optional[ba.Campaign]
lobby: ba.Lobby
max_players: int
min_players: int
players: List[ba.Player]
teams: List[ba.Team]
def __init__(self,
depsets: Sequence[ba.DependencySet],
team_names: Sequence[str] = None,
team_colors: Sequence[Sequence[float]] = None,
use_team_colors: bool = True,
min_players: int = 1,
max_players: int = 8,
allow_mid_activity_joins: bool = True):
"""Instantiate a session.
depsets should be a sequence of successfully resolved ba.DependencySet
instances; one for each ba.Activity the session may potentially run.
"""
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=cyclic-import
from ba._lobby import Lobby
from ba._stats import Stats
from ba._gameutils import sharedobj
from ba._gameactivity import GameActivity
from ba._team import Team
from ba._error import DependencyError
from ba._dependency import Dependency, AssetPackage
# First off, resolve all dependency-sets we were passed.
# If things are missing, we'll try to gather them into a single
# missing-deps exception if possible to give the caller a clean
# path to download missing stuff and try again.
missing_asset_packages: Set[str] = set()
for depset in depsets:
try:
depset.resolve()
except DependencyError as exc:
# Gather/report missing assets only; barf on anything else.
if all(issubclass(d.cls, AssetPackage) for d in exc.deps):
for dep in exc.deps:
assert isinstance(dep.config, str)
missing_asset_packages.add(dep.config)
else:
missing_info = [(d.cls, d.config) for d in exc.deps]
raise RuntimeError(
f'Missing non-asset dependencies: {missing_info}')
# Throw a combined exception if we found anything missing.
if missing_asset_packages:
raise DependencyError([
Dependency(AssetPackage, set_id)
for set_id in missing_asset_packages
])
# Ok; looks like our dependencies check out.
# Now give the engine a list of asset-set-ids to pass along to clients.
required_asset_packages: Set[str] = set()
for depset in depsets:
required_asset_packages.update(depset.get_asset_package_ids())
# print('Would set host-session asset-reqs to:',
# required_asset_packages)
# First thing, wire up our internal engine data.
self._sessiondata = _ba.register_session(self)
self.tournament_id: Optional[str] = None
# FIXME: This stuff shouldn't be here.
self.sharedobjs: Dict[str, Any] = {}
# TeamGameActivity uses this to display a help overlay on the first
# activity only.
self.have_shown_controls_help_overlay = False
self.campaign = None
# FIXME: Should be able to kill this I think.
self.campaign_state: Dict[str, str] = {}
self._use_teams = (team_names is not None)
self._use_team_colors = use_team_colors
self._in_set_activity = False
self._allow_mid_activity_joins = allow_mid_activity_joins
self.teams = []
self.players = []
self._next_team_id = 0
self._activity_retained: Optional[ba.Activity] = None
self.launch_end_session_activity_time: Optional[float] = None
self._activity_end_timer: Optional[ba.Timer] = None
# Hacky way to create empty weak ref; must be a better way.
class _EmptyObj:
pass
self._activity_weak: ReferenceType[ba.Activity]
self._activity_weak = weakref.ref(_EmptyObj()) # type: ignore
if self._activity_weak() is not None:
raise Exception('Error creating empty activity weak ref.')
self._next_activity: Optional[ba.Activity] = None
self.wants_to_end = False
self._ending = False
self.min_players = min_players
self.max_players = max_players
# Create Teams.
if self._use_teams:
assert team_names is not None
assert team_colors is not None
for i, color in enumerate(team_colors):
team = Team(team_id=self._next_team_id,
name=GameActivity.get_team_display_string(
team_names[i]),
color=color)
self.teams.append(team)
self._next_team_id += 1
try:
with _ba.Context(self):
self.on_team_join(team)
except Exception:
from ba import _error
_error.print_exception(
f'Error in on_team_join for {self}.')
self.lobby = Lobby()
self.stats = Stats()
# Instantiate our session globals node
# (so it can apply default settings).
sharedobj('globals')
@property
def use_teams(self) -> bool:
"""(internal)"""
return self._use_teams
@property
def use_team_colors(self) -> bool:
"""(internal)"""
return self._use_team_colors
def on_player_request(self, player: ba.Player) -> bool:
"""Called when a new ba.Player wants to join the Session.
This should return True or False to accept/reject.
"""
from ba._lang import Lstr
# Limit player counts *unless* we're in a stress test.
if _ba.app.stress_test_reset_timer is None:
if len(self.players) >= self.max_players:
# Print a rejection message *only* to the client trying to
# join (prevents spamming everyone else in the game).
_ba.playsound(_ba.getsound('error'))
_ba.screenmessage(
Lstr(resource='playerLimitReachedText',
subs=[('${COUNT}', str(self.max_players))]),
color=(0.8, 0.0, 0.0),
clients=[player.get_input_device().client_id],
transient=True)
return False
_ba.playsound(_ba.getsound('dripity'))
return True
def on_player_leave(self, player: ba.Player) -> None:
"""Called when a previously-accepted ba.Player leaves the session."""
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=cyclic-import
from ba._freeforallsession import FreeForAllSession
from ba._lang import Lstr
from ba import _error
# Remove them from the game rosters.
if player in self.players:
_ba.playsound(_ba.getsound('playerLeft'))
team: Optional[ba.Team]
# The player will have no team if they are still in the lobby.
try:
team = player.team
except _error.TeamNotFoundError:
team = None
activity = self._activity_weak()
# If he had no team, he's in the lobby.
# If we have a current activity with a lobby, ask them to
# remove him.
if team is None:
with _ba.Context(self):
try:
self.lobby.remove_chooser(player)
except Exception:
_error.print_exception(
'Error in Lobby.remove_chooser()')
# *If* they were actually in the game, announce their departure.
if team is not None:
_ba.screenmessage(
Lstr(resource='playerLeftText',
subs=[('${PLAYER}', player.get_name(full=True))]))
# Remove him from his team and session lists.
# (he may not be on the team list since player are re-added to
# team lists every activity)
if team is not None and player in team.players:
# Testing; can remove this eventually.
if isinstance(self, FreeForAllSession):
if len(team.players) != 1:
_error.print_error('expected 1 player in FFA team')
team.players.remove(player)
# Remove player from any current activity.
if activity is not None and player in activity.players:
activity.players.remove(player)
# Run the activity callback unless its been expired.
if not activity.is_expired():
try:
with _ba.Context(activity):
activity.on_player_leave(player)
except Exception:
_error.print_exception(
'exception in on_player_leave for activity',
activity)
else:
_error.print_error('expired activity in on_player_leave;'
" shouldn't happen")
player.set_activity(None)
player.set_node(None)
# Reset the player; this will remove its actor-ref and clear
# its calls/etc
try:
with _ba.Context(activity):
player.reset()
except Exception:
_error.print_exception(
'exception in player.reset in'
' on_player_leave for player', player)
# If we're a non-team session, remove the player's team completely.
if not self._use_teams and team is not None:
# If the team's in an activity, call its on_team_leave
# callback.
if activity is not None and team in activity.teams:
activity.teams.remove(team)
if not activity.is_expired():
try:
with _ba.Context(activity):
activity.on_team_leave(team)
except Exception:
_error.print_exception(
'exception in on_team_leave for activity',
activity)
else:
_error.print_error(
'expired activity in on_player_leave p2'
"; shouldn't happen")
# Clear the team's game-data (so dying stuff will
# have proper context).
try:
with _ba.Context(activity):
team.reset_gamedata()
except Exception:
_error.print_exception(
'exception clearing gamedata for team:', team,
'for player:', player, 'in activity:', activity)
# Remove the team from the session.
self.teams.remove(team)
try:
with _ba.Context(self):
self.on_team_leave(team)
except Exception:
_error.print_exception(
'exception in on_team_leave for session', self)
# Clear the team's session-data (so dying stuff will
# have proper context).
try:
with _ba.Context(self):
team.reset_sessiondata()
except Exception:
_error.print_exception(
'exception clearing sessiondata for team:', team,
'in session:', self)
# Now remove them from the session list.
self.players.remove(player)
else:
print('ERROR: Session.on_player_leave called'
' for player not in our list.')
def end(self) -> None:
"""Initiates an end to the session and a return to the main menu.
Note that this happens asynchronously, allowing the
session and its activities to shut down gracefully.
"""
self.wants_to_end = True
if self._next_activity is None:
self.launch_end_session_activity()
def launch_end_session_activity(self) -> None:
"""(internal)"""
from ba import _error
from ba._activitytypes import EndSessionActivity
from ba._enums import TimeType
with _ba.Context(self):
curtime = _ba.time(TimeType.REAL)
if self._ending:
# Ignore repeats unless its been a while.
assert self.launch_end_session_activity_time is not None
since_last = (curtime - self.launch_end_session_activity_time)
if since_last < 30.0:
return
_error.print_error(
'launch_end_session_activity called twice (since_last=' +
str(since_last) + ')')
self.launch_end_session_activity_time = curtime
self.set_activity(_ba.new_activity(EndSessionActivity))
self.wants_to_end = False
self._ending = True # Prevent further activity-mucking.
def on_team_join(self, team: ba.Team) -> None:
"""Called when a new ba.Team joins the session."""
def on_team_leave(self, team: ba.Team) -> None:
"""Called when a ba.Team is leaving the session."""
def _complete_end_activity(self, activity: ba.Activity,
results: Any) -> None:
# Run the subclass callback in the session context.
try:
with _ba.Context(self):
self.on_activity_end(activity, results)
except Exception:
from ba import _error
_error.print_exception(
'exception in on_activity_end() for session', self, 'activity',
activity, 'with results', results)
def end_activity(self, activity: ba.Activity, results: Any, delay: float,
force: bool) -> None:
"""Commence shutdown of a ba.Activity (if not already occurring).
'delay' is the time delay before the Activity actually ends
(in seconds). Further calls to end() will be ignored up until
this time, unless 'force' is True, in which case the new results
will replace the old.
"""
from ba._general import Call
from ba._enums import TimeType
# Only pay attention if this is coming from our current activity.
if activity is not self._activity_retained:
return
# If this activity hasn't begun yet, just set it up to end immediately
# once it does.
if not activity.has_begun():
activity.set_immediate_end(results, delay, force)
# The activity has already begun; get ready to end it.
else:
if (not activity.has_ended()) or force:
activity.set_has_ended(True)
# Set a timer to set in motion this activity's demise.
self._activity_end_timer = _ba.Timer(
delay,
Call(self._complete_end_activity, activity, results),
timetype=TimeType.BASE)
def handlemessage(self, msg: Any) -> Any:
"""General message handling; can be passed any message object."""
from ba._lobby import PlayerReadyMessage
from ba._error import UNHANDLED
from ba._messages import PlayerProfilesChangedMessage
if isinstance(msg, PlayerReadyMessage):
self._on_player_ready(msg.chooser)
return None
if isinstance(msg, PlayerProfilesChangedMessage):
# If we have a current activity with a lobby, ask it to reload
# profiles.
with _ba.Context(self):
self.lobby.reload_profiles()
return None
return UNHANDLED
def set_activity(self, activity: ba.Activity) -> None:
"""Assign a new current ba.Activity for the session.
Note that this will not change the current context to the new
Activity's. Code must be run in the new activity's methods
(on_transition_in, etc) to get it. (so you can't do
session.set_activity(foo) and then ba.newnode() to add a node to foo)
"""
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
from ba import _error
from ba._gameutils import sharedobj
from ba._enums import TimeType
# Sanity test: make sure this doesn't get called recursively.
if self._in_set_activity:
raise Exception(
'Session.set_activity() cannot be called recursively.')
if activity.session is not _ba.getsession():
raise Exception("Provided Activity's Session is not current.")
# Quietly ignore this if the whole session is going down.
if self._ending:
return
if activity is self._activity_retained:
_error.print_error('activity set to already-current activity')
return
if self._next_activity is not None:
raise Exception('Activity switch already in progress (to ' +
str(self._next_activity) + ')')
self._in_set_activity = True
prev_activity = self._activity_retained
if prev_activity is not None:
with _ba.Context(prev_activity):
gprev = sharedobj('globals')
else:
gprev = None
with _ba.Context(activity):
# Now that it's going to be front and center,
# set some global values based on what the activity wants.
glb = sharedobj('globals')
glb.use_fixed_vr_overlay = activity.use_fixed_vr_overlay
glb.allow_kick_idle_players = activity.allow_kick_idle_players
if activity.inherits_slow_motion and gprev is not None:
glb.slow_motion = gprev.slow_motion
else:
glb.slow_motion = activity.slow_motion
if activity.inherits_music and gprev is not None:
glb.music_continuous = True # Prevent restarting same music.
glb.music = gprev.music
glb.music_count += 1
if activity.inherits_camera_vr_offset and gprev is not None:
glb.vr_camera_offset = gprev.vr_camera_offset
if activity.inherits_vr_overlay_center and gprev is not None:
glb.vr_overlay_center = gprev.vr_overlay_center
glb.vr_overlay_center_enabled = gprev.vr_overlay_center_enabled
# If they want to inherit tint from the previous activity.
if activity.inherits_tint and gprev is not None:
glb.tint = gprev.tint
glb.vignette_outer = gprev.vignette_outer
glb.vignette_inner = gprev.vignette_inner
# Let the activity do its thing.
activity.start_transition_in()
self._next_activity = activity
# If we have a current activity, tell it it's transitioning out;
# the next one will become current once this one dies.
if prev_activity is not None:
# pylint: disable=protected-access
prev_activity._transitioning_out = True
# pylint: enable=protected-access
# Activity will be None until the next one begins.
with _ba.Context(prev_activity):
prev_activity.on_transition_out()
# Setting this to None should free up the old activity to die,
# which will call begin_next_activity.
# We can still access our old activity through
# self._activity_weak() to keep it up to date on player
# joins/departures/etc until it dies.
self._activity_retained = None
# There's no existing activity; lets just go ahead with the begin call.
else:
self.begin_next_activity()
# Tell the C layer that this new activity is now 'foregrounded'.
# This means that its globals node controls global stuff and stuff
# like console operations, keyboard shortcuts, etc will run in it.
# pylint: disable=protected-access
# noinspection PyProtectedMember
activity._activity_data.make_foreground()
# pylint: enable=protected-access
# We want to call _destroy() for the previous activity once it should
# tear itself down, clear out any self-refs, etc. If the new activity
# has a transition-time, set it up to be called after that passes;
# otherwise call it immediately. After this call the activity should
# have no refs left to it and should die (which will trigger the next
# activity to run).
if prev_activity is not None:
if activity.transition_time > 0.0:
# FIXME: We should tweak the activity to not allow
# node-creation/etc when we call _destroy (or after).
with _ba.Context('ui'):
# pylint: disable=protected-access
# noinspection PyProtectedMember
_ba.timer(activity.transition_time,
prev_activity._destroy,
timetype=TimeType.REAL)
# Just run immediately.
else:
# noinspection PyProtectedMember
prev_activity._destroy() # pylint: disable=protected-access
self._in_set_activity = False
def getactivity(self) -> Optional[ba.Activity]:
"""Return the current foreground activity for this session."""
return self._activity_weak()
def get_custom_menu_entries(self) -> List[Dict[str, Any]]:
"""Subclasses can override this to provide custom menu entries.
The returned value should be a list of dicts, each containing
a 'label' and 'call' entry, with 'label' being the text for
the entry and 'call' being the callable to trigger if the entry
is pressed.
"""
return []
def _request_player(self, player: ba.Player) -> bool:
# If we're ending, allow no new players.
if self._ending:
return False
# Ask the user.
try:
with _ba.Context(self):
result = self.on_player_request(player)
except Exception:
from ba import _error
_error.print_exception('error in on_player_request call for', self)
result = False
# If the user said yes, add the player to the session list.
if result:
self.players.append(player)
# If we have a current activity with a lobby,
# ask it to bring up a chooser for this player.
# otherwise they'll have to wait around for the next activity.
with _ba.Context(self):
try:
self.lobby.add_chooser(player)
except Exception:
from ba import _error
_error.print_exception('exception in lobby.add_chooser()')
return result
def on_activity_end(self, activity: ba.Activity, results: Any) -> None:
"""Called when the current ba.Activity has ended.
The ba.Session should look at the results and start
another ba.Activity.
"""
def begin_next_activity(self) -> None:
"""Called once the previous activity has been totally torn down.
This means we're ready to begin the next one
"""
if self._next_activity is not None:
# We store both a weak and a strong ref to the new activity;
# the strong is to keep it alive and the weak is so we can access
# it even after we've released the strong-ref to allow it to die.
self._activity_retained = self._next_activity
self._activity_weak = weakref.ref(self._next_activity)
self._next_activity = None
# Lets kick out any players sitting in the lobby since
# new activities such as score screens could cover them up;
# better to have them rejoin.
self.lobby.remove_all_choosers_and_kick_players()
activity = self._activity_weak()
assert activity is not None
activity.begin(self)
def _on_player_ready(self, chooser: ba.Chooser) -> None:
"""Called when a ba.Player has checked themself ready."""
from ba._lang import Lstr
lobby = chooser.lobby
activity = self._activity_weak()
# In joining activities, we wait till all choosers are ready
# and then create all players at once.
if activity is not None and activity.is_joining_activity:
if lobby.check_all_ready():
choosers = lobby.get_choosers()
min_players = self.min_players
if len(choosers) >= min_players:
for lch in lobby.get_choosers():
self._add_chosen_player(lch)
lobby.remove_all_choosers()
# Get our next activity going.
self._complete_end_activity(activity, {})
else:
_ba.screenmessage(Lstr(resource='notEnoughPlayersText',
subs=[('${COUNT}', str(min_players))
]),
color=(1, 1, 0))
_ba.playsound(_ba.getsound('error'))
else:
return
# Otherwise just add players on the fly.
else:
self._add_chosen_player(chooser)
lobby.remove_chooser(chooser.getplayer())
def _add_chosen_player(self, chooser: ba.Chooser) -> ba.Player:
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
from ba import _error
from ba._lang import Lstr
from ba._team import Team
from ba import _freeforallsession
player = chooser.getplayer()
if player not in self.players:
_error.print_error('player not found in session '
'player-list after chooser selection')
activity = self._activity_weak()
assert activity is not None
# We need to reset the player's input here, as it is currently
# referencing the chooser which could inadvertently keep it alive.
player.reset_input()
# Pass it to the current activity if it has already begun
# (otherwise it'll get passed once begin is called).
pass_to_activity = (activity.has_begun()
and not activity.is_joining_activity)
# If we're not allowing mid-game joins, don't pass; just announce
# the arrival.
if pass_to_activity:
if not self._allow_mid_activity_joins:
pass_to_activity = False
with _ba.Context(self):
_ba.screenmessage(Lstr(resource='playerDelayedJoinText',
subs=[('${PLAYER}',
player.get_name(full=True))
]),
color=(0, 1, 0))
# If we're a non-team game, each player gets their own team
# (keeps mini-game coding simpler if we can always deal with teams).
if self._use_teams:
team = chooser.get_team()
else:
our_team_id = self._next_team_id
team = Team(team_id=our_team_id,
name=chooser.getplayer().get_name(full=True,
icon=False),
color=chooser.get_color())
self.teams.append(team)
self._next_team_id += 1
try:
with _ba.Context(self):
self.on_team_join(team)
except Exception:
_error.print_exception(f'exception in on_team_join for {self}')
if pass_to_activity:
if team in activity.teams:
_error.print_error(
'Duplicate team ID in ba.Session._add_chosen_player')
activity.teams.append(team)
try:
with _ba.Context(activity):
activity.on_team_join(team)
except Exception:
_error.print_exception(
f'ERROR: exception in on_team_join for {activity}')
player.set_data(team=team,
character=chooser.get_character_name(),
color=chooser.get_color(),
highlight=chooser.get_highlight())
self.stats.register_player(player)
if pass_to_activity:
if isinstance(self, _freeforallsession.FreeForAllSession):
if player.team.players:
_error.print_error('expected 0 players in FFA team')
# Don't actually add the player to their team list if we're not
# in an activity. (players get (re)added to their team lists
# when the activity begins).
player.team.players.append(player)
if player in activity.players:
_error.print_exception(
f'Dup player in ba.Session._add_chosen_player: {player}')
else:
activity.players.append(player)
player.set_activity(activity)
pnode = activity.create_player_node(player)
player.set_node(pnode)
try:
with _ba.Context(activity):
activity.on_player_join(player)
except Exception:
_error.print_exception(
f'Error on on_player_join for {activity}')
return player
|
the-stack_0_16957 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import codecs
import numpy as np
import re
import itertools
from collections import Counter
import os
# from gensim.models import word2vec
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def get_chinese_text():
if not os.path.isdir("data/"):
os.system("mkdir data/")
if (not os.path.exists('data/pos.txt')) or \
(not os.path.exists('data/neg')):
os.system("wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip -P data/")
os.chdir("./data")
os.system("unzip -u chinese_text.zip")
os.chdir("..")
def load_data_and_labels():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# download dataset
get_chinese_text()
# Load data from files
positive_examples = list(codecs.open("./data/pos.txt", "r", "utf-8").readlines())
positive_examples = [s.strip() for s in positive_examples]
positive_examples = [pe for pe in positive_examples if len(pe) < 100]
negative_examples = list(codecs.open("./data/neg.txt", "r", "utf-8").readlines())
negative_examples = [s.strip() for s in negative_examples]
negative_examples = [ne for ne in negative_examples if len(ne) < 100]
# Split by words
x_text = positive_examples + negative_examples
# x_text = [clean_str(sent) for sent in x_text]
x_text = [list(s) for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="</s>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def build_input_data_with_word2vec(sentences, labels, word2vec):
"""Map sentences and labels to vectors based on a pretrained word2vec"""
x_vec = []
for sent in sentences:
vec = []
for word in sent:
if word in word2vec:
vec.append(word2vec[word])
else:
vec.append(word2vec['</s>'])
x_vec.append(vec)
x_vec = np.array(x_vec)
y_vec = np.array(labels)
return [x_vec, y_vec]
def load_data_with_word2vec(word2vec):
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
# vocabulary, vocabulary_inv = build_vocab(sentences_padded)
return build_input_data_with_word2vec(sentences_padded, labels, word2vec)
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def load_pretrained_word2vec(infile):
if isinstance(infile, str):
infile = open(infile)
word2vec = {}
for idx, line in enumerate(infile):
if idx == 0:
vocab_size, dim = line.strip().split()
else:
tks = line.strip().split()
word2vec[tks[0]] = map(float, tks[1:])
return word2vec
def load_google_word2vec(path):
model = word2vec.Word2Vec.load_word2vec_format(path, binary=True)
return model
|
the-stack_0_16960 | #!/usr/bin/env python3
""" Parent class for color Adjustments for faceswap.py converter """
import logging
import numpy as np
from plugins.convert._config import Config
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_config(plugin_name, configfile=None):
""" Return the config for the requested model """
return Config(plugin_name, configfile=configfile).config_dict
class Adjustment():
""" Parent class for adjustments """
def __init__(self, configfile=None, config=None):
logger.debug("Initializing %s: (configfile: %s, config: %s)",
self.__class__.__name__, configfile, config)
self.config = self.set_config(configfile, config)
logger.debug("config: %s", self.config)
logger.debug("Initialized %s", self.__class__.__name__)
def set_config(self, configfile, config):
""" Set the config to either global config or passed in config """
section = ".".join(self.__module__.split(".")[-2:])
if config is None:
retval = get_config(section, configfile)
else:
config.section = section
retval = config.config_dict
config.section = None
logger.debug("Config: %s", retval)
return retval
def process(self, old_face, new_face, raw_mask):
""" Override for specific color adjustment process """
raise NotImplementedError
def run(self, old_face, new_face, raw_mask):
""" Perform selected adjustment on face """
logger.trace("Performing color adjustment")
# Remove Mask for processing
reinsert_mask = False
if new_face.shape[2] == 4:
reinsert_mask = True
final_mask = new_face[:, :, -1]
new_face = new_face[:, :, :3]
new_face = self.process(old_face, new_face, raw_mask)
new_face = np.clip(new_face, 0.0, 1.0)
if reinsert_mask and new_face.shape[2] != 4:
# Reinsert Mask
new_face = np.concatenate((new_face, np.expand_dims(final_mask, axis=-1)), -1)
logger.trace("Performed color adjustment")
return new_face
|
the-stack_0_16961 | # Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
from typing import Optional, Tuple, Union
import attr
from netaddr import IPAddress # type: ignore
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IResponse
from zope.interface import implementer
from sydent.http.httpcommon import read_body_with_max_size
from sydent.http.srvresolver import SrvResolver, pick_server_from_list
from sydent.util import json_decoder
from sydent.util.ttlcache import TTLCache
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter to add to the .well-known default cache ttl
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# The maximum size (in bytes) to allow a well-known file to be.
WELL_KNOWN_MAX_SIZE = 50 * 1024 # 50 KiB
logger = logging.getLogger(__name__)
well_known_cache = TTLCache("well-known")
@implementer(IAgent)
class MatrixFederationAgent:
"""An Agent-like thing which provides a `request` method which will look up a matrix
server and send an HTTP request to it.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
:param reactor: twisted reactor to use for underlying requests
:type reactor: IReactor
:param tls_client_options_factory: Factory to use for fetching client tls
options, or none to disable TLS.
:type tls_client_options_factory: ClientTLSOptionsFactory, None
:param _well_known_tls_policy: TLS policy to use for fetching .well-known
files. None to use a default (browser-like) implementation.
:type _well_known_tls_policy: IPolicyForHTTPS, None
:param _srv_resolver: SRVResolver impl to use for looking up SRV records.
None to use a default implementation.
:type _srv_resolver: SrvResolver, None
:param _well_known_cache: TTLCache impl for storing cached well-known
lookups. Omit to use a default implementation.
:type _well_known_cache: TTLCache
"""
def __init__(
self,
reactor,
tls_client_options_factory,
_well_known_tls_policy=None,
_srv_resolver: Optional["SrvResolver"] = None,
_well_known_cache: "TTLCache" = well_known_cache,
) -> None:
self._reactor = reactor
self._tls_client_options_factory = tls_client_options_factory
if _srv_resolver is None:
_srv_resolver = SrvResolver()
self._srv_resolver = _srv_resolver
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
agent_args = {}
if _well_known_tls_policy is not None:
# the param is called 'contextFactory', but actually passing a
# contextfactory is deprecated, and it expects an IPolicyForHTTPS.
agent_args["contextFactory"] = _well_known_tls_policy
_well_known_agent = RedirectAgent(
Agent(self._reactor, pool=self._pool, **agent_args),
)
self._well_known_agent = _well_known_agent
# our cache of .well-known lookup results, mapping from server name
# to delegated name. The values can be:
# `bytes`: a valid server-name
# `None`: there is no (valid) .well-known here
self._well_known_cache = _well_known_cache
@defer.inlineCallbacks
def request(
self,
method: bytes,
uri: bytes,
headers: Optional["Headers"] = None,
bodyProducer: Optional["IBodyProducer"] = None,
) -> IResponse:
"""
:param method: HTTP method (GET/POST/etc).
:param uri: Absolute URI to be retrieved.
:param headers: HTTP headers to send with the request, or None to
send no extra headers.
:param bodyProducer: An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
:returns a deferred that fires when the header of the response has
been received (regardless of the response status code). Fails if
there is any problem which prevents that response from being received
(including problems that prevent the request from being sent).
"""
parsed_uri = URI.fromBytes(uri, defaultPort=-1)
res = yield defer.ensureDeferred(self._route_matrix_uri(parsed_uri))
# set up the TLS connection params
#
# XXX disabling TLS is really only supported here for the benefit of the
# unit tests. We should make the UTs cope with TLS rather than having to make
# the code support the unit tests.
if self._tls_client_options_factory is None:
tls_options = None
else:
tls_options = self._tls_client_options_factory.get_options(
res.tls_server_name.decode("ascii")
)
# make sure that the Host header is set correctly
if headers is None:
headers = Headers()
else:
headers = headers.copy()
assert headers is not None
if not headers.hasHeader(b"host"):
headers.addRawHeader(b"host", res.host_header)
class EndpointFactory:
@staticmethod
def endpointForURI(_uri):
ep = LoggingHostnameEndpoint(
self._reactor,
res.target_host,
res.target_port,
)
if tls_options is not None:
ep = wrapClientTLS(tls_options, ep)
return ep
agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield agent.request(method, uri, headers, bodyProducer)
return res
async def _route_matrix_uri(
self, parsed_uri: "URI", lookup_well_known: bool = True
) -> "_RoutingResult":
"""Helper for `request`: determine the routing for a Matrix URI
:param parsed_uri: uri to route. Note that it should be parsed with
URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1 if there
is no explicit port given.
:param lookup_well_known: True if we should look up the .well-known
file if there is no SRV record.
:returns a routing result.
"""
# check for an IP literal
try:
ip_address = IPAddress(parsed_uri.host.decode("ascii"))
except Exception:
# not an IP address
ip_address = None
if ip_address:
port = parsed_uri.port
if port == -1:
port = 8448
return _RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=port,
)
if parsed_uri.port != -1:
# there is an explicit port
return _RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=parsed_uri.host,
target_port=parsed_uri.port,
)
if lookup_well_known:
# try a .well-known lookup
well_known_server = await self._get_well_known(parsed_uri.host)
if well_known_server:
# if we found a .well-known, start again, but don't do another
# .well-known lookup.
# parse the server name in the .well-known response into host/port.
# (This code is lifted from twisted.web.client.URI.fromBytes).
if b":" in well_known_server:
well_known_host, well_known_port = well_known_server.rsplit(b":", 1)
try:
well_known_port = int(well_known_port)
except ValueError:
# the part after the colon could not be parsed as an int
# - we assume it is an IPv6 literal with no port (the closing
# ']' stops it being parsed as an int)
well_known_host, well_known_port = well_known_server, -1
else:
well_known_host, well_known_port = well_known_server, -1
new_uri = URI(
scheme=parsed_uri.scheme,
netloc=well_known_server,
host=well_known_host,
port=well_known_port,
path=parsed_uri.path,
params=parsed_uri.params,
query=parsed_uri.query,
fragment=parsed_uri.fragment,
)
res = await self._route_matrix_uri(new_uri, lookup_well_known=False)
return res
# try a SRV lookup
service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
server_list = await self._srv_resolver.resolve_service(service_name)
if not server_list:
target_host = parsed_uri.host
port = 8448
logger.debug(
"No SRV record for %s, using %s:%i",
parsed_uri.host.decode("ascii"),
target_host.decode("ascii"),
port,
)
else:
target_host, port = pick_server_from_list(server_list)
logger.debug(
"Picked %s:%i from SRV records for %s",
target_host.decode("ascii"),
port,
parsed_uri.host.decode("ascii"),
)
return _RoutingResult(
host_header=parsed_uri.netloc,
tls_server_name=parsed_uri.host,
target_host=target_host,
target_port=port,
)
async def _get_well_known(self, server_name: bytes) -> Optional[bytes]:
"""Attempt to fetch and parse a .well-known file for the given server
:param server_name: Name of the server, from the requested url.
:returns either the new server name, from the .well-known, or None if
there was no .well-known file.
"""
try:
result = self._well_known_cache[server_name]
except KeyError:
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
result, cache_period = await self._do_get_well_known(server_name)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
return result
async def _do_get_well_known(
self, server_name: bytes
) -> Tuple[Union[bytes, None, object], int]:
"""Actually fetch and parse a .well-known, without checking the cache
:param server_name: Name of the server, from the requested url
:returns a tuple of (result, cache period), where result is one of:
- the new server name from the .well-known (as a `bytes`)
- None if there was no .well-known file.
- INVALID_WELL_KNOWN if the .well-known was invalid
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
uri_str = uri.decode("ascii")
logger.info("Fetching %s", uri_str)
try:
response = await self._well_known_agent.request(b"GET", uri)
body = await read_body_with_max_size(response, WELL_KNOWN_MAX_SIZE)
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json_decoder.decode(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
if not isinstance(parsed_body, dict):
raise Exception("not a dict")
if "m.server" not in parsed_body:
raise Exception("Missing key 'm.server'")
except Exception as e:
logger.info("Error fetching %s: %s", uri_str, e)
# add some randomness to the TTL to avoid a stampeding herd every hour
# after startup
cache_period: float = WELL_KNOWN_INVALID_CACHE_PERIOD
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
return (None, cache_period)
result = parsed_body["m.server"].encode("ascii")
cache_period = _cache_period_from_headers(
response.headers,
time_now=self._reactor.seconds,
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
return (result, cache_period)
@implementer(IStreamClientEndpoint)
class LoggingHostnameEndpoint:
"""A wrapper for HostnameEndpint which logs when it connects"""
def __init__(self, reactor, host, port, *args, **kwargs):
self.host = host
self.port = port
self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
logger.info("Endpoint created with %s:%d", host, port)
def connect(self, protocol_factory):
logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port)
return self.ep.connect(protocol_factory)
def _cache_period_from_headers(headers, time_now=time.time):
cache_controls = _parse_cache_control(headers)
if b"no-store" in cache_controls:
return 0
if b"max-age" in cache_controls:
try:
max_age = int(cache_controls[b"max-age"])
return max_age
except ValueError:
pass
expires = headers.getRawHeaders(b"expires")
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers):
cache_controls = {}
for hdr in headers.getRawHeaders(b"cache-control", []):
for directive in hdr.split(b","):
splits = [x.strip() for x in directive.split(b"=", 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s
class _RoutingResult:
"""The result returned by `_route_matrix_uri`.
Contains the parameters needed to direct a federation connection to a particular
server.
Where a SRV record points to several servers, this object contains a single server
chosen from the list.
"""
host_header = attr.ib()
"""
The value we should assign to the Host header (host:port from the matrix
URI, or .well-known).
:type: bytes
"""
tls_server_name = attr.ib()
"""
The server name we should set in the SNI (typically host, without port, from the
matrix URI or .well-known)
:type: bytes
"""
target_host = attr.ib()
"""
The hostname (or IP literal) we should route the TCP connection to (the target of the
SRV record, or the hostname from the URL/.well-known)
:type: bytes
"""
target_port = attr.ib()
"""
The port we should route the TCP connection to (the target of the SRV record, or
the port from the URL/.well-known, or 8448)
:type: int
"""
|
the-stack_0_16962 | from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as func
from torchsupport.interacting.off_policy_training import OffPolicyTraining
class AWACTraining(OffPolicyTraining):
def __init__(self, policy, value, agent, environment,
beta=1.0, clip=None, tau=5e-3, **kwargs):
self.value = ...
super().__init__(
policy, agent, environment,
{"value": value}, **kwargs
)
self.beta = beta
self.clip = clip
self.tau = tau
self.target = deepcopy(value)
def update_target(self):
with torch.no_grad():
tp = self.target.parameters()
ap = self.value.parameters()
for t, a in zip(tp, ap):
t *= (1 - self.tau)
t += self.tau * a
def action_nll(self, policy, action):
return func.cross_entropy(policy, action, reduction='none')
def policy_loss(self, policy, action, advantage):
weight = torch.exp(advantage / self.beta)
if self.clip is not None:
weight = weight.clamp(0, self.clip)
negative_log_likelihood = self.action_nll(policy, action)
weighted_loss = negative_log_likelihood * weight
return weighted_loss.mean()
def state_value(self, state, value=None):
value = value or self.value
action_value = value(state)
policy = self.policy(state)
expected = action_value * policy.softmax(dim=1)
expected = expected.sum(dim=1)
return expected
def run_policy(self, sample):
initial_state = sample.initial_state
action = sample.action
with torch.no_grad():
action_value = self.value(initial_state)
inds = torch.arange(action.size(0), device=action.device)
action_value = action_value[inds, action]
value = self.state_value(initial_state)
advantage = action_value - value
self.current_losses["mean advantage"] = float(advantage.mean())
policy = self.policy(initial_state)
return policy, action, advantage
def auxiliary_loss(self, value, target):
return func.mse_loss(value.view(-1), target.view(-1))
def run_auxiliary(self, sample):
self.update_target()
initial_state = sample.initial_state
final_state = sample.final_state
action = sample.action
rewards = sample.rewards
action_value = self.value(initial_state)
inds = torch.arange(action.size(0), device=action.device)
action_value = action_value[inds, action]
with torch.no_grad():
state_value = self.state_value(
final_state, value=self.target
)
done_mask = 1.0 - sample.done.float()
target = rewards + self.discount * done_mask * state_value
self.current_losses["mean state value"] = float(state_value.mean())
self.current_losses["mean target value"] = float(target.mean())
return action_value, target
|
the-stack_0_16963 | # modified from https://github.com/raoyongming/DynamicViT and https://github.com/facebookresearch/deit
import argparse
import numpy as np
import torch.backends.cudnn as cudnn
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from deit.datasets import build_dataset2, get_post_process,build_dataset
import utils
from timm.utils import accuracy, ModelEma
from torchvision import utils as vutils
import torch
from torchvision import transforms
import models
from PIL import Image
import os
import pdb
import torch.nn.functional as F
import torch.nn as nn
def get_transform(input_size):
t = []
resize_im = (input_size != 224)
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
else:
t.append(transforms.ToTensor())
return transforms.Compose(t)
def get_keep_indices(decisions):
keep_indices = []
for i in range(3):
if i == 0:
keep_indices.append(decisions[i])
else:
keep_indices.append(keep_indices[-1][decisions[i]])
return keep_indices
def gen_masked_tokens(tokens, indices, alpha=0.3):
indices = [i for i in range(196) if i not in indices]
tokens = tokens.copy()
tokens[indices] = alpha * tokens[indices] + (1 - alpha) * 255
return tokens
def recover_image(tokens):
# image: (C, 196, 16, 16)
image = tokens.reshape(14, 14, 16, 16, 3).swapaxes(1, 2).reshape(224, 224, 3)
return image
def gen_visualization(image, keep_indices):
# keep_indices = get_keep_indices(decisions)
image_tokens = image.reshape(14, 16, 14, 16, 3).swapaxes(1, 2).reshape(196, 16, 16, 3)
viz = recover_image(gen_masked_tokens(image_tokens, keep_indices))
return viz
def get_args_parser():
parser = argparse.ArgumentParser('DeiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./test_img/', help='path where to save')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_false', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--excel_filename', type=str, default='attention_matrix_cls', help='filename of saving excel')
# visualization
parser.add_argument('--img-path', default='', type=str,
help='path to images to be visualized. Set '' to visualize batch images in imagenet val.')
parser.add_argument('--save-name', default='', type=str,
help='name to save when visualizing a single image. Set '' to save name as the original image.')
parser.add_argument('--layer-wise-prune', action='store_true',
help='set true when visualize a model trained without layer to stage training strategy')
return parser
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def unnormalize(input_tensor):
return (input_tensor*IMAGENET_DEFAULT_STD)+IMAGENET_DEFAULT_MEAN
def save_image_tensor(input_tensor: torch.Tensor, filename):
"""
"""
assert ((len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1) or len(input_tensor.shape) == 3)
input_tensor = input_tensor.clone().detach()
input_tensor = input_tensor.to(torch.device('cpu'))
vutils.save_image(input_tensor, filename)
@torch.no_grad()
def visualize_single_img(img_input, model, device, transform, post_process, save_name):
model.eval()
# img: 1, 3, H, W
image_raw = transform(img_input)
save_image_tensor(image_raw, Path(args.output_dir, '{}.jpg'.format(save_name)))
images = post_process(image_raw)
images = images.unsqueeze(0)
images = images.to(device, non_blocking=True)
print(images.shape)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
vis_dict = model.get_vis_dict()
image_raw = image_raw * 255
image_raw = image_raw.squeeze(0).permute(1, 2, 0).cpu().numpy()
for k in vis_dict:
keep_indices = vis_dict[k]
viz = gen_visualization(image_raw, keep_indices)
viz = torch.from_numpy(viz).permute(2, 0, 1)
viz = viz / 255
save_image_tensor(viz,
Path(args.output_dir, '{}_{}.jpg'.format(save_name, k)))
print("Visualization finished")
@torch.no_grad()
def visualize(data_loader, model, device, post_process):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
# set stage_wise_prune = True if the trained model is under layer-to-stage training strategy
model.stage_wise_prune = not args.layer_wise_prune
threshold = 0.7 # the exit threshold of coarse stage
all_index = 0
for images_raw_full, target_full in metric_logger.log_every(data_loader, 10, header):
B = images_raw_full.shape[0]
for index in range(B):
all_index += 1
images_raw = images_raw_full[index:index + 1]
target = target_full[index:index + 1]
assert images_raw.shape[0] == 1
images = post_process(images_raw)
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# input
images_list = []
resized_img = F.interpolate(images, (112, 112), mode='bilinear', align_corners=True)
images_list.append(resized_img)
images_list.append(images)
# compute output
with torch.cuda.amp.autocast():
output = model(images_list)
if nn.functional.softmax(output[0]).max() > threshold:
output = output[0]
exit_stage = "coarse"
else:
output = output[1]
exit_stage = "fine"
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if acc1 == 0:
judger = 'wrong'
elif acc1 == 100:
judger = 'right'
else:
raise ValueError('xxxx')
if exit_stage == "coarse":
name = 'label{}_{}_coarse_index{}.jpg'.format(str(target.item()),judger,all_index)
save_image_tensor(images_raw, Path(args.output_dir, name))
continue
informative_index = model.get_vis_data()
images_raw = images_raw * 255
images_raw = images_raw.squeeze(0).permute(1, 2, 0).cpu().numpy()
keep_indices = informative_index.tolist()[0]
viz = gen_visualization(images_raw, keep_indices)
viz = torch.from_numpy(viz).permute(2, 0, 1)
viz = viz / 255
name = 'label{}_{}_{}_index{}.jpg'.format(
str(target.item()), judger,exit_stage, all_index)
save_image_tensor(viz, Path(args.output_dir, name))
batch_size = images.shape[0]
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
print("Visualization finished")
# gather the stats from all processes
metric_logger.synchronize_between_processes()
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def vis_single(args):
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
transform = get_transform(input_size=224) # set input_size to other value if the test image is not 224*224
post_process = get_post_process()
print("Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=1000,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
model.to(device)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
img_input = Image.open(args.img_path)
if args.save_name == '':
save_name = os.path.basename(args.img_path).split('.')[0]
else:
save_name = args.save_name
if args.eval:
test_stats = visualize_single_img(img_input, model, device, transform, post_process, save_name=save_name)
return
def vis_batch(args):
utils.init_distributed_mode(args)
print(args)
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_val, args.nb_classes = build_dataset2(is_train=False, args=args)
post_process = get_post_process()
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_val = torch.utils.data.RandomSampler(dataset_val)
else:
sampler_val = torch.utils.data.RandomSampler(dataset_val)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
)
print("Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None,
)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print("Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
visualize(data_loader_val, model, device, post_process=post_process)
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
args.eval = True
if args.img_path == '':
# To visualize batch images of imagenet val, please run this:
vis_batch(args)
else:
# To visualize a single image, please run this:
vis_single(args)
|
the-stack_0_16964 | """为diagnosis应用定义路由"""
from django.urls import path
from django.conf.urls import url
from . import views
app_name = 'diagnosis'
urlpatterns = [
path('pic_upload/', views.PicUploadView.as_view(), name='pic_upload'),
path('pic_process/', views.PicProcessView.as_view(), name='pic_process'),
path('result/', views.ResultView.as_view(), name='result'),
path('main_layout/', views.MainLayoutView.as_view(), name='main_layout'),
path('result_list/', views.ResultListView.as_view(), name='result_list'),
path('result_detail/<num>/', views.ResultDetailView.as_view(), name='detail'),
path('append_result/<img_url>/', views.PicProcessView.getPicture),
path('update_result/<checked>/<result>/<idx>/', views.UpdateResultView.as_view())
]
|
the-stack_0_16967 | from matplotlib import pyplot as plt
import numpy as np
def configure_ax_asia(ax, extent=None, tight_layout=True):
ax.coastlines(resolution='50m')
xticks = range(60, 160, 20)
ax.set_xticks(xticks)
ax.set_xticklabels([f'${t}\\degree$ E' for t in xticks])
ax.set_xticks(np.linspace(58, 150, 47), minor=True)
yticks = range(20, 60, 20)
ax.set_yticks(yticks)
ax.set_yticklabels([f'${t}\\degree$ N' for t in yticks])
ax.set_yticks(np.linspace(2, 56, 28), minor=True)
ax.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False,
bottom=True, top=True, left=True, right=True, which='both')
if extent is not None:
ax.set_xlim((extent[0], extent[1]))
ax.set_ylim((extent[2], extent[3]))
else:
ax.set_xlim((58, 150))
ax.set_ylim((2, 56))
if tight_layout:
plt.tight_layout()
|
the-stack_0_16968 | """
Configuration for django-hosts. Sets urlconf on the request to a module
under hipikat.urls matching the requested CNAME, if it's one of 'www' or
'blog'. (Outside of testing and development, it should be; configure the
web server should to redirect requests on hipikat.org/foo/bar
to www.hipikat.org/foo/bar)
"""
from django_hosts import patterns, host
host_patterns = patterns(
'hipikat.urls',
host(r'www', 'www', name='main_site'),
host(r'blog', 'blog', name='blog'),
host(r'broken', 'broken', name='not_configured'),
)
|
the-stack_0_16970 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave elicoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop elicoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing elicoind/elicoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: elicoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ELICOIND", "elicoind"),
help="elicoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("ELICOIND", "elicoind"),
help="elicoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
the-stack_0_16971 | import torch
from torch.cuda.amp import GradScaler, autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
import torch.utils.data.distributed as ddist
import torch.utils.data as td
from torch import nn, optim
from pytorch_quik import arg, ddp, io, metrics
from contextlib import nullcontext
from argparse import ArgumentParser, Namespace
from typing import Any, Callable, Dict, Optional
from dataclasses import dataclass, field, asdict
from tqdm import tqdm
from .mlflow import QuikMlflow
@dataclass
class World:
"""World related data."""
device: torch.device = field(init=False)
node_id: int = 0
total_nodes: int = 1
gpu_id: int = None
total_gpus: int = None
rank_id: int = field(init=False)
world_size: int = field(init=False)
use_ray_tune: bool = False
use_init_group: bool = False
is_ddp: bool = field(init=False)
is_logger: bool = field(init=False)
def __post_init__(self):
if self.gpu_id is None:
self.device = torch.device("cpu")
self.master_port = None
self.rank_id = None
self.world_size = None
self.is_ddp = False
else:
if self.use_ray_tune:
self.device = torch.device("cuda")
else:
self.device = torch.device("cuda", self.gpu_id)
self.rank_id = self.node_id * self.total_gpus + self.gpu_id
self.world_size = self.total_gpus * self.total_nodes
self.is_ddp = True
self.is_logger = not self.is_ddp
if self.gpu_id == 0:
self.is_logger = True
@dataclass
class DlKwargs:
"""Data loader keyword arguments."""
batch_size: int = 24
shuffle: bool = False
pin_memory: bool = True
num_workers: int = 0
@dataclass
class OptKwargs:
"""Optimizer keyword arguments"""
lr: int
weight_decay: int
eps: int
betas: tuple
class QuikTrek:
"""A class for maintaining the general data for the full trek to
be shared between travelers.
"""
def __init__(
self,
gpu: Optional[int] = None,
args: Optional[Namespace] = None,
):
"""Constructor, primarily adding learning arguments
and creating dataclasses."""
if args is None:
parser = arg.add_ddp_args(ArgumentParser())
parser = arg.add_learn_args(parser)
parser = arg.add_mlflow_args(parser)
parser = arg.add_ray_tune_args(parser)
args = parser.parse_args()
self.args = args
self.epochs = args.epochs
self.create_dataclasses(gpu, args)
self.trek_prep(args)
def create_dataclasses(self, gpu: int, args: Namespace):
"""Create a World, DlKwargs, and OptKwargs dataclass"""
self.world = World(
args.nr,
args.nodes,
gpu,
args.gpus,
getattr(args, "use_ray_tune", False),
getattr(args, "use_init_group", False),
)
self.dlkwargs = DlKwargs(
batch_size=args.bs,
num_workers=args.num_workers,
)
self.optkwargs = OptKwargs(
lr=args.lr,
weight_decay=args.weight_decay,
eps=args.eps,
betas=args.betas,
)
self.args.device = self.world.device
def trek_prep(self, args: Namespace):
"""Create the MLFlow run, clear the cuda cache,
and setup ddp (if ddp and not ray tune)."""
if args.use_mlflow and self.world.is_logger:
self.mlflow = QuikMlflow(self.args)
self.mlflow.create_run(
[
self.dlkwargs,
self.optkwargs,
self.world,
]
)
if self.world.device.type == "cuda":
torch.cuda.empty_cache()
if self.world.gpu_id is not None and not getattr(
self.args, "use_ray_tune", False
):
torch.cuda.set_device(self.world.device)
ddp.setup(self.world.gpu_id, self.world)
class QuikTraveler:
"""A class for traversing a model either in training, validation, or
testing. Is always a singular run - but can be part of a multi-GPU run.
"""
metrics = metrics.LossMetrics(0.99)
def __init__(self, trek, type: Optional[str] = None):
"""Constructor, primarily adding learning arguments
and creating the QuikAmp."""
self.type = type
self.world = trek.world
self.args = trek.args
self.trek = trek
self.find_unused_parameters = trek.args.find_unused_parameters
self.amp = QuikAmp(trek.args.mixed_precision)
def set_criterion(
self,
criterion_fcn: Callable[..., nn.Module],
kwargs: Optional[Dict[str, Any]] = {},
):
"""Add the criterion to the traveler"""
if callable(criterion_fcn):
self.criterion = criterion_fcn(**kwargs)
self.criterion.to(self.world.device)
def set_optimizer(
self,
optimizer_fcn: Callable[..., optim.Optimizer],
kwargs: Optional[Dict[str, Any]] = {},
):
"""Add the optimizer to the traveler"""
if hasattr(self.model, "module"):
self.optimizer = optimizer_fcn(
self.model.module.parameters(),
**asdict(self.trek.optkwargs),
**kwargs,
)
else:
self.optimizer = optimizer_fcn(
self.model.parameters(),
**asdict(self.trek.optkwargs),
**kwargs,
)
def set_scheduler(
self,
scheduler_fcn: Callable[..., optim.Optimizer],
kwargs: Optional[Dict[str, Any]] = {},
):
"""Add the scheduler to the traveler"""
self.scheduler = scheduler_fcn(
self.optimizer,
**kwargs,
)
def add_model(
self,
model: nn.Module,
state_dict: Optional[Dict[str, torch.Tensor]] = None,
):
"""Add the model to the traveler"""
self.model = model
if state_dict is not None:
self.model.load_state_dict(state_dict)
self.model.to(self.world.device)
if self.world.is_ddp:
self.model = DDP(
self.model,
device_ids=[self.world.device],
output_device=self.world.device,
find_unused_parameters=self.find_unused_parameters,
)
def add_data(self, tensorDataset: td.TensorDataset):
"""Add the dataloader (via QuikData) to the traveler"""
self.data = QuikData(
tensorDataset, self.world, self.trek.dlkwargs, self.trek.epochs
)
if self.type == "train":
self.metrics.steps = self.data.steps
def backward(self, loss: torch.Tensor, clip: Optional[bool] = True):
"""Run the model.backward (plus consider a scaler)."""
if hasattr(self.amp, "scaler"):
self.amp.backward(self, loss, clip)
else:
loss.backward()
if hasattr(self.amp, "optimizer"):
self.optimizer.step()
def add_loss(self, loss: torch.Tensor, pbar: tqdm, epoch: int):
"""Add the training loss to the model metrics."""
self.metrics.add_loss(loss)
if self.args.use_mlflow and pbar is not None:
loss = self.metrics.metric_dict["train_loss"]
step = pbar.n + (self.metrics.steps * epoch)
self.trek.mlflow.log_metric("train_loss", loss, step)
def add_vloss(self, vlosses, nums, epoch):
"""Add the valid loss to the model metrics."""
self.metrics.add_vloss(vlosses, nums)
if self.args.use_mlflow and self.world.is_logger:
vloss = self.metrics.metric_dict["valid_loss"]
step = self.metrics.steps * (epoch + 1)
self.trek.mlflow.log_metric("valid_loss", vloss, step)
def save_state_dict(self, epoch):
""" "Run save_state_dict within the traveler"""
sd_id = io.save_state_dict(self.model, self.args, epoch)
if self.args.use_mlflow and self.world.is_logger:
self.trek.mlflow.log_artifact(str(sd_id))
def record_results(
self,
label_names,
accuracy: Optional[bool] = True,
f1: Optional[bool] = True,
confusion: Optional[bool] = True,
):
"""Record the confusion matrix and classification report both in
MLFlow and in the traveler."""
cm_id = io.id_str("confusion", self.args)
self.cm = metrics.build_confusion_matrix(
self.data.predictions,
self.data.labels,
label_names,
cm_id,
)
self.cr = metrics.build_class_dict(
self.data.predictions,
self.data.labels,
label_names,
)
if self.args.use_mlflow and self.world.is_logger:
self.trek.mlflow.log_artifact(cm_id)
{
self.trek.mlflow.log_metric(metric, value, 0)
for metric, value in self.cr.items()
}
class QuikData:
"""A class for providing data to a traveler."""
def __init__(
self,
tensorDataset: td.TensorDataset,
world: World,
dlkwargs: Dict[str, Any],
epochs: int,
):
"""Constructor, primarily adding the dataset and dataloader."""
self.dataset = tensorDataset
self.labels = self.dataset.tensors[2].cpu().numpy()
self.dlkwargs = dlkwargs
self.add_data_loader(world)
self.steps = len(self.data_loader)
self.total_steps = self.steps * epochs
def add_sampler(self, world, sampler_fcn=None, kwargs={}):
"""Adds the data sampler to the data"""
if world.is_ddp:
self.sampler = ddist.DistributedSampler(
self.dataset,
num_replicas=world.world_size,
rank=world.rank_id,
)
elif callable(sampler_fcn):
self.sampler = sampler_fcn(**kwargs)
else:
self.sampler = sampler_fcn
def add_data_loader(self, world):
"""Adds the data loader to the data"""
if not hasattr(self, "sampler"):
self.add_sampler(world)
self.data_loader = td.DataLoader(
dataset=self.dataset,
sampler=self.sampler,
**asdict(self.dlkwargs),
)
def add_results(self, predictions: torch.Tensor, labels: torch.Tensor):
"""Adds the predictions df and labels df to the data"""
self.predictions = predictions
self.labels = labels
class QuikAmp:
"""A class to manage automatic mixed precision. Provides
a nullcontext to your forward function if it's not being
used.
"""
def __init__(self, mixed_precision: bool):
"""Constructor, add the automatic mixed precision values of scaler and
autocast. If there's no amp, it adds a nullcontext as a callable for
the with statement."""
if mixed_precision:
self.scaler = GradScaler()
self.caster = autocast()
else:
self.caster = nullcontext()
def backward(
self,
trvlr: QuikTraveler,
loss: torch.Tensor,
clip: Optional[bool] = True,
):
"""Backward propogation with automatic mixed precision."""
self.scaler.scale(loss).backward()
# https://pytorch.org/docs/stable/notes/amp_examples.html#working-with-unscaled-gradients
if clip:
self.scaler.unscale_(trvlr.optimizer)
# https://discuss.pytorch.org/t/about-torch-nn-utils-clip-grad-norm/13873
if hasattr(trvlr.model, "module"):
clip_grad_norm_(trvlr.model.module.parameters(), 1.0)
else:
clip_grad_norm_(trvlr.model.parameters(), 1.0)
self.scaler.step(trvlr.optimizer)
self.scaler.update()
|
the-stack_0_16972 | # coding=utf-8
"""Author: Konrad Zemek
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Functions wrapping capabilities of docker binary.
"""
import json
import os
import subprocess
import sys
# noinspection PyDefaultArgument
def run(image, docker_host=None, detach=False, dns_list=[], add_host={},
envs={}, hostname=None, interactive=False, link={}, tty=False, rm=False,
reflect=[], volumes=[], name=None, workdir=None, user=None, group=None,
group_add=[], cpuset_cpus=None, privileged=False, run_params=[], command=None,
output=False, stdin=None, stdout=None, stderr=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('run')
if detach:
cmd.append('-d')
for addr in dns_list:
cmd.extend(['--dns', addr])
for key, value in add_host.iteritems():
cmd.extend(['--add-host', '{0}:{1}'.format(key, value)])
for key in envs:
cmd.extend(['-e', '{0}={1}'.format(key, envs[key])])
if hostname:
cmd.extend(['-h', hostname])
if detach or sys.__stdin__.isatty():
if interactive:
cmd.append('-i')
if tty:
cmd.append('-t')
for container, alias in link.items():
cmd.extend(['--link', '{0}:{1}'.format(container, alias)])
if name:
cmd.extend(['--name', name])
if rm:
cmd.append('--rm')
for path, read in reflect:
vol = '{0}:{0}:{1}'.format(os.path.abspath(path), read)
cmd.extend(['-v', vol])
# Volume can be in one of three forms
# 1. 'path_on_docker'
# 2. ('path_on_host', 'path_on_docker', 'ro'/'rw')
# 3. {'volumes_from': 'volume name'}
for entry in volumes:
if isinstance(entry, tuple):
path, bind, readable = entry
vol = '{0}:{1}:{2}'.format(os.path.abspath(path), bind, readable)
cmd.extend(['-v', vol])
elif isinstance(entry, dict):
volume_name = entry['volumes_from']
cmd.extend(['--volumes-from', volume_name])
else:
cmd.extend(['-v', entry])
if workdir:
cmd.extend(['-w', os.path.abspath(workdir)])
if user:
user_group = '{0}:{1}'.format(user, group) if group else user
cmd.extend(['-u', user_group])
for g in group_add:
cmd.extend(['--group-add', g])
if privileged:
cmd.append('--privileged')
if cpuset_cpus:
cmd.extend(['--cpuset-cpus', cpuset_cpus])
cmd.extend(run_params)
cmd.append(image)
if isinstance(command, basestring):
cmd.extend(['sh', '-c', command])
elif isinstance(command, list):
cmd.extend(command)
elif command is not None:
raise ValueError('{0} is not a string nor list'.format(command))
if detach or output:
return subprocess.check_output(cmd, stdin=stdin, stderr=stderr).decode(
'utf-8').strip()
return subprocess.call(cmd, stdin=stdin, stderr=stderr, stdout=stdout)
def exec_(container, command, docker_host=None, user=None, group=None,
detach=False, interactive=False, tty=False, privileged=False,
output=False, stdin=None, stdout=None, stderr=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('exec')
if user:
user_group = '{0}:{1}'.format(user, group) if group else user
cmd.extend(['-u', user_group])
if detach:
cmd.append('-d')
if detach or sys.__stdin__.isatty():
if interactive:
cmd.append('-i')
if tty:
cmd.append('-t')
if privileged:
cmd.append('--privileged')
cmd.append(container)
if isinstance(command, basestring):
cmd.extend(['sh', '-c', command])
elif isinstance(command, list):
cmd.extend(command)
else:
raise ValueError('{0} is not a string nor list'.format(command))
if detach or output:
return subprocess.check_output(cmd, stdin=stdin, stderr=stderr).decode(
'utf-8').strip()
return subprocess.call(cmd, stdin=stdin, stderr=stderr, stdout=stdout)
def inspect(container, docker_host=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.extend(['inspect', container])
out = subprocess.check_output(cmd, universal_newlines=True)
return json.loads(out)[0]
def logs(container, docker_host=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.extend(['logs', container])
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
def remove(containers, docker_host=None, force=False,
link=False, volumes=False):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('rm')
if force:
cmd.append('-f')
if link:
cmd.append('-l')
if volumes:
cmd.append('-v')
cmd.extend(containers)
subprocess.check_call(cmd)
def cp(container, src_path, dest_path, to_container=False):
"""Copying file between docker container and host
:param container: str, docker id or name
:param src_path: str
:param dest_path: str
:param to_container: bool, if True file will be copied from host to
container, otherwise from docker container to host
"""
cmd = ["docker", "cp"]
if to_container:
cmd.extend([src_path, "{0}:{1}".format(container, dest_path)])
else:
cmd.extend(["{0}:{1}".format(container, src_path), dest_path])
subprocess.check_call(cmd)
def login(user, password, repository='hub.docker.com'):
"""Logs into docker repository."""
subprocess.check_call(['docker', 'login', '-u', user, '-p', password,
repository])
def build_image(image, build_args):
"""Builds and tags docker image."""
subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',
image] + build_args)
def tag_image(image, tag):
"""Tags docker image."""
subprocess.check_call(['docker', 'tag', image, tag])
def push_image(image):
"""Pushes docker image to the repository."""
subprocess.check_call(['docker', 'push', image])
def pull_image(image):
"""Pulls docker image from the repository."""
subprocess.check_call(['docker', 'pull', image])
def remove_image(image):
"""Removes docker image."""
subprocess.check_call(['docker', 'rmi', '-f', image])
def create_volume(path, name, image, command):
cmd = ['docker']
cmd.append('create')
cmd.append('-v')
cmd.append(path)
cmd.append('--name')
cmd.append(name)
cmd.append(image)
cmd.append(command)
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
|
the-stack_0_16973 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.project.volumes.snapshots \
import tables as snapshots_tables
from openstack_dashboard.dashboards.project.volumes.volumes \
import tables as volumes_tables
class UpdateVolumeSnapshotStatus(tables.LinkAction):
name = "update_status"
verbose_name = _("Update Status")
url = "horizon:admin:volumes:snapshots:update_status"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume",
"snapshot_extension:snapshot_actions:"
"update_snapshot_status"),)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, snapshot_id):
snapshot = cinder.volume_snapshot_get(request, snapshot_id)
snapshot._volume = cinder.volume_get(request, snapshot.volume_id)
snapshot.host_name = getattr(snapshot._volume,
'os-vol-host-attr:host')
tenant_id = getattr(snapshot._volume,
'os-vol-tenant-attr:tenant_id')
try:
tenant = keystone.tenant_get(request, tenant_id)
snapshot.tenant_name = getattr(tenant, "name")
except Exception:
msg = _('Unable to retrieve volume project information.')
exceptions.handle(request, msg)
return snapshot
class VolumeSnapshotsTable(volumes_tables.VolumesTableBase):
name = tables.WrappingColumn("name", verbose_name=_("Name"),
link="horizon:admin:volumes:snapshots:detail")
volume_name = snapshots_tables.SnapshotVolumeNameColumn(
"name", verbose_name=_("Volume Name"),
link="horizon:admin:volumes:volumes:detail")
host = tables.Column("host_name", verbose_name=_("Host"))
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
class Meta(object):
name = "volume_snapshots"
verbose_name = _("Volume Snapshots")
pagination_param = 'snapshot_marker'
prev_pagination_param = 'prev_snapshot_marker'
table_actions = (snapshots_tables.VolumeSnapshotsFilterAction,
snapshots_tables.DeleteVolumeSnapshot,)
row_actions = (snapshots_tables.DeleteVolumeSnapshot,
UpdateVolumeSnapshotStatus,
snapshots_tables.UpdateMetadata)
row_class = UpdateRow
status_columns = ("status",)
columns = ('tenant', 'host', 'name', 'description', 'size', 'status',
'volume_name',)
|
the-stack_0_16975 | import sys
import vlc
import json
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtGui import QPixmap
from PyQt5 import uic
from internationalization import LANGUAGE
class Menu(QMainWindow):
def __init__(self, lang, username):
QMainWindow.__init__(self)
uic.loadUi("windows/Menu.ui", self)
self.lang = lang
self.reload_text()
self.username = username
death_star_image = QPixmap("resources/death_star.png")
self.image.setPixmap(death_star_image)
self.cantina_song = vlc.MediaPlayer("resources/cantina.mp3")
self.cantina_song.play()
self.play_button.clicked.connect(self.go_to_play)
self.leaderboards_button.clicked.connect(self.open_leaderboards)
self.exit_button.clicked.connect(self.close_game)
def showEvent(self, event):
"""Play the game song when the window appears
This is an override method"""
self.cantina_song.play()
def closeEvent(self, event):
"""Stop the game song when the window close
This is an override method"""
self.cantina_song.stop()
def go_to_play(self):
"""Go to create lobby window"""
from chooseSideWindow import ChooseSide
self.choose = ChooseSide(self.lang, self.username)
self.choose.show()
self.close()
def open_leaderboards(self):
"""Show the leaderboards window"""
from leaderboardsWindow import Leaderboards
self.leader = Leaderboards(self.lang)
self.leader.show()
def reload_text(self):
"""Change the language of the window according to the chosen previously"""
self.language = LANGUAGE.get(self.lang)
self.leaderboards_button.setText(self.language["leaderboards"])
self.exit_button.setText(self.language["exit"])
self.setWindowTitle(self.language["menu"])
self.play_button.setText(self.language["play"])
def close_game(self):
"""Close the game window"""
self.close()
|
the-stack_0_16978 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
for k in range(5):
dataFile = "../setup/data/data_set_" + str(k).zfill(3) + ".dat"
x = np.loadtxt(dataFile)
plt.plot(x[:, 0], x[:, 1], '-o')
plt.autoscale(enable=True, axis='x', tight=True)
plt.xlabel('time')
plt.ylabel('noisy logistic data')
plt.title('All data sets')
plt.grid(True)
plt.show()
|
the-stack_0_16979 | from functools import partial
from inspect import isclass
from graphql_relay import from_global_id, to_global_id
from ..types import ID, Field, Interface, ObjectType
from ..types.interface import InterfaceOptions
from ..types.utils import get_type
def is_node(objecttype):
"""
Check if the given objecttype has Node as an interface
"""
if not isclass(objecttype):
return False
if not issubclass(objecttype, ObjectType):
return False
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False
class GlobalID(Field):
def __init__(self, node=None, parent_type=None, required=True, *args, **kwargs):
super(GlobalID, self).__init__(ID, required=required, *args, **kwargs)
self.node = node or Node
self.parent_type_name = parent_type._meta.name if parent_type else None
@staticmethod
def id_resolver(parent_resolver, node, root, info, parent_type_name=None, **args):
type_id = parent_resolver(root, info, **args)
parent_type_name = parent_type_name or info.parent_type.name
return node.to_global_id(parent_type_name, type_id) # root._meta.name
def get_resolver(self, parent_resolver):
return partial(
self.id_resolver,
parent_resolver,
self.node,
parent_type_name=self.parent_type_name,
)
class NodeField(Field):
def __init__(self, node, type=False, **kwargs):
assert issubclass(node, Node), "NodeField can only operate in Nodes"
self.node_type = node
self.field_type = type
super(NodeField, self).__init__(
# If we don's specify a type, the field type will be the node
# interface
type or node,
id=ID(required=True, description="The ID of the object"),
**kwargs
)
def get_resolver(self, parent_resolver):
return partial(self.node_type.node_resolver, get_type(self.field_type))
class AbstractNode(Interface):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, **options):
_meta = InterfaceOptions(cls)
_meta.fields = {"id": GlobalID(cls, description="The ID of the object")}
super(AbstractNode, cls).__init_subclass_with_meta__(_meta=_meta, **options)
class Node(AbstractNode):
"""An object with an ID"""
@classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
@classmethod
def node_resolver(cls, only_type, root, info, id):
return cls.get_node_from_global_id(info, id, only_type=only_type)
@classmethod
def get_node_from_global_id(cls, info, global_id, only_type=None):
try:
_type, _id = cls.from_global_id(global_id)
graphene_type = info.schema.get_type(_type).graphene_type
except Exception:
return None
if only_type:
assert graphene_type == only_type, ("Must receive a {} id.").format(
only_type._meta.name
)
# We make sure the ObjectType implements the "Node" interface
if cls not in graphene_type._meta.interfaces:
return None
get_node = getattr(graphene_type, "get_node", None)
if get_node:
return get_node(info, _id)
@classmethod
def from_global_id(cls, global_id):
return from_global_id(global_id)
@classmethod
def to_global_id(cls, type, id):
return to_global_id(type, id)
|
the-stack_0_16980 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for model output tensors
defined in :py:class:`monai.transforms.utility.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
from typing import Optional
from monai.config.type_definitions import KeysCollection
from monai.utils.misc import ensure_tuple_rep
from monai.transforms.compose import MapTransform
from monai.transforms.post.array import SplitChannel, Activations, AsDiscrete, KeepLargestConnectedComponent
class SplitChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SplitChannel`.
All the input specified by `keys` should be splitted into same count of data.
"""
def __init__(self, keys: KeysCollection, output_postfixes, to_onehot=False, num_classes=None):
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
output_postfixes (list, tuple): the postfixes to construct keys to store splitted data.
for example: if the key of input data is `pred` and split 2 classes, the output
data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])
to_onehot (bool or list of bool): whether to convert the data to One-Hot format, default is False.
num_classes (int or list of int): the class number used to convert to One-Hot format
if `to_onehot` is True.
"""
super().__init__(keys)
if not isinstance(output_postfixes, (list, tuple)):
raise ValueError("must specify key postfixes to store splitted data.")
self.output_postfixes = output_postfixes
self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
self.num_classes = ensure_tuple_rep(num_classes, len(self.keys))
self.splitter = SplitChannel()
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
rets = self.splitter(d[key], self.to_onehot[idx], self.num_classes[idx])
assert len(self.output_postfixes) == len(rets), "count of splitted results must match output_postfixes."
for i, r in enumerate(rets):
d[f"{key}_{self.output_postfixes[i]}"] = r
return d
class Activationsd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AddActivations`.
Add activation layers to the input data specified by `keys`.
"""
def __init__(self, keys: KeysCollection, output_postfix: str = "act", sigmoid=False, softmax=False, other=None):
"""
Args:
keys: keys of the corresponding items to model output and label.
See also: :py:class:`monai.transforms.compose.MapTransform`
output_postfix: the postfix string to construct keys to store converted data.
for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,
the output data keys will be: `pred_act`, `label_act`.
if set to None, will replace the original data with the same key.
sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model
output before transform.
softmax (bool, tuple or list of bool): whether to execute softmax function on model
output before transform.
other (Callable, tuple or list of Callables): callable function to execute other activation layers,
for example: `other = lambda x: torch.tanh(x)`
"""
super().__init__(keys)
if output_postfix is not None and not isinstance(output_postfix, str):
raise ValueError("output_postfix must be a string.")
self.output_postfix = output_postfix
self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))
self.softmax = ensure_tuple_rep(softmax, len(self.keys))
self.other = ensure_tuple_rep(other, len(self.keys))
self.converter = Activations()
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
ret = self.converter(d[key], self.sigmoid[idx], self.softmax[idx], self.other[idx])
output_key = key if self.output_postfix is None else f"{key}_{self.output_postfix}"
d[output_key] = ret
return d
class AsDiscreted(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsDiscrete`.
"""
def __init__(
self,
keys: KeysCollection,
output_postfix: str = "discreted",
argmax: bool = False,
to_onehot: bool = False,
n_classes: Optional[int] = None,
threshold_values: bool = False,
logit_thresh: float = 0.5,
):
"""
Args:
keys: keys of the corresponding items to model output and label.
See also: :py:class:`monai.transforms.compose.MapTransform`
output_postfix: the postfix string to construct keys to store converted data.
for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,
the output data keys will be: `pred_discreted`, `label_discreted`.
if set to None, will replace the original data with the same key.
argmax: whether to execute argmax function on input data before transform.
to_onehot: whether to convert input data into the one-hot format. Defaults to False.
n_classes: the number of classes to convert to One-Hot format.
threshold_values: whether threshold the float value to int number 0 or 1, default is False.
logit_thresh: the threshold value for thresholding operation, default is 0.5.
"""
super().__init__(keys)
if output_postfix is not None and not isinstance(output_postfix, str):
raise ValueError("output_postfix must be a string.")
self.output_postfix = output_postfix
self.argmax = ensure_tuple_rep(argmax, len(self.keys))
self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
self.n_classes = ensure_tuple_rep(n_classes, len(self.keys))
self.threshold_values = ensure_tuple_rep(threshold_values, len(self.keys))
self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys))
self.converter = AsDiscrete()
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
output_key = key if self.output_postfix is None else f"{key}_{self.output_postfix}"
d[output_key] = self.converter(
d[key],
self.argmax[idx],
self.to_onehot[idx],
self.n_classes[idx],
self.threshold_values[idx],
self.logit_thresh[idx],
)
return d
class KeepLargestConnectedComponentd(MapTransform):
"""
dictionary-based wrapper of :py:class:monai.transforms.utility.array.KeepLargestConnectedComponent.
"""
def __init__(
self,
keys: KeysCollection,
applied_labels,
independent: bool = True,
connectivity: Optional[int] = None,
output_postfix: str = "largestcc",
):
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
applied_labels (int, list or tuple of int): Labels for applying the connected component on.
If only one channel. The pixel whose value is not in this list will remain unchanged.
If the data is in one-hot format, this is used to determine what channels to apply.
independent (bool): consider several labels as a whole or independent, default is `True`.
Example use case would be segment label 1 is liver and label 2 is liver tumor, in that case
you want this "independent" to be specified as False.
connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
output_postfix: the postfix string to construct keys to store converted data.
for example: if the keys of input data is `label`, output_postfix is `largestcc`,
the output data keys will be: `label_largestcc`.
if set to None, will replace the original data with the same key.
"""
super().__init__(keys)
if output_postfix is not None and not isinstance(output_postfix, str):
raise ValueError("output_postfix must be a string.")
self.output_postfix = output_postfix
self.converter = KeepLargestConnectedComponent(applied_labels, independent, connectivity)
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
output_key = key if self.output_postfix is None else f"{key}_{self.output_postfix}"
d[output_key] = self.converter(d[key])
return d
SplitChannelD = SplitChannelDict = SplitChanneld
ActivationsD = ActivationsDict = Activationsd
AsDiscreteD = AsDiscreteDict = AsDiscreted
KeepLargestConnectedComponentD = KeepLargestConnectedComponentDict = KeepLargestConnectedComponentd
|
the-stack_0_16981 | '''
Web browser GUI-launched tasks run in a separate process. This module provides
mechanisms for interacting withe the task running in another process, e.g.,
calling functions to start/stop the task, enabling/disabling decoder adaptation, etc.
'''
import os
import sys
import time
import xmlrpc.client
import multiprocessing as mp
import collections
from riglib import experiment
from riglib.mp_proxy import FuncProxy
from . import websocket
from config import config
from .json_param import Parameters
import io
import traceback
log_filename = os.path.join(config.log_path, "tasktrack_log")
def log_error(err, mode='a'):
traceback.print_exc(None, err)
with open(log_filename, mode) as fp:
err.seek(0)
fp.write(err.read())
def log_str(s, mode="a", newline=True):
if newline and not s.endswith("\n"):
s += "\n"
with open(log_filename, mode) as fp:
fp.write(s)
class Track(object):
'''
Tracker for task instantiation running in a separate process. This is a singleton.
'''
def __init__(self, use_websock=True):
# shared memory to store the status of the task in a char array
self.status = mp.Array('c', 256)
self.reset()
self.proc = None
self.init_pipe()
if use_websock:
self.websock = websocket.Server(self.notify)
else:
self.websock = None
def init_pipe(self):
self.tracker_end_of_pipe, self.task_end_of_pipe = mp.Pipe()
def notify(self, msg):
if msg['status'] == "error" or msg['State'] == "stopped":
self.status.value = b""
def runtask(self, **kwargs):
'''
Begin running of task
'''
log_str("Running new task: \n", mode="w")
self.init_pipe()
# initialize task status
# self.status.value = b"testing" if 'saveid' in kwargs else b"running"
self.status.value = b"running" if 'saveid' in kwargs else b"testing"
# create a proxy for interacting with attributes/functions of the task.
# The task runs in a separate process and we cannot directly access python
# attributes of objects in other processes
self.task_proxy = TaskObjProxy(self.tracker_end_of_pipe)
# Spawn the process
args = (self.tracker_end_of_pipe, self.task_end_of_pipe, self.websock)
print("Track.runtask")
print(kwargs)
if 'seq' in kwargs:
kwargs['seq_params'] = kwargs['seq'].params
kwargs['seq'] = kwargs['seq'].get() ## retreive the database data on this end of the pipe
print(kwargs['seq'])
self.task_args = args
self.task_kwargs = kwargs
self.proc = mp.Process(target=remote_runtask, args=args, kwargs=kwargs)
log_str("Spawning process...")
log_str(str(kwargs))
self.proc.start()
def __del__(self):
'''
Destructor for Track object. Not sure if this function ever gets called
since Track is a singleton created upon import of the db.tracker.ajax module...
'''
if not self.websock is None:
self.websock.stop()
# def pausetask(self):
# self.status.value = bytes(self.task_proxy.pause())
def stoptask(self):
'''
Terminate the task gracefully by running riglib.experiment.Experiment.end_task
'''
assert self.status.value in [b"testing", b"running"]
try:
self.task_proxy.end_task()
except Exception as e:
traceback.print_exc()
err = io.StringIO()
traceback.print_exc(None, err)
err.seek(0)
return dict(status="error", msg=err.read())
status = self.status.value.decode("utf-8")
self.status.value = b""
self.reset()
return status
def reset(self):
self.task_proxy = None
self.task_kwargs = {}
self.task_args = ()
def get_status(self):
return self.status.value.decode("utf-8")
def update_alive(self):
""" Check if the remote process is still alive, and if dead, reset the task_proxy object """
if (not self.proc is None) and (not self.proc.is_alive()):
print("process died in error, destroying proxy object")
self.reset()
def task_running(self):
print(self.get_status())
return self.get_status() in ["running", "testing"]
def remote_runtask(tracker_end_of_pipe, task_end_of_pipe, websock, **kwargs):
'''
Target function to execute in the spawned process to start the task
'''
log_str("remote_runtask")
print("*************************** STARTING TASK *****************************")
use_websock = not (websock is None)
# Rerout prints to stdout to the websocket
if use_websock:
sys.stdout = websock
# os.nice sets the 'niceness' of the task, i.e. how willing the process is
# to share resources with other OS processes. Zero is neutral
if not sys.platform == "win32":
os.nice(0)
status = "running" if 'saveid' in kwargs else "testing"
# Force all tasks to use the Notify feature defined above.
if use_websock:
kwargs['params']['websock'] = websock
kwargs['feats'].insert(0, websocket.NotifyFeat)
kwargs['params']['tracker_status'] = status
kwargs['params']['tracker_end_of_pipe'] = tracker_end_of_pipe
try:
# Instantiate the task
task_wrapper = TaskWrapper(**kwargs)
print("Created task wrapper..")
cmd = task_end_of_pipe.recv()
log_str("Initial command: " + str(cmd))
# Rerout prints to stdout to the websocket
if use_websock: sys.stdout = websock
while (cmd is not None) and (task_wrapper.task.state is not None):
log_str('remote command received: %s, %s, %s\n' % cmd)
try:
fn_name = cmd[0]
cmd_args = cmd[1]
cmd_kwargs = cmd[2]
# look up the function by name
fn = getattr(task_wrapper, fn_name)
# run the function and save the return value as a single object
# if an exception is thrown, the code will jump to the last 'except' case
ret = fn(*cmd_args, **cmd_kwargs)
log_str("return value: %s\n" % str(ret))
# send the return value back to the remote process
task_end_of_pipe.send(ret)
# hang and wait for the next command to come in
log_str("task state = %s, stop status=%s, waiting for next command...\n" % (task_wrapper.task.state, str(task_wrapper.task.stop)))
cmd = task_end_of_pipe.recv()
except KeyboardInterrupt:
# Handle the KeyboardInterrupt separately. How the hell would
# a keyboard interrupt even get here?
cmd = None
except Exception as e:
err = io.StringIO()
log_error(err, mode='a')
task_end_of_pipe.send(e)
if task_end_of_pipe.poll(60.):
cmd = task_end_of_pipe.recv()
else:
cmd = None
log_str('Done with command: %s\n\n' % fn_name)
except:
task_wrapper = None
err = io.StringIO()
log_error(err, mode='a')
err.seek(0)
if use_websock:
websock.send(dict(status="error", msg=err.read()))
err.seek(0)
print(err.read())
log_str('End of task while loop\n')
# Redirect printing from the websocket back to the shell
if use_websock:
websock.write("Running task cleanup functions....\n")
sys.stdout = sys.__stdout__
print("Running task cleanup functions....\n")
# Initiate task cleanup
if task_wrapper is None:
print("\nERROR: Task was never initialized, cannot run cleanup function!")
print("see %s for error messages" % log_filename)
if 'saveid' in kwargs:
from . import dbq
dbq.hide_task_entry(kwargs['saveid'])
print('hiding task entry!')
cleanup_successful = False
else:
log_str("Starting cleanup...")
cleanup_successful = task_wrapper.cleanup()
# inform the user in the browser that the task is done!
if cleanup_successful == True or cleanup_successful is None:
if use_websock: websock.write("\n\n...done!\n")
else:
if use_websock: websock.write("\n\nError! Check for errors in the terminal!\n")
print("*************************** EXITING TASK *****************************")
class TaskWrapper(object):
'''
Wrapper for Experiment classes launched from the web interface
'''
def __init__(self, subj, base_class, feats, params, seq=None, seq_params=None, saveid=None):
'''
Parameters
----------
subj : tracker.models.Subject instance
Database record for subject performing the task
base_class : a child class of riglib.experiment.Experiment
The base class for the task, without the feature mixins
feats : list
List of features to enable for the task
params : json_param.Parameters, or string representation of JSON object
user input on configurable task parameters
seq : models.Sequence instance, or tuple
Database record of Sequence parameters/static target sequence
If passed in as a tuple, then it's the result of calling 'seq.get' on the models.Sequence instance
seq_params: params from seq (see above)
saveid : int, optional
ID number of db.tracker.models.TaskEntry associated with this task
if None specified, then the data saved will not be linked to the
database entry and will be lost after the program exits
'''
log_str("TaskWrapper constructor")
self.saveid = saveid
self.subj = subj
if isinstance(params, Parameters):
self.params = params
elif isinstance(params, str):
self.params = Parameters(params)
elif isinstance(params, dict):
self.params = Parameters.from_dict(params)
if None in feats:
raise Exception("Features not found properly in database!")
else:
Task = experiment.make(base_class, feats=feats)
# Run commands which must be executed before the experiment class can be instantiated (e.g., starting neural recording)
Task.pre_init(saveid=saveid)
self.params.trait_norm(Task.class_traits())
if issubclass(Task, experiment.Sequence):
# from . import models
# retreive the sequence data from the db, or from the input argument if the input arg was a tuple
if isinstance(seq, tuple):
gen_constructor, gen_params = seq
elif hasattr(seq, 'get'): #isinstance(seq, models.Sequence):
gen_constructor, gen_params = seq.get()
# Typically, 'gen_constructor' is the experiment.generate.runseq function (not an element of namelist.generators)
else:
raise ValueError("Unrecognized type for seq")
gen = gen_constructor(Task, **gen_params)
self.params.params['seq_params'] = seq_params
# 'gen' is now a true python generator usable by experiment.Sequence
self.task = Task(gen, **self.params.params)
log_str("instantiating task with a generator\n")
else:
self.task = Task(**self.params.params)
self.task.start()
def report(self):
return experiment.report(self.task)
def pause(self):
self.task.pause = not self.task.pause
return "pause" if self.task.pause else "running"
def end_task(self):
return self.task.end_task()
def enable_clda(self):
self.task.enable_clda()
def disable_clda(self):
self.task.disable_clda()
def get_state(self):
return self.task.state
def __getattr__(self, attr):
""" Redirect attribute access to the task object if the attribute can't be found in the wrapper """
try:
return self.task.__getattribute__(attr)
except:
raise AttributeError("Could not get task attribute: %s" % attr)
def set_task_attr(self, attr, value):
setattr(self.task, attr, value)
def cleanup(self):
self.task.join()
print("Calling saveout/task cleanup code")
if self.saveid is not None:
# get object representing function calls to the remote database
# returns the result of tracker.dbq.rpc_handler
database = xmlrpc.client.ServerProxy("http://localhost:8000/RPC2/", allow_none=True)
# from tracker import dbq as database
cleanup_successful = self.task.cleanup(database, self.saveid, subject=self.subj)
# if not self.task._task_init_complete:
# from tracker import dbq
# dbq.hide_task_entry(self.saveid)
# print 'hiding task entry!'
# else:
# print 'not hiding task entry!'
else:
cleanup_successful = True
self.task.terminate()
return cleanup_successful
class TaskObjProxy(object):
def __init__(self, tracker_end_of_pipe):
self.tracker_end_of_pipe = tracker_end_of_pipe
def __getattr__(self, attr):
log_str("remotely getting attribute: %s\n" % attr)
self.tracker_end_of_pipe.send(("__getattr__", [attr], {}))
ret = self.tracker_end_of_pipe.recv()
if isinstance(ret, Exception):
# Assume that the attribute can't be retreived b/c the name refers
# to a function
ret = FuncProxy(attr, self.tracker_end_of_pipe)
return ret
def end_task(self):
end_task_fn = FuncProxy("end_task", self.tracker_end_of_pipe)
end_task_fn()
self.tracker_end_of_pipe.send(None)
def remote_set_attr(self, attr, value):
log_str('trying to remotely set attribute %s to %s\n' % (attr, value))
ret = FuncProxy('set_task_attr', self.tracker_end_of_pipe)
ret(attr, value)
|
the-stack_0_16982 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
python lr_generator.py
"""
import numpy as np
from mindspore import Tensor
from src.config import cfg
def lr_generator(lr_init, total_epochs, steps_per_epoch):
lr_each_step = []
for i in range(total_epochs):
if i in cfg.schedule:
lr_init *= cfg.gamma
for _ in range(steps_per_epoch):
lr_each_step.append(lr_init)
lr_each_step = np.array(lr_each_step).astype(np.float32)
return Tensor(lr_each_step)
|
the-stack_0_16983 | import json
from query.selection import AndClause, Compound, OrClause, ScalarSelector
class TempoIQEncoder(json.JSONEncoder):
def encode_point(self, point):
return {
't': self.encode_datetime(point.timestamp),
'v': point.value}
def encode_datetime(self, dt):
return dt.isoformat()
class WriteEncoder(TempoIQEncoder):
encoders = {
'Device': 'encode_device',
'Sensor': 'encode_sensor',
'Point': 'encode_point',
'datetime': 'encode_datetime',
'Rule': 'encode_rule',
'Trigger': 'encode_trigger',
'Webhook': 'encode_webhook'
}
def default(self, o):
encoder_name = self.encoders.get(o.__class__.__name__)
if encoder_name is None:
super(TempoIQEncoder, self).default(o)
encoder = getattr(self, encoder_name)
return encoder(o)
def encode_condition(self, condition):
return {
'trigger': self.encode_trigger(condition.trigger),
'filter': {
'and': map(self.encode_filter, condition.filters)
}
}
def encode_device(self, device):
return device.key
def encode_filter(self, _filter):
return {
'operation': _filter.inclusion,
'type': _filter.filter_type,
'arguments': _filter.args
}
def encode_rule(self, rule):
read_encoder = ReadEncoder()
j = {
'conditions': map(self.encode_condition, rule.conditions),
'name': rule.name,
'alerts': rule.alert_by,
'actions': [self.default(rule.action)],
'selection': {
'search': {
'filters': {
'devices': read_encoder.default(
rule.selection['devices']),
'sensors': read_encoder.default(
rule.selection['sensors'])
}
}
}
}
if rule.key is not None:
j['key'] = rule.key
return j
def encode_sensor(self, sensor):
return sensor.key
def encode_trigger(self, trigger):
return {
'name': trigger.trigger_type,
'arguments': trigger.args
}
def encode_webhook(self, webhook):
return {
'url': webhook.url
}
class CreateEncoder(TempoIQEncoder):
encoders = {
'Device': 'encode_device',
'Sensor': 'encode_sensor'
}
def default(self, o):
encoder_name = self.encoders.get(o.__class__.__name__)
if encoder_name is None:
super(TempoIQEncoder, self).default(o)
encoder = getattr(self, encoder_name)
return encoder(o)
def encode_device(self, device):
return {
'key': device.key,
'name': device.name,
'attributes': device.attributes,
'sensors': map(self.encode_sensor, device.sensors)
}
def encode_sensor(self, sensor):
return {
'key': sensor.key,
'name': sensor.name,
'attributes': sensor.attributes
}
class ReadEncoder(TempoIQEncoder):
encoders = {
'Point': 'encode_point',
'datetime': 'encode_datetime',
'ScalarSelector': 'encode_scalar_selector',
'AndClause': 'encode_compound_clause',
'OrClause': 'encode_compound_clause',
'QueryBuilder': 'encode_query_builder',
'Selection': 'encode_selection',
'Find': 'encode_function',
'Interpolation': 'encode_function',
'MultiRollup': 'encode_function',
'Rollup': 'encode_function',
'Aggregation': 'encode_function',
'ConvertTZ': 'encode_function'
}
def default(self, o):
encoder_name = self.encoders.get(o.__class__.__name__)
if encoder_name is None:
super(TempoIQEncoder, self).default(o)
encoder = getattr(self, encoder_name)
return encoder(o)
def encode_compound_clause(self, clause):
name = None
if isinstance(clause, AndClause):
name = 'and'
else:
name = 'or'
result = []
for selector in clause.selectors:
if isinstance(selector, (AndClause, OrClause)):
result.append(self.encode_compound_clause(selector))
elif isinstance(selector, ScalarSelector):
result.append(self.encode_scalar_selector(selector))
else:
raise ValueError("invalid selector type")
return {
name: result
}
def encode_function(self, function):
return {
'name': function.name,
'arguments': function.args
}
def encode_query_builder(self, builder):
j = {
'search': {
'select': builder.object_type,
'filters': {
'devices': self.encode_selection(
builder.selection['devices']),
'sensors': self.encode_selection(
builder.selection['sensors'])
}
},
builder.operation.name: builder.operation.args
}
if not j['search']['filters']['devices']:
if not j['search']['filters']['sensors']:
j['search']['filters']['devices'] = 'all'
j['search']['filters']['sensors'] = 'all'
else:
j['search']['filters']['devices'] = 'all'
else:
if not j['search']['filters']['sensors']:
j['search']['filters']['sensors'] = 'all'
if len(builder.pipeline) > 0:
j['fold'] = {
'functions': map(self.encode_function, builder.pipeline)
}
return j
def encode_scalar_selector(self, selector):
return {
selector.key: selector.value
}
def encode_selection(self, selection):
if selection.selection is None:
return {}
if isinstance(selection.selection, Compound):
if len(selection.selection.selectors) == 0:
return {}
else:
return self.default(selection.selection)
return self.encode_scalar_selector(selection.selection)
|
the-stack_0_16984 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Boot Interface for iLO drivers and its supporting methods.
"""
import os
import tempfile
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six.moves.urllib.parse as urlparse
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import image_service
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
REQUIRED_PROPERTIES = {
'ilo_deploy_iso': _("UUID (from Glance) of the deployment ISO. "
"Required.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
def parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
"""
info = node.driver_info
d_info = {}
d_info['ilo_deploy_iso'] = info.get('ilo_deploy_iso')
error_msg = _("Error validating iLO virtual media deploy. Some parameters"
" were missing in node's driver_info")
deploy_utils.check_for_missing_params(d_info, error_msg)
return d_info
def _get_boot_iso_object_name(node):
"""Returns the boot iso object name for a given node.
:param node: the node for which object name is to be provided.
"""
return "boot-%s" % node.uuid
def _get_boot_iso(task, root_uuid):
"""This method returns a boot ISO to boot the node.
It chooses one of the three options in the order as below:
1. Does nothing if 'ilo_boot_iso' is present in node's instance_info and
'boot_iso_created_in_web_server' is not set in 'driver_internal_info'.
2. Image deployed has a meta-property 'boot_iso' in Glance. This should
refer to the UUID of the boot_iso which exists in Glance.
3. Generates a boot ISO on the fly using kernel and ramdisk mentioned in
the image deployed. It uploads the generated boot ISO to Swift.
:param task: a TaskManager instance containing the node to act on.
:param root_uuid: the uuid of the root partition.
:returns: boot ISO URL. Should be either of below:
* A Swift object - It should be of format 'swift:<object-name>'. It is
assumed that the image object is present in
CONF.ilo.swift_ilo_container;
* A Glance image - It should be format 'glance://<glance-image-uuid>'
or just <glance-image-uuid>;
* An HTTP URL.
On error finding the boot iso, it returns None.
:raises: MissingParameterValue, if any of the required parameters are
missing in the node's driver_info or instance_info.
:raises: InvalidParameterValue, if any of the parameters have invalid
value in the node's driver_info or instance_info.
:raises: SwiftOperationError, if operation with Swift fails.
:raises: ImageCreationFailed, if creation of boot ISO failed.
:raises: exception.ImageRefValidationFailed if ilo_boot_iso is not
HTTP(S) URL.
"""
LOG.debug("Trying to get a boot ISO to boot the baremetal node")
# Option 1 - Check if user has provided ilo_boot_iso in node's
# instance_info
driver_internal_info = task.node.driver_internal_info
boot_iso_created_in_web_server = (
driver_internal_info.get('boot_iso_created_in_web_server'))
if (task.node.instance_info.get('ilo_boot_iso')
and not boot_iso_created_in_web_server):
LOG.debug("Using ilo_boot_iso provided in node's instance_info")
boot_iso = task.node.instance_info['ilo_boot_iso']
if not service_utils.is_glance_image(boot_iso):
try:
image_service.HttpImageService().validate_href(boot_iso)
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Virtual media deploy accepts only Glance "
"images or HTTP(S) URLs as "
"instance_info['ilo_boot_iso']. Either %s "
"is not a valid HTTP(S) URL or is "
"not reachable."), boot_iso)
return task.node.instance_info['ilo_boot_iso']
# Option 2 - Check if user has provided a boot_iso in Glance. If boot_iso
# is a supported non-glance href execution will proceed to option 3.
deploy_info = _parse_deploy_info(task.node)
image_href = deploy_info['image_source']
image_properties = (
images.get_image_properties(
task.context, image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))
boot_iso_uuid = image_properties.get('boot_iso')
kernel_href = (task.node.instance_info.get('kernel') or
image_properties.get('kernel_id'))
ramdisk_href = (task.node.instance_info.get('ramdisk') or
image_properties.get('ramdisk_id'))
if boot_iso_uuid:
LOG.debug("Found boot_iso %s in Glance", boot_iso_uuid)
return boot_iso_uuid
if not kernel_href or not ramdisk_href:
LOG.error(_LE("Unable to find kernel or ramdisk for "
"image %(image)s to generate boot ISO for %(node)s"),
{'image': image_href, 'node': task.node.uuid})
return
# NOTE(rameshg87): Functionality to share the boot ISOs created for
# similar instances (instances with same deployed image) is
# not implemented as of now. Creation/Deletion of such a shared boot ISO
# will require synchronisation across conductor nodes for the shared boot
# ISO. Such a synchronisation mechanism doesn't exist in ironic as of now.
# Option 3 - Create boot_iso from kernel/ramdisk, upload to Swift
# or web server and provide its name.
deploy_iso_uuid = deploy_info['ilo_deploy_iso']
boot_mode = deploy_utils.get_boot_mode_for_deploy(task.node)
boot_iso_object_name = _get_boot_iso_object_name(task.node)
kernel_params = CONF.pxe.pxe_append_params
with tempfile.NamedTemporaryFile(dir=CONF.tempdir) as fileobj:
boot_iso_tmp_file = fileobj.name
images.create_boot_iso(task.context, boot_iso_tmp_file,
kernel_href, ramdisk_href,
deploy_iso_uuid, root_uuid,
kernel_params, boot_mode)
if CONF.ilo.use_web_server_for_images:
boot_iso_url = (
ilo_common.copy_image_to_web_server(boot_iso_tmp_file,
boot_iso_object_name))
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = True
task.node.driver_internal_info = driver_internal_info
task.node.save()
LOG.debug("Created boot_iso %(boot_iso)s for node %(node)s",
{'boot_iso': boot_iso_url, 'node': task.node.uuid})
return boot_iso_url
else:
container = CONF.ilo.swift_ilo_container
swift_api = swift.SwiftAPI()
swift_api.create_object(container, boot_iso_object_name,
boot_iso_tmp_file)
LOG.debug("Created boot_iso %s in Swift", boot_iso_object_name)
return 'swift:%s' % boot_iso_object_name
def _clean_up_boot_iso_for_instance(node):
"""Deletes the boot ISO if it was created for the instance.
:param node: an ironic node object.
"""
ilo_boot_iso = node.instance_info.get('ilo_boot_iso')
if not ilo_boot_iso:
return
if ilo_boot_iso.startswith('swift'):
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
boot_iso_object_name = _get_boot_iso_object_name(node)
try:
swift_api.delete_object(container, boot_iso_object_name)
except exception.SwiftOperationError as e:
LOG.exception(_LE("Failed to clean up boot ISO for node "
"%(node)s. Error: %(error)s."),
{'node': node.uuid, 'error': e})
elif CONF.ilo.use_web_server_for_images:
result = urlparse.urlparse(ilo_boot_iso)
ilo_boot_iso_name = os.path.basename(result.path)
boot_iso_path = os.path.join(
CONF.deploy.http_root, ilo_boot_iso_name)
ironic_utils.unlink_without_raise(boot_iso_path)
def _parse_deploy_info(node):
"""Gets the instance and driver specific Node deployment info.
This method validates whether the 'instance_info' and 'driver_info'
property of the supplied node contains the required information for
this driver to deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info and driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = {}
info.update(deploy_utils.get_image_instance_info(node))
info.update(parse_driver_info(node))
return info
class IloVirtualMediaBoot(base.BootInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue, if some information is invalid.
:raises: MissingParameterValue if 'kernel_id' and 'ramdisk_id' are
missing in the Glance image or 'kernel' and 'ramdisk' not provided
in instance_info for non-Glance image.
"""
node = task.node
d_info = _parse_deploy_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
def prepare_ramdisk(self, task, ramdisk_params):
"""Prepares the boot of deploy ramdisk using virtual media.
This method prepares the boot of the deploy ramdisk after
reading relevant information from the node's driver_info and
instance_info.
:param task: a task from TaskManager.
:param ramdisk_params: the parameters to be passed to the ramdisk.
:returns: None
:raises: MissingParameterValue, if some information is missing in
node's driver_info or instance_info.
:raises: InvalidParameterValue, if some information provided is
invalid.
:raises: IronicException, if some power or set boot boot device
operation failed on the node.
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
# NOTE(TheJulia): If this method is being called by something
# aside from deployment and clean, such as conductor takeover, we
# should treat this as a no-op and move on otherwise we would modify
# the state of the node due to virtual media operations.
if (node.provision_state != states.DEPLOYING and
node.provision_state != states.CLEANING):
return
# Clear ilo_boot_iso if it's a glance image to force recreate
# another one again (or use existing one in glance).
# This is mainly for rebuild scenario.
if service_utils.is_glance_image(
node.instance_info.get('image_source')):
instance_info = node.instance_info
instance_info.pop('ilo_boot_iso', None)
node.instance_info = instance_info
node.save()
# Eject all virtual media devices, as we are going to use them
# during deploy.
ilo_common.eject_vmedia_devices(task)
deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)
ramdisk_params['BOOTIF'] = deploy_nic_mac
deploy_iso = node.driver_info['ilo_deploy_iso']
ilo_common.setup_vmedia(task, deploy_iso, ramdisk_params)
def prepare_instance(self, task):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
relevant information from the node's instance_info.
It does the following depending on boot_option for deploy:
- If the boot_option requested for this deploy is 'local' or image
is a whole disk image, then it sets the node to boot from disk.
- Otherwise it finds/creates the boot ISO to boot the instance
image, attaches the boot ISO to the bare metal and then sets
the node to boot from CDROM.
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
ilo_common.cleanup_vmedia_boot(task)
# For iscsi_ilo driver, we boot from disk every time if the image
# deployed is a whole disk image.
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if deploy_utils.get_boot_option(node) == "local" or iwdi:
manager_utils.node_set_boot_device(task, boot_devices.DISK,
persistent=True)
else:
drv_int_info = node.driver_internal_info
root_uuid_or_disk_id = drv_int_info.get('root_uuid_or_disk_id')
if root_uuid_or_disk_id:
self._configure_vmedia_boot(task, root_uuid_or_disk_id)
else:
LOG.warning(_LW("The UUID for the root partition could not "
"be found for node %s"), node.uuid)
def clean_up_instance(self, task):
"""Cleans up the boot of instance.
This method cleans up the environment that was setup for booting
the instance. It ejects virtual media
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
_clean_up_boot_iso_for_instance(task.node)
driver_internal_info = task.node.driver_internal_info
driver_internal_info.pop('boot_iso_created_in_web_server', None)
driver_internal_info.pop('root_uuid_or_disk_id', None)
task.node.driver_internal_info = driver_internal_info
task.node.save()
ilo_common.cleanup_vmedia_boot(task)
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up virtual media devices setup for the deploy
ramdisk.
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
ilo_common.cleanup_vmedia_boot(task)
def _configure_vmedia_boot(self, task, root_uuid):
"""Configure vmedia boot for the node.
:param task: a task from TaskManager.
:param root_uuid: uuid of the root partition
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
boot_iso = _get_boot_iso(task, root_uuid)
if not boot_iso:
LOG.error(_LE("Cannot get boot ISO for node %s"), node.uuid)
return
# Upon deploy complete, some distros cloud images reboot the system as
# part of its configuration. Hence boot device should be persistent and
# not one-time.
ilo_common.setup_vmedia_for_boot(task, boot_iso)
manager_utils.node_set_boot_device(task,
boot_devices.CDROM,
persistent=True)
i_info = node.instance_info
i_info['ilo_boot_iso'] = boot_iso
node.instance_info = i_info
node.save()
|
the-stack_0_16985 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
import numpy as np
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
#_logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def initialize_clip(VISUAL_CONFIG, num_patches = 240, adapter_config=None):
import clip
clip_model, preprocess = clip.load(VISUAL_CONFIG.clip_model_name, jit=False, adapter_config=adapter_config)
if VISUAL_CONFIG.clip_model_name == "ViT-B/32" and VISUAL_CONFIG.reset_pos_embedding:
#from timm.models.vision_transformer import resize_pos_embed
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
pos_embed.weight = resize_pos_embed(clip_model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))
clip_model.visual.positional_embedding = pos_embed
# model.visual.positional_embedding = model.visual.positional_embedding.to("cuda")
#print(model.visual.positional_embedding.device)
# pass
if VISUAL_CONFIG.freeze_clip:
for parameter in clip_model.parameters():
parameter.requires_grad = False
return clip_model
def initialize_vit(VISUAL_CONFIG, model_type = "ViT-B_32", pretrained_dir = "data/ViT-B_32.npz", img_size = (384, 640), num_patches = 240):
from vit.models.modeling import VisionTransformer, CONFIGS
config = CONFIGS[model_type]
model = VisionTransformer(config, img_size = 224, zero_head=True, num_classes=1)
model.load_from(np.load(pretrained_dir))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
pos_embed.weight = resize_pos_embed(model.transformer.embeddings.position_embeddings, pos_embed.unsqueeze(0))
model.transformer.embeddings.position_embeddings = pos_embed
if VISUAL_CONFIG.freeze_clip:
for parameter in model.parameters():
parameter.requires_grad = False
return model
def initialize_optimizer(visual_model, lr, momentum, weight_decay):
optimizer = torch.optim.SGD(visual_model.parameters(), lr,
momentum=momentum,
weight_decay=weight_decay)
return optimizer
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.sgd_lr
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
from torch.optim import Optimizer
class FusedOptimizer(Optimizer):
def __init__(self, optimizers):
self.optimizers = optimizers
param_groups = []
for optimizer in self.optimizers:
param_groups += optimizer.param_groups
#super(FusedOptimizer, self).__init__([], {})
self.param_groups = param_groups
def step(self):
for optimizer in self.optimizers:
optimizer.step()
|
the-stack_0_16986 | # Copyright (c) 2013 NTT DOCOMO, INC.
# Copyright 2014 IBM Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The bare-metal admin extension."""
from oslo_utils import importutils
import webob
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
import jacket.compute.conf
from jacket.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
ALIAS = "os-baremetal-nodes"
authorize = extensions.os_compute_authorizer(ALIAS)
node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
CONF = jacket.compute.conf.CONF
CONF.import_opt('api_version',
'jacket.compute.virt.ironic.driver',
group='ironic')
CONF.import_opt('api_endpoint',
'jacket.compute.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_username',
'jacket.compute.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_password',
'jacket.compute.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_tenant_name',
'jacket.compute.virt.ironic.driver',
group='ironic')
def _check_ironic_client_enabled():
"""Check whether Ironic is installed or not."""
if ironic_client is None:
common.raise_feature_not_supported()
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'insecure': 'true',
'ironic_url': CONF.ironic.api_endpoint}
icli = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
return icli
def _no_ironic_proxy(cmd):
raise webob.exc.HTTPBadRequest(
explanation=_("Command Not supported. Please use Ironic "
"command %(cmd)s to perform this "
"action.") % {'cmd': cmd})
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API."""
def _node_dict(self, node_ref):
d = {}
for f in node_fields:
d[f] = node_ref.get(f)
for f in node_ext_fields:
d[f] = node_ref.get(f)
return d
@extensions.expected_errors((404, 501))
def index(self, req):
context = req.environ['compute.context']
authorize(context)
nodes = []
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
ironic_nodes = icli.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0)}
nodes.append(node)
return {'nodes': nodes}
@extensions.expected_errors((404, 501))
def show(self, req, id):
context = req.environ['compute.context']
authorize(context)
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
try:
inode = icli.node.get(id)
except ironic_exc.NotFound:
msg = _("Node %s could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0),
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
@extensions.expected_errors(400)
def create(self, req, body):
_no_ironic_proxy("port-create")
@extensions.expected_errors(400)
def delete(self, req, id):
_no_ironic_proxy("port-create")
@wsgi.action('add_interface')
@extensions.expected_errors(400)
def _add_interface(self, req, id, body):
_no_ironic_proxy("port-create")
@wsgi.action('remove_interface')
@extensions.expected_errors(400)
def _remove_interface(self, req, id, body):
_no_ironic_proxy("port-delete")
class BareMetalNodes(extensions.V21APIExtensionBase):
"""Admin-only bare-metal node administration."""
name = "BareMetalNodes"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
BareMetalNodeController(),
member_actions={"action": "POST"})]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
the-stack_0_16987 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
from .DCNv2.dcn_v2 import DCN
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv):
self.inplanes = 64
self.heads = heads
self.deconv_with_bias = False
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3, 3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv=256, opt=None):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers)
return model
|
the-stack_0_16988 | import numpy as np
import scipy as sp
import logging
from beatmap import io as io
from beatmap import utils as util
from beatmap import vis as figs
from collections import namedtuple
def bet(iso_df, a_o, info, *args):
"""
Performs BET analysis on isotherm data for all relative pressure ranges.
This function performs BET analysis of any relative pressure range where
the starting relative pressure is less than the ending relative pressure.
Results of the analysis are written to arrays, the indexes of the arrays
correspond to the starting and ending relative pressure.
eg the specific surface area value with the indicies [3,9] is the specific
surface area for the relative pressure range that begins with the 4th data
point and ends with the 10th.
Arrays of results are stored in the bet_results named tuple.
Indexing of named tuple elements is in order of priority, data used by
other function are given priority.
Rather than pass individual parameters, this function can accept
*isotherm_data (where isotherm_data is a named tuple output by
a data import function).
Parameters
----------
iso_df: dataframe
Isotherm data, output by a data import function.
a_o : float
Cross sectional area of adsorbate, in square Angstrom, output by a
data import function.
info : string
Adsorbate-adsorbent information, output by a data import
function.
Returns
-------
bet_results : namedtuple
Contains the results of BET analysis.
Tuple elements are, in order of index:
- ``bet_results.intercept`` (array) : 2D array of intercept values
for the BET plot trendline. Indicies correspond to first and last
datapoint used in the analysis.
- ``bet_results.iso_df`` (dataframe) : Experimental isotherm data.
- ``bet_results.nm`` (array) : 2D array of monolayer adsorbed
amounts, in mol/g, indicies correspond to first and last datapoint
used in the analysis.
- ``bet_results.slope`` (array) : 2D array of slope values for the
BET plot trendline. Indicies correspond to first and last datapoint
used in the analysis.
- ``bet_results.ssa`` (array) : 2D array of specific surface area
values, in m^2/g, indicies correspond to first and last datapoint
used in the analysis.
- ``bet_results.c`` (array) : 2D array of BET constants values,
indicies correspond to first and last datapoint used in the
analysis.
- ``bet_results.err`` (array) : 2D array of average error between
a datapoint and the theoretical BET isotherm. Indicies correspond
to first and last datapoint used in the analysis.
- ``bet_results.r`` (array) : 2D array of r values for the BET plot
trendline. Indicies correspond to first and last datapoint used in
the analysis.
- ``bet_results.num_pts`` (array) : 2D array of the number of
experimental data points per relative pressure range.
-``bet_results.info`` (string) : string of adsorbate-adsorbent info
by other functions to name files.
"""
ssa_array = np.zeros((len(iso_df), len(iso_df)))
c_array = np.zeros((len(iso_df), len(iso_df)))
nm_array = np.zeros((len(iso_df), len(iso_df)))
err_array = np.zeros((len(iso_df), len(iso_df)))
slope = np.zeros((len(iso_df), len(iso_df)))
intercept = np.zeros((len(iso_df), len(iso_df)))
r = np.zeros((len(iso_df), len(iso_df)))
bet_c = np.zeros(len(iso_df))
number_pts = np.zeros((len(iso_df), len(iso_df)))
for i in range(len(iso_df)):
for j in range(len(iso_df)):
if i > j:
a = iso_df.iloc[j : i + 1]
X = a.relp
y = a.bet
m, b, r_value, p_value, std_err = sp.stats.linregress(X, y)
slope[i, j] = m
intercept[i, j] = b
r[i, j] = r_value
c = 0
nm = 0
bet_c = 0
if b != 0:
c = m / b + 1 # avoiding divide by zero issues
nm = 1 / (b * c)
bet_c = (1 / (nm * c)) + (c - 1) * iso_df.relp / (nm * c)
spec_sa = nm * 6.022 * 10 ** 23 * a_o * 10 ** -20
ssa_array[i, j] = spec_sa
c_array[i, j] = c
nm_array[i, j] = nm
number_pts[i, j] = i - j + 1
errors = np.nan_to_num(abs(bet_c - iso_df.bet) / bet_c)
if i - j == 1:
err_array[i, j] = 0
else:
err_array[i, j] = 100 * sum(errors[j : i + 1]) / (i + 1 - j)
# error is normalized for the interval of relative pressures
# used to compute C, so, min and max error corresponds to the
# best and worst fit over the interval used in BET analysis,
# not the entire isotherm
results = namedtuple(
"results", "intercept iso_df nm slope ssa c err r num_pts info",
)
bet_results = results(
np.nan_to_num(intercept),
iso_df,
nm_array,
slope,
ssa_array,
c_array,
err_array,
r,
number_pts,
info,
)
return bet_results
def single_point_bet(df, a_o):
"""
Performs single point BET analysis on an isotherm data set for all
relative pressure ranges. Can be used to check for agreement between BET
and single point BET.
Parameters
----------
bet_results : namedtuple
Contains all information required for BET analysis. Results of BET
analysis are also stored in this named tuple.
Relevant fields for single point BET anaylsis are:
- ``bet_results.raw_data`` (dataframe) : experimental isotherm data.
- ``bet_results.a_o`` (flaot) : the cross sectional area of the
adsorbate molecule, in square angstrom.
Returns
-------
singlept_results : namedtuple
Contains the results of single point BET analysis. Relevant fields are:
- ``singlept_results.ssa`` (array) : 2D array of specific surface
area values, in m^2/g, indicies correspond to first and last
datapoint used in the analysis.
- ``singlept_results.nm`` (array) : 2D array of monolayer adsorbed
amounts, in mol/g, indicies correspond to first and last datapoint
used in the analysis.
"""
ssa_array = np.zeros((len(df), len(df)))
nm_array = np.zeros((len(df), len(df)))
for i in range(len(df)):
for j in range(len(df)):
if i > j:
n_range = df.n[j:i]
relp_range = df.relp[j:i]
n = np.ma.median(n_range)
relp = np.ma.median(relp_range)
nm_array[i, j] = n * (1 - relp)
ssa_array[i, j] = n * 6.022 * 10 ** 23 * a_o * 10 ** -20
singlept_results = namedtuple("singlept_results", ("ssa", "nm"))
singlept_results.ssa = ssa_array
singlept_results.nm = nm_array
return singlept_results
def check_1(intercept):
"""
Checks that y intercept of the BET plot's linear regression is positive.
Parameters
----------
intercept : array
2D array of y-intercept values.
Returns
-------
check1 : array
Array of 1s and 0s where 0 corresponds to relative pressure ranges
where the y-intercept is negative or zero, ie ranges that fail this
check.
"""
check1 = intercept[:, :] > 0
if np.any(check1) is False:
logging.warning("All relative pressure ranges fail check 1.")
return check1
def check_2(df):
"""
Checks that n(p-po) aka check2 is increasing.
This is a necessary condition for linearity of the BET dataset.
Parameters
----------
df : dataframe
Dataframe of imported experimental isothermal adsorption data.
Returns
-------
check2 : array
Array of 1s and 0s where 0 corresponds to relative pressure ranges
where n(p-po) isn't consistently increasing with relative pressure, ie
ranges that fail this check.
"""
df["check2"] = df.n * (1 - df.relp)
check2 = np.ones((len(df), len(df)))
minus1 = np.concatenate(([0], df.check2[:-1]))
test = df.check2 - minus1 >= 0
test = np.tile(test, (len(df), 1))
check2 = check2 * test
check2 = check2.T
if np.any(check2) is False:
logging.warning("All relative pressure ranges fail check 2.")
return check2
def check_3(df, nm):
"""
Checks that nm, amount adsorbed in the monolayer, is in the range of
data points used in BET analysis.
Parameters
----------
df : dataframe
Dataframe of imported experimental isothermal adsorption data.
nm : array
2D array of BET specific amount of adsorbate in the monolayer, the
coordinates of the array corresponding to relative pressures, units
[moles / gram].
Returns
-------
check3 : array
Array of 1s and 0s where 0 corresponds to relative pressure ranges nm
is not included in the range of experimental n values, ie ranges that
fail this check.
"""
check3 = np.zeros((len(df), len(df)))
for i in range(np.shape(check3)[0]):
for j in range(np.shape(check3)[1]):
if df.iloc[j, 1] <= nm[i, j] <= df.iloc[i, 1]:
check3[i, j] = 1
if np.any(check3) is False:
logging.warning("All relative pressure ranges fail check 3.")
return check3
def check_4(df, nm, slope, intercept):
"""
Checks that relative pressure is consistent.
The relative pressure corresponding to nm is found from linear
interpolation of the experiemental data.
A second relative pressure is found by setting n to nm in the BET equation
and solving for relative pressure.
The two relative pressures are compared and must agree within 10% to pass
this check.
Parameters
----------
df : dataframe
Dataframe of imported experimental isothermal adsorption data.
nm : array
2D array of BET specific amount of adsorbate in the monolayer,
the coordinates of the array corresponding to relative pressures,
units [moles / gram].
slope : array
2D array of slope values resulting from linear regression applied to
relevant experimental data.
intercept : array
2D array of y-intercept values resulting from linear regression applied
to relevant experimental data.
Returns
-------
check4 : array
Array of 1s and 0s where 0 corresponds to relative pressure values that
do not agree within 10%, ie ranges that fail this check.
"""
check4 = np.zeros((len(df), len(df)))
for i in range(np.shape(check4)[0]):
for j in range(np.shape(check4)[1]):
if nm[i, j] != 0 and i > 0 and j > 0:
# find relp corresponding to nm
relpm = util.lin_interp(df, nm[i, j])
# BET eq solved for relp is a quadratic, coeff = [a, b, c]
coeff = [
-1 * slope[i, j] * nm[i, j],
slope[i, j] * nm[i, j] - 1 - intercept[i, j] * nm[i, j],
intercept[i, j] * nm[i, j],
]
# find roots
# (relp value where nm occurs on theoretical isotherm)
roots = np.roots(coeff) # note: some roots are imaginary
roots = [item.real for item in roots if len(roots) == 2]
# find the difference between
relp_m_1 = roots[0]
diff_1 = abs((relp_m_1 - relpm) / relpm)
relp_m_2 = roots[1]
diff_2 = abs((relp_m_2 - relpm) / relpm)
diff = min(diff_1, diff_2)
if diff < 0.1:
check4[i, j] = 1
if np.any(check4) is False:
logging.warning("All relative pressure ranges fail check 4.")
return check4
def check_5(df, points=5):
"""
Checks that relative pressure ranges contain a minium number of data points.
Parameters
----------
df : dataframe
Dataframe of imported experimental isothermal adsorption data.
points : int
Minimum number of data points required for BET analysis to be
considered valid, default value is 5.
Returns
-------
check5 : array
Array of 1s and 0s where 0 corresponds to ranges of experimental data
that contain less than the minimum number of points.
"""
check5 = np.ones((len(df), len(df)))
for i in range(len(df)):
for j in range(len(df)):
if i - j < points - 1:
check5[i, j] = 0
if np.any(check5) is False:
logging.warning("All relative pressure ranges fail check 5.")
return check5
def rouq_mask(
intercept,
iso_df,
nm,
slope,
*args,
check1=True,
check2=True,
check3=True,
check4=True,
check5=True,
points=5
):
"""
Calls all check functions and combines their masks into one "rouqerol mask".
Rather than pass individual parameters, this function can accept
*bet_results (where bet_results is a named tuple output by the bet
function).
Parameters
----------
intercept : array
2D array of intercept values, used in check1.
iso_df : dataframe
Dataframe of isotherm data, used in check2.
nm : array
2D array of amount in the monolayer values, used in check3 and check4.
slope : array
2D array of slope values, used in check4
check1 : boolean
True means the will be evalued, False means the check will not be
evaluated.
check2 : boolean
True means the will be evalued, False means the check will not be
evaluated.
check3 : boolean
True means the will be evalued, False means the check will not be
evaluated.
check4 : boolean
True means the will be evalued, False means the check will not be
evaluated.
check5 : boolean
True means the will be evalued, False means the check will not be
evaluated.
points : int
The minimum number of experimental data points for a relative pressure
interval to be considered valid.
Returns
-------
rouq_mask : namedtuple
Contains arrays for the result of each check and a masked array that is
the result of all selected checks.
Fields of the named tuple are:
-``rouq_mask.mask`` (MaskedArray) : object where invalid BET results
are masked.
-``rouq_mask.check1 (array) : array of 1s and 0s where 0 corresponds
failing check1.
-``rouq_mask.check2 (array) : array of 1s and 0s where 0 corresponds
failing check2.
-``rouq_mask.check3 (array) : array of 1s and 0s where 0 corresponds
failing check3.
-``rouq_mask.check4 (array) : array of 1s and 0s where 0 corresponds
failing check4.
-``rouq_mask.check5 (array) : array of 1s and 0s where 0 corresponds
failing check5.
"""
mask = np.ones((len(iso_df), len(iso_df)))
for i in range(len(iso_df)):
for j in range(len(iso_df)):
if j >= i:
mask[i, j] = 0
if check1 is True:
check1 = check_1(intercept)
else:
check1 = np.ones((len(iso_df), len(iso_df)))
if check2 is True:
check2 = check_2(iso_df)
else:
check2 = np.ones((len(iso_df), len(iso_df)))
if check3 is True:
check3 = check_3(iso_df, nm)
else:
check3 = np.ones((len(iso_df), len(iso_df)))
if check4 is True:
check4 = check_4(iso_df, nm, slope, intercept)
else:
check4 = np.ones((len(iso_df), len(iso_df)))
if check5 is True:
check5 = check_5(iso_df, points)
else:
check5 = np.ones((len(iso_df), len(iso_df)))
mask = np.multiply(check1, mask)
mask = np.multiply(check2, mask)
mask = np.multiply(check3, mask)
mask = np.multiply(check4, mask)
mask = np.multiply(check5, mask)
mask.astype(bool) # converting mask to boolean
# inverting mask so that 0 = valid, 1 = invalid, to work well with numpy masks
invertedmask = np.logical_not(mask)
rouq_mask = namedtuple("rouq_mask", "mask check1 check2 check3 check4 check5")
mask_results = rouq_mask(invertedmask, check1, check2, check3, check4, check5)
return mask_results
def ssa_answer(bet_results, mask_results, criterion="error"):
"""
Logs a single specific surface area answer from the valid relative
pressure range with the lowest error, most number of points, maximum
specific surface area, or minimum specific surface area.
Parameters
----------
bet_results : named tuple
``bet_results.ssa`` contains the array of specific surface values.
rouq_mask : named tuple
``rouq_mask.mask`` contains the mask used to remove invaid specific
surface area values from consideration.
criterion : string
Used to specify the criterion for a final specific surface area answer,
either 'error', 'points', 'max', or 'min. Defaults to 'error'.
Returns
-------
ssa_ans : float
Specific surface answer corresponding to user defined criteria.
"""
mask = mask_results.mask
if mask.all():
raise ValueError(
"No valid relative pressure ranges. Specific surface"
" area not calculated."
)
ssa = np.ma.array(bet_results.ssa, mask=mask)
if criterion == "points":
pts = np.ma.array(bet_results.num_pts, mask=mask)
max_pts = np.max(pts)
ssa_ans_array = np.ma.masked_where(pts < max_pts, ssa)
try:
ssa_ans = float(ssa_ans_array.compressed())
except ValueError:
raise Exception(
"Error, so single specific surface area answer. Multiple"
+ "relative pressure ranges with the maximum number of points."
)
return 0
logging.info(
"The specific surface area value, based on %s is %.2f m2/g."
% (criterion, ssa_ans)
)
return ssa_ans
if criterion == "error":
err = np.ma.array(bet_results.err, mask=mask)
errormax, error_max_idx, errormin, error_min_idx = util.max_min(err)
ssa_ans = ssa[int(error_min_idx[0]), int(error_min_idx[1])]
logging.info(
"The specific surface area value, based on %s is %.2f m2/g."
% (criterion, ssa_ans)
)
return ssa_ans
if criterion == "max":
ssa_ans = np.max(ssa)
logging.info(
"The specific surface area value, based on %s is %.2f m2/g."
% (criterion, ssa_ans)
)
return ssa_ans
if criterion == "min":
ssa_ans = np.min(ssa)
logging.info(
"The specific surface area value, based on %s is %.2f m2/g."
% (criterion, ssa_ans)
)
return ssa_ans
else:
raise ValueError("Invalid criterion, must be points, error, min, or max.")
def run_beatmap(
file=None,
info=None,
a_o=None,
check1=True,
check2=True,
check3=True,
check4=True,
check5=True,
points=5,
save_figures=True,
export_data=False,
ssa_criterion="error",
ssa_gradient="Greens",
err_gradient="Greys",
):
"""
A single function that executes all necessary BEaTmap algorithims.
This function is built to be as user friendly as possible. The file
name/path of the isotherm data, information about the isotherm, and the
cross sectional surface area of the adsorbate can be passed using the
file, info, and a_o parameters respectively. Or, if left empty, the user
be prompted to input them.
eg. ``run_beatmap('myfile.csv', 'nitrogen on carbon', 16.2)`` or
``run_beatmap()`` will execute the function. In the later case the user
will provide the parameters passed in the former through promts in their
console.
Additional parameters to set which of the Roquerol criteria are applied,
the minimum number of data points per valid relative pressure range,
the criteria used to select a single specific surface area, and more, are
defined and set to reasonable default values.
Parameters
----------
file : string
File name (if file is in parent directory) or file path.
info : string
Adsorbate-adsorbent information.
a_o : float
Cross sectional area of adsorbate, in square Angstrom.
check1 : boolean
If check1 is True any relative pressure ranges with a negative y
intercept are considered invalid.
check2 : boolean
If check2 is True any relative pressure ranges where n(p-po) is
decreasing are considered invalid.
check3 : boolean
If check3 is True any relative pressure ranges where the monolayer
amount falls outside of the relative pressure range are considered
invalid.
check4 : boolean
If check4 is True any relative pressure range where there is
disagreement of more than 10% between the actual relative pressure
where monolayer coverage occurs and the relative pressure where
monolayer coverage occurs on the theoretical isotherm are considered
invalid.
check5 : boolean
If check5 is True relative pressure ranges that contain fewer points
than specified by the user are considered invalid.
points : interger
The minimum number of points for a valid relative pressure range.
save_figures : boolean
If save_figures is True any figures created by this function will be
saved as .png files in the parent directory.
export_data : boolean
If export data is True .csv files of the isotherm data and the BEaTmap
results will be created and saved in the parent directory.
ssa_criterion : string
Used to set which criterion is used to provide a single specific
surface area value. 'error' will output the valid ssa answer with the
lowest error, 'points' will output the ssa answer with the most
datapoints.
ssa_gradient : string
Color gradient for heatmap, must be a vaild color gradient name
in the seaborn package.
err_gradient : string
Color gradient for heatmap, must be a vaild color gradient name
in the seaborn package, default is grey.
Returns
-------
"""
# run_beatmap_import_data imports isotherm data from a .csv file and returns
# the results in the isotherm_data namedtuple
isotherm_data = io.import_data(file, info, a_o)
figs.experimental_data_plot(isotherm_data, save_file=save_figures)
# bet_results uses isotherm_data, applies BET analysis and returns the results
# in the bet_results namedtuple
bet_results = bet(isotherm_data.iso_df, isotherm_data.a_o, isotherm_data.info)
# mask_results uses isotherm_data and bet_results, applies the roquerol
# criteria specified by the user, and returns the results in the
# mask_results named tuple
mask_results = rouq_mask(
bet_results.intercept,
bet_results.iso_df,
bet_results.nm,
bet_results.slope,
check1=check1,
check2=check2,
check3=check3,
check4=check4,
check5=check5,
points=points,
)
# mask_results are used to highlight the valid bet_results in the
# following functions
# ssa_ans = ssa_answer(bet_results, mask_results, ssa_criterion)
figs.ssa_heatmap(bet_results, mask_results, save_figures)
figs.err_heatmap(bet_results, mask_results, save_figures)
figs.bet_combo_plot(bet_results, mask_results, save_figures)
figs.iso_combo_plot(bet_results, mask_results, save_figures)
figs.ascii_tables(bet_results, mask_results)
if export_data is True:
io.export_raw_data(isotherm_data)
io.export_processed_data(bet_results, points)
combo_results = namedtuple(
"results",
"ssa c nm err intercept slope r mask"
" check1 check2 check3 check4 check5 num_pts",
)
results = combo_results(
bet_results.ssa,
bet_results.c,
bet_results.nm,
bet_results.err,
bet_results.intercept,
bet_results.slope,
bet_results.r,
mask_results.mask,
mask_results.check1,
mask_results.check2,
mask_results.check3,
mask_results.check4,
mask_results.check5,
bet_results.num_pts,
)
return results
|
the-stack_0_16989 | # -*- coding: utf-8 -*-
"""Fram framework bootstrap module."""
import argparse
import sys
import six
__author__ = "Shawn Lee"
__email__ = "[email protected]"
def fram_plugins():
"""Go through all the loaded modules and look for fram plugins.
A plugin is defined by a module that defines a FRAM_PLUGIN variable that is
a dict."""
plugins = []
for mod_name in list(sys.modules):
instance = sys.modules[mod_name]
if hasattr(instance, "FRAM_PLUGIN"):
plugins.append(instance.FRAM_PLUGIN)
return plugins
def parser_from_plugins(plugins, description):
"""Go through all the loaded plugins and build out a cli parser."""
parser = argparse.ArgumentParser(description, conflict_handler="resolve")
for plugin in plugins:
if "argparse" in plugin:
for argument, options in six.iteritems(plugin["argparse"]):
kwargs = {}
for option in ["help", "action", "default", "required"]:
val = options.get(option)
if val:
kwargs[option] = options.get(option)
args = [argument] + options.get(
"additional_args", [])
parser.add_argument(*args, **kwargs)
return parser
def decorated_main_from_plugins(plugins, func):
"""Go through all the loaded plugins and build the main decorators."""
for plugin in plugins:
if "main_decorator" in plugin:
func = plugin["main_decorator"](func)
return func
def parser_callbacks(plugins, parser):
"""Go through all the loaded plugins and run the parser callbacks."""
framework = {}
if parser:
try:
framework["argparse"] = parser.parse_args()
except AttributeError:
print (
"ERROR: Did you return parser.parse_args() in your argument\n"
" parser? Just return the parser. Fram framework will\n"
" call parse_args at a later time.")
sys.exit(1)
# Since we have parsed_args, go through all callbacks.
for plugin in plugins:
if "argparse" in plugin:
for argument, options in six.iteritems(plugin["argparse"]):
if "callback" in options:
framework[argument.strip("-")] = (
options["callback"](getattr(
framework["argparse"],
argument.strip("-"))))
return framework
def run(func, description=None, argument_parser=None):
"""Bootstrap up the library."""
plugins = fram_plugins()
parser = parser_from_plugins(plugins, description)
if argument_parser:
try:
parser = argument_parser(parser)
except TypeError:
print (
"ERROR: Did you return parser.parse_args() in your argument\n"
" parser? Just return the parser. Fram framework will\n"
" call parse_args at a later time.")
sys.exit(1)
func = decorated_main_from_plugins(plugins, func)
framework = parser_callbacks(plugins, parser)
return func(framework)
|
the-stack_0_16991 | from mitie import tokenize
from rasa_nlu import Interpreter
class MITIESklearnInterpreter(Interpreter):
def __init__(self,metadata):
self.extractor = named_entity_extractor(metadata["entity_extractor"])#,metadata["feature_extractor"])
self.classifier = text_categorizer(metadata["intent_classifier"])#,metadata["feature_extractor"])
def get_entities(self,tokens):
d = {}
entities = self.extractor.extract_entities(tokens)
for e in entities:
_range = e[0]
d[e[1]] = " ".join(tokens[i] for i in _range)
return d
def get_intent(self,tokens):
label, _ = self.classifier(tokens) # don't use the score
return label
def parse(self,text):
tokens = tokenize(text)
intent = self.get_intent(tokens)
entities = self.get_entities(tokens)
return {'intent':intent,'entities': entities}
|
the-stack_0_16992 | # Tango import
import tango
from tango import DevFailed, DevState
# Additional import
from ska.base.commands import BaseCommand
from tmc.common.tango_client import TangoClient
from tmc.common.tango_server_helper import TangoServerHelper
from . import const
from .attribute_callbacks import (
CbfHealthStateAttributeUpdator,
PssHealthStateAttributeUpdator,
PstHealthStateAttributeUpdator,
)
class TelescopeOn(BaseCommand):
"""
A class for CspMasterLeafNode's TelescopeOn() command. On command is inherited from BaseCommand.
It Sets the State to On.
"""
def check_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
:return: True if this command is allowed to be run in current device state
:rtype: boolean
:raises: DevFailed if this command is not allowed to be run in current device state
"""
if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:
tango.Except.throw_exception(
f"Command TelescopeOn is not allowed in current state {self.state_model.op_state}.",
"Failed to invoke On command on CspMasterLeafNode.",
"CspMasterLeafNode.TelescopeOn()",
tango.ErrSeverity.ERR,
)
return True
def telescope_on_cmd_ended_cb(self, event):
"""
Callback function immediately executed when the asynchronous invoked
command returns. Checks whether the Telescope On command has been successfully invoked on CSPMaster.
:param event: a CmdDoneEvent object. This class is used to pass data
to the callback method in asynchronous callback model for command
execution.
:type: CmdDoneEvent object
It has the following members:
- device : (DeviceProxy) The DeviceProxy object on which the call was executed.
- cmd_name : (str) The command name
- argout_raw : (DeviceData) The command argout
- argout : The command argout
- err : (bool) A boolean flag set to true if the command failed. False otherwise
- errors : (sequence<DevError>) The error stack
- ext
:return: none
"""
this_device = TangoServerHelper.get_instance()
if event.err:
log_msg = f"{const.ERR_INVOKING_CMD}{event.cmd_name}\n{event.errors}"
self.logger.error(log_msg)
this_device.write_attr("activityMessage", log_msg, False)
else:
log_msg = f"{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}"
self.logger.info(log_msg)
this_device.write_attr("activityMessage", log_msg, False)
def do(self):
"""
Method to invoke On command on CSP Element.
param argin:
None
raises:
DevFailed on communication failure with CspMaster or CspMaster is in error state.
"""
device_data = self.target
this_device = TangoServerHelper.get_instance()
try:
csp_mln_client_obj = TangoClient(this_device.read_property("CspMasterFQDN")[0])
csp_mln_client_obj.send_command_async(
const.CMD_ON, [], self.telescope_on_cmd_ended_cb
)
self.logger.debug(const.STR_ON_CMD_ISSUED)
this_device.write_attr("activityMessage", const.STR_ON_CMD_ISSUED, False)
device_data.cbf_health_updator = CbfHealthStateAttributeUpdator()
device_data.cbf_health_updator.start()
device_data.pss_health_updator = PssHealthStateAttributeUpdator()
device_data.pss_health_updator.start()
device_data.pst_health_updator = PstHealthStateAttributeUpdator()
device_data.pst_health_updator.start()
except DevFailed as dev_failed:
log_msg = f"{const.ERR_EXE_ON_CMD}{dev_failed}"
self.logger.exception(dev_failed)
this_device.write_attr("activityMessage", const.ERR_EXE_ON_CMD, False)
tango.Except.re_throw_exception(
dev_failed,
const.STR_ON_EXEC,
log_msg,
"CspMasterLeafNode.TelescopeOnCommand",
tango.ErrSeverity.ERR,
)
|
the-stack_0_16994 | #!/usr/bin/env python
""" Tests of Larch Scripts """
import unittest
import time
import ast
import numpy as np
from sys import version_info
from utils import TestCase
from larch import Interpreter
class TestScripts(TestCase):
'''testing of asteval'''
def test01_basic(self):
self.runscript('a.lar', dirname='larch_scripts')
assert(len(self.session.get_errors()) == 0)
self.isTrue("n < 10")
self.isTrue("n > 5")
self.isTrue("x > 3")
def test02_autobk(self):
self.runscript('doc_autobk1.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("cu.e0 > 8950.0")
self.isTrue("len(cu.k) > 200")
self.isTrue("max(abs(cu.chi)) < 2.0")
def test03_autobk2(self):
self.runscript('doc_autobk2.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("dat.e0 > 10000.0")
self.isTrue("len(dat.k) > 200")
def test04_autobk_clamp(self):
self.runscript('doc_autobk3.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("dat.e0 > 11000.0")
self.isTrue("len(dat.k) > 200")
def test05_autobk_with_std(self):
self.runscript('doc_autobk4.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("cu2.e0 > 8950.0")
self.isTrue("len(cu2.k) > 200")
self.isTrue("max(abs(cu2.chi)) < 2.0")
def test06_ftwin1(self):
self.runscript('doc_ftwin1.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("len(hann_win1) == 401")
self.isTrue("hann_win3.sum() > 50.0")
self.runscript('doc_ftwin2.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("len(kai_win1) == 401")
self.isTrue("kai_win1.sum() > 20.0")
def test07_xafsft1(self):
self.runscript('doc_xafsft1.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("len(d2.k) > 200")
self.isTrue("len(d2.kwin) > 200")
self.isTrue("d1.chir_mag.sum() > 30")
self.isTrue("where(d1.chir_mag>1)[0][0] > 60")
def test08_xafsft2(self):
self.runscript('doc_xafsft2.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("len(d3.k) > 200")
self.isTrue("len(d3.kwin) > 200")
self.isTrue("len(d4.k) > 200")
self.isTrue("len(d4.kwin) > 200")
self.isTrue("len(d1.r) > 100")
self.isTrue("len(d1.chir_mag) > 100")
self.isTrue("len(d3.r) > 100")
self.isTrue("len(d3.chir_mag) > 100")
self.isTrue("len(d4.r) > 100")
self.isTrue("len(d4.chir_mag) > 100")
self.isTrue("len(d4.chir_re) > 100")
self.isTrue("len(d4.chir_im) > 100")
def test09_xafsft3(self):
self.runscript('doc_xafsft3.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("len(dat.k) > 200")
self.isTrue("len(dat.kwin) > 200")
def test10_xafsft3(self):
self.runscript('doc_xafsft4.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("len(dat.r) > 200")
self.isTrue("len(dat.rwin) > 200")
self.isTrue("len(dat.q) > 200")
self.isTrue("len(dat.chiq_re) > 200")
def test11_wavelet1(self):
self.runscript('wavelet_example.lar', dirname='../examples/xafs/')
assert(len(self.session.get_errors()) == 0)
self.isTrue("f.wcauchy_im.shape == (326, 318)")
self.isTrue("f.wcauchy_mag.sum() > 300")
def test12_feffit_kws(self):
self.runscript('test_epsk_kws.lar', dirname='../examples/feffit/')
assert(len(self.session.get_errors()) == 0)
out = self.session.run('out')
for row in out:
amp = row[5]
amp_err = row[5]
delr= row[7]
self.assertTrue(amp > 0.5)
self.assertTrue(amp < 2.0)
self.assertTrue(amp_err > 0.0)
self.assertTrue(amp_err < 2.0)
self.assertTrue(abs(delr) < 0.1)
if __name__ == '__main__': # pragma: no cover
for suite in (TestScripts,):
suite = unittest.TestLoader().loadTestsFromTestCase(suite)
unittest.TextTestRunner(verbosity=13).run(suite)
|
the-stack_0_16995 | # -*- coding: utf-8 -*-
"""
Dora Team
Teknofest 2021- Türkçe Doğal Dil İşleme Yarışması
"""
import numpy as np
import pandas as pd
import re
import string
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import classification_report
from sklearn.feature_selection import chi2
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from tkinter import *
from PIL import ImageTk,Image
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
import numpy as np
#arayüz penceresi
pencere = Tk()
pencere.geometry("900x600+100+50")
pencere.title("Dora-Şiir Türü Belirleme Programı")
pencere.configure(background="#B7CBF0")
#logo ekleme
canvas = Canvas(pencere, width = 100, height = 81, bg="#B7CBF0")
canvas.pack()
canvas.place(x=170,y=490)
img = ImageTk.PhotoImage(Image.open("logodoram2.png"))
canvas.create_image(0, 0, anchor=NW, image=img)
#arayüz widgetleri
label=Label(pencere,fg="blue", bg="#B7CBF0",font="bold")
label.config(text="Şiiri yazınız:",font=("Arial",14))
label.place(x=20,y=20)
siirtext=Text(pencere,width=50,height=20)
siirtext.place(x=20,y=50)
siirtext.insert(END,"Korkma, sönmez bu şafaklarda yüzen al sancak;\nSönmeden yurdumun üstünde tüten en son ocak.\nO benim milletimin yıldızıdır, parlayacak;\nO benimdir, o benim milletimindir ancak.\n\nÇatma, kurban olayım çehreni ey nazlı hilâl!\nKahraman ırkıma bir gül… ne bu şiddet bu celâl?\nSana olmaz dökülen kanlarımız sonra helâl,\nHakkıdır, Hakk’a tapan, milletimin istiklâl.")
sonuclabel=Label(pencere,fg="blue",bg="#B7CBF0",font="bold")
sonuclabel.config(text='Şiir Türü=',font=("Arial",14))
sonuclabel.place(x=450,y=50)
sonucsgd=Label(pencere,fg="blue",bg="#B7CBF0",font="bold")
sonucsgd.config(text='SGD Sonuc:',font=("Arial",14))
sonucsvc=Label(pencere,fg="blue",bg="#B7CBF0",font="bold")
sonucsvc.config(text='Linear SVC sonuc:',font=("Arial",14))
sonuc2=Label(pencere,fg="blue",bg="#B7CBF0",font="bold")
sonuc2.config(text='Linear SVC Doğruluk Oranı:',font=("Arial",14))
sonuc3=Label(pencere,fg="blue",bg="#B7CBF0",font="bold")
sonuc3.config(text='SGD Doğruluk Oranı:',font=("Arial",14))
#veri seti okuma
pd.set_option('display.max_colwidth', None)
df=pd.read_csv('siirdataset.csv', encoding = 'utf-8')
df['siir']=df['siir'].str.lower()
class_ = df['tur'].value_counts().keys()
sum_ = df['tur'].value_counts()
daf = pd.DataFrame(zip(class_,sum_), columns = ['tur', 'Toplam'])
def siir_kontrol():
pd.set_option('display.max_colwidth', None)
df=pd.read_csv('siirdataset.csv', encoding = 'utf-8')
df['siir']=df['siir'].str.lower()
class_ = df['tur'].value_counts().keys()
sum_ = df['tur'].value_counts()
daf = pd.DataFrame(zip(class_,sum_), columns = ['tur', 'Toplam'])
#stop-words-gereksiz kelime ayıklama
stop_words=pd.read_csv('stopwords.txt', sep=" ", header=None)
stop_words.columns=['words_list']
pat2 = r'\b(?:{})\b'.format('|'.join(list(stop_words['words_list'].str.lower())))
df['yenisiir'] = df['siir'].str.lower().str.replace(pat2, '')
#Noktalama işaretlerinin ayıklanması
df=df.dropna()
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['siir_son'] = df['yenisiir'].apply(lambda text: remove_punctuation(text))
#test-train oluşturma
X_train, X_test, y_train, y_test = train_test_split(df['siir_son'], df['tur'], random_state = 0)
trial = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()), ('clf', SGDClassifier())])
parameters = {'vect__max_df': (0.5, 0.75, 1.0),'vect__max_features': (None, 5000, 10000, 50000),'vect__ngram_range':((1, 1),(1, 2)),
'clf__max_iter': (20,),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
# ‘clf__max_iter’: (10, 50, 80),
}
#en iyi skoru gösterme
grid_search = GridSearchCV(trial, parameters, n_jobs=-1, verbose=1)
grid_search.fit(X_train, y_train)
print("En iyi Skor: %0.3f" % grid_search.best_score_)
print("Best parameters")
best_parameters = grid_search.best_estimator_.get_params()
print(best_parameters)
#SGD
trial = Pipeline([('vect', CountVectorizer(max_df=0.75, ngram_range=(1, 2))),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='modified_huber',alpha=1e-05, max_iter=20, penalty='elasticnet')),])
trial.fit(X_train, y_train)
print("SGDC Doğruluk Oranı: " + str(trial.score(X_test, y_test)))
sonuc3["text"]="SGDC Doğruluk Oranı: "+str(trial.score(X_test, y_test))
#linear SVC
trial2 = Pipeline([('vectorizer',CountVectorizer()),
('tfidf', TfidfTransformer()),
('classifier', LinearSVC())])
trial2.fit(X_train, y_train)
print("Linear SVC Doğruluk Oranı= " + str(trial2.score(X_test, y_test)))
sonuc2["text"]="Linear SVC Doğruluk Oranı= "+str(trial2.score(X_test, y_test))
#sınıflandırma raporu
y_pred=trial.predict(X_test)
print(classification_report(y_test, y_pred))
#test1-SGD
cv = CountVectorizer(ngram_range=(1,2))
siirsoz=siirtext.get(1.0,END)
data = [siirsoz]
n=3
tahmin1 = trial.predict(data)
if tahmin1=='epik':
sonuct1='Epik'
elif tahmin1=='lirik':
sonuct1='Lirik'
elif tahmin1=='didaktik':
sonuct1='Didaktik'
elif tahmin1=='pastoral':
sonuct1='Pastoral'
elif tahmin1=='satirik':
sonuct1='Satirik'
else:
sonuct1='Dramatik'
print(sonuct1)
sonucsvc["text"]="Linear SVC Sonucu="+str(sonuct1)
#test2-linear svc
tahmin2 = trial2.predict(data)
if tahmin2=='epik':
sonuct2='Epik'
elif tahmin2=='lirik':
sonuct2='Lirik'
elif tahmin2=='didaktik':
sonuct2='Didaktik'
elif tahmin2=='pastoral':
sonuct2='Pastoral'
elif tahmin2=='satirik':
sonuct2='Satirik'
else:
sonuct2='Dramatik'
print(sonuct2)
sonucsgd["text"]="SGDC Sonucu="+str(sonuct2)
if str(tahmin1)==str(tahmin2):
sonuclabel["text"]="Şiir Türü="+str(sonuct2)
else:
sonuclabel["text"]=str(sonuct1)+" veya "+str(sonuct2)
#temizle butonu
def temizle():
siirtext.delete('1.0', END)
sonucsgd.place_forget()
sonucsvc.place_forget()
sonuc2.place_forget()
sonuc3.place_forget()
sonuclabel["text"]="Şiir Türü="
#linear svc ve sgd doğruluk oranlarını göster
def oran_goster():
sonucsgd.place(x=450,y=150)
sonucsvc.place(x=450,y=190)
sonuc2.place(x=450,y=230)
sonuc3.place(x=450,y=270)
#Türlerine göre şiir sayıları çubuk grafiği çiz
def grafik_cizdir():
pencere2 = Tk()
pencere2.geometry("600x300+600+300")
pencere2.title("Türlerine Göre Şiir Sayıları Grafiği")
pd.set_option('display.max_colwidth', None)
df=pd.read_csv('siirdataset.csv', encoding = 'utf-8')
df['siir']=df['siir'].str.lower()
class_ = df['tur'].value_counts().keys()
sum_ = df['tur'].value_counts()
daf = pd.DataFrame(zip(class_,sum_), columns = ['tur', 'Toplam'])
fig,ax=plt.subplots(figsize=(15,5))
ax.bar(daf.tur,daf.Toplam,width=.8)
plt.xlabel('şiir türleri')
plt.ylabel('Şiir Sayıları')
plt.show()
"""
#türlerine göre şiir sayıları pasta grafik
fig, ax = plt.subplots(figsize=(15, 10))
ax.pie(daf.Toplam, labels =daf.tur, autopct = '%1.1f%%', startangle = 90 )
ax.axis('equal')
plt.show()
"""
canvas = FigureCanvasTkAgg(fig,master = pencere2)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas,pencere2)
toolbar.update()
canvas.get_tk_widget().pack()
def kelimebulutu_ciz():
#en sık kullanılan kelimeler içim kelime bulutu oluşturma-tüm türler
pd.set_option('display.max_colwidth', None)
df=pd.read_csv('siirdataset.csv', encoding = 'utf-8')
df['siir']=df['siir'].str.lower()
class_ = df['tur'].value_counts().keys()
sum_ = df['tur'].value_counts()
daf = pd.DataFrame(zip(class_,sum_), columns = ['tur', 'Toplam'])
#stop-words-gereksiz kelime ayıklama
stop_words=pd.read_csv('stopwords.txt', sep=" ", header=None)
stop_words.columns=['words_list']
pat2 = r'\b(?:{})\b'.format('|'.join(list(stop_words['words_list'].str.lower())))
df['yenisiir'] = df['siir'].str.lower().str.replace(pat2, '')
#Noktalama işaretlerinin ayıklanması
df=df.dropna()
PUNCT_TO_REMOVE = string.punctuation
def remove_punctuation(text):
return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
df['siir_son'] = df['yenisiir'].apply(lambda text: remove_punctuation(text))
pencere3 = Tk()
pencere3.geometry("600x300+600+300")
pencere3.title("Word Cloud")
wordcloud = WordCloud(width=1000, height=500).generate("+".join(df['siir_son']))
fig,ax=plt.subplots(figsize=(15,5))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
canvas = FigureCanvasTkAgg(fig,master = pencere3)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas,pencere3)
toolbar.update()
canvas.get_tk_widget().pack()
#butonlar
buton=Button(pencere)
buton.config(text="Şiir Türü Bul",bg="blue",fg="white",font="bold",command=siir_kontrol,width=20)
buton.place(x=20,y=400)
butonoran=Button(pencere)
butonoran.config(text="Doğruluk Oranları",bg="blue",fg="white",font="bold",command=oran_goster,width="20")
butonoran.place(x=450,y=100)
butontemizle=Button(pencere)
butontemizle.config(text="Temizle",bg="blue",fg="white",font="bold",command=temizle,width="20")
butontemizle.place(x=235,y=400)
butoncikis=Button(pencere)
butoncikis.config(text="Çıkış",bg="blue",fg="white",command=pencere.destroy,width="17")
butoncikis.place(x=295,y=450)
butongrafik=Button(pencere)
butongrafik.config(text="Şiir Sayıları Grafiği",bg="blue",fg="white",width="17",command=grafik_cizdir)
butongrafik.place(x=20,y=450)
butonbulut=Button(pencere)
butonbulut.config(text="Kelime Bulutu Çiz",bg="blue",fg="white",width="17",command=kelimebulutu_ciz)
butonbulut.place(x=157,y=450)
mainloop()
|
the-stack_0_16996 | from trust_monitor.verifier.structs import *
from trust_monitor.verifier.statistics import *
from suds.client import Client
from trust_monitor.verifier.parser import IRParser, IMAMeasureHandler
from trust_monitor.verifier.parser import ContainerCheckAnalysis
import logging
import gc
import xmltodict
import ssl
# use logging system of django.
logger = logging.getLogger('driver')
class ParsingOAT():
def __init__(self):
logger.info('Parsing OAT Set structures')
Digest.digests_dict = {}
Digest.digests_query_done = False
Digest.packages_query_done = False
Digest.packages_query = set()
Package.pkg_dict = {}
IMARecord.records = []
Subject.subj_label_dict = {}
Object.obj_label_dict = {}
ssl._create_default_https_context = ssl._create_unverified_context
def parsing(self, analysis, checked_containers,
report_url, report_id, infoDigest):
doCheckContAnalysis = False
containers = {}
if 'cont-check' in analysis:
doCheckContAnalysis = True
logger.info('Understand what kind of analysis to do')
for item in analysis.split(','):
if item.startswith('cont-list'):
logger.info('Analysis include containters')
checked_containers = item.split('=')[1]
break
try:
if report_url is not None and report_id != 0:
client = Client(report_url)
logger.info('report url ' + str(report_url))
logger.info('report id ' + str(report_id))
report_str = client.service.fetchReport(report_id)
logger.info('Start to parser IR %s', str(report_id))
IRParser(report_str, ContainerCheckAnalysis(doCheckContAnalysis,
containers,
checked_containers,
infoDigest))
logger.info('Parsing of IR done.')
try:
data_xml = xmltodict.parse(report_str)
host_name = (data_xml['ns3:Report']['ns3:QuoteData']
['ns3:TpmSignature']['ns3:KeyInfo']['KeyName'])
except Exception:
host_name = (data_xml['ns3:Report']['ns3:QuoteData']
['ns3:TpmSignature']['ns3:KeyInfo']
['ns2:KeyName'])
logger.info(host_name)
infoDigest.host = host_name
gc.collect()
except Exception as e:
logger.error('Error opening IR, %s', e)
del report_str
gc.collect()
return 2
return 0
|
the-stack_0_16997 | # Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
from awx.api.views import JobEventDetail, JobEventChildrenList
urls = [
re_path(r'^(?P<pk>[0-9]+)/$', JobEventDetail.as_view(), name='job_event_detail'),
re_path(r'^(?P<pk>[0-9]+)/children/$', JobEventChildrenList.as_view(), name='job_event_children_list'),
]
__all__ = ['urls']
|
the-stack_0_16998 | """
Typeclass for Player objects
Note that this object is primarily intended to
store OOC information, not game info! This
object represents the actual user (not their
character) and has NO actual precence in the
game world (this is handled by the associated
character object, so you should customize that
instead for most things).
"""
from django.conf import settings
from django.utils import timezone
from evennia.typeclasses.models import TypeclassBase
from evennia.players.manager import PlayerManager
from evennia.players.models import PlayerDB
from evennia.comms.models import ChannelDB
from evennia.commands import cmdhandler
from evennia.utils import logger
from evennia.utils.utils import (lazy_property, to_str,
make_iter, to_unicode,
variable_from_module)
from evennia.typeclasses.attributes import NickHandler
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands.cmdsethandler import CmdSetHandler
from django.utils.translation import ugettext as _
__all__ = ("DefaultPlayer",)
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_CMDSET_PLAYER = settings.CMDSET_PLAYER
_CONNECT_CHANNEL = None
class DefaultPlayer(PlayerDB):
"""
This is the base Typeclass for all Players. Players represent
the person playing the game and tracks account info, password
etc. They are OOC entities without presence in-game. A Player
can connect to a Character Object in order to "enter" the
game.
Player Typeclass API:
* Available properties (only available on initiated typeclass objects)
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also
be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not
create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(outgoing_string, from_obj=None, **kwargs)
#swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None,
ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hook methods
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_access()
at_cmdset_get(**kwargs)
at_first_login()
at_post_login(sessid=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
__metaclass__ = TypeclassBase
objects = PlayerManager()
# properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
# session-related methods
def get_session(self, sessid):
"""
Return session with given sessid connected to this player.
note that the sessionhandler also accepts sessid as an iterable.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.session_from_player(self, sessid)
def get_all_sessions(self):
"Return all sessions connected to this player"
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.sessions_from_player(self)
sessions = property(get_all_sessions) # alias shortcut
def disconnect_session_from_player(self, sessid):
"""
Access method for disconnecting a given session from the player
(connection happens automatically in the sessionhandler)
"""
# this should only be one value, loop just to make sure to
# clean everything
sessions = (session for session in self.get_all_sessions()
if session.sessid == sessid)
for session in sessions:
# this will also trigger unpuppeting
session.sessionhandler.disconnect(session)
# puppeting operations
def puppet_object(self, sessid, obj):
"""
Use the given session to control (puppet) the given object (usually
a Character type).
Args:
sessid (int): session id of session to connect
obj (Object): the object to start puppeting
Raises:
RuntimeError with message if puppeting is not possible
returns True if successful, False otherwise
"""
# safety checks
if not obj:
raise RuntimeError("Object not found")
session = self.get_session(sessid)
if not session:
raise RuntimeError("Session not found")
if self.get_puppet(sessid) == obj:
# already puppeting this object
raise RuntimeError("You are already puppeting this object.")
if not obj.access(self, 'puppet'):
# no access
raise RuntimeError("You don't have permission to puppet '%s'." % obj.key)
if obj.player:
# object already puppeted
if obj.player == self:
if obj.sessid.count():
# we may take over another of our sessions
# output messages to the affected sessions
if _MULTISESSION_MODE in (1, 3):
txt1 = "{c%s{n{G is now shared from another of your sessions.{n"
txt2 = "Sharing {c%s{n with another of your sessions."
else:
txt1 = "{c%s{n{R is now acted from another of your sessions.{n"
txt2 = "Taking over {c%s{n from another of your sessions."
self.unpuppet_object(obj.sessid.get())
self.msg(txt1 % obj.name, sessid=obj.sessid.get(), _forced_nomulti=True)
self.msg(txt2 % obj.name, sessid=sessid, _forced_nomulti=True)
elif obj.player.is_connected:
# controlled by another player
raise RuntimeError("{R{c%s{R is already puppeted by another Player.")
# do the puppeting
if session.puppet:
# cleanly unpuppet eventual previous object puppeted by this session
self.unpuppet_object(sessid)
# if we get to this point the character is ready to puppet or it
# was left with a lingering player/sessid reference from an unclean
# server kill or similar
obj.at_pre_puppet(self, sessid=sessid)
# do the connection
obj.sessid.add(sessid)
obj.player = self
session.puid = obj.id
session.puppet = obj
# validate/start persistent scripts on object
obj.scripts.validate()
obj.at_post_puppet()
# re-cache locks to make sure superuser bypass is updated
obj.locks.cache_lock_bypass(obj)
def unpuppet_object(self, sessid):
"""
Disengage control over an object
Args:
sessid(int): the session id to disengage
Raises:
RuntimeError with message about error.
"""
if _MULTISESSION_MODE == 1:
sessions = self.get_all_sessions()
else:
sessions = self.get_session(sessid)
if not sessions:
raise RuntimeError("No session was found.")
for session in make_iter(sessions):
obj = session.puppet or None
if not obj:
raise RuntimeError("No puppet was found to disconnect from.")
elif obj:
# do the disconnect, but only if we are the last session to puppet
obj.at_pre_unpuppet()
obj.sessid.remove(session.sessid)
if not obj.sessid.count():
del obj.player
obj.at_post_unpuppet(self, sessid=sessid)
# Just to be sure we're always clear.
session.puppet = None
session.puid = None
def unpuppet_all(self):
"""
Disconnect all puppets. This is called by server
before a reset/shutdown.
"""
for session in (sess for sess in self.get_all_sessions() if sess.puppet):
self.unpuppet_object(session.sessid)
def get_puppet(self, sessid, return_dbobj=False):
"""
Get an object puppeted by this session through this player. This is
the main method for retrieving the puppeted object from the
player's end.
sessid - return character connected to this sessid,
"""
session = self.get_session(sessid)
if not session:
return None
if return_dbobj:
return session.puppet
return session.puppet and session.puppet or None
def get_all_puppets(self):
"""
Get all currently puppeted objects as a list.
"""
return list(set(session.puppet for session in self.get_all_sessions()
if session.puppet))
def __get_single_puppet(self):
"""
This is a legacy convenience link for users of
MULTISESSION_MODE 0 or 1. It will return
only the first puppet. For mode 2, this returns
a list of all characters.
"""
puppets = self.get_all_puppets()
if _MULTISESSION_MODE in (0, 1):
return puppets and puppets[0] or None
return puppets
character = property(__get_single_puppet)
puppet = property(__get_single_puppet)
# utility methods
def delete(self, *args, **kwargs):
"""
Deletes the player permanently.
"""
for session in self.get_all_sessions():
# unpuppeting all objects and disconnecting the user, if any
# sessions remain (should usually be handled from the
# deleting command)
try:
self.unpuppet_object(session.sessid)
except RuntimeError:
# no puppet to disconnect from
pass
session.sessionhandler.disconnect(session, reason=_("Player being deleted."))
self.scripts.stop()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
super(PlayerDB, self).delete(*args, **kwargs)
## methods inherited from database model
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
Args:
text (str, optional): text data to send
from_obj (Object or Player, optional): object sending. If given,
its at_msg_send() hook will be called.
sessid (int or list, optional): session id or ids to receive this
send. If given, overrules MULTISESSION_MODE.
Notes:
All other keywords are passed on to the protocol.
"""
text = to_str(text, force_string=True) if text else ""
if from_obj:
# call hook
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
pass
# session relay
if sessid:
# this could still be an iterable if sessid is an iterable
sessions = self.get_session(sessid)
if sessions:
# this is a special instruction to ignore MULTISESSION_MODE
# and only relay to this given session.
kwargs["_nomulti"] = True
for session in make_iter(sessions):
session.msg(text=text, **kwargs)
return
# we only send to the first of any connected sessions - the sessionhandler
# will disperse this to the other sessions based on MULTISESSION_MODE.
sessions = self.get_all_sessions()
if sessions:
sessions[0].msg(text=text, **kwargs)
def execute_cmd(self, raw_string, sessid=None, **kwargs):
"""
Do something as this player. This method is never called normally,
but only when the player object itself is supposed to execute the
command. It takes player nicks into account, but not nicks of
eventual puppets.
raw_string - raw command input coming from the command line.
sessid - the optional session id to be responsible for the command-send
**kwargs - other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
"""
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string,
categories=("inputline", "channel"), include_player=False)
if not sessid and _MULTISESSION_MODE in (0, 1):
# in this case, we should either have only one sessid, or the sessid
# should not matter (since the return goes to all of them we can
# just use the first one as the source)
try:
sessid = self.get_all_sessions()[0].sessid
except IndexError:
# this can happen for bots
sessid = None
return cmdhandler.cmdhandler(self, raw_string,
callertype="player", sessid=sessid, **kwargs)
def search(self, searchdata, return_puppet=False,
nofound_string=None, multimatch_string=None, **kwargs):
"""
This is similar to the ObjectDB search method but will search for
Players only. Errors will be echoed, and None returned if no Player
is found.
searchdata - search criterion, the Player's key or dbref to search for
return_puppet - will try to return the object the player controls
instead of the Player object itself. If no
puppeted object exists (since Player is OOC), None will
be returned.
nofound_string - optional custom string for not-found error message.
multimatch_string - optional custom string for multimatch error header.
Extra keywords are ignored, but are allowed in call in order to make
API more consistent with objects.models.TypedObject.search.
"""
# handle me, self and *me, *self
if isinstance(searchdata, basestring):
# handle wrapping of common terms
if searchdata.lower() in ("me", "*me", "self", "*self",):
return self
matches = self.__class__.objects.player_search(searchdata)
matches = _AT_SEARCH_RESULT(self, searchdata, matches, global_search=True,
nofound_string=nofound_string,
multimatch_string=multimatch_string)
if matches and return_puppet:
try:
return matches.puppet
except AttributeError:
return None
return matches
def access(self, accessing_obj, access_type='read', default=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one
access_type (str): Type of access sought
default (bool): What to return if no lock of access_type was found
Kwargs:
Passed to the at_access hook along with the result.
"""
result = super(DefaultPlayer, self).access(accessing_obj, access_type=access_type, default=default)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
## player hooks
def basetype_setup(self):
"""
This sets up the basic properties for a player.
Overload this with at_player_creation rather than
changing this method.
"""
# A basic security setup
lockstring = "examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:all()"
self.locks.add(lockstring)
# The ooc player cmdset
self.cmdset.add_default(_CMDSET_PLAYER, permanent=True)
def at_player_creation(self):
"""
This is called once, the very first time
the player is created (i.e. first time they
register with the game). It's a good place
to store attributes all players should have,
like configuration values etc.
"""
# set an (empty) attribute holding the characters this player has
lockstring = "attrread:perm(Admins);attredit:perm(Admins);attrcreate:perm(Admins)"
self.attributes.add("_playable_characters", [], lockstring=lockstring)
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload. In the case of player objects, this usually
happens the moment the player logs in or reconnects after a
reload.
"""
pass
# Note that the hooks below also exist in the character object's
# typeclass. You can often ignore these and rely on the character
# ones instead, unless you are implementing a multi-character game
# and have some things that should be done regardless of which
# character is currently connected to this player.
def at_first_save(self):
"""
This is a generic hook called by Evennia when this object is
saved to the database the very first time. You generally
don't override this method but the hooks called by it.
"""
self.basetype_setup()
self.at_player_creation()
permissions = settings.PERMISSION_PLAYER_DEFAULT
if hasattr(self, "_createdict"):
# this will only be set if the utils.create_player
# function was used to create the object.
cdict = self._createdict
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("permissions"):
permissions = cdict["permissions"]
del self._createdict
self.permissions.add(permissions)
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this player are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the player currently
have no cmdsets. kwargs are usually not used unless the
cmdset is generated dynamically.
"""
pass
def at_first_login(self):
"""
Called the very first time this player logs into the game.
"""
pass
def at_pre_login(self):
"""
Called every time the user logs in, just before the actual
login-state is set.
"""
pass
def _send_to_connect_channel(self, message):
"Helper method for loading the default comm channel"
global _CONNECT_CHANNEL
if not _CONNECT_CHANNEL:
try:
_CONNECT_CHANNEL = ChannelDB.objects.filter(db_key=settings.DEFAULT_CHANNELS[1]["key"])[0]
except Exception:
logger.log_trace()
now = timezone.now()
now = "%02i-%02i-%02i(%02i:%02i)" % (now.year, now.month,
now.day, now.hour, now.minute)
if _CONNECT_CHANNEL:
_CONNECT_CHANNEL.tempmsg("[%s, %s]: %s" % (_CONNECT_CHANNEL.key, now, message))
else:
logger.log_infomsg("[%s]: %s" % (now, message))
def at_post_login(self, sessid=None):
"""
Called at the end of the login process, just before letting
the player loose. This is called before an eventual Character's
at_post_login hook.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
if _MULTISESSION_MODE == 0:
# in this mode we should have only one character available. We
# try to auto-connect to our last conneted object, if any
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE == 1:
# in this mode all sessions connect to the same puppet.
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE in (2, 3):
# In this mode we by default end up at a character selection
# screen. We execute look on the player.
self.execute_cmd("look", sessid=sessid)
def at_disconnect(self, reason=None):
"""
Called just before user is disconnected.
"""
reason = reason and "(%s)" % reason or ""
self._send_to_connect_channel("{R%s disconnected %s{n" % (self.key, reason))
def at_post_disconnect(self):
"""
This is called after disconnection is complete. No messages
can be relayed to the player from here. After this call, the
player should not be accessed any more, making this a good
spot for deleting it (in the case of a guest player account,
for example).
"""
pass
def at_message_receive(self, message, from_obj=None):
"""
Called when any text is emitted to this
object. If it returns False, no text
will be sent automatically.
"""
return True
def at_message_send(self, message, to_object):
"""
Called whenever this object tries to send text
to another object. Only called if the object supplied
itself as a sender in the msg() call.
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
class DefaultGuest(DefaultPlayer):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
def at_post_login(self, sessid=None):
"""
In theory, guests only have one character regardless of which
MULTISESSION_MODE we're in. They don't get a choice.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
self.puppet_object(sessid, self.db._last_puppet)
def at_disconnect(self):
"""
A Guest's characters aren't meant to linger on the server. When a
Guest disconnects, we remove its character.
"""
super(DefaultGuest, self).at_disconnect()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_server_shutdown(self):
"""
We repeat at_disconnect() here just to be on the safe side.
"""
super(DefaultGuest, self).at_server_shutdown()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_post_disconnect(self):
"""
Guests aren't meant to linger on the server, either. We need to wait
until after the Guest disconnects to delete it, though.
"""
super(DefaultGuest, self).at_post_disconnect()
self.delete()
|
the-stack_0_17000 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from django.utils import timezone
from users.models import User
# 日活跃用户统计
# GET /meiduo_admin/statistical/day_active/
class UserDayActiveView(APIView):
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取日活用户量:
1. 获取日活用户量
2. 返回应答
"""
# 1. 获取日活用户量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(last_login__gte=now_date).count()
# 2. 返回应答
response_data = {
'date': now_date.date(),
'count': count
}
return Response(response_data)
# 日下单用户统计
# GET /meiduo_admin/statistical/day_orders/
class UserDayOrdersView(APIView):
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取日下单用户数量:
1. 获取日下单用户数量
2. 返回应答
"""
# 1. 获取日下单用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(orders__create_time__gte=now_date).distinct().count()
# 2. 返回应答
response_data = {
'date': now_date.date(),
'count': count
}
return Response(response_data)
# 30天新增用户统计
# GET /meiduo_admin/statistical/month_increment/
class UserMonthCountView(APIView):
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取当月每日新增用户数据:
1. 获取当月每日新增用户数据
2. 返回应答
"""
# 1. 获取当月每日新增用户数据
# 结束时间
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 起始时间: now_date - 29天
begin_date = now_date - timezone.timedelta(days=29)
# 当天日期
current_date = begin_date
# 新增用户的数量
month_li = []
while current_date <= now_date:
# 次日时间
next_date = current_date + timezone.timedelta(days=1)
# 统计当天的新增用户数量
count = User.objects.filter(date_joined__gte=current_date,
date_joined__lt=next_date).count()
month_li.append({
'count': count,
'date': current_date.date()
})
current_date += timezone.timedelta(days=1)
# 2. 返回应答
return Response(month_li)
|
the-stack_0_17003 | import os
import time
import argparse
import json
import math
import torch
from torch._C import dtype
import torch_geometric
from torch.utils.data import DataLoader, TensorDataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from argparse import Namespace
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import pickle as pk
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from torch_geometric.data import Data, Batch, NeighborSampler, ClusterData, ClusterLoader
from torch_scatter import scatter
from base_task import add_config_to_argparse, BaseConfig, BasePytorchTask, \
LOSS_KEY, BAR_KEY, SCALAR_LOG_KEY, VAL_SCORE_KEY
from dataset import SAINTDataset, SimpleDataset
from data_utils import load_data, load_new_data
from nbeats import NBeatsModel
from hierst import HierSTModel
from graph_optim import TruncateSGD, TruncateAdam
class Config(BaseConfig):
def __init__(self):
super().__init__()
# Reset base variables
self.max_epochs = 1000
self.early_stop_epochs = 15
self.infer = False
# for data loading
self.data_fp = '../data/daily_us_7.csv'
self.start_date = '2020-03-01'
self.min_peak_size = -1 # min peak confirmed cases selected by country level
self.lookback_days = 14 # the number of days before the current day for daily series
self.lookahead_days = 1
self.forecast_date = '2020-06-29'
self.horizon = 7
self.val_days = 1 # the number of days used for validation
self.label = 'confirmed_target'
self.use_mobility = False
self.model_type = 'hierst' # choices: nbeats, hierst
self.rnn_type = 'nbeats'
self.date_emb_dim = 2
self.use_gbm = False
self.use_lr = True
# for krnn
self.cnn_dim = 32
self.cnn_kernel_size = 3
self.rnn_dim = 32
self.rnn_dups = 10
# for transformer
self.tfm_layer_num = 8
self.tfm_head_num = 8
self.tfm_hid_dim = 32
self.tfm_ff_dim = 32
self.tfm_max_pos = 500
self.tfm_node_dim = 5
self.tfm_dropout = 0.1
self.tfm_block_num = -1
self.tfm_cnn_kernel_size = 1
# for n_beats
self.block_size = 3
self.hidden_dim = 32
self.id_emb_dim = 8
# for gcn
self.gcn_dim = 32
self.gcn_type = 'gcn'
self.gcn_aggr = 'max'
self.gcn_norm = 'none'
self.gcn_layer_num = 2
self.gcn_node_dim = 4
self.gcn_edge_dim = 4
self.gcn_dropout = 0.1
# for gov gate
self.use_gov_gate = False
self.gov_id_dim = 32
self.gov_hid_dim = 32
# per-gpu training batch size, real_batch_size = batch_size * num_gpus * grad_accum_steps
self.batch_size = 4
self.lr = 1e-3 # the learning rate
# batch sample type
self.use_saintdataset = True
self.saint_batch_size = 3000
self.saint_sample_type = 'random_walk'
self.saint_walk_length = 2
self.saint_shuffle_order = 'node_first'
# graph optimization (deprecated)
self.optim_graph = False
self.graph_fp = '../data/us_graph.cpt'
self.graph_lr = 1e-4 # learning rate for graph adjacent matrix
self.graph_opt_type = 'TruncateAdam' # TruncateAdam, TruncateSGD, Adam
self.graph_gravity = 0.1 # sparse regularization coefficients
self.graph_eta = 0.01 # \eta * || A - A_{prior} ||_2^2
# consistency loss
# the usage of 'xxxx_loss_node_num'
# -1: use all nodes,
# 0: not use this loss,
# >0: use a certain number of randomly selected nodes
self.topo_loss_node_num = -1
self.topo_loss_weight = 0.01
self.topo_loss_epoch_start = 3
self.pair_loss_node_num = -1
self.pair_loss_weight = 0.0
# temp options
self.use_node_weight = True
self.mape_eps = 10
self.sparse_gate_weight = 0.0
self.sparse_gate_epoch_start = 3
self.prepro_type = 'none'
self.use_popu_norm = True
self.use_logy = False
self.use_fea_zscore = False
self.use_adapt_norm = False
self.use_default_edge = False
self.abla_type = 'none'
self.fea_day_offset = 1
self.data_aug_scales = '1' # a list of scales applied for training data augmentation
class WrapperNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if self.config.model_type == 'nbeats':
self.net = NBeatsModel(config)
elif self.config.model_type == 'hierst':
self.net = HierSTModel(config)
else:
raise Exception(
'Unsupported model type {}'.format(config.model_type))
if config.use_lr:
self.weight_lr = nn.Parameter(torch.Tensor(self.config.lookback_days, self.config.lookahead_days))
self.b_lr = nn.Parameter(torch.Tensor([0.0] * self.config.lookahead_days))
if config.use_gov_gate:
self.state_emb = nn.Embedding(self.config.num_nodes, self.config.gov_id_dim)
self.gov_gru = nn.GRU(input_size=self.config.day_gov_fea_dim,
hidden_size=self.config.gov_hid_dim,
batch_first=True)
self.state_weight = nn.Parameter(torch.Tensor(self.config.gov_hid_dim, self.config.lookahead_days))
self.gov_weight = nn.Parameter(torch.Tensor(self.config.gov_id_dim, self.config.lookahead_days))
self.reset_parameters()
def gov_map(self, input_day_gov):
sz = input_day_gov.size()
x = input_day_gov.view(-1, sz[2], sz[3])
_, h = self.gov_gru(x)
h = h[0,:,:].view(sz[0],sz[1],-1)
return h
def state_map(self, input_day, g):
sz = input_day.size()
n_id = g['cent_n_id']
id_emb = self.state_emb(n_id.reshape(1,sz[1]).expand(sz[0],sz[1]).long())
return id_emb
def lr(self, input_day):
sz = input_day.size()
label_idx = self.config.label_fea_idx
ts = input_day[:,:,:,label_idx]
if self.config.use_logy:
ts = ts.expm1()
pred = torch.matmul(ts, torch.softmax(self.weight_lr, dim=0)) + self.b_lr
if self.config.use_logy:
pred = torch.log1p(pred)
pred = pred.view(sz[0], sz[1], self.config.lookahead_days)
return pred
def reset_parameters(self):
if self.config.use_lr:
nn.init.xavier_uniform_(self.weight_lr)
if self.config.use_gov_gate:
nn.init.xavier_uniform_(self.gov_weight)
nn.init.xavier_uniform_(self.state_weight)
def forward_ori(self, input_day, g):
out = self.net(input_day, g)
if self.config.use_lr:
out = out + self.lr(input_day)
return out
def forward(self, input_day, input_day_gov, g):
ori_out = self.forward_ori(input_day, g)
if self.config.use_gov_gate:
gov_hid = self.gov_map(input_day_gov)
state_hid = self.state_map(input_day, g)
state_gate = torch.sigmoid(torch.matmul(state_hid, self.state_weight))
gov_gate = torch.tanh(torch.matmul(gov_hid, self.gov_weight))
out = ori_out * (1 + state_gate * gov_gate)
else:
out, state_gate, gov_gate = ori_out, torch.ones_like(ori_out), torch.ones_like(ori_out)
return out, state_gate, gov_gate
class GraphNet(nn.Module):
def __init__(self, config, edge_weight):
super().__init__()
self.config = config
self.net = WrapperNet(config)
if config.optim_graph:
self.edge_weight = nn.Parameter(edge_weight)
else:
self.edge_weight = None
def forward(self, input_day, input_day_gov, g):
return self.net(input_day, input_day_gov, g)
def get_net_parameters(self):
return self.net.parameters()
def get_graph_parameters(self):
yield self.edge_weight
def weighted_mse_loss(input, target, weight):
return torch.mean(weight * (input - target) ** 2)
class Task(BasePytorchTask):
def __init__(self, config):
super().__init__(config)
self.log('Intialize {}'.format(self.__class__))
self.init_data()
self.init_graph()
self.adjust_for_ablation_study()
self.loss_func = nn.MSELoss()
# self.loss_func = nn.L1Loss()
self.log('Config:\n{}'.format(
json.dumps(self.config.to_dict(), ensure_ascii=False, indent=4)
))
def adjust_for_ablation_study(self):
if self.config.abla_type == 'gat':
self.config.gcn_type = 'gat'
elif self.config.abla_type == 'flat':
edge_sid = (self.edge_type == 0).sum().item()
self.edge_index = self.edge_index[:, edge_sid:]
self.edge_weight = self.edge_weight[edge_sid:]
self.edge_type = self.edge_type[edge_sid:]
self.config.num_edges = self.edge_weight.shape[0]
elif self.config.abla_type.startswith('sep'):
cur_node_type = int(self.config.abla_type[3:])
node_sid, node_eid = None, None
for idx, x in enumerate(self.node_type_list):
if node_sid is None:
if x == cur_node_type:
node_sid = idx
node_eid = idx
elif x == cur_node_type:
node_eid = idx
node_eid += 1
edge_sid = (self.edge_type == 0).sum().item()
self.edge_index = self.edge_index[:, edge_sid:]
self.edge_weight = self.edge_weight[edge_sid:]
self.edge_type = self.edge_type[edge_sid:]
sel_edge_mask = (self.edge_index[0] >= node_sid) & (self.edge_index[0] < node_eid)
self.edge_index = self.edge_index[:, sel_edge_mask] - node_sid
self.edge_weight = self.edge_weight[sel_edge_mask]
self.edge_type = self.edge_type[sel_edge_mask]
self.config.num_edges = self.edge_weight.shape[0]
self.node_type = torch.zeros_like(self.node_type[node_sid:node_eid])
self.node_type_list = [0] * (node_eid-node_sid)
self.config.num_node_types = 1
self.node_name = self.node_name[node_sid:node_eid]
self.node_weight = self.node_weight[cur_node_type:cur_node_type+1]
self.use_node_weight = False
if self.node_popu is not None:
self.node_popu = self.node_popu[node_sid:node_eid]
self.nodes = self.nodes[node_sid:node_eid]
self.config.num_nodes = node_eid - node_sid
self.train_day_inputs = self.train_day_inputs[:, node_sid:node_eid]
self.train_day_gov_inputs = self.train_day_gov_inputs[:, node_sid:node_eid]
self.train_gbm_outputs = self.train_gbm_outputs[:, node_sid:node_eid]
self.train_outputs = self.train_outputs[:, node_sid:node_eid]
self.val_day_inputs = self.val_day_inputs[:, node_sid:node_eid]
self.val_day_gov_inputs = self.val_day_gov_inputs[:, node_sid:node_eid]
self.val_gbm_outputs = self.val_gbm_outputs[:, node_sid:node_eid]
self.val_outputs = self.val_outputs[:, node_sid:node_eid]
self.test_day_inputs = self.test_day_inputs[:, node_sid:node_eid]
self.test_day_gov_inputs = self.test_day_gov_inputs[:, node_sid:node_eid]
self.test_gbm_outputs = self.test_gbm_outputs[:, node_sid:node_eid]
self.test_outputs = self.test_outputs[:, node_sid:node_eid]
else:
pass
def init_data(self, data_fp=None):
if data_fp is None:
data_fp = self.config.data_fp
# load data
self.config.label_fea_name = f'{self.config.label[:-7]}.rolling({self.config.horizon}).sum()'
day_inputs, day_gov_inputs, outputs, dates, nodes, \
self.main_feas, self.gov_feas, self.node_popu, self.fea_scaler = \
load_new_data(data_fp, self.config, logger=self.log)
self.config.adapt_norm_eps = 1
self.config.label_fea_idx = dict(zip(self.main_feas, range(len(self.main_feas))))[self.config.label_fea_name]
if self.node_popu is not None:
self.node_popu = self.node_popu.to(self.device)
gbm_outputs = outputs
# numpy default dtype is float64, but torch default dtype is float32
self.day_inputs = day_inputs
self.day_gov_inputs = day_gov_inputs
self.outputs = outputs
self.gbm_outputs = gbm_outputs
self.dates = dates # share index with sample id
self.nodes = nodes # share index with node id
# fulfill config
self.config.num_nodes = self.day_inputs.shape[1]
self.config.day_seq_len = self.day_inputs.shape[2]
self.config.day_fea_dim = self.day_inputs.shape[3]
self.config.day_gov_fea_dim = self.day_gov_inputs.shape[3]
# self.config.edge_fea_dim = self.edge_attr.shape[1]
# Filter by label dates
use_dates = [
pd.to_datetime(item) for item in dates
if pd.to_datetime(item) <= pd.to_datetime(self.config.forecast_date)
]
test_divi = len(use_dates) - 1
val_divi = test_divi - self.config.horizon
train_divi = val_divi - self.config.val_days
if self.config.infer:
# use all achieved train data
train_divi = val_divi + 1
print(dates[train_divi],dates[val_divi],dates[test_divi])
self.train_day_inputs = self.day_inputs[:train_divi+1]
self.train_day_gov_inputs = self.day_gov_inputs[:train_divi+1]
self.train_gbm_outputs = self.gbm_outputs[:train_divi+1]
self.train_outputs = self.outputs[:train_divi+1]
self.train_dates = self.dates[:train_divi+1]
if self.config.data_aug_scales != '1':
data_aug_scales = [float(s) for s in self.config.data_aug_scales.split(',')]
scale_fea_end = -1
print(f'Data Augmentation Scaling {data_aug_scales} for {self.main_feas[:scale_fea_end]}')
def aug_scale(day_input, is_label=False):
if is_label:
aug_inputs = [day_input * s for s in data_aug_scales]
else:
scale_part = day_input[:, :, :, :scale_fea_end]
invar_part = day_input[:, :, :, scale_fea_end:]
aug_inputs = []
for s in data_aug_scales:
aug_part = scale_part * s
aug_part = torch.cat([aug_part, invar_part], dim=-1)
aug_inputs.append(aug_part)
aug_input = torch.cat(aug_inputs, dim=0)
return aug_input
self.train_day_inputs = aug_scale(self.train_day_inputs)
self.train_day_gov_inputs = aug_scale(self.train_day_gov_inputs)
self.train_gbm_outputs = aug_scale(self.train_gbm_outputs, is_label=True)
self.train_outputs = aug_scale(self.train_outputs, is_label=True)
self.train_dates = self.train_dates * len(data_aug_scales)
if self.config.infer:
self.val_day_inputs = self.day_inputs[:train_divi+1]
self.val_day_gov_inputs = self.day_gov_inputs[:train_divi+1]
self.val_gbm_outputs = self.gbm_outputs[:train_divi+1]
self.val_outputs = self.outputs[:train_divi+1]
self.val_dates = self.dates[:train_divi+1]
else:
self.val_day_inputs = self.day_inputs[val_divi:val_divi+1]
self.val_day_gov_inputs = self.day_gov_inputs[val_divi:val_divi+1]
self.val_gbm_outputs = self.gbm_outputs[val_divi:val_divi+1]
self.val_outputs = self.outputs[val_divi:val_divi+1]
self.val_dates = self.dates[val_divi:val_divi+1]
self.test_day_inputs = self.day_inputs[test_divi:test_divi+1]
self.test_day_gov_inputs = self.day_gov_inputs[test_divi:test_divi+1]
self.test_gbm_outputs = self.gbm_outputs[test_divi:test_divi+1]
self.test_outputs = self.outputs[test_divi:test_divi+1]
self.test_dates = self.dates[test_divi:test_divi+1]
def init_graph(self, graph_fp=None):
if graph_fp is None:
graph_fp = self.config.graph_fp
graph_dict = torch.load(graph_fp)
self.edge_index = graph_dict['edge_index']
self.edge_weight = graph_dict['edge_weight']
self.edge_type = graph_dict['edge_type'].to(self.device)
self.node_type = graph_dict['node_type'].to(self.device)
self.node_type_list = list(graph_dict['node_type'].numpy())
self.node_name = graph_dict['node_name']
if self.config.num_nodes != len(self.node_name):
data_node_set = set(self.nodes)
graph_node_set = set(self.node_name)
print('New nodes in data', data_node_set - graph_node_set)
print('Missing nodes in data', graph_node_set - data_node_set)
raise Exception('Please regenerate GNN topo before running')
self.config.num_edges = self.edge_weight.shape[0]
self.config.num_node_types = int(graph_dict['node_type'].max()) + 1
self.config.num_edge_types = int(graph_dict['edge_type'].max()) + 1
base_ones = torch.ones_like(self.node_type, dtype=torch.float)
node_type_count = scatter(base_ones, self.node_type, dim_size=self.config.num_node_types, reduce='sum')
# the weight of the bottom nodes is equal to 1
self.node_weight = 1.0 / node_type_count * node_type_count.max()
def make_sample_dataloader(self, day_inputs, day_gov_inputs, gbm_outputs, outputs, shuffle=False):
if self.config.use_saintdataset:
dataset = SAINTDataset(
[day_inputs, day_gov_inputs, gbm_outputs, outputs],
self.edge_index, self.edge_weight, self.config.num_nodes,
self.config.batch_size, shuffle=shuffle,
shuffle_order=self.config.saint_shuffle_order,
saint_sample_type=self.config.saint_sample_type,
saint_batch_size=self.config.saint_batch_size,
saint_walk_length=self.config.saint_walk_length,
)
return DataLoader(dataset, batch_size=None)
else:
dataset = SimpleDataset([day_inputs, day_gov_inputs, gbm_outputs, outputs])
def collate_fn(samples):
day_inputs = torch.cat([item[0][0] for item in samples]).unsqueeze(0) # [1,bs,seq_length,feature_dim]
day_gov_inputs = torch.cat([item[0][1] for item in samples]).unsqueeze(0) # [1,bs,seq_length,feature_dim]
gbm_outputs = torch.cat([item[0][-2] for item in samples]).unsqueeze(0)
outputs = torch.cat([item[0][-1] for item in samples]).unsqueeze(0)
node_ids = torch.LongTensor([item[1] for item in samples]) # [bs]
date_ids = torch.LongTensor([item[2] for item in samples]) # [bs]
return [[day_inputs, day_gov_inputs, gbm_outputs, outputs], {'cent_n_id':node_ids,'type':'random'}, date_ids]
return DataLoader(dataset, batch_size=self.config.batch_size, shuffle=shuffle, collate_fn=collate_fn)
def build_train_dataloader(self):
return self.make_sample_dataloader(
self.train_day_inputs, self.train_day_gov_inputs, self.train_gbm_outputs, self.train_outputs, shuffle=True
)
def build_val_dataloader(self):
return self.make_sample_dataloader(
self.val_day_inputs, self.val_day_gov_inputs, self.val_gbm_outputs, self.val_outputs, shuffle=False
)
def build_test_dataloader(self):
return self.make_sample_dataloader(
self.test_day_inputs, self.test_day_gov_inputs, self.test_gbm_outputs, self.test_outputs, shuffle=False
)
def build_optimizer(self, model):
model_opt = torch.optim.Adam(self.model.get_net_parameters(), lr=self.config.lr)
if self.config.optim_graph:
kwargs = {
'lr': self.config.graph_lr,
}
if self.config.graph_opt_type == 'Adam':
opt_class = torch.optim.Adam
elif self.config.graph_opt_type == 'TruncateSGD':
kwargs['gravity'] = self.config.graph_gravity
opt_class = TruncateSGD
elif self.config.graph_opt_type == 'TruncateAdam':
kwargs['gravity'] = self.config.graph_gravity
kwargs['lr_truncate'] = self.config.graph_lr
opt_class = TruncateAdam
else:
raise Exception("Unsupported graph optimizer '{}'".format(self.config.graph_opt_type))
graph_opt = opt_class(self.model.get_graph_parameters(), **kwargs)
return model_opt, graph_opt
else:
return model_opt
def train_step(self, batch, batch_idx):
inputs, g, _ = batch
# prepare inputs, outputs
input_day, input_day_gov, y_gbm, y = inputs
if self.config.use_gbm: # deprecated
y = y - y_gbm
if self.config.use_adapt_norm:
norm_eps = self.config.adapt_norm_eps
input_norm = input_day.mean(dim=-2, keepdim=True) + norm_eps
y_norm = input_norm[:, :, :, self.config.label_fea_idx] + norm_eps
input_day = (input_day+norm_eps) / input_norm
y = (y+norm_eps) / y_norm
else:
norm_eps = 0
input_norm = 1
y_norm = 1
# prepare graph
g['edge_type'] = self.edge_type[g['e_id']]
g['node_type'] = self.node_type[g['cent_n_id']]
if self.config.optim_graph:
g['edge_attr_prior'] = g['edge_attr']
g['edge_attr'] = self.model.edge_weight[g['e_id']]
y_hat, _, _ = self.model(input_day, input_day_gov, g)
assert(y.size() == y_hat.size())
if self.config.use_node_weight:
node_weight = self.node_weight[g['node_type']]\
.reshape(1, y.shape[1], 1)
loss = weighted_mse_loss(y_hat, y, node_weight)
else:
node_weight = None
loss = self.loss_func(y_hat, y)
y_loss_i = loss.item()
if self.config.optim_graph:
graph_loss = self.loss_func(g['edge_attr'], g['edge_attr_prior'])
loss += self.config.graph_eta * graph_loss
if self.config.topo_loss_weight > 0 and \
self._current_epoch >= self.config.topo_loss_epoch_start:
# get topo_edge_index
edge_index = g['edge_index']
node_type = g['node_type']
i, j = 1, 0
node_type_j = node_type[edge_index[j]]
node_type_i = node_type[edge_index[i]]
topo_edge_index = edge_index[:, node_type_i == node_type_j-1]
# calculate aggregated y
if self.config.use_adapt_norm:
y = y * y_norm - norm_eps
y_hat = y_hat * y_norm - norm_eps
if self.config.use_logy:
y = y.expm1() # exp(y)-1, where y = log(1+label)
y_hat = y_hat.expm1() # exp(y_hat)-1, where y = log(1+label_hat)
if self.config.use_popu_norm:
popu = self.node_popu[g['cent_n_id']]\
.reshape(1, g['cent_n_id'].shape[0], 1)
y = y * popu / 10**5
y_hat = y_hat * popu / 10**5
y_j = y[:, topo_edge_index[j], :]
y_hat_j = y_hat[:, topo_edge_index[j], :]
y_agg = scatter(y_j, topo_edge_index[i], dim=-2, dim_size=y.shape[-2], reduce='sum')
y_hat_agg = scatter(y_hat_j, topo_edge_index[i], dim=-2, dim_size=y_hat.shape[-2], reduce='sum')
# use agg mask to ignore bottom node
bottom_node_type = node_type.max()
agg_mask = node_type < bottom_node_type
ym = y[:, agg_mask]
ym_hat = y_hat[:, agg_mask]
ym_agg = y_agg[:, agg_mask]
ym_hat_agg = y_hat_agg[:, agg_mask]
eps = self.config.mape_eps
topo_loss = self.loss_func((ym_hat_agg+eps)/(ym_agg+eps), torch.ones_like(ym_agg)) + \
self.loss_func((ym_hat_agg+eps)/(ym_agg+eps), (ym_hat+eps)/(ym+eps),)
loss += self.config.topo_loss_weight * topo_loss
topo_loss_i = topo_loss.item()
else:
topo_loss_i = 0
# judge to avoid useless computation
if self.config.pair_loss_node_num != 0 and self.config.pair_loss_weight > 0:
pair_edge_index = g['edge_index'] # consider every pair in the graph
if self.config.pair_loss_node_num > 0:
num_edges = pair_edge_index.shape[1]
rand_eids = torch.randperm(num_edges, device=loss.device)[:self.config.pair_loss_node_num]
pair_edge_index = pair_edge_index[:, rand_eids]
i, j = 1, 0
logy_j = y[:, pair_edge_index[j], :]
logy_i = y[:, pair_edge_index[i], :]
logy_j_hat = y_hat[:, pair_edge_index[j], :]
logy_i_hat = y_hat[:, pair_edge_index[i], :]
pair_loss = weighted_mse_loss(
(logy_j_hat - logy_j).exp(), # (y_j_hat+1) / (y_j+1)
(logy_i_hat - logy_i).exp(), # (y_i_hat+1) / (y_i+1)
0.5*(logy_j + logy_i), # pay more attention to large nodes
)
loss += self.config.pair_loss_weight * pair_loss
pair_loss_i = pair_loss.item()
else:
pair_loss_i = 0
if self.config.sparse_gate_weight > 0:
gate_loss = self.model.net.net.gcn_coef.mean()
if self._current_epoch >= self.config.sparse_gate_epoch_start:
loss += self.config.sparse_gate_weight * gate_loss
gate_loss_i = gate_loss.item()
else:
gate_loss_i = 0
loss_i = loss.item() # scalar loss
# log all kinds of losses for debug
loss_info = {
'loss': loss_i,
'y_loss': y_loss_i,
'topo_loss': topo_loss_i,
'pair_loss': pair_loss_i,
'gate_loss': gate_loss_i,
}
return {
LOSS_KEY: loss,
BAR_KEY: loss_info,
SCALAR_LOG_KEY: loss_info,
}
def eval_step(self, batch, batch_idx, tag):
inputs, g, rows = batch
input_day, input_day_gov, y_gbm, y = inputs
if self.config.use_adapt_norm:
norm_eps = self.config.adapt_norm_eps
input_norm = input_day.mean(dim=-2, keepdim=True) + norm_eps
y_norm = input_norm[:, :, :, self.config.label_fea_idx] + norm_eps
input_day = (input_day + norm_eps) / input_norm
else:
norm_eps = 0
input_norm = 1
y_norm = 1
forecast_length = y.size()[-1]
g['edge_type'] = self.edge_type[g['e_id']]
g['node_type'] = self.node_type[g['cent_n_id']]
if self.config.optim_graph:
g['edge_attr_prior'] = g['edge_attr']
g['edge_attr'] = self.model.edge_weight[g['e_id']]
y_hat, state_gate, gov_gate = self.model(input_day, input_day_gov, g)
if self.config.use_gbm:
y_hat += y_gbm
assert(y.size() == y_hat.size())
if self.config.use_adapt_norm:
y_hat = y_hat * y_norm - norm_eps
if self.config.use_logy:
y = y.expm1() # exp(y)-1, where y = log(1+label)
y_hat = y_hat.expm1() # exp(y_hat)-1, where y = log(1+label_hat)
if self.config.use_popu_norm:
popu = self.node_popu[g['cent_n_id']]\
.reshape(1, g['cent_n_id'].shape[0], 1)
y = y * popu / 10**5
y_hat = y_hat * popu / 10**5
if g['type'] == 'subgraph' and 'res_n_id' in g: # if using SAINT sampler
cent_n_id = g['cent_n_id']
res_n_id = g['res_n_id']
# Note: we only evaluate predictions on those initial nodes (per random walk)
# to avoid duplicated computations
y = y[:, res_n_id]
y_hat = y_hat[:, res_n_id]
cent_n_id = cent_n_id[res_n_id]
else:
cent_n_id = g['cent_n_id']
if self.config.use_saintdataset:
index_ptr = torch.cartesian_prod(
torch.arange(rows.size(0)),
torch.arange(cent_n_id.size(0)),
torch.arange(forecast_length)
)
label = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 1]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,2].data.cpu().numpy(),
'val': y.flatten().data.cpu().numpy()
})
pred = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 1]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,2].data.cpu().numpy(),
'val': y_hat.flatten().data.cpu().numpy()
})
else:
index_ptr = torch.cartesian_prod(
torch.arange(rows.size(0)),
torch.arange(forecast_length)
)
label = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 0]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,1].data.cpu().numpy(),
'val': y.flatten().data.cpu().numpy()
})
pred = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 0]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,1].data.cpu().numpy(),
'val': y_hat.flatten().data.cpu().numpy()
})
pred = pred.groupby(['row_idx', 'node_idx', 'forecast_idx']).mean()
label = label.groupby(['row_idx', 'node_idx', 'forecast_idx']).mean()
return {
'label': label,
'pred': pred,
'info': [state_gate, gov_gate]
# 'atten': atten_context
}
def eval_epoch_end(self, outputs, tag, dates):
pred = pd.concat([x['pred'] for x in outputs], axis=0)
label = pd.concat([x['label'] for x in outputs], axis=0)
pred = pred.groupby(['row_idx', 'node_idx','forecast_idx']).mean()
label = label.groupby(['row_idx', 'node_idx', 'forecast_idx']).mean()
info = [x['info'] for x in outputs]
# atten_context = [x['atten'] for x in outputs]
align_nodes = label.reset_index().node_idx.map(lambda x: self.nodes[x]).values
align_dates = label.reset_index().row_idx.map(lambda x: dates[x]).values
loss = np.mean(np.abs(pred['val'].values - label['val'].values))
scores = self.produce_score(pred, label, dates)
log_dict = {
'{}_loss'.format(tag): loss,
'{}_mae'.format(tag): scores['mean_mistakes'],
'{}_mape'.format(tag): scores['mape'],
# '{}_mean_mistakes'.format(tag): scores['mean_mistakes'],
# '{}_mean_label'.format(tag): scores['mean_label'],
# '{}_mean_predict'.format(tag): scores['mean_predict']
}
type_mae_sum = 0
type_mape_sum = 0
for type_id in range(self.config.num_node_types):
cur_pred = pred[
pred.index.get_level_values(1).map(lambda x: self.node_type_list[x]) == type_id
]
cur_label = label[
label.index.get_level_values(1).map(lambda x: self.node_type_list[x]) == type_id
]
cur_scores = self.produce_score(cur_pred, cur_label, dates)
log_dict[f'{tag}_type-{type_id}_mae'] = cur_scores['mean_mistakes']
log_dict[f'{tag}_type-{type_id}_mape'] = cur_scores['mape']
type_mae_sum += cur_scores['mean_mistakes']
type_mape_sum += cur_scores['mape']
log_dict[f'{tag}_type-mean_mae'] = type_mae_sum / self.config.num_node_types
log_dict[f'{tag}_type-mean_mape'] = type_mape_sum / self.config.num_node_types
out = {
BAR_KEY: log_dict,
SCALAR_LOG_KEY: log_dict,
VAL_SCORE_KEY: - type_mape_sum,
'pred': pred,
'label': label,
'scores': scores,
'dates':align_dates,
'nodes':align_nodes,
'info': info,
'y_scale': 'linear',
'epoch': self._passed_epoch,
# 'atten': atten_context
}
return out
def produce_score(self, pred, label, dates=None):
# y_hat = pred.apply(lambda x: np.expm1(x))
# y = label.apply(lambda x: np.expm1(x))
y_hat = pred
y = label
mape_eps = self.config.mape_eps
mape_df = np.abs((y_hat+mape_eps)/(y+mape_eps)-1).reset_index(drop=False)
mape_val = np.abs((y_hat.values+1)/(y.values+1)-1).mean()
mean_mistakes = np.abs(y_hat.values - y.values).mean()
mean_label = np.abs(y.values).mean()
mean_predict = np.abs(y.values).mean()
eval_df = pd.concat([y_hat.rename(columns={'val': 'pred'}),
y.rename(columns={'val': 'label'})],
axis=1).reset_index(drop=False)
eval_df['mape'] = mape_df['val']
if dates is not None:
eval_df['date'] = eval_df.row_idx.map(lambda x: dates[x])
eval_df['nodes'] = eval_df.node_idx.map(lambda x: self.nodes[x])
def produce_percent_count(m_df):
res = pd.Series()
res['pred'] = m_df['pred'].mean()
res['label'] = m_df['label'].mean()
res['mistake'] = np.abs(m_df['pred'] - m_df['label']).mean()
return res
scores = {
'mape': mape_val,
'mean_mistakes': mean_mistakes,
'mean_label': mean_label,
'mean_predict': mean_predict
}
for name, metric in [
('mistakes', eval_df),
]:
scores[name] = metric.groupby(
'row_idx').apply(produce_percent_count)
if dates is not None:
scores[name]['date'] = dates
return scores
def val_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, 'val')
def val_epoch_end(self, outputs):
val_out = self.eval_epoch_end(outputs, 'val', self.val_dates)
return val_out
def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, 'test')
def test_epoch_end(self, outputs):
test_out = self.eval_epoch_end(outputs, 'test', self.test_dates)
return test_out
if __name__ == '__main__':
start_time = time.time()
# build argument parser and config
config = Config()
parser = argparse.ArgumentParser(description='COVID-19 Forecasting Task')
add_config_to_argparse(config, parser)
# parse arguments to config
args = parser.parse_args()
config.update_by_dict(args.__dict__)
# build task
task = Task(config)
# Set random seed before the initialization of network parameters
# Necessary for distributed training
task.set_random_seed()
net = GraphNet(task.config, task.edge_weight)
task.init_model_and_optimizer(net)
task.log('Build Neural Nets')
# select epoch with best validation accuracy
best_epochs = 50
if not task.config.skip_train:
task.fit()
best_epochs = task._best_val_epoch
print('Best validation epochs: {}'.format(best_epochs))
# Resume the best checkpoint for evaluation
task.resume_best_checkpoint()
val_eval_out = task.val_eval()
test_eval_out = task.test_eval()
# dump evaluation results of the best checkpoint to val out
task.dump(val_out=val_eval_out,
test_out=test_eval_out,
epoch_idx=-1,
is_best=True,
dump_option=1)
task.log('Best checkpoint (epoch={}, {}, {})'.format(
task._passed_epoch, val_eval_out[BAR_KEY], test_eval_out[BAR_KEY]))
if task.is_master_node:
for tag, eval_out in [
('val', val_eval_out),
('test', test_eval_out),
]:
print('-'*15, tag)
scores = eval_out['scores']['mistakes']
print('-'*5, 'mistakes')
print('Average:')
print(scores.mean().to_frame('mistakes'))
print('Daily:')
print(scores)
task.log('Training time {}s'.format(time.time() - start_time))
|
the-stack_0_17006 |
score = float(input("Enter Score: "))
if score < 1 and score > 0:
if score >= 0.9:
print('A')
elif score >= 0.8:
print('B')
elif score >= 0.7:
print('C')
elif score >= 0.6:
print('D')
else:
print('F')
else:
print('Value of score is out of range.')
largest = None
smallest = None
while True:
num = input("Enter a number: ")
if num == "done" : break
try:
num = int(num)
except:
print('Invalid input')
continue
if largest is None:
largest = num
elif num > largest:
largest = num
elif smallest is None:
smallest = num
elif num < smallest:
smallest = num
#print(num)
print("Maximum is", largest)
print('Minimum is', smallest)
def computepay(h,r):
if h <= 40:
pay = h * r
else:
h1 = h - 40
pay = 40 * r + h1 *(r * 1.5)
return pay
hrs = input("Enter Hours:")
rate = input('Enter Rate:')
h = float(hrs)
r = float(rate)
p = computepay(h,r)
print("Pay",p)
|
the-stack_0_17007 | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditsyslogpolicy_csvserver_binding(base_resource) :
""" Binding class showing the csvserver that can be bound to auditsyslogpolicy.
"""
def __init__(self) :
self._boundto = None
self._priority = None
self._activepolicy = None
self._name = None
self.___count = None
@property
def name(self) :
r"""Name of the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
r"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
r"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditsyslogpolicy_csvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditsyslogpolicy_csvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch auditsyslogpolicy_csvserver_binding resources.
"""
try :
if not name :
obj = auditsyslogpolicy_csvserver_binding()
response = obj.get_resources(service, option_)
else :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of auditsyslogpolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count auditsyslogpolicy_csvserver_binding resources configued on NetScaler.
"""
try :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of auditsyslogpolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class auditsyslogpolicy_csvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.auditsyslogpolicy_csvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditsyslogpolicy_csvserver_binding = [auditsyslogpolicy_csvserver_binding() for _ in range(length)]
|
the-stack_0_17011 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Network is a composition of Layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import weakref
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import ops
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
# pylint: disable=protected-access
# Explanation for protected-access disable: Network has lots of same-class and
# parent-class references across different objects, and some to private
# functions in base.py which should be reused.
_DeferredRestoration = collections.namedtuple(
"_DeferredRestoration",
[
# The map_func to use (either user-specified or the default).
"map_func",
# Boolean, True if the user specified an explicit map_func, for error
# messages.
"map_func_is_user",
# A mapping from checkpoint names to initial values of not-yet-created
# variables which should be restored. These values come from parsing a
# checkpoint.
"checkpointed_variables_to_restore",
# A mapping from checkpoint name to variable objects of variables which
# have already been restored, for error checking.
"restored_variables",
# The session to restore with (if in graph mode).
"session",
# Names of the Network where the restore was requested, for error
# messages.
"network_name",
"network_scope_name"
])
def _default_naming_conflict_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The default checkpoint variable name mapping strategy for Network "
"'%s' resulted in a naming conflict. We attempted to strip off the "
"variable prefix for the Network ('%s'), but this resulted in two "
"variables named '%s' (originally '%s' and '%s'). This should only "
"happen when using variable sharing (i.e. the Network contains Networks "
"or Layers which were first added to another Network, and therefore "
"have that Network's variable prefix). One solution is to pass "
"`map_func=lambda n: n` to Network.save and Network.restore to use "
"fully qualified variable names in the checkpoint, although this will "
"require that the variable prefix of the Network being restored into "
"is also '%s'. You may alternatively write an arbitrary mapping.")
% (
network_name, network_scope_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name, network_scope_name
))
def _restore_custom_map_func_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The map_func passed to Network.restore for the Network '%s' "
"resulted in two variables named '%s' (originally '%s' and '%s'). Since "
"this is also an error on Network.save, this Network was "
"probably not saved with this map_func. Note that map_func "
"always maps from full variable names to checkpoint names; "
"there is no need to specify an inverse mapping.\n\n"
"Try stripping less from the variable names, or renaming parts "
"of the Network. For reference, variables created by sub-Layers "
"of this Network are prefixed with '%s', but if they are "
"re-used after being added to another Network they will have "
"that Network's full variable prefix instead.") % (
network_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name,
network_scope_name))
def _make_custom_getter_for_deferred_restorations():
"""Returns a custom getter which searches `deferred_restorations`.
Returns: A tuple of (_custom_getter, deferred_restorations)
_custom_getter: The getter which should be added to variable_scopes where
variables will be created.
deferred_restorations: A list for _DeferredRestoration objects. Typically
empty when the getter is set, and expanded as deferred restorations are
requested. All new deferred restorations should be appended to the end of
the list, where they will have priority over older deferred restorations.
"""
deferred_restorations = []
def _custom_getter(getter, name, shape=None, dtype=None,
initializer=None,
*args, **kwargs):
"""A custom getter which processes deferred restorations."""
# Iterate over restorations, newest first (newer restorations will take
# precedence over older restorations, just like with immediate restorations
# into existing variables).
delayed_restoration = None
found_value = False
value_to_restore = None
for delayed_restoration in reversed(
deferred_restorations):
checkpoint_name = delayed_restoration.map_func(name)
if (checkpoint_name
in delayed_restoration.checkpointed_variables_to_restore):
found_value = True
value_to_restore = (
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name])
if found_value:
break
# value_to_restore may be False because this variable is not in any
# checkpoint we are restoring, or None because we have explicitly set it to
# None when it was previously fetched. In either case, we don't need to
# set an initializer.
if found_value and value_to_restore is not None:
initializer = value_to_restore
shape = None
variable = getter(name, shape=shape, dtype=dtype, initializer=initializer,
*args, **kwargs)
if found_value and value_to_restore is not None:
# Mark as already restored from this checkpoint.
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name] = None
if context.in_graph_mode():
delayed_restoration.session.run(variable.initializer)
if found_value:
# Error checking should run even if we've already restored a value.
if delayed_restoration.restored_variables.setdefault(
checkpoint_name, variable) is not variable:
# Naming conflict. We've tried to initialize two variables with the
# same value from the checkpoint.
if delayed_restoration.map_func_is_user:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
else:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
return variable
return _custom_getter, deferred_restorations
def _make_prefix_stripping_map_fn(scope_name):
"""Closure for stripping the scope name of a Network.
Implemented as a closure rather than a member function to avoid reference
cycles in deferred restorations (this function should not have a reference to
the Network which created it).
Args:
scope_name: The Network.scope_name to strip from variables.
Returns:
A scope_name-stripping default `map_fn` for the Network.
"""
def _strip_variable_prefix(original_variable_name):
"""The default map_func for saving or restoring variables.
Strips the variable prefix for the Network on which save/restore was called,
and leaves other variable names fully qualified in the checkpoint.
Args:
original_variable_name: The _shared_name of the variable (no :0
suffix) to map.
Returns:
The checkpoint name of the variable.
"""
scope_name_with_slash = scope_name + "/"
if original_variable_name.startswith(scope_name_with_slash):
return original_variable_name[len(scope_name_with_slash):]
else:
return original_variable_name
return _strip_variable_prefix
class Network(base.Layer):
"""Represents the composition of a set of Layers.
TODO(josh11b,ashankar):
- Should "trainable" be changeable on the Network object?
- Do we allow add_variable in Network?
- Detect layers used in __call__ that weren't registered with track_layer.
- Convert inputs to __call__ to tensors.
- Prevent variables from being created after the first __call__?
(Think about restoring from a checkpoint).
"""
def __init__(self, name=None):
if isinstance(name, variable_scope.VariableScope):
raise ValueError("VariableScopes are not valid Network names.")
if name is not None and "/" in name:
raise ValueError(
"Forward slashes ('/') are not allowed in Network names.")
super(Network, self).__init__(name=name)
self._layers = []
self._sub_layer_name_uids = collections.defaultdict(int)
# Initially None, but set to False for networks which are first built as
# top-level.
self._first_parent = None # A weak reference to our first parent.
self._non_network_sublayers = []
self._owned_layers = {}
# The scope to use if we end up without a parent.
self._default_parent_variable_scope = variable_scope.get_variable_scope()
# Hold on to the variable scope counts from init to check whether a scope
# with the name we want was ever created in our parent scope. Without this
# check we might have name collisions if the parent scope on init gets
# closed before build is called.
self._variable_scope_counts_on_init = (
variable_scope._get_default_variable_store().variable_scopes_count)
self._custom_getter, self._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
def _init_set_name(self, name):
# Anonymous Networks (name=None) defer setting a final name until they are
# (1) added to another Network, or (2) built/called (where (2) is only used
# for a "top level" network).
#
# However, if we were provided an explicit name (name is not None), that
# will always be the final name of the Network; if it turns out not to be
# unique or if variable names can't be prefixed by it we will throw an
# error.
self._name = name
self._base_name = None
def _finalize_name(self, parent_network):
if not self._name:
# Were were not passed a name explicitly (or it was blank), so this is an
# anonymous Network. We make up a unique name.
if parent_network:
avoid_names = parent_network._owned_layers
name_uid_map = parent_network._sub_layer_name_uids
else:
name_uid_map = base._get_default_graph_uid_map()
# Figure out which names we have to avoid based on which variable scope
# we're nested in.
strip_name = self._default_parent_variable_scope.name
if strip_name:
strip_name += "/"
def _strip_on_init_scope(name):
if name.startswith(strip_name):
return name[len(strip_name):]
else:
return None
avoid_names = set(
_strip_on_init_scope(name)
for name in self._variable_scope_counts_on_init.keys() if name)
self._name, self._base_name = self._make_unique_name(
name_uid_map=name_uid_map, avoid_names=avoid_names,
namespace=self._default_parent_variable_scope.name)
if self._first_parent is None or (self._first_parent # False = no parent
and self._first_parent() is None):
# Save a pointer to the parent Network so that we can later check that the
# scope name we get is correct.
if not parent_network:
self._first_parent = parent_network
else:
self._first_parent = weakref.ref(parent_network)
def _set_scope(self, scope=None):
if self._scope is None:
if not self._first_parent:
first_parent = self._first_parent
else:
first_parent = self._first_parent()
if first_parent is None:
# If we were never added to another Network, or that Network has beed
# garbage collected before being called, then we're a top-level Network.
self._finalize_name(
# Use False to make sure the value sticks and we don't inherit a
# parent if we're added to a network later.
parent_network=False)
if scope is not None:
raise ValueError("Networks may not be created with explicit scopes.")
if first_parent:
first_parent._set_scope()
parent_scope = first_parent._scope
else:
parent_scope = self._default_parent_variable_scope
with variable_scope.variable_scope(parent_scope) as parent_vs:
expected_scope_name = parent_vs.name + "/" + self._name
if expected_scope_name in self._variable_scope_counts_on_init:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
# Make sure variables with this prefix will be unique.
with variable_scope.variable_scope(
None, use_resource=True, default_name=self._name) as scope:
self._scope = scope
scope_name = scope.name
suffix_start = scope_name.rfind("/") + 1
# rfind is -1 if there is no slash in the string, in which case the
# suffix starts at the beginning of the string (there is no prefix).
scope_suffix = scope_name[suffix_start:]
scope_prefix = scope_name[:suffix_start]
if scope_suffix != self._name:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
if (first_parent
and scope_prefix[:-1] != first_parent.scope_name):
raise ValueError(
("Network variable names must match a nesting of sub-Network "
"names. Expected prefix '%s' from parent network, but got "
"'%s' when attempting to create a variable_scope for Network "
"'%s'. Likely an explicit variable_scope was inserted into "
"the nesting.") % (
first_parent.scope_name,
scope_prefix[:-1],
self._name))
elif not first_parent and scope_prefix:
# For the case when this Network is not nested inside any other
# Network, but is in a variable_scope. This Network's name takes on
# the full variable scope prefix.
self._name = scope_name
for non_network_sublayer in self._non_network_sublayers:
self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please file a feature request.") %
(self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True,
default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
@base.Layer.name.getter
def name(self):
if self._name is None:
raise ValueError(
"The network does not yet have a final name, but a name was "
"requested for it. Networks get a name when they are added to "
"another Network via track_layer, or when they are first "
"called/built.")
return self._name
def track_layer(self, layer):
"""Track a Layer in this Network.
`Network` requires that all `Layer`s used in `call()` be tracked so that the
`Network` can export a complete list of variables.
Args:
layer: A `tf.layers.Layer` object.
Returns:
The passed in `layer`.
Raises:
RuntimeError: If __init__ has not been called.
TypeError: If `layer` is the wrong type.
ValueError: If a `Layer` with the same name has already been added.
"""
if not hasattr(self, "_layers"):
raise RuntimeError("Need to call Network.__init__ before adding layers")
if not isinstance(layer, base.Layer):
raise TypeError(
"Network.track_layer() passed type %s, not a tf.layers.Layer" %
(type(layer),))
if isinstance(layer, Network):
layer._finalize_name(parent_network=self)
else:
# `layer` is a non-Network, so it hasn't been named to follow Network
# conventions for contained Layers (i.e. the same conventions as for
# sub-Networks). This renaming is necessary to isolate Network variable
# naming from Layers constructed outside the Network and never added to it
# (because Layers are named globally).
if not layer.built:
if not hasattr(layer, "_first_parent"):
dereferenced_layer_first_parent = None
else:
dereferenced_layer_first_parent = layer._first_parent()
if dereferenced_layer_first_parent is None:
if layer._name != layer._base_name:
# If name and base_name do not match, then this Layer used anonymous
# naming and we have to rename it. Otherwise there's an explicit
# name, and we should respect it (subject to error checking).
layer._name, layer._base_name = layer._make_unique_name(
name_uid_map=self._sub_layer_name_uids,
avoid_names=self._owned_layers
# No namespace required, since we've specified our own UID map.
)
layer._first_parent = weakref.ref(self)
self._non_network_sublayers.append(layer)
if (not layer.built
and layer._first_parent
and self is layer._first_parent()):
if layer.name in self._owned_layers:
if self._owned_layers[layer.name] is layer:
return layer
raise ValueError(
"Attempt to add two Layers with the name '%s' to the same Network."
% (layer.name))
self._owned_layers[layer.name] = layer
self._layers.append(layer)
return layer
def get_layer(self, name=None, index=None):
"""Get a contained `tf.layers.Layer` either by name or index.
Args:
name: String matching one of the names of a contained `Layer`. Note that
the names of `Layer`s added to `Network`s may not be unique when doing
layer sharing (i.e. adding a `Layer` to this `Network` which was already
added to another `Network`). The lowest index `Layer` with a matching
name will be returned.
index: Integer in [0, number of layers). Layers are assigned an index
by the order they are added.
Returns:
A `tf.layers.Layer` object.
Raises:
ValueError: If neither or both of 'index' or 'name' is specified, or the
lookup failed.
"""
if index is not None:
if name is not None:
raise ValueError("Exactly one of 'index' or 'name' must be provided")
if len(self._layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self._layers)) +
" layers.")
else:
return self._layers[index]
else:
if not name:
raise ValueError("Provide either a layer name or layer index.")
for layer in self._layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name)
# The following methods are for implementing the Layer interface.
@property
def weights(self):
# TODO(josh11b): Should this return a set or perform de-duplication of
# variables in the case of shared layers/variables that appear in
# multiple places in the Network?
weights = []
for layer in self._layers:
weights += layer.weights
return weights
@property
def trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.non_trainable_weights
return weights
@property
def trainable(self):
return True
@trainable.setter
def trainable(self, value):
if not value:
# We believe it better to decide which layers & networks are trainable
# at the Trainer level than here. Otherwise you can run into trouble if a
# layer/network is shared between two models, but is trainable in one
# but not the other (like with adversarial networks).
raise AttributeError("cannot mark Network as not trainable")
@property
def layers(self):
return self._layers
def add_variable(self, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, constraint=None):
raise RuntimeError(
"add_variable not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
def save(self, save_path, global_step=None, map_func=None):
"""Save variables from the Network to a checkpoint.
Args:
save_path: Either a checkpoint prefix or the name of a directory to save
the checkpoint in (in which case the checkpoint will be named based on
the Network name).
global_step: The global step to use when naming the checkpoint. If None
(default), we will first try to get the default global step. If that
fails because no default global step exists, then the checkpoint is
created without a global step suffix.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged.
Returns:
The checkpoint prefix for the saved checkpoint, which may be passed to
`Network.restore`.
Raises:
ValueError: If the Network has not yet been called, or if map_func results
in a name collision.
"""
if not self.built:
raise ValueError(
"Attempt to save the Network before it was first called. This means "
"variables have not yet been created, so there is nothing to save.")
self._set_scope() # scope_name should be available to map_funcs
if global_step is None:
global_step = training_util.get_global_step()
if os.path.isdir(save_path):
# If we were passed a directory, default to naming based on the Network
# name.
save_path = os.path.join(save_path, self.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(self.scope_name)
variable_map = {}
for variable in self.variables:
mapped_name = map_func(variable._shared_name)
if variable_map.setdefault(mapped_name, variable) is not variable:
if user_map_func is None:
# Instead of erroring out, we could just re-try and silently use the
# full variable names in the checkpoint. This could be odd for deeply
# nested sub-Networks (since the full prefix from the nesting would
# get added), so for now we'll let the user deal with this case.
raise ValueError(_default_naming_conflict_error_message(
mapped_name=mapped_name,
first_variable=variable_map[mapped_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
# The user passed their own problematic map_func.
raise ValueError(
("The map_func passed to Network.save for the Network '%s' "
"resulted in two variables named '%s' ('%s' and '%s'). Try "
"stripping less from the variable names, or renaming parts of "
"the Network. For reference, variables created by sub-Layers of "
"this Network are prefixed with '%s', but if they are re-used "
"after being added to another Network, they will have that "
"Network's full variable prefix instead.") % (
self.name, mapped_name,
variable_map[mapped_name]._shared_name,
variable._shared_name,
self.scope_name))
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
return saver_lib.Saver(variable_map).save(
sess=sess, save_path=save_path, write_meta_graph=False,
global_step=global_step)
def _restore_existing_variables(self, save_path, map_func, user_map_func):
"""Use a standard Saver to restore existing variables from a checkpoint.
Args:
save_path: The checkpoint prefix or directory to read from.
map_func: The function to use when mapping from variable names to
checkpoint names.
user_map_func: The original map_func passed by the user, for error
checking.
Returns:
A dictionary mapping from checkpoint names to variable objects which have
been restored (for bookkeeping to avoid deferred restorations on these
variables).
Raises:
ValueError: If there is a name collision.
"""
existing_variables_by_checkpoint_name = {}
for variable in self.variables:
checkpoint_name = map_func(variable._shared_name)
if existing_variables_by_checkpoint_name.setdefault(
checkpoint_name, variable) is not variable:
if user_map_func is None:
raise ValueError(_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
raise ValueError(_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
if existing_variables_by_checkpoint_name:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(
sess=sess, save_path=save_path)
return existing_variables_by_checkpoint_name
def _set_restore_on_create(self, save_path, map_func, user_map_func,
existing_variables_by_checkpoint_name):
"""If necessary, request deferred restorations of variables."""
checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)
checkpointed_variables_to_restore = {}
for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):
if checkpoint_name in existing_variables_by_checkpoint_name:
# This variable was already created and restored.
continue
# Save the variable for later restoration in a custom getter.
checkpointed_variables_to_restore[checkpoint_name] = (
checkpoint_reader.get_tensor(checkpoint_name))
# Only set a deferred restoration if there are checkpoint variables which
# have not been assigned to existing variables. Note that this loses out on
# some opportunity for error checking, but avoids creating
# _DeferredRestoration objects once a Network has been built (so that
# restoring in a loop does not take increasing amounts of memory).
if checkpointed_variables_to_restore:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
# We need a name for error messages. If we haven't been added to another
# Network yet, we're top-level.
self._finalize_name(False)
self._set_scope()
# Save a record of this restoration for use in the custom getter.
deferred_restoration = _DeferredRestoration(
map_func=map_func,
map_func_is_user=(user_map_func is not None),
checkpointed_variables_to_restore=checkpointed_variables_to_restore,
restored_variables={},
session=sess,
network_name=self.name,
network_scope_name=self.scope_name)
self._deferred_restorations.append(deferred_restoration)
# Add the deferred registration to non-Network children, and request that
# Networks propagate the request to their children.
self._add_deferred_restoration(deferred_restoration)
def _add_deferred_restoration(self, deferred_restoration):
"""Add a deferred restoration to this Network and all children.
Restorations which are requested later have higher priority, and the highest
priority matching restoration is applied to a variable when it is created.
Args:
deferred_restoration: A _DeferredRestoration object.
"""
# Networks don't create variables at the moment, so this append isn't
# strictly necessary. We could get by with only adding deferred restorations
# to non-Network Layers.
self._set_scope()
# We use set_custom_getter because it avoids recursively calling up the
# variable_scope tree. We've done the tree traversal ourselves and have
# added the request to each Layer which needs it.
self._scope.set_custom_getter(self._custom_getter)
self._deferred_restorations.append(deferred_restoration)
for layer in self.layers:
if isinstance(layer, Network):
# For Networks, request that they propagate this deferred restoration
# to all of their children recursively.
layer._add_deferred_restoration(deferred_restoration)
else:
# For non-Network Layers, make sure they have a deferred restoration
# queue and a custom getter, then add our request to it.
if not hasattr(layer, "_custom_getter"):
assert not hasattr(layer, "_deferred_restorations")
layer._custom_getter, layer._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
self._set_scope_for_nonnetwork_sublayer(layer)
layer._scope.set_custom_getter(layer._custom_getter)
layer._deferred_restorations.append(deferred_restoration)
def restore(self, save_path, map_func=None):
"""Restore the Network from a checkpoint.
If variables have already been created (typically when some or all of the
`Network` is built), they are assigned values from the checkpoint
immediately, overwriting any existing values (in graph mode the default
session is used for the assignments).
If there are checkpoint entries which do not correspond to any existing
variables in the `Network`, these values are saved for deferred restoration;
their initial values will be the checkpointed values once they are
created. Requests for multiple deferred restorations behave the same way as
immediate restorations, in that later requests will take priority over
earlier requests relevant to the same variable.
If this `Network` shares `Layer`s with another network, those `Layer`s will
also have their variables restored from the checkpoint.
Args:
save_path: The return value of `Network.save`, or a directory to search
for a checkpoint.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged. Note that this is the _same_ map_func as `Network.save`, not
an inverse mapping.
"""
self._finalize_name(parent_network=False)
self._set_scope() # scope_name should be available to map_funcs
if os.path.isdir(save_path):
# If we don't have a name yet, set no parent.
save_path = os.path.join(save_path, self.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(self.scope_name)
# Step one is to restore any existing variables from the checkpoint.
existing_variables_by_checkpoint_name = self._restore_existing_variables(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func)
# Step two is to set a custom getter which restores variables on creation,
# for those variables which have not been added to sub-Layers yet.
self._set_restore_on_create(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func,
existing_variables_by_checkpoint_name=(
existing_variables_by_checkpoint_name))
# TODO(josh11b): Support other Layer methods needed for graph mode, such as for
# losses and updates
class Sequential(Network):
"""Represents a linear sequence of Layers or functions.
The output of each layer/function is provided as the input to the next.
The inputs passed to `__call__` are passed to the inputs of the first
Layer, and it returns the outputs of the last Layer.
Args:
layers_funcs: An optional sequence where each element is either a
tf.layers.Layer object or a callable.
name: An optional string name to use for this Network.
"""
def __init__(self, layers_funcs=None, name=None):
super(Sequential, self).__init__(name=name)
self._layers_funcs = []
if layers_funcs:
for l in layers_funcs:
self.add(l)
def add(self, layer_func):
if isinstance(layer_func, base.Layer):
args = estimator_util.fn_args(layer_func.call)
self.track_layer(layer_func)
elif callable(layer_func):
args = estimator_util.fn_args(layer_func)
else:
raise TypeError(
"Sequential.add() takes only tf.layers.Layer objects or callables; "
"not '%s' of type '%s'." % (layer_func, type(layer_func)))
self._layers_funcs.append((("training" in args), layer_func))
def call(self, inputs, training=None):
"""Call each Layer in the order they were added."""
# TODO(josh11b): Support "mode" and maybe other arguments
if training is None:
for _, l in self._layers_funcs:
inputs = l(inputs)
else:
for has_training_arg, l in self._layers_funcs:
if has_training_arg:
inputs = l(inputs, training)
else:
inputs = l(inputs)
return inputs
|
the-stack_0_17012 | """
Support for per-request messages to be shown to the user.
These utilities are based upon the Django message framework, and allow
code to register messages to be shown to the user on their next page
view. These messages are shown in a page banner which is supported on
all pages that utilize the main.html template.
There are two common use cases:
- register a message before rendering a view, in which case the message
will be shown on the resulting page
- register a message before posting or redirecting. In these situations
the message will be shown on the subsequent page. This is typically
used to show a success message to the use.
"""
from abc import abstractmethod
from enum import Enum
from django.contrib import messages
from django.utils.translation import ugettext as _
from openedx.core.djangolib.markup import HTML, Text
class UserMessageType(Enum):
"""
An enumeration of the types of user messages.
"""
INFO = messages.constants.INFO
SUCCESS = messages.constants.SUCCESS
WARNING = messages.constants.WARNING
ERROR = messages.constants.ERROR
CSS_CLASSES = {
UserMessageType.INFO: 'alert-info',
UserMessageType.SUCCESS: 'alert-success',
UserMessageType.WARNING: 'alert-warning',
UserMessageType.ERROR: 'alert-danger',
}
ICON_CLASSES = {
UserMessageType.INFO: 'fa fa-bullhorn',
UserMessageType.SUCCESS: 'fa fa-check-circle',
UserMessageType.WARNING: 'fa fa-warning',
UserMessageType.ERROR: 'fa fa-warning',
}
class UserMessage():
"""
Representation of a message to be shown to a user.
"""
def __init__(self, type, message_html): # lint-amnesty, pylint: disable=redefined-builtin
assert isinstance(type, UserMessageType)
self.type = type
self.message_html = message_html
@property
def css_class(self):
"""
Returns the CSS class to be used on the message element.
"""
return CSS_CLASSES[self.type]
@property
def icon_class(self):
"""
Returns the CSS icon class representing the message type.
"""
return ICON_CLASSES[self.type]
class UserMessageCollection():
"""
A collection of messages to be shown to a user.
"""
@classmethod
@abstractmethod
def get_namespace(self): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Returns the namespace of the message collection.
The name is used to namespace the subset of django messages.
For example, return 'course_home_messages'.
"""
raise NotImplementedError('Subclasses must define a namespace for messages.')
@classmethod
def get_message_html(cls, body_html, title=None, dismissable=False, **kwargs): # pylint: disable=unused-argument
"""
Returns the entire HTML snippet for the message.
Classes that extend this base class can override the message styling
by implementing their own version of this function. Messages that do
not use a title can just pass the body_html.
"""
if title:
return Text(_('{header_open}{title}{header_close}{body}')).format(
header_open=HTML('<div class="message-header">'),
title=title,
body=body_html,
header_close=HTML('</div>')
)
return body_html
@classmethod
def register_user_message(cls, request, message_type, body_html, once_only=False, **kwargs):
"""
Register a message to be shown to the user in the next page.
Arguments:
message_type (UserMessageType): the user message type
body_html (str): body of the message in html format
title (str): optional title for the message as plain text
dismissable (bool): shows a dismiss button (defaults to no button)
once_only (bool): show the message only once per request
"""
assert isinstance(message_type, UserMessageType)
message = Text(cls.get_message_html(body_html, **kwargs))
if not once_only or message not in [m.message for m in messages.get_messages(request)]:
messages.add_message(request, message_type.value, Text(message), extra_tags=cls.get_namespace())
@classmethod
def register_info_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers an information message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.INFO, message, **kwargs)
@classmethod
def register_success_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers a success message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.SUCCESS, message, **kwargs)
@classmethod
def register_warning_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers a warning message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.WARNING, message, **kwargs)
@classmethod
def register_error_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers an error message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.ERROR, message, **kwargs)
@classmethod
def user_messages(self, request): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Returns any outstanding user messages.
Note: this function also marks these messages as being complete
so they won't be returned in the next request.
"""
def _get_message_type_for_level(level):
"""
Returns the user message type associated with a level.
"""
for __, type in UserMessageType.__members__.items(): # lint-amnesty, pylint: disable=redefined-builtin, no-member
if type.value is level:
return type
raise Exception(f'Unable to find UserMessageType for level {level}')
def _create_user_message(message):
"""
Creates a user message from a Django message.
"""
return UserMessage(
type=_get_message_type_for_level(message.level),
message_html=str(message.message),
)
django_messages = messages.get_messages(request)
return (_create_user_message(message) for message in django_messages if self.get_namespace() in message.tags)
class PageLevelMessages(UserMessageCollection):
"""
This set of messages appears as top page level messages.
"""
NAMESPACE = 'page_level_messages'
@classmethod
def get_message_html(cls, body_html, title=None, dismissable=False, **kwargs):
"""
Returns the entire HTML snippet for the message.
"""
if title:
title_area = Text(_('{header_open}{title}{header_close}')).format(
header_open=HTML('<div class="message-header">'),
title=title,
header_close=HTML('</div>')
)
else:
title_area = ''
if dismissable:
dismiss_button = HTML(
'<div class="message-actions">'
'<button class="btn-link action-dismiss">'
'<span class="sr">{dismiss_text}</span>'
'<span class="icon fa fa-times" aria-hidden="true"></span></button>'
'</div>'
).format(
dismiss_text=Text(_("Dismiss"))
)
else:
dismiss_button = ''
return Text('{title_area}{body_area}{dismiss_button}').format(
title_area=title_area,
body_area=HTML('<div class="message-content">{body_html}</div>').format(
body_html=body_html,
),
dismiss_button=dismiss_button,
)
@classmethod
def get_namespace(self): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Returns the namespace of the message collection.
"""
return self.NAMESPACE
|
the-stack_0_17014 | from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_CONNECTION = 'connections/%s'
API_CONNECTIONS = 'connections'
class Connection(ManagementHandler):
def get(self, connection):
"""Get Connection details.
:param str connection: Connection name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_CONNECTION % connection)
def list(self):
"""Get Connections.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
return self.http_client.get(API_CONNECTIONS)
def close(self, connection, reason='Closed via management api'):
"""Close Connection.
:param str connection: Connection name
:param str reason: Reason for closing connection.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
close_payload = json.dumps({
'name': connection,
'reason': reason
})
connection = quote(connection, '')
return self.http_client.delete(API_CONNECTION % connection,
payload=close_payload,
headers={
'X-Reason': reason
})
|
the-stack_0_17015 | """Headers Support
This module implements support for parsing and handling headers.
"""
import re
from circuits.six import b, iteritems, u
# Regular expression that matches `special' characters in parameters, the
# existance of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
q_separator = re.compile(r'; *q *=')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list.
Returns a sorted HeaderElement list
from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in fieldvalue.split(","):
if fieldname.startswith("Accept") or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
def __str__(self):
p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)]
return "%s%s" % (self.value, "".join(p))
def __bytes__(self):
return b(self.__str__())
def __unicode__(self):
return u(self.__str__())
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
# Split the element into a value and parameters. The 'value' may
# be of the form, "token=token", but we don't split that here.
atoms = [x.strip() for x in elementstr.split(";") if x.strip()]
if not atoms:
initial_value = ''
else:
initial_value = atoms.pop(0).strip()
params = {}
for atom in atoms:
atom = [x.strip() for x in atom.split("=", 1) if x.strip()]
key = atom.pop(0)
if atom:
val = atom[0]
else:
val = ""
params[key] = val
return initial_value, params
parse = staticmethod(parse)
@classmethod
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
@classmethod
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params["q"] = qvalue
return cls(media_type, params)
def qvalue(self):
val = self.params.get("q", "1")
if isinstance(val, HeaderElement):
val = val.value
return float(val)
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
def __eq__(self, other):
return self.qvalue == other.qvalue
def __lt__(self, other):
if self.qvalue == other.qvalue:
return str(self) < str(other)
else:
return self.qvalue < other.qvalue
class CaseInsensitiveDict(dict):
"""A case-insensitive dict subclass.
Each key is changed on entry to str(key).title().
"""
def __init__(self, *args, **kwargs):
d = dict(*args, **kwargs)
for key, value in iteritems(d):
dict.__setitem__(self, str(key).title(), value)
dict.__init__(self)
def __getitem__(self, key):
return dict.__getitem__(self, str(key).title())
def __setitem__(self, key, value):
dict.__setitem__(self, str(key).title(), value)
def __delitem__(self, key):
dict.__delitem__(self, str(key).title())
def __contains__(self, key):
return dict.__contains__(self, str(key).title())
def get(self, key, default=None):
return dict.get(self, str(key).title(), default)
def update(self, E):
for k in E.keys():
self[str(k).title()] = E[k]
@classmethod
def fromkeys(cls, seq, value=None):
newdict = cls()
for k in seq:
newdict[k] = value
return newdict
def setdefault(self, key, x=None):
key = str(key).title()
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = x
return x
def pop(self, key, default=None):
return dict.pop(self, str(key).title(), default)
class Headers(CaseInsensitiveDict):
"""
This class implements a storage for headers as key value pairs.
The underlying model of a case insensitive dict matches the requirements
for headers quite well, because usually header keys are unique. If
several values may be associated with a header key, most HTTP headers
represent the values as an enumeration using a comma as item separator.
There is, however one exception (currently) to this rule. In order to
set several cookies, there should be multiple headers with the same
key, each setting one cookie ("Set-Cookie: some_cookie").
This is modeled by having either a string (common case) or a list
(cookie case) as value in the underlying dict. In order to allow
easy iteration over all headers as they appear in the HTTP request,
the items() method expands associated lists of values. So if you have
{ "Set-Cookie": [ "cookie1", "cookie2" ] }, the items() method returns
the two pairs ("Set-Cookie", "cookie1") and ("Set-Cookie", "cookie2").
This is convenient for most use cases. The only drawback is that
len(keys()) is not equal to len(items()) for this specialized dict.
"""
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
return header_elements(key, self.get(key))
def get_all(self, name):
"""Return a list of all the values for the named field."""
value = self.get(name, '')
if isinstance(value, list):
return value
return [val.strip() for val in value.split(',')]
def __repr__(self):
return "Headers(%s)" % repr(list(self.items()))
def __str__(self):
headers = ["%s: %s\r\n" % (k, v) for k, v in self.items()]
return "".join(headers) + '\r\n'
def items(self):
for k, v in super(Headers, self).items():
if isinstance(v, list):
for vv in v:
yield (k, vv)
else:
yield (k, v)
def __bytes__(self):
return str(self).encode("latin1")
def append(self, key, value):
"""
If a header with the given name already exists, the value is
normally appended to the existing value separated by a comma.
If, however, the already existing entry associated key with a
value of type list (as is the case for "Set-Cookie"),
the new value is appended to that list.
"""
if key not in self:
if key.lower() == "set-cookie":
self[key] = [value]
else:
self[key] = value
else:
if isinstance(self[key], list):
self[key].append(value)
else:
self[key] = ", ".join([self[key], value])
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in list(_params.items()):
k = k.replace('_', '-')
if v is None:
parts.append(k)
else:
parts.append(_formatparam(k, v))
self.append(_name, "; ".join(parts))
|
the-stack_0_17016 | import json
import logging
import requests
from django.conf import settings
NAMESPACES = {"dcim": "dcim"}
class NetBox(object):
"""
Class used to interact with the NetBox API.
"""
logger = logging.getLogger("peering.manager.netbox")
def lookup(self, namespace, search):
"""
Sends a get request to the API given a namespace and some parameters.
"""
# Enforce trailing slash and add namespace
api_url = settings.NETBOX_API.strip("/") + "/" + namespace
# Set token in the headers
headers = {
"accept": "application/json",
"authorization": "Token {}".format(settings.NETBOX_API_TOKEN),
}
# Make the request
self.logger.debug("calling api: %s | %s", api_url, search)
response = requests.get(api_url, headers=headers, params=search)
return response.json() if response.status_code == 200 else None
def get_devices(self):
"""
Return all devices found with the NetBox API.
"""
result = self.lookup(NAMESPACES["dcim"] + "/devices", {})
if not result or result["count"] == 0:
return None
return [
device
for device in result["results"]
if device["device_role"]["slug"] in settings.NETBOX_DEVICE_ROLES
]
|
the-stack_0_17018 | """Support for Modbus Register sensors."""
import logging
import struct
from typing import Any, Union
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_OFFSET,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_COUNT = "count"
CONF_DATA_TYPE = "data_type"
CONF_PRECISION = "precision"
CONF_REGISTER = "register"
CONF_REGISTER_TYPE = "register_type"
CONF_REGISTERS = "registers"
CONF_REVERSE_ORDER = "reverse_order"
CONF_SCALE = "scale"
DATA_TYPE_CUSTOM = "custom"
DATA_TYPE_FLOAT = "float"
DATA_TYPE_INT = "int"
DATA_TYPE_UINT = "uint"
REGISTER_TYPE_HOLDING = "holding"
REGISTER_TYPE_INPUT = "input"
def number(value: Any) -> Union[int, float]:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, str):
try:
value = int(value)
return value
except (TypeError, ValueError):
pass
try:
value = float(value)
return value
except (TypeError, ValueError):
raise vol.Invalid(f"invalid number {value}")
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_REGISTERS): [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_COUNT, default=1): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_INT): vol.In(
[DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT, DATA_TYPE_CUSTOM]
),
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_OFFSET, default=0): number,
vol.Optional(CONF_PRECISION, default=0): cv.positive_int,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING): vol.In(
[REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]
),
vol.Optional(CONF_REVERSE_ORDER, default=False): cv.boolean,
vol.Optional(CONF_SCALE, default=1): number,
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_STRUCTURE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
]
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus sensors."""
sensors = []
data_types = {DATA_TYPE_INT: {1: "h", 2: "i", 4: "q"}}
data_types[DATA_TYPE_UINT] = {1: "H", 2: "I", 4: "Q"}
data_types[DATA_TYPE_FLOAT] = {1: "e", 2: "f", 4: "d"}
for register in config.get(CONF_REGISTERS):
structure = ">i"
if register.get(CONF_DATA_TYPE) != DATA_TYPE_CUSTOM:
try:
structure = ">{}".format(
data_types[register.get(CONF_DATA_TYPE)][register.get(CONF_COUNT)]
)
except KeyError:
_LOGGER.error(
"Unable to detect data type for %s sensor, " "try a custom type",
register.get(CONF_NAME),
)
continue
else:
structure = register.get(CONF_STRUCTURE)
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error(
"Error in sensor %s structure: %s", register.get(CONF_NAME), err
)
continue
if register.get(CONF_COUNT) * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count " "(%d words)",
size,
register.get(CONF_COUNT),
)
continue
hub_name = register.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
sensors.append(
ModbusRegisterSensor(
hub,
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_UNIT_OF_MEASUREMENT),
register.get(CONF_COUNT),
register.get(CONF_REVERSE_ORDER),
register.get(CONF_SCALE),
register.get(CONF_OFFSET),
structure,
register.get(CONF_PRECISION),
)
)
if not sensors:
return False
add_entities(sensors)
class ModbusRegisterSensor(RestoreEntity):
"""Modbus register sensor."""
def __init__(
self,
hub,
name,
slave,
register,
register_type,
unit_of_measurement,
count,
reverse_order,
scale,
offset,
structure,
precision,
):
"""Initialize the modbus register sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._register = int(register)
self._register_type = register_type
self._unit_of_measurement = unit_of_measurement
self._count = int(count)
self._reverse_order = reverse_order
self._scale = scale
self._offset = offset
self._precision = precision
self._structure = structure
self._value = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._value = state.state
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Update the state of the sensor."""
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._slave, self._register, self._count
)
else:
result = self._hub.read_holding_registers(
self._slave, self._register, self._count
)
val = 0
try:
registers = result.registers
if self._reverse_order:
registers.reverse()
except AttributeError:
_LOGGER.error(
"No response from hub %s, slave %s, register %s",
self._hub.name,
self._slave,
self._register,
)
return
byte_string = b"".join([x.to_bytes(2, byteorder="big") for x in registers])
val = struct.unpack(self._structure, byte_string)[0]
val = self._scale * val + self._offset
if isinstance(val, int):
self._value = str(val)
if self._precision > 0:
self._value += "." + "0" * self._precision
else:
self._value = f"{val:.{self._precision}f}"
|
the-stack_0_17019 | """
Calculates real time burn rate for AWS
"""
import pandas as pd
import pprint
from decisionengine.framework.modules import Transform
import logging
"""
IMPORTANT: Please do not change order of these keys and always
append new keys rather than pre-pend or insert.
"""
CONSUMES = ["provisioner_resource_spot_prices",
"AWS_Occupancy"]
PRODUCES = ["AWS_Burn_Rate"]
class AwsBurnRate(Transform.Transform):
def __init__(self, config):
super(AwsBurnRate, self).__init__(config)
self.logger = logging.getLogger()
def transform(self, data_block):
spot_prices = data_block[CONSUMES[0]].fillna(0)
occupancy = data_block[CONSUMES[1]].fillna(0)
burn_df = pd.DataFrame([{"BurnRate": 0.}])
if not occupancy.empty:
df = pd.merge(occupancy,
spot_prices,
how="inner",
on=["AccountName", "AvailabilityZone", "InstanceType"])
if not df.empty:
df["BurnRate"] = pd.to_numeric(
df["RunningVms"]) * pd.to_numeric(df["SpotPrice"])
burn_df = pd.DataFrame([{"BurnRate": df["BurnRate"].sum()}])
return {PRODUCES[0]: burn_df}
def consumes(self, name_list=None):
return CONSUMES
def produces(self, name_schema_id_list=None):
return PRODUCES
def module_config_template():
"""
print a template for this module configuration data
"""
d = {
"AwsBurnRate": {
"module": "modules.AWS.transforms.AwsBurnRate",
"name": "AwsBurnRate",
"parameters": {
}
}
}
print("Entry in channel cofiguration")
pprint.pprint(d)
print("where")
print("\t name - name of the class to be instantiated by task manager")
def module_config_info():
"""
print this module configuration information
"""
print("consumes", CONSUMES)
print("produces", PRODUCES)
module_config_template()
def main():
"""
Call this a a test unit or use as CLI of this module
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--configtemplate",
action="store_true",
help="prints the expected module configuration")
parser.add_argument("--configinfo",
action="store_true",
help="prints config template along with produces and consumes info")
args = parser.parse_args()
if args.configtemplate:
module_config_template()
elif args.configinfo:
module_config_info()
if __name__ == "__main__":
main()
|
the-stack_0_17021 | # -*- coding: utf-8 -*-
from __future__ import print_function
import itertools as it, operator as op, functools as ft
from os.path import join, exists
import os, sys, logging, glob, time, select
class OnDemandLogger(object):
log = None
def __getattr__(self, k):
if not self.log: self.log = logging.getLogger('sht.gpio')
return getattr(self.log, k)
log = OnDemandLogger()
path_gpio = '/sys/class/gpio'
class GPIOAccessFailure(Exception): pass
def gpio_access_wrap(func, checks=12, timeout=1.0):
for n in xrange(checks, -1, -1):
try: return func()
except (IOError, OSError): pass
if checks <= 0: break
if n: time.sleep(timeout / checks)
else:
raise GPIOAccessFailure(func, timeout)
# log.warn('gpio access failed (func: %s, timeout: %s)', func, timeout)
def get_pin_path(n, sub=None, _cache=dict()):
n = int(n)
if n not in _cache:
for try_export in [True, False]:
try:
path = join(path_gpio, 'gpio{}'.format(n))
if not exists(path): path, = glob.glob(path + '_*')
except:
if not try_export:
raise OSError('Failed to find sysfs control path for pin: {}'.format(n))
else: break
log.debug('Exporting pin: %s', n)
with open(join(path_gpio, 'export'), 'wb', 0) as dst:
gpio_access_wrap(ft.partial(dst.write, bytes(n)))
_cache[n] = path
else: path = _cache[n]
return path if not sub else os.path.join(path, sub)
def get_pin_value(n, k='value'):
with gpio_access_wrap(
ft.partial(open, get_pin_path(n, k), 'rb', 0) ) as src:
val = src.read().strip()
if k == 'value':
try: val = int(val)
except ValueError as err:
log.warn('Failed to read/decode pin (n: %s) value %r: %s', n, val, err)
val = None
return val
def set_pin_value(n, v, k='value', force=False, _pin_state=dict()):
if k == 'value' and isinstance(v, bool): v = int(v)
if not force and _pin_state.get(n) == v: return
if _pin_state.get(n) == v: return
# log.debug('Setting parameter of pin-%s: %s = %r ', n, k, v)
with gpio_access_wrap(
ft.partial(open, get_pin_path(n, k), 'wb', 0) ) as dst:
gpio_access_wrap(ft.partial(dst.write, bytes(v)))
_pin_state[n] = v
class PollTimeout(Exception): pass
def poll_pin(n, timeout=1.0, edge='both', _poller_cache=dict()):
if edge: set_pin_value(n, k='edge', v=edge)
try:
if n not in _poller_cache:
_poller_cache[n] = select.poll()
poller = _poller_cache[n]
with gpio_access_wrap(
ft.partial(open, get_pin_path(n, 'value'), 'rb', 0) ) as src:
poller.register(src.fileno(), select.POLLPRI | select.POLLERR)
res = poller.poll(timeout * 1000)
if not res or res[0][1] & select.POLLERR == select.POLLERR:
raise PollTimeout(n, timeout, edge, res)
return get_pin_value(n)
finally:
if edge: set_pin_value(n, k='edge', v='none')
|
the-stack_0_17022 | from datetime import datetime
from airflow.models import DAG
from airtunnel import PandasDataAsset
from airtunnel.operators.archival import DataAssetArchiveOperator, IngestArchiveOperator
from airtunnel.operators.ingestion import IngestOperator
from airtunnel.operators.loading import StagingToReadyOperator
from airtunnel.operators.transformation import PandasTransformationOperator
from airtunnel.sensors.ingestion import SourceFileIsReadySensor
student = PandasDataAsset("student")
programme = PandasDataAsset("programme")
enrollment = PandasDataAsset("enrollment")
enrollment_summary = PandasDataAsset("enrollment_summary")
with DAG(
dag_id="university",
schedule_interval=None,
start_date=datetime(year=2019, month=9, day=1),
) as dag:
ingested_ready_tasks = set()
# a common stream of tasks for all ingested assets:
for ingested_asset in (student, programme, enrollment):
source_is_ready = SourceFileIsReadySensor(
# we reduce the poke interval to only 3 seconds so that our test runs complete faster
# do not do in production!! :)
asset=ingested_asset,
poke_interval=3,
no_of_required_static_pokes=2,
)
ingest = IngestOperator(asset=ingested_asset)
transform = PandasTransformationOperator(asset=ingested_asset)
archive = DataAssetArchiveOperator(asset=ingested_asset)
staging_to_ready = StagingToReadyOperator(asset=ingested_asset)
ingest_archival = IngestArchiveOperator(asset=ingested_asset)
dag >> source_is_ready >> ingest >> transform >> archive >> staging_to_ready >> ingest_archival
ingested_ready_tasks.add(staging_to_ready)
# upon having loaded the three ingested assets, connect the aggregation downstream to them:
build_enrollment_summary = PandasTransformationOperator(asset=enrollment_summary)
build_enrollment_summary.set_upstream(ingested_ready_tasks)
staging_to_ready = StagingToReadyOperator(asset=enrollment_summary)
dag >> build_enrollment_summary >> staging_to_ready
|
the-stack_0_17024 | #!/usr/bin/env python
from collections import defaultdict
import sys
from hunmisc.corpustools.tsv_tools import sentence_iterator, get_dependencies
from common import sanitize_word
def get_node_id_and_word(token):
word, i = token
word = sanitize_word(word)
node_id = "{0}_{1}".format(word, i)
return node_id, word
def deps_to_sen_dict(deps):
root_token = None
sen_dict = defaultdict(dict)
for dep in deps:
gov = (dep['gov']['word'], dep['gov']['id'])
ddep = (dep['dep']['word'], dep['dep']['id'])
dtype = dep['type']
sen_dict[gov][ddep] = dtype
if dtype == 'root':
root_token = gov
return sen_dict, root_token
def dict_to_graph(sen_dict, token):
# print dictionary
global SEEN
global GRAPH_STRING
if token in SEEN:
node_id = SEEN[token]
GRAPH_STRING += node_id
else:
node_id, word = get_node_id_and_word(token)
SEEN[token] = node_id
GRAPH_STRING += "({0} / {1}".format(node_id, word)
for neighbor, edge in sen_dict[token].iteritems():
GRAPH_STRING += ' :{0} '.format(edge.replace(':', '_'))
dict_to_graph(sen_dict, neighbor)
GRAPH_STRING += ')'
HEADER = (
'# IRTG unannotated corpus file, v1.0\n' +
'# interpretation graph: de.up.ling.irtg.algebra.graph.GraphAlgebra')
def main():
print(HEADER)
id_field, word_field, lemma_field, msd_field, gov_field, dep_field = (
0, 1, None, None, -4, -3)
global SEEN
global GRAPH_STRING
with open(sys.argv[1]) as stream:
for sentence in sentence_iterator(stream, comment_tag='#'):
deps = get_dependencies(
sentence, id_field, word_field, lemma_field, msd_field,
gov_field, dep_field)
sentence_dict, root_token = deps_to_sen_dict(deps)
# root token will be the first token if ROOT doesn't exist
if root_token is None:
root_token = sentence_dict.keys()[0]
SEEN = {}
GRAPH_STRING = ''
dict_to_graph(sentence_dict, root_token)
print(GRAPH_STRING)
if __name__ == "__main__":
main()
|
the-stack_0_17026 | import numpy as np
import requests
import os
from PIL import Image, ImageOps
from pathlib import Path
from retrofy.configs import Filter_Configs
import retrofy.utils as utils
CONFIGS = Filter_Configs()
class Filter():
MAX_SIZE = CONFIGS.MAXS["size"]
def __init__(self, img_src):
if isinstance(img_src, (str, Image.Image)) == False:
raise TypeError("Parameter 'img_src' must be a string or a Pillow Image object.")
self.__img_src = img_src
self.__last_modifications = [] #list for all modifications that wasnt undoed
self.__last_undos = []
self.__load_image()
@property
def modified_img(self):
return self.__modified_img
@modified_img.setter
def modified_img(self, img):
if isinstance(img, Image.Image) == False:
raise TypeError("Parameter 'modified_img' must be a Pillow Image object.")
self.__modified_img = img
self.__last_modifications.append(self.__modified_img)
@property
def original_img(self):
return self.__original_img
@property
def last_modifications(self):
return self.__last_modifications
def __load_image(self):
# if img_src alredy is a PIL Image object
if isinstance(self.__img_src, Image.Image) == True:
self.__original_img = self.__img_src
else:
if utils.is_url(self.__img_src) == True:
try:
self.__img_src = requests.get(self.__img_src, stream=True).raw
self.__original_img = Image.open(self.__img_src).convert("RGB")
except:
raise ValueError("Could not download image from URL '{}'.".format(self.__img_src))
else:
try:
self.__original_img = Image.open(self.__img_src).convert("RGB")
except:
raise ValueError("Could not access image on file '{}'.".format(self.__img_src))
#resize large images (with same aspect ratio)
self.__original_img.thumbnail(self.MAX_SIZE)
self.__modified_img = self.__original_img
def undo(self, times=1):
if isinstance(times, int) == False:
raise TypeError("Parameter 'times' must be an integer.")
if len(self.__last_modifications) > 0:
for i in range(times):
if len(self.__last_modifications) > 0:
self.__last_undos.append(self.__last_modifications[-1])
self.__last_modifications.pop(-1)
if len(self.__last_modifications) == 0:
self.reset()
else:
self.__modified_img = self.__last_modifications[-1]
def redo(self, times=1):
if isinstance(times, int) == False:
raise TypeError("Parameter 'times' must be an integer.")
if len(self.__last_undos) > 0:
for i in range(times):
if len(self.__last_undos) > 0:
self.modified_img = self.__last_undos[-1]
self.__last_undos.pop(-1)
def reset(self):
self.__modified_img = self.__original_img
def show(self, original=False):
if isinstance(original, bool) == False:
raise TypeError("Parameter 'original' must be a boolean.")
if original == False:
self.__modified_img.show()
else:
self.__original_img.show()
def save(self, path, original=False):
if isinstance(original, bool) == False:
raise TypeError("Parameter 'original' must be a boolean.")
if isinstance(path, str) == False and isinstance(file_path, Path) == False:
raise TypeError("Parameter 'path' must be a string or a Path object.")
path = Path(path)
if path.suffix == "":
path = path.parent / Path(path.stem + ".png")
if self.__modified_img.mode == "RGBA" and path.suffix != ".png":
raise ValueError("RGBA Image must have 'png' file extension.")
try:
if original == False:
self.__modified_img.save(path)
else:
self.__original_img.save(path)
except:
raise ValueError("Could not save image on especified path.")
|
the-stack_0_17027 | # coding: utf-8
"""
OpenAPI Petstore */ ' \" =end -- \\r\\n \\n \\r
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ */ ' \" =end -- # noqa: E501
OpenAPI spec version: 1.0.0 */ ' \" =end -- \\r\\n \\n \\r
Contact: [email protected] */ ' \" =end -- \\r\\n \\n \\r
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from petstore_api.api_client import ApiClient
class FakeApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def test_code_inject____end__rn_n_r(self, **kwargs): # noqa: E501
"""To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_code_inject____end__rn_n_r(async_req=True)
>>> result = thread.get()
:param async_req bool
:param UNKNOWN_BASE_TYPE unknown_base_type:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_code_inject____end__rn_n_r_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.test_code_inject____end__rn_n_r_with_http_info(**kwargs) # noqa: E501
return data
def test_code_inject____end__rn_n_r_with_http_info(self, **kwargs): # noqa: E501
"""To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_code_inject____end__rn_n_r_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param UNKNOWN_BASE_TYPE unknown_base_type:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['unknown_base_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_code_inject____end__rn_n_r" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'unknown_base_type' in local_var_params:
body_params = local_var_params['unknown_base_type']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', '*/ \" =end -- ']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
the-stack_0_17028 | """Test the rig module."""
import numpy as np
from opensfm import pygeometry, rig, types
def test_create_instances_with_patterns() -> None:
# A first rig model defined as left/right/top/bottom
instance1 = [
"12345_left.jpg",
"12345_bottom.jpg",
"12345_top.jpg",
"12345_right.jpg",
]
instance2 = [
"1234567_left.jpg",
"1234567_bottom.jpg",
"1234567_top.jpg",
"1234567_right.jpg",
]
patterns_12 = {
"camera_left": "(left)",
"camera_right": "(right)",
"camera_top": "(top)",
"camera_bottom": "(bottom)",
}
# A second one as RED/GREEN/BLUE
instance3 = [
"RED_SENSOR_001-12345678.jpg",
"GREEN_SENSOR_002-12345678.jpg",
"BLUE_SENSOR_003-12345678.jpg",
]
patterns_3 = {
"red": "(RED_SENSOR_001)",
"green": "(GREEN_SENSOR_002)",
"blue": "(BLUE_SENSOR_003)",
}
# Run detection with these two rig model patterns
rig_patterns = patterns_12
rig_patterns.update(patterns_3)
instances = rig.create_instances_with_patterns(
instance1 + instance2 + instance3, rig_patterns
)
# Ensure we have 2 instance for the first rig, and 1 for the second
assert len(instances) == 3
recovered_instance1 = instances["12345_.jpg"]
assert [x[0] for x in recovered_instance1] == instance1
recovered_instance2 = instances["1234567_.jpg"]
assert [x[0] for x in recovered_instance2] == instance2
recovered_instance3 = instances["-12345678.jpg"]
assert [x[0] for x in recovered_instance3] == instance3
def test_compute_relative_pose() -> None:
# 4-cameras rig
camera1 = pygeometry.Camera.create_spherical()
camera1.id = "camera1"
camera2 = pygeometry.Camera.create_spherical()
camera2.id = "camera2"
camera3 = pygeometry.Camera.create_spherical()
camera3.id = "camera3"
camera4 = pygeometry.Camera.create_spherical()
camera4.id = "camera4"
# a bit cumbersome that we need to have some reconstruction
rec = types.Reconstruction()
rec.add_camera(camera1)
rec.add_camera(camera2)
rec.add_camera(camera3)
rec.add_camera(camera4)
# First rig instance
rec.create_shot("shot1", "camera1", pygeometry.Pose([0, 0, 0], [-2, -2, 0]))
rec.create_shot("shot2", "camera2", pygeometry.Pose([0, 0, 0], [-3, -3, 0]))
rec.create_shot("shot3", "camera3", pygeometry.Pose([0, 0, 0], [-1, -3, 0]))
rec.create_shot("shot4", "camera4", pygeometry.Pose([0, 0, 0], [-2, -4, 0]))
# Second rig instance (rotated by pi/2 around Z)
pose_instance = pygeometry.Pose([0, 0, -1.5707963])
pose_instance.set_origin([-6, 0, 0])
rec.create_shot("shot5", "camera1", pose_instance)
pose_instance.set_origin([-7, 1, 0])
rec.create_shot("shot6", "camera2", pose_instance)
pose_instance.set_origin([-7, -1, 0])
rec.create_shot("shot7", "camera3", pose_instance)
pose_instance.set_origin([-8, 0, 0])
rec.create_shot("shot8", "camera4", pose_instance)
pose_instances = [
[
(
rec.shots["shot1"],
"camera_id_1",
),
(
rec.shots["shot2"],
"camera_id_2",
),
(
rec.shots["shot3"],
"camera_id_3",
),
(
rec.shots["shot4"],
"camera_id_4",
),
],
[
(
rec.shots["shot5"],
"camera_id_1",
),
(
rec.shots["shot6"],
"camera_id_2",
),
(
rec.shots["shot7"],
"camera_id_3",
),
(
rec.shots["shot8"],
"camera_id_4",
),
],
]
# Compute rig cameras poses
rig_cameras = rig.compute_relative_pose(pose_instances)
assert np.allclose(
[0, -1, 0], rig_cameras["camera_id_1"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[1, 0, 0], rig_cameras["camera_id_2"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[-1, 0, 0], rig_cameras["camera_id_3"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[0, 1, 0], rig_cameras["camera_id_4"].pose.get_origin(), atol=1e-7
)
|
the-stack_0_17031 | '''
MIT License
Copyright (c) 2020 Futurewei Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import configparser
class Runnable:
def __init__(self):
self.name = ""
self.host = ""
self.image = ""
self.docker_args = ""
self.program_args = ""
def __str__(self):
return "Name: " + self.name + \
"\nHost: " + self.host + \
"\nImage: " + self.image + \
"\nDocker args: " + self.docker_args + \
"\nProgram args: " + self.program_args
def getDockerPull(self):
return "sudo docker pull " + self.image
def getDockerRun(self):
return "sudo docker run " + self.docker_args + " " + self.program_args
def getDockerStop(self):
return "sudo docker stop -t 30 " + self.name
def getDockerRemove(self):
return "sudo docker container rm " + self.name
def getDockerLogs(self):
return "sudo docker logs --tail 5000 " + self.name
def parseRunnableConfig(locals_filename, runnable, config_files, cpus, cpus_base):
binary = ""
parsed_args = []
for filename in config_files.split(' '):
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
config.read([locals_filename]+[filename])
if "binary" in config["deployment"]:
binary = config["deployment"]["binary"]
if "image" in config["deployment"]:
runnable.image = config["deployment"]["image"]
if "docker_args" in config["deployment"]:
runnable.docker_args += config["deployment"]["docker_args"] + " "
for arg in config["program_args"]:
if arg in parsed_args:
continue
parsed_args.append(arg)
value = config["program_args"][arg]
if value == "$cpus":
value = str(cpus)
if value == "$cpus_expand":
value = str(cpus_base) + "-" + str(cpus_base+cpus-1)
runnable.program_args += "--" + arg + " " + value + " "
runnable.docker_args += "--name " + runnable.name + " " + runnable.image + " " + binary
def parseConfig(locals_filename, config_filename):
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
config.read([locals_filename, config_filename])
all_to_run = []
cpuset_base = int(config["LocalConfig"]["cpuset_base"])
for section in config.sections():
if (section == "LocalConfig"):
continue
hosts = config[section]["hosts"].split(' ')
for host in hosts:
runnable = Runnable()
runnable.name = section
runnable.host = host
parseRunnableConfig(locals_filename, runnable, config[section]["configs"], int(config[section]["cpus"]), cpuset_base)
all_to_run.append(runnable)
return all_to_run
|
the-stack_0_17032 | #!/usr/bin/env python
# coding: utf-8
# # Data distribution check
# In[14]:
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
#import umap as umap
"""
# In[24]:
# Read the old and new data
old = pd.read_csv('data_x_old.csv', header=None,sep=' ',dtype='float')
old.info()
old = old.values
new = pd.read_csv('data_x.csv', header=None,sep=' ',dtype='float')
new.info()
new = new.values
# ## Histogram
# In[29]:
# Plot the histogram of data
def histogram_plot(data, dim):
f = plt.figure()
# Determine if this is a new data
if np.shape(data)[0] == 17500:
new_flag = True
name = 'new'
else:
new_flag = False
name = 'old'
# Plot the histogram
plt.hist(data[:, dim],bins=100)
plt.title('histogram of axim {} of {} data '.format(dim, name))
plt.ylabel('cnt')
plt.xlabel('axis {}'.format(dim))
plt.savefig('histogram of axim {} of {} data.png'.format(dim, name))
# In[30]:
for i in range(8):
histogram_plot(new, i)
histogram_plot(old, i)
# ## Clustering
# In[31]:
data_all = np.concatenate([old, new])
reducer = umap.UMAP()
embedding = reducer.fit_transform(data_all)
embedding.shape
# In[37]:
# Plot the umap graph
lo = len(old)
ln = len(new)
label_all = np.zeros([lo + ln, ])
label_all[lo:] = 1
f = plt.figure()
plt.scatter(embedding[:lo, 0], embedding[:lo, 1], label='old',s=1)
plt.legend()
plt.xlabel('u1')
plt.ylabel('u2')
plt.title('umap plot for old data')
plt.savefig('umap plot for old data.png')
f = plt.figure()
plt.scatter(embedding[lo:, 0], embedding[lo:, 1], label='new',s=1)
plt.legend()
plt.xlabel('u1')
plt.ylabel('u2')
plt.title('umap plot for new data')
plt.savefig('umap plot for new data.png')
f = plt.figure()
plt.scatter(embedding[:lo, 0], embedding[:lo, 1], label='old',s=1)
plt.scatter(embedding[lo:, 0], embedding[lo:, 1], label='new',s=1)
plt.legend()
plt.xlabel('u1')
plt.ylabel('u2')
plt.title('umap plot for old data and new data')
plt.savefig('umap plot for old data and new data.png')
# ## Visualization
#
# In[12]:
def plot_scatter(old, new, dim1, dim2):
f = plt.figure()
plt.scatter(old[:, dim1], old[:, dim2], label='old',marker='x')#,s=10)
plt.scatter(new[:, dim1], new[:, dim2], label='new',marker='.')#,s=5)
plt.legend()
plt.xlabel('dim {}'.format(dim1))
plt.ylabel('dim {}'.format(dim2))
plt.title('scatter plot of dim{},{} of old and new data'.format(dim1, dim2))
plt.savefig('scatter plot of dim{},{} of old and new data.png'.format(dim1, dim2))
# In[15]:
for i in range(8):
for j in range(8):
if i == j:
continue
plot_scatter(old, new, i, j)
plt.close('all')
# ## Pair-wise scatter plot
# In[19]:
df_old = pd.DataFrame(old)
df_new = pd.DataFrame(new)
psm = pd.plotting.scatter_matrix(df_old, figsize=(15, 15), s=10)
# ## Find the same and plot spectra
# In[38]:
i = 0
for i in range(len(old)):
#print(old[i,:])
new_minus = np.sum(np.square(new - old[i,:]),axis=1)
#print(np.shape(new_minus))
match = np.where(new_minus==0)
#print(match)
if np.shape(match)[1] != 0: #There is a match
print('we found a match! new index {} and old index {} match'.format(match, i))
# In[39]:
print('old index ', old[11819,:])
print('new index ', new[5444,:])
# In[35]:
np.shape(match)
# ### Plot the matched spectra
# In[6]:
y_old = pd.read_csv('data_y_old.csv',header=None,sep=' ')
# In[42]:
y_new = pd.read_csv('data_y_new.csv',header=None,sep=' ')
# In[7]:
y_old = y_old.values
y_new = y_new.values
# In[45]:
# plot the spectra
old_index = 11819
new_index = 5444
f = plt.figure()
plt.plot(y_old[old_index,:],label='old geometry {}'.format(old[old_index, :]))
plt.plot(y_new[new_index,:],label='new geometry {}'.format(new[new_index, :]))
plt.legend()
plt.ylabel('transmission')
plt.xlabel('THz')
plt.savefig('Spectra plot for identicle point')
# # Conclusion, this simulation is not the same as before ...
# ### See what percentage are still within range
# In[36]:
#print(old)
#print(new)
hmax = np.max(old[:,0])
hmin = np.min(old[:,1])
rmax = np.max(old[:,4])
rmin = np.min(old[:,4])
print(hmax, hmin, rmax, rmin)
#hmax = np.max(new[:,0])
#hmin = np.min(new[:,1])
#rmax = np.max(new[:,4])
#rmin = np.min(new[:,4])
#print(hmax, hmin, rmax, rmin)
within_range = np.ones([len(new)])
new_minus = np.copy(new)
new_minus[:,:4] -= hmin
new_minus[:,4:] -= rmin
new_plus = np.copy(new)
new_plus[:, :4] -= hmax
new_plus[:, 4:] -= rmax
small_flag = np.min(new_minus, axis=1) < 0
big_flag = np.max(new_plus, axis=1) > 0
within_range[small_flag] = 0
within_range[big_flag] = 0
print(np.sum(within_range) / len(within_range))
print(type(within_range))
print(np.shape(within_range))
print(within_range)
print(new[np.arange(len(within_range))[within_range.astype('bool')],:])
print(np.sum(within_range))
# # Data augmentation
# ## Since the geometry is symmetric, we can augment the data with permutations
# In[13]:
# Check the assumption that the permutation does indeed give you the same spectra
# Check if there is same spectra
i = 0
for i in range(len(y_old)):
#print(old[i,:])
new_minus = np.sum(np.square(y_old - y_old[i,:]),axis=1)
#print(np.shape(new_minus))
match = np.where(new_minus==0)
#print(match)
#print(np.shape(match))
#print(len(match))
#if match[0]
if len(match) != 1:#np.shape(match)[1] != 0: #There is a match
print('we found a match! new index {} and old index {} match'.format(match, i))
# ### Due to physical periodic boundary condition, we can augment the data by doing permutations
# In[39]:
"""
def permutate_periodicity(geometry_in, spectra_in):
"""
:param: geometry_in: numpy array of geometry [n x 8] dim
:param: spectra_in: spectra of the geometry_in [n x k] dim
:return: output of the augmented geometry, spectra [4n x 8], [4n x k]
"""
# Get the dimension parameters
(n, k) = np.shape(spectra_in)
# Initialize the output
spectra_out = np.zeros([4*n, k])
geometry_out = np.zeros([4*n, 8])
#################################################
# start permutation of geometry (case: 1 - 0123)#
#################################################
# case:2 -- 1032
geometry_c2 = geometry_in[:, [1,0,3,2,5,4,7,6]]
# case:3 -- 2301
geometry_c3 = geometry_in[:, [2,3,0,1,6,7,4,5]]
# case:4 -- 3210
geometry_c4 = geometry_in[:, [3,2,1,0,7,6,5,4]]
geometry_out[0*n:1*n, :] = geometry_in
geometry_out[1*n:2*n, :] = geometry_c2
geometry_out[2*n:3*n, :] = geometry_c3
geometry_out[3*n:4*n, :] = geometry_c4
for i in range(4):
spectra_out[i*n:(i+1)*n,:] = spectra_in
return geometry_out, spectra_out
# In[40]:
data_folder = '/work/sr365/Christian_data/dataIn'
data_out_folder = '/work/sr365/Christian_data_augmented'
for file in os.listdir(data_folder):
data = pd.read_csv(os.path.join(data_folder, file),header=None,sep=',').values
(l, w) = np.shape(data)
g = data[:,2:10]
s = data[:,10:]
g_aug, s_aug = permutate_periodicity(g, s)
output = np.zeros([l*4, w])
output[:, 2:10] = g_aug
output[:, 10:] = s_aug
np.savetxt(os.path.join(data_out_folder, file+'_augmented.csv'),output,delimiter=',')
# In[41]:
#print(np.shape(g))
# In[ ]:
|
the-stack_0_17035 | # inter-node communication
from collections import defaultdict
from enum import IntEnum, unique
from plenum.common.plenum_protocol_version import PlenumProtocolVersion
from plenum.common.roles import Roles
from plenum.common.transactions import PlenumTransactions
NOMINATE = "NOMINATE"
REELECTION = "REELECTION"
PRIMARY = "PRIMARY"
PRIMDEC = "PRIMARYDECIDED"
BATCH = "BATCH"
REQACK = "REQACK"
REQNACK = "REQNACK"
REJECT = "REJECT"
POOL_LEDGER_TXNS = "POOL_LEDGER_TXNS"
PROPAGATE = "PROPAGATE"
PREPREPARE = "PREPREPARE"
OLD_VIEW_PREPREPARE_REQ = "OLD_VIEW_PREPREPARE_REQ"
OLD_VIEW_PREPREPARE_REP = "OLD_VIEW_PREPREPARE_REP"
PREPARE = "PREPARE"
COMMIT = "COMMIT"
CHECKPOINT = "CHECKPOINT"
CHECKPOINT_STATE = "CHECKPOINT_STATE"
THREE_PC_STATE = "THREE_PC_STATE"
UPDATE_BLS_MULTI_SIG = "UPDATE_BLS_MULTI_SIG"
REPLY = "REPLY"
ORDERED = "ORDERED"
REQKEY = "REQKEY"
INSTANCE_CHANGE = "INSTANCE_CHANGE"
BACKUP_INSTANCE_FAULTY = "BACKUP_INSTANCE_FAULTY"
VIEW_CHANGE_DONE = "VIEW_CHANGE_DONE"
CURRENT_STATE = "CURRENT_STATE"
VIEW_CHANGE = "VIEW_CHANGE"
VIEW_CHANGE_ACK = "VIEW_CHANGE_ACK"
NEW_VIEW = "NEW_VIEW"
LEDGER_STATUS = "LEDGER_STATUS"
CONSISTENCY_PROOF = "CONSISTENCY_PROOF"
CATCHUP_REQ = "CATCHUP_REQ"
CATCHUP_REP = "CATCHUP_REP"
MESSAGE_REQUEST = 'MESSAGE_REQUEST'
MESSAGE_RESPONSE = 'MESSAGE_RESPONSE'
OBSERVED_DATA = 'OBSERVED_DATA'
BATCH_COMMITTED = 'BATCH_COMMITTED'
VIEW_CHANGE_START = 'ViewChangeStart'
VIEW_CHANGE_CONTINUE = 'ViewChangeContinue'
BLACKLIST = "BLACKLIST"
THREE_PC_PREFIX = "3PC: "
MONITORING_PREFIX = "MONITORING: "
VIEW_CHANGE_PREFIX = "VIEW CHANGE: "
CATCH_UP_PREFIX = "CATCH-UP: "
PRIMARY_SELECTION_PREFIX = "PRIMARY SELECTION: "
BLS_PREFIX = "BLS: "
OBSERVER_PREFIX = "OBSERVER: "
PROPOSED_VIEW_NO = "proposed_view_no"
NAME = "name"
VERSION = "version"
IP = "ip"
PORT = "port"
KEYS = "keys"
TYPE = "type"
TXN_TYPE = "type"
TXN_ID = "txnId"
ORIGIN = "origin"
# Use f.IDENTIFIER.nm
IDENTIFIER = "identifier"
TARGET_NYM = "dest"
DATA = "data"
RAW = "raw"
ENC = "enc"
HASH = "hash"
ALIAS = "alias"
PUBKEY = "pubkey"
VERKEY = "verkey"
BLS_KEY = "blskey"
BLS_KEY_PROOF = "blskey_pop"
NYM_KEY = "NYM"
NODE_IP = "node_ip"
NODE_PORT = "node_port"
CLIENT_IP = "client_ip"
CLIENT_PORT = "client_port"
# CHANGE_HA = "CHANGE_HA"
# CHANGE_KEYS = "CHANGE_KEYS"
SERVICES = "services"
VALIDATOR = "VALIDATOR"
CLIENT = "CLIENT"
ROLE = 'role'
NONCE = 'nonce'
ATTRIBUTES = 'attributes'
VERIFIABLE_ATTRIBUTES = 'verifiableAttributes'
PREDICATES = 'predicates'
TXN_TIME = 'txnTime'
TXN_DATA = "txnData"
LAST_TXN = "lastTxn"
TXNS = "Txns"
BY = "by"
FORCE = 'force'
AML_VERSION = 'version'
AML = 'aml'
AML_CONTEXT = 'amlContext'
AUDIT_TXN_VIEW_NO = "viewNo"
AUDIT_TXN_PP_SEQ_NO = "ppSeqNo"
AUDIT_TXN_LEDGERS_SIZE = "ledgerSize"
AUDIT_TXN_LEDGER_ROOT = "ledgerRoot"
AUDIT_TXN_STATE_ROOT = "stateRoot"
AUDIT_TXN_PRIMARIES = "primaries"
AUDIT_TXN_DIGEST = "digest"
AUDIT_TXN_NODE_REG = "nodeReg"
# State proof fields
STATE_PROOF = 'state_proof'
ROOT_HASH = "root_hash"
MULTI_SIGNATURE = "multi_signature"
PROOF_NODES = "proof_nodes"
VALUE = 'value'
MULTI_SIGNATURE_SIGNATURE = 'signature'
MULTI_SIGNATURE_PARTICIPANTS = 'participants'
MULTI_SIGNATURE_VALUE = 'value'
MULTI_SIGNATURE_VALUE_LEDGER_ID = 'ledger_id'
MULTI_SIGNATURE_VALUE_STATE_ROOT = 'state_root_hash'
MULTI_SIGNATURE_VALUE_TXN_ROOT = 'txn_root_hash'
MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT = 'pool_state_root_hash'
MULTI_SIGNATURE_VALUE_TIMESTAMP = 'timestamp'
# ROLES
IDENTITY_OWNER = Roles.IDENTITY_OWNER.value
STEWARD = Roles.STEWARD.value
TRUSTEE = Roles.TRUSTEE.value
IDENTITY_OWNER_STRING = None
STEWARD_STRING = 'STEWARD'
TRUSTEE_STRING = 'TRUSTEE'
# TXNs
NODE = PlenumTransactions.NODE.value
NYM = PlenumTransactions.NYM.value
AUDIT = PlenumTransactions.AUDIT.value
GET_TXN = PlenumTransactions.GET_TXN.value
TXN_AUTHOR_AGREEMENT = PlenumTransactions.TXN_AUTHOR_AGREEMENT.value
TXN_AUTHOR_AGREEMENT_AML = PlenumTransactions.TXN_AUTHOR_AGREEMENT_AML.value
TXN_AUTHOR_AGREEMENT_DISABLE = PlenumTransactions.TXN_AUTHOR_AGREEMENT_DISABLE.value
GET_TXN_AUTHOR_AGREEMENT = PlenumTransactions.GET_TXN_AUTHOR_AGREEMENT.value
GET_TXN_AUTHOR_AGREEMENT_AML = PlenumTransactions.GET_TXN_AUTHOR_AGREEMENT_AML.value
CURRENT_TXN_PAYLOAD_VERSIONS = defaultdict(lambda: "1")
CURRENT_TXN_PAYLOAD_VERSIONS[TXN_AUTHOR_AGREEMENT] = "2"
CURRENT_TXN_VERSION = "1"
# TXN
# TODO: manye of these constants will be replaced
# by constants from Request after Request refactoring
TXN_PAYLOAD = "txn"
TXN_PAYLOAD_TYPE = "type"
TXN_PAYLOAD_PROTOCOL_VERSION = "protocolVersion"
TXN_PAYLOAD_DATA = "data"
TXN_PAYLOAD_VERSION = "ver"
TXN_PAYLOAD_METADATA = "metadata"
TXN_PAYLOAD_METADATA_FROM = "from"
TXN_PAYLOAD_METADATA_ENDORSER = "endorser"
TXN_PAYLOAD_METADATA_REQ_ID = "reqId"
TXN_PAYLOAD_METADATA_DIGEST = "digest"
TXN_PAYLOAD_METADATA_PAYLOAD_DIGEST = "payloadDigest"
TXN_PAYLOAD_METADATA_TAA_ACCEPTANCE = "taaAcceptance"
TXN_METADATA = "txnMetadata"
TXN_METADATA_TIME = "txnTime"
TXN_METADATA_ID = "txnId"
TXN_METADATA_SEQ_NO = "seqNo"
TXN_SIGNATURE = "reqSignature"
TXN_VERSION = "ver"
TXN_SIGNATURE_TYPE = "type"
ED25519 = "ED25519"
TXN_SIGNATURE_VALUES = "values"
TXN_SIGNATURE_FROM = "from"
TXN_SIGNATURE_VALUE = "value"
TXN_AUTHOR_AGREEMENT_TEXT = "text"
TXN_AUTHOR_AGREEMENT_VERSION = "version"
TXN_AUTHOR_AGREEMENT_DIGEST = "digest"
TXN_AUTHOR_AGREEMENT_RETIREMENT_TS = "retirement_ts"
TXN_AUTHOR_AGREEMENT_RATIFICATION_TS = "ratification_ts"
GET_TXN_AUTHOR_AGREEMENT_VERSION = "version"
GET_TXN_AUTHOR_AGREEMENT_DIGEST = "digest"
GET_TXN_AUTHOR_AGREEMENT_TIMESTAMP = "timestamp"
GET_TXN_AUTHOR_AGREEMENT_AML_VERSION = "version"
GET_TXN_AUTHOR_AGREEMENT_AML_TIMESTAMP = "timestamp"
class ClientBootStrategy(IntEnum):
Simple = 1
PoolTxn = 2
Custom = 3
class StorageType(IntEnum):
File = 1
Ledger = 2
class KeyValueStorageType(IntEnum):
Leveldb = 1
Memory = 2
Rocksdb = 3
ChunkedBinaryFile = 4
BinaryFile = 5
class PreVCStrategies(IntEnum):
VC_START_MSG_STRATEGY = 1
@unique
class LedgerState(IntEnum):
not_synced = 1 # Still gathering consistency proofs
syncing = 2 # Got sufficient consistency proofs, will be sending catchup
# requests and waiting for their replies
synced = 3 # Got replies for all catchup requests, indicating catchup
# complete for the ledger
OP_FIELD_NAME = "op"
CLIENT_STACK_SUFFIX = "C"
CLIENT_BLACKLISTER_SUFFIX = "BLC"
NODE_BLACKLISTER_SUFFIX = "BLN"
NODE_PRIMARY_STORAGE_SUFFIX = "PS"
NODE_TXN_STORE_SUFFIX = "TS"
NODE_HASH_STORE_SUFFIX = "HS"
HS_FILE = "file"
HS_MEMORY = "memory"
HS_LEVELDB = 'leveldb'
HS_ROCKSDB = 'rocksdb'
LAST_SENT_PRE_PREPARE = 'lastSentPrePrepare'
PLUGIN_BASE_DIR_PATH = "PluginBaseDirPath"
POOL_LEDGER_ID = 0
DOMAIN_LEDGER_ID = 1
CONFIG_LEDGER_ID = 2
AUDIT_LEDGER_ID = 3
# Store labels
BLS_LABEL = 'bls'
TS_LABEL = 'ts'
IDR_CACHE_LABEL = 'idr'
ATTRIB_LABEL = 'attrib'
SEQ_NO_DB_LABEL = 'seq_no_db'
NODE_STATUS_DB_LABEL = 'node_status_db'
LAST_SENT_PP_STORE_LABEL = 'last_sent_pp_store'
VALID_LEDGER_IDS = (POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID, AUDIT_LEDGER_ID)
CURRENT_PROTOCOL_VERSION = PlenumProtocolVersion.TXN_FORMAT_1_0_SUPPORT.value
OPERATION_SCHEMA_IS_STRICT = False
SCHEMA_IS_STRICT = False
GENERAL_LIMIT_SIZE = 256
|
the-stack_0_17036 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import unittest
def getFunction(schema, name):
for item in schema['functions']:
if item['name'] == name:
return item
raise KeyError('Missing function %s' % name)
def getParams(schema, name):
function = getFunction(schema, name)
return function['parameters']
def getReturns(schema, name):
function = getFunction(schema, name)
return function['returns']
def getType(schema, id):
for item in schema['types']:
if item['id'] == id:
return item
class IdlSchemaTest(unittest.TestCase):
def setUp(self):
loaded = idl_schema.Load('test/idl_basics.idl')
self.assertEquals(1, len(loaded))
self.assertEquals('idl_basics', loaded[0]['namespace'])
self.idl_basics = loaded[0]
def testSimpleCallbacks(self):
schema = self.idl_basics
expected = [{'type':'function', 'name':'cb', 'parameters':[]}]
self.assertEquals(expected, getParams(schema, 'function4'))
expected = [{'type':'function', 'name':'cb',
'parameters':[{'name':'x', 'type':'integer'}]}]
self.assertEquals(expected, getParams(schema, 'function5'))
expected = [{'type':'function', 'name':'cb',
'parameters':[{'name':'arg', '$ref':'MyType1'}]}]
self.assertEquals(expected, getParams(schema, 'function6'))
def testCallbackWithArrayArgument(self):
schema = self.idl_basics
expected = [{'type':'function', 'name':'cb',
'parameters':[{'name':'arg', 'type':'array',
'items':{'$ref':'MyType2'}}]}]
self.assertEquals(expected, getParams(schema, 'function12'))
def testArrayOfCallbacks(self):
schema = idl_schema.Load('test/idl_callback_arrays.idl')[0]
expected = [{'type':'array', 'name':'callbacks',
'items':{'type':'function', 'name':'MyCallback',
'parameters':[{'type':'integer', 'name':'x'}]}}]
self.assertEquals(expected, getParams(schema, 'whatever'))
def testLegalValues(self):
self.assertEquals({
'x': {'name': 'x', 'type': 'integer', 'enum': [1,2],
'description': 'This comment tests "double-quotes".'},
'y': {'name': 'y', 'type': 'string'},
'z': {'name': 'z', 'type': 'string'},
'a': {'name': 'a', 'type': 'string'},
'b': {'name': 'b', 'type': 'string'},
'c': {'name': 'c', 'type': 'string'}},
getType(self.idl_basics, 'MyType1')['properties'])
def testMemberOrdering(self):
self.assertEquals(
['x', 'y', 'z', 'a', 'b', 'c'],
getType(self.idl_basics, 'MyType1')['properties'].keys())
def testEnum(self):
schema = self.idl_basics
expected = {'enum': [{'name': 'name1', 'description': 'comment1'},
{'name': 'name2'}],
'description': 'Enum description',
'type': 'string', 'id': 'EnumType'}
self.assertEquals(expected, getType(schema, expected['id']))
expected = [{'name':'type', '$ref':'EnumType'},
{'type':'function', 'name':'cb',
'parameters':[{'name':'type', '$ref':'EnumType'}]}]
self.assertEquals(expected, getParams(schema, 'function13'))
expected = [{'items': {'$ref': 'EnumType'}, 'name': 'types',
'type': 'array'}]
self.assertEquals(expected, getParams(schema, 'function14'))
def testNoCompile(self):
schema = self.idl_basics
func = getFunction(schema, 'function15')
self.assertTrue(func is not None)
self.assertTrue(func['nocompile'])
def testNoDocOnEnum(self):
schema = self.idl_basics
enum_with_nodoc = getType(schema, 'EnumTypeWithNoDoc')
self.assertTrue(enum_with_nodoc is not None)
self.assertTrue(enum_with_nodoc['nodoc'])
def testInternalNamespace(self):
idl_basics = self.idl_basics
self.assertEquals('idl_basics', idl_basics['namespace'])
self.assertTrue(idl_basics['internal'])
self.assertFalse(idl_basics['nodoc'])
def testReturnTypes(self):
schema = self.idl_basics
self.assertEquals({'name': 'function19', 'type': 'integer'},
getReturns(schema, 'function19'))
self.assertEquals({'name': 'function20', '$ref': 'MyType1',
'optional': True},
getReturns(schema, 'function20'))
self.assertEquals({'name': 'function21', 'type': 'array',
'items': {'$ref': 'MyType1'}},
getReturns(schema, 'function21'))
self.assertEquals({'name': 'function22', '$ref': 'EnumType',
'optional': True},
getReturns(schema, 'function22'))
self.assertEquals({'name': 'function23', 'type': 'array',
'items': {'$ref': 'EnumType'}},
getReturns(schema, 'function23'))
def testChromeOSPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_chromeos.idl')[0]
self.assertEquals('idl_namespace_chromeos', schema['namespace'])
expected = ['chromeos']
self.assertEquals(expected, schema['platforms'])
def testAllPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_all_platforms.idl')[0]
self.assertEquals('idl_namespace_all_platforms', schema['namespace'])
expected = ['chromeos', 'chromeos_touch', 'linux', 'mac', 'win']
self.assertEquals(expected, schema['platforms'])
def testNonSpecificPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_non_specific_platforms.idl')[0]
self.assertEquals('idl_namespace_non_specific_platforms',
schema['namespace'])
expected = None
self.assertEquals(expected, schema['platforms'])
def testSpecificImplementNamespace(self):
schema = idl_schema.Load('test/idl_namespace_specific_implement.idl')[0]
self.assertEquals('idl_namespace_specific_implement',
schema['namespace'])
expected = 'idl_namespace_specific_implement.idl'
self.assertEquals(expected, schema['compiler_options']['implemented_in'])
def testSpecificImplementOnChromeOSNamespace(self):
schema = idl_schema.Load(
'test/idl_namespace_specific_implement_chromeos.idl')[0]
self.assertEquals('idl_namespace_specific_implement_chromeos',
schema['namespace'])
expected_implemented_path = 'idl_namespace_specific_implement_chromeos.idl'
expected_platform = ['chromeos']
self.assertEquals(expected_implemented_path,
schema['compiler_options']['implemented_in'])
self.assertEquals(expected_platform, schema['platforms'])
def testCallbackComment(self):
schema = self.idl_basics
self.assertEquals('A comment on a callback.',
getParams(schema, 'function16')[0]['description'])
self.assertEquals(
'A parameter.',
getParams(schema, 'function16')[0]['parameters'][0]['description'])
self.assertEquals(
'Just a parameter comment, with no comment on the callback.',
getParams(schema, 'function17')[0]['parameters'][0]['description'])
self.assertEquals(
'Override callback comment.',
getParams(schema, 'function18')[0]['description'])
def testFunctionComment(self):
schema = self.idl_basics
func = getFunction(schema, 'function3')
self.assertEquals(('This comment should appear in the documentation, '
'despite occupying multiple lines.'),
func['description'])
self.assertEquals(
[{'description': ('So should this comment about the argument. '
'<em>HTML</em> is fine too.'),
'name': 'arg',
'$ref': 'MyType1'}],
func['parameters'])
func = getFunction(schema, 'function4')
self.assertEquals(('This tests if "double-quotes" are escaped correctly.'
'<br/><br/> It also tests a comment with two newlines.'),
func['description'])
def testReservedWords(self):
schema = idl_schema.Load('test/idl_reserved_words.idl')[0]
foo_type = getType(schema, 'Foo')
self.assertEquals([{'name': 'float'}, {'name': 'DOMString'}],
foo_type['enum'])
enum_type = getType(schema, 'enum')
self.assertEquals([{'name': 'callback'}, {'name': 'namespace'}],
enum_type['enum'])
dictionary = getType(schema, 'dictionary')
self.assertEquals('integer', dictionary['properties']['long']['type'])
mytype = getType(schema, 'MyType')
self.assertEquals('string', mytype['properties']['interface']['type'])
params = getParams(schema, 'static')
self.assertEquals('Foo', params[0]['$ref'])
self.assertEquals('enum', params[1]['$ref'])
def testObjectTypes(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
foo_type = getType(schema, 'FooType')
self.assertEquals('object', foo_type['type'])
self.assertEquals('integer', foo_type['properties']['x']['type'])
self.assertEquals('object', foo_type['properties']['y']['type'])
self.assertEquals(
'any',
foo_type['properties']['y']['additionalProperties']['type'])
self.assertEquals('object', foo_type['properties']['z']['type'])
self.assertEquals(
'any',
foo_type['properties']['z']['additionalProperties']['type'])
self.assertEquals('Window', foo_type['properties']['z']['isInstanceOf'])
bar_type = getType(schema, 'BarType')
self.assertEquals('object', bar_type['type'])
self.assertEquals('any', bar_type['properties']['x']['type'])
def testObjectTypesInFunctions(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
params = getParams(schema, 'objectFunction1')
self.assertEquals('object', params[0]['type'])
self.assertEquals('any', params[0]['additionalProperties']['type'])
self.assertEquals('ImageData', params[0]['isInstanceOf'])
params = getParams(schema, 'objectFunction2')
self.assertEquals('any', params[0]['type'])
def testObjectTypesWithOptionalFields(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
baz_type = getType(schema, 'BazType')
self.assertEquals(True, baz_type['properties']['x']['optional'])
self.assertEquals('integer', baz_type['properties']['x']['type'])
self.assertEquals(True, baz_type['properties']['foo']['optional'])
self.assertEquals('FooType', baz_type['properties']['foo']['$ref'])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_17037 | from django.contrib.auth import get_user_model
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.urls import reverse
from meta.models import ModelMeta
class Club(ModelMeta, models.Model):
'''
Club
'''
name = models.CharField(
'nombre', max_length=64
)
slug = models.SlugField(
'slug', max_length=64, unique=True
)
description = models.TextField(
'descripción', max_length=300
)
document = models.FileField(
'documento', upload_to='clubs/docs/', blank=True,
help_text='Útil para bases de concursos o información de actividades.'
)
document_name = models.CharField(
'nombre del documento', max_length=120, default='', blank=True,
help_text='Texto que aparecerá en el enlace del documento.'
)
image = models.ImageField(
'imagen', upload_to='clubs/', blank=True,
help_text='Imagen para mostrar en la lista de clubes'
)
telegram_group = models.CharField(
'grupo de telegram', max_length=64, blank=True, default=''
)
telegram_group_link = models.CharField(
'enlace al grupo de telegram', max_length=64, blank=True, default=''
)
managers = models.ManyToManyField(
get_user_model(), 'managed_clubs', verbose_name='gestores'
)
members = models.ManyToManyField(
get_user_model(), 'clubs', verbose_name='miembros'
)
_metadata = {
'title': 'name',
'description': 'description',
'image': 'get_image',
}
class Meta:
verbose_name = 'club'
verbose_name_plural = 'clubes'
permissions = [
('can_link_club', 'Puede vincular un grupo de Telegram con un club')
]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('clubs:detail', args=[self.slug])
def get_image(self):
return self.image.url if self.image else static('images/favicon.png')
class ClubMeeting(models.Model):
'''
Club meeting
'''
club = models.ForeignKey(
Club, models.CASCADE, 'meetings', verbose_name='club'
)
title = models.CharField(
'título', max_length=200, blank=True
)
place = models.CharField(
'lugar', max_length=120
)
moment = models.DateTimeField('fecha')
class Meta:
verbose_name = 'quedada'
ordering = ['moment']
def __str__(self):
return '{} en {} ({})'.format(
self.club.name, self.place, self.moment.strftime('%d %b %Y %H:%M')
)
|
the-stack_0_17038 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "cone.legendgrouptitle"
_path_str = "cone.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.cone.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.cone.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_0_17039 | '''
Utility functions.
'''
from datetime import datetime, timezone
def timezone_aware(date: datetime) -> datetime:
'''
Convert naive to timezone-aware datetime (UTC timezone).
Parameters:
date (datetime): Datetime object.
Returns:
datetime: A timezone-aware datetime.
'''
return date.replace(tzinfo=timezone.utc) if date.tzinfo is None else date
__all__ = [
'timezone_aware'
]
|
the-stack_0_17040 | import pandas as pd
import trackpy as tp
def remove_short_tracks(tracks, threshold_length=7):
"""Filter les tracks qui font plus de X pixels.
Parameters
----------
tracks: pd.DataFrame
contient les traces brutes des nanoparticle
threshold_length: float
le nombre de pixels minimum pour avoir une trace valide
Returns
-------
t_neurons: pd.DataFrame
seulement une partie des traces obtenues en entréee.
"""
print("[filter.py] Je filtre les tracks qui sont trop courtes")
# t_neurons will contain the trajectories of interest
t_neurons = pd.DataFrame()
Ntraj = 0
for item in set(tracks.particle):
sub = tracks[tracks.particle == item] # selection of the item-th particle trajectory
distance = tp.motion.diagonal_size(sub)
# distance is an estimation of the particle displacement if the displacement
# is roughly linear
if distance > threshold_length:
Ntraj += 1
t_neurons = t_neurons.append(sub)
print(str(Ntraj) + ' trajectoires retenues')
return t_neurons
|
the-stack_0_17041 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of tvOS rules."""
load(
"@build_bazel_rules_apple//apple/internal:apple_product_type.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:linking_support.bzl",
"linking_support",
)
load(
"@build_bazel_rules_apple//apple/internal:outputs.bzl",
"outputs",
)
load(
"@build_bazel_rules_apple//apple/internal:partials.bzl",
"partials",
)
load(
"@build_bazel_rules_apple//apple/internal:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_factory.bzl",
"rule_factory",
)
load(
"@build_bazel_rules_apple//apple/internal:run_support.bzl",
"run_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"TvosApplicationBundleInfo",
"TvosExtensionBundleInfo",
"TvosFrameworkBundleInfo",
)
def _tvos_application_impl(ctx):
"""Experimental implementation of tvos_application."""
top_level_attrs = [
"app_icons",
"launch_images",
"strings",
]
binary_descriptor = linking_support.register_linking_action(ctx)
binary_artifact = binary_descriptor.artifact
debug_outputs_provider = binary_descriptor.debug_outputs_provider
bundle_id = ctx.attr.bundle_id
embeddable_targets = ctx.attr.extensions + ctx.attr.frameworks
swift_dylib_dependencies = ctx.attr.extensions + ctx.attr.frameworks
processor_partials = [
partials.app_assets_validation_partial(
app_icons = ctx.files.app_icons,
launch_images = ctx.files.launch_images,
),
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.bitcode_symbols_partial(
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = embeddable_targets,
package_bitcode = True,
),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = embeddable_targets,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
embeddable_targets = embeddable_targets,
),
partials.framework_import_partial(
targets = ctx.attr.deps + embeddable_targets,
),
partials.resources_partial(
bundle_id = bundle_id,
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions],
plist_attrs = ["infoplists"],
targets_to_avoid = ctx.attr.frameworks,
top_level_attrs = top_level_attrs,
),
partials.settings_bundle_partial(),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = swift_dylib_dependencies,
bundle_dylibs = True,
package_swift_support_if_needed = True,
),
]
if platform_support.is_device_build(ctx):
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
executable = outputs.executable(ctx)
run_support.register_simulator_executable(ctx, executable)
return [
DefaultInfo(
executable = executable,
files = processor_result.output_files,
runfiles = ctx.runfiles(
files = [
outputs.archive(ctx),
ctx.file._std_redirect_dylib,
],
),
),
TvosApplicationBundleInfo(),
# Propagate the binary provider so that this target can be used as bundle_loader in test
# rules.
binary_descriptor.provider,
] + processor_result.providers
def _tvos_framework_impl(ctx):
"""Experimental implementation of tvos_framework."""
binary_descriptor = linking_support.register_linking_action(ctx)
binary_artifact = binary_descriptor.artifact
binary_provider = binary_descriptor.provider
debug_outputs_provider = binary_descriptor.debug_outputs_provider
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.bitcode_symbols_partial(
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.frameworks,
),
# TODO(kaipi): Check if clang_rt dylibs are needed in Frameworks, or if
# the can be skipped.
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = ctx.attr.frameworks,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
frameworks = [outputs.archive(ctx)],
embeddable_targets = ctx.attr.frameworks,
),
partials.extension_safe_validation_partial(is_extension_safe = ctx.attr.extension_safe),
partials.framework_headers_partial(hdrs = ctx.files.hdrs),
partials.framework_provider_partial(binary_provider = binary_provider),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
targets_to_avoid = ctx.attr.frameworks,
version_keys_required = False,
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.frameworks,
),
]
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(files = processor_result.output_files),
TvosFrameworkBundleInfo(),
] + processor_result.providers
def _tvos_extension_impl(ctx):
"""Experimental implementation of tvos_extension."""
top_level_attrs = [
"app_icons",
"strings",
]
binary_descriptor = linking_support.register_linking_action(ctx)
binary_artifact = binary_descriptor.artifact
debug_outputs_provider = binary_descriptor.debug_outputs_provider
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.bitcode_symbols_partial(
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.frameworks,
),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = ctx.attr.frameworks,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
plugins = [outputs.archive(ctx)],
embeddable_targets = ctx.attr.frameworks,
),
partials.extension_safe_validation_partial(is_extension_safe = True),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
targets_to_avoid = ctx.attr.frameworks,
top_level_attrs = top_level_attrs,
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.frameworks,
),
]
if platform_support.is_device_build(ctx):
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(
files = processor_result.output_files,
),
TvosExtensionBundleInfo(),
] + processor_result.providers
tvos_application = rule_factory.create_apple_bundling_rule(
implementation = _tvos_application_impl,
platform_type = "tvos",
product_type = apple_product_type.application,
doc = "Builds and bundles a tvOS Application.",
)
tvos_extension = rule_factory.create_apple_bundling_rule(
implementation = _tvos_extension_impl,
platform_type = "tvos",
product_type = apple_product_type.app_extension,
doc = "Builds and bundles a tvOS Extension.",
)
tvos_framework = rule_factory.create_apple_bundling_rule(
implementation = _tvos_framework_impl,
platform_type = "tvos",
product_type = apple_product_type.framework,
doc = "Builds and bundles a tvOS Dynamic Framework.",
)
|
the-stack_0_17045 | from lib import *
def get_tick_labels(bins, ticks):
ticklabels = []
for i in ticks:
if i < len(bins):
ticklabels.append('%.2f'%(bins[int(i)]))
else:
ticklabels.append('%.2f'%(bins[-1])+'+')
return ticklabels
class Visualizer:
def __init__(self, action_labels):
self.n_action = len(action_labels)
self.action_labels = action_labels
def plot_a_episode(self,
env, model,
explored_cum_rewards, explored_actions,
safe_cum_rewards, safe_actions,
fig_path):
f, axs = plt.subplots(3,1,sharex=True, figsize=(14,14))
ax_price, ax_action, ax_Q = axs
ls = ['-','--']
for i in range(min(2,env.prices.shape[1])):
p = env.prices[:,i]/env.prices[0,i]*100 - 100
ax_price.plot(p, 'k'+ls[i], label='input%i - 100'%i)
ax_price.plot(explored_cum_rewards, 'b', label='explored P&L')
ax_price.plot(safe_cum_rewards, 'r', label='safe P&L')
ax_price.legend(loc='best', frameon=False)
ax_price.set_title(env.title+', ideal: %.1f, safe: %.1f, explored: %1.f'%(
env.max_profit, safe_cum_rewards[-1], explored_cum_rewards[-1]))
ax_action.plot(explored_actions, 'b', label='explored')
ax_action.plot(safe_actions, 'r', label='safe', linewidth=2)
ax_action.set_ylim(-0.4, self.n_action-0.6)
ax_action.set_ylabel('action')
ax_action.set_yticks(range(self.n_action))
ax_action.legend(loc='best', frameon=False)
style = ['k','r','b']
qq = []
for t in xrange(env.t0):
qq.append([np.nan] * self.n_action)
for t in xrange(env.t0, env.t_max):
qq.append(model.predict(env.get_state(t)))
for i in xrange(self.n_action):
ax_Q.plot([float(qq[t][i]) for t in xrange(len(qq))],
style[i], label=self.action_labels[i])
ax_Q.set_ylabel('Q')
ax_Q.legend(loc='best', frameon=False)
ax_Q.set_xlabel('t')
plt.subplots_adjust(wspace=0.4)
plt.savefig(fig_path)
plt.close()
def plot_episodes(self,
explored_total_rewards, safe_total_rewards, explorations,
fig_path, MA_window=100):
f = plt.figure(figsize=(14,10)) # width, height in inch (100 pixel)
if explored_total_rewards is None:
f, ax_reward = plt.subplots()
else:
figshape = (3,1)
ax_reward = plt.subplot2grid(figshape, (0, 0), rowspan=2)
ax_exploration = plt.subplot2grid(figshape, (2, 0), sharex=ax_reward)
tt = range(len(safe_total_rewards))
if explored_total_rewards is not None:
ma = pd.rolling_median(np.array(explored_total_rewards), window=MA_window, min_periods=1)
std = pd.rolling_std(np.array(explored_total_rewards), window=MA_window, min_periods=3)
ax_reward.plot(tt, explored_total_rewards,'bv', fillstyle='none')
ax_reward.plot(tt, ma, 'b', label='explored ma', linewidth=2)
ax_reward.plot(tt, std, 'b--', label='explored std', linewidth=2)
ma = pd.rolling_median(np.array(safe_total_rewards), window=MA_window, min_periods=1)
std = pd.rolling_std(np.array(safe_total_rewards), window=MA_window, min_periods=3)
ax_reward.plot(tt, safe_total_rewards,'ro', fillstyle='none')
ax_reward.plot(tt, ma,'r', label='safe ma', linewidth=2)
ax_reward.plot(tt, std,'r--', label='safe std', linewidth=2)
ax_reward.axhline(y=0, color='k', linestyle=':')
#ax_reward.axhline(y=60, color='k', linestyle=':')
ax_reward.set_ylabel('total reward')
ax_reward.legend(loc='best', frameon=False)
ax_reward.yaxis.tick_right()
ylim = ax_reward.get_ylim()
ax_reward.set_ylim((max(-100,ylim[0]), min(100,ylim[1])))
if explored_total_rewards is not None:
ax_exploration.plot(tt, np.array(explorations)*100., 'k')
ax_exploration.set_ylabel('exploration')
ax_exploration.set_xlabel('episode')
plt.savefig(fig_path)
plt.close()
def test_visualizer():
f = plt.figure()#figsize=(5,8))
axs_action = []
ncol = 3
nrow = 2
clim = (0,1)
ax = plt.subplot2grid((nrow, ncol), (0,ncol-1))
ax.matshow(np.random.random((2,2)), cmap='RdYlBu_r', clim=clim)
for action in range(3):
row = 1 + action/ncol
col = action%ncol
ax = plt.subplot2grid((nrow, ncol), (row,col))
cax = ax.matshow(np.random.random((2,2)), cmap='RdYlBu_r', clim=clim)
ax = plt.subplot2grid((nrow, ncol), (0,0), colspan=ncol-1)
cbar = f.colorbar(cax, ax=ax)
plt.show()
class VisualizerSequential:
def config(self):
pass
def __init__(self, model):
self.model = model
self.layers = []
for layer in self.model.layers:
self.layers.append(str(layer.name))
self.inter_models = dict()
model_input = self.model.input
for layer in self.layers:
self.inter_models[layer] = keras.models.Model(
inputs=model_input,
outputs=self.model.get_layer(layer).output)
self.config()
class VisualizerConv1D(VisualizerSequential):
def config(self):
self.n_channel = self.model.input.shape[2]
n_col = self.n_channel
for layer in self.layers:
shape = self.inter_models[layer].output.shape
if len(shape) == 3:
n_col = max(n_col, shape[2])
self.figshape = (len(self.layers)+1, int(n_col))
def plot(self, x):
f = plt.figure(figsize=(30,30))
for i in range(self.n_channel):
ax = plt.subplot2grid(self.figshape, (0,i))
ax.plot(x[0,:,i], '.-')
ax.set_title('input, channel %i'%i)
for i_layer in range(len(self.layers)):
layer = self.layers[i_layer]
z = self.inter_models[layer].predict(x)
print('plotting '+layer)
if len(z.shape) == 3:
for i in range(z.shape[2]):
ax = plt.subplot2grid(self.figshape, (i_layer+1, i))
ax.plot(z[0,:,i], '.-')
ax.set_title(layer+' filter %i'%i)
else:
ax = plt.subplot2grid(self.figshape, (i_layer+1, 0))
ax.plot(z[0,:], '.-')
ax.set_title(layer)
ax.set_ylim(-100,100)
def print_w(self):
layer = self.layers[0]
ww = self.inter_models[layer].get_weights()
for w in ww:
print(w.shape)
print(w)
"""
def test_VisualizerConv1D():
from agents import *
from sampler import *
fld = os.path.join('models','SinSampler(2, 5, 60) large')
qmodel = QModelConv(None, None)
qmodel.load(fld)
vis_conv = VisualizerConv1D(qmodel.model)
print vis_conv.layers
#return
vis_conv.print_w()
return
sampler = SinSampler(2, 5, 60)
x_all, title = sampler.sample(['fake'])
fld_fig = os.path.join(fld,'vis_conv', title.replace('/','|'))
makedirs(fld_fig)
for t in range(20, 60):
state = x_all[t-20:t,:]
state = (state.copy()/state[-1]-1.)*100
vis_conv.plot(np.reshape(state, (1, 20, 1)))
plt.savefig(os.path.join(fld_fig, 't%i.pdf'%t))
plt.close()
"""
if __name__ == '__main__':
#test()
#print np.isnan(float('nan'))
#test_VisualizerConv1D()
#histgram()
plot_price()
|
the-stack_0_17046 | import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'submissions/Zemgulys/myFace.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.