max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
deprecated/examples/simnet_bow/py_reader_generator.py | hutuxian/FleetX | 170 | 12661457 | #!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There are 13 integer features and 26 categorical features
import random
def combination(x, y):
res = [[[xi, yi] for yi in y] for xi in x]
return res[0]
def get_one_data(file_list, sample_rate):
for file in file_list:
contents = []
with open(file, "r") as fin:
for q in fin.readlines():
"""query_ids, pos_title_ids, neg_title_ids, label"""
one_data = q.split(";")[:-1]
if len(one_data) < 4:
print("data format error!, please check!", q)
continue
label = int(one_data[0])
pos_title_num, neg_title_num = int(one_data[1].split(" ")[0]), int(one_data[1].split(" ")[1])
query_ids = [int(x) for x in one_data[2].split(" ")]
if pos_title_num + neg_title_num != len(one_data) - 3:
print("data format error, pos_title_num={}, neg_title_num={}, one_data={}"
.format(pos_title_num, neg_title_num, len(one_data)))
continue
for x in range(pos_title_num):
pos_title_ids = [ int(i) for i in one_data[3+x].split(" ")]
for y in range(neg_title_num):
if random.random() > sample_rate:
continue
neg_title_ids = [int(i) for i in one_data[3+pos_title_num+y].split(" ")]
yield [query_ids, pos_title_ids, neg_title_ids, [label]]
fin.close()
def get_batch_reader(file_list, batch_size=128, sample_rate=0.02, trainer_id=1):
def batch_reader():
res = []
idx = 0
for i in get_one_data(file_list, sample_rate):
res.append(i)
idx += 1
if len(res) >= batch_size:
yield res
res = []
return batch_reader
def get_infer_data(file_list, sample_rate):
for file in file_list:
contents = []
with open(file, "r") as fin:
for q in fin.readlines():
"""query_ids, pos_title_ids, neg_title_ids, label"""
one_data = q.split(";")[:-1]
if len(one_data) < 4:
print("data format error!, please check!",q)
continue
label = int(one_data[0])
pos_title_num, neg_title_num = int(one_data[1].split(" ")[0]), int(one_data[1].split(" ")[1])
query_ids = [int(x) for x in one_data[2].split(" ")]
if pos_title_num + neg_title_num != len(one_data) - 3:
print("data format error, pos_title_num={}, neg_title_num={}, one_data={}"
.format(pos_title_num,neg_title_num,len(one_data)))
continue
for x in range(pos_title_num):
pos_title_ids = [int(i) for i in one_data[3 + x].split(" ")]
for y in range(neg_title_num):
if random.random() > sample_rate:
continue
neg_title_ids = [int(i) for i in one_data[3 + pos_title_num + y].split(" ")]
yield [query_ids, pos_title_ids, neg_title_ids]
fin.close()
def get_infer_batch_reader(file_list, batch_size=128, sample_rate=0.02, trainer_id=1):
def batch_reader():
res = []
idx = 0
for i in get_infer_data(file_list, sample_rate):
res.append(i)
idx += 1
if len(res) >= batch_size:
yield res
res = []
return batch_reader
|
flocker/control/test/test_diffing.py | stackriot/flocker | 2,690 | 12661458 | <gh_stars>1000+
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Tests for ``flocker.node._diffing``.
"""
from json import dumps
from uuid import uuid4
from eliot.testing import capture_logging, assertHasMessage
from hypothesis import given
import hypothesis.strategies as st
from pyrsistent import PClass, field, pmap, pset, InvariantException
from twisted.python.monkey import MonkeyPatcher
from .._diffing import (
create_diff,
compose_diffs,
DIFF_COMMIT_ERROR,
_TransformProxy,
)
from .._persistence import wire_encode, wire_decode
from .._model import Node, Port
from ..testtools import (
application_strategy,
deployment_strategy,
node_strategy,
related_deployments_strategy
)
from ...testtools import TestCase
from testtools.matchers import Equals, LessThan
class DiffTestObj(PClass):
"""
Simple pyrsistent object for testing.
"""
a = field()
class DeploymentDiffTest(TestCase):
"""
Tests for creating and applying diffs between deployments.
"""
@given(
related_deployments_strategy(2)
)
def test_deployment_diffing(self, deployments):
"""
Diffing two arbitrary deployments, then applying the diff to the first
deployment yields the second even after the diff has been serialized
and re-created.
"""
deployment_a, deployment_b = deployments
diff = create_diff(deployment_a, deployment_b)
serialized_diff = wire_encode(diff)
newdiff = wire_decode(serialized_diff)
should_b_b = newdiff.apply(deployment_a)
self.assertThat(
should_b_b,
Equals(deployment_b)
)
@given(
st.lists(deployment_strategy(), min_size=3, max_size=10)
)
def test_deployment_diffing_composable(self, deployments):
"""
Diffs should compose to create an aggregate diff.
Create a bunch of deployments and compute the incremental diffs from
one to the next. Compose all diffs together and apply the resulting
diff to the first deployment. Verify that the final deployment is the
result.
"""
reserialize = lambda x: wire_decode(wire_encode(x))
deployment_diffs = list(
reserialize(create_diff(a, b))
for a, b in zip(deployments[:-1], deployments[1:])
)
full_diff = reserialize(compose_diffs(deployment_diffs))
self.assertThat(
full_diff.apply(deployments[0]),
Equals(deployments[-1])
)
def test_deployment_diffing_smart(self):
"""
Small modifications to a deployment have diffs that are small. Their
reverse is also small.
"""
# Any large deployment will do, just use hypothesis for convenience of
# generating a large deployment.
deployment = deployment_strategy(min_number_of_nodes=90).example()
new_nodes = list(Node(uuid=uuid4()) for _ in xrange(4))
d = reduce(lambda x, y: x.update_node(y), new_nodes, deployment)
encoded_deployment = wire_encode(deployment)
diff = create_diff(deployment, d)
encoded_diff = wire_encode(diff)
self.assertThat(
len(encoded_diff),
LessThan(len(encoded_deployment)/2)
)
self.assertThat(
wire_decode(encoded_diff).apply(deployment),
Equals(d)
)
removal_diff = create_diff(d, deployment)
encoded_removal_diff = wire_encode(removal_diff)
self.assertThat(
len(encoded_removal_diff),
LessThan(len(encoded_deployment)/2)
)
self.assertThat(
wire_decode(encoded_removal_diff).apply(d),
Equals(deployment)
)
def test_set_diffing_smart(self):
"""
Small modifications to sets have diffs that are small. Their reverse
is also small.
"""
# Any Application with a large set of ports will do, just use
# hypothesis for convenience of generating a large number of ports on
# an application.
application = application_strategy(min_number_of_ports=1000).example()
new_ports = list(
Port(internal_port=i, external_port=i) for i in xrange(4)
)
a = reduce(
lambda x, y: x.transform(['ports'], lambda x: x.add(y)),
new_ports,
application
)
encoded_application = wire_encode(application)
diff = create_diff(application, a)
encoded_diff = wire_encode(diff)
self.assertThat(
len(encoded_diff),
LessThan(len(encoded_application)/2)
)
self.assertThat(
wire_decode(encoded_diff).apply(application),
Equals(a)
)
removal_diff = create_diff(a, application)
encoded_removal_diff = wire_encode(removal_diff)
self.assertThat(
len(encoded_removal_diff),
LessThan(len(encoded_application)/2)
)
self.assertThat(
wire_decode(encoded_removal_diff).apply(a),
Equals(application)
)
def test_equal_objects(self):
"""
Diffing objects that are equal results in an object that is smaller
than the object.
"""
baseobj = frozenset(xrange(1000))
object_a = DiffTestObj(a=baseobj)
object_b = DiffTestObj(a=baseobj)
diff = create_diff(object_a, object_b)
serialized_diff = wire_encode(diff)
self.assertThat(
len(serialized_diff),
LessThan(len(dumps(list(baseobj))))
)
self.assertThat(
wire_decode(serialized_diff).apply(object_a),
Equals(object_b)
)
def test_different_objects(self):
"""
Diffing objects that are entirely different results in a diff that can
be applied.
"""
object_a = DiffTestObj(a=pset(xrange(1000)))
object_b = pmap({'1': 34})
diff = create_diff(object_a, object_b)
self.assertThat(
wire_decode(wire_encode(diff)).apply(object_a),
Equals(object_b)
)
def test_different_uuids(self):
"""
Diffing objects that have parts that are simply not equal can be
applied to turn the first object into the second.
"""
object_a = DiffTestObj(a=uuid4())
object_b = DiffTestObj(a=uuid4())
diff = create_diff(object_a, object_b)
self.assertThat(
wire_decode(wire_encode(diff)).apply(object_a),
Equals(object_b)
)
class DiffTestObjInvariant(PClass):
"""
Simple pyrsistent object with an invariant that spans multiple fields.
Diffs which swap the values of the fields will trigger ``InvariantError`
unless ``_perform_invariant_check`` is set to ``False`` or the diff is
applied to an evolver object.
"""
_perform_invariant_check = True
a = field()
b = field()
def __invariant__(self):
if self._perform_invariant_check and self.a == self.b:
return (False, "a must not equal b")
else:
return (True, "")
class InvariantDiffTests(TestCase):
"""
Tests for creating and applying diffs to objects with invariant checks.
"""
def test_straight_swap(self):
"""
A diff composed of two separate ``set`` operations can be applied to an
object without triggering an invariant exception.
"""
o1 = DiffTestObjInvariant(
a=1,
b=2,
)
o2 = DiffTestObjInvariant(
a=2,
b=1,
)
diff = create_diff(o1, o2)
self.expectThat(len(diff.changes), Equals(2))
self.assertEqual(
o2,
diff.apply(o1)
)
def test_deep_swap(self):
"""
A diff composed of two separate ``set`` operations can be applied to a
nested object without triggering an invariant exception.
"""
a = DiffTestObjInvariant(
a=1,
b=2,
)
b = DiffTestObjInvariant(
a=3,
b=4,
)
o1 = DiffTestObjInvariant(
a=a,
b=b,
)
o2 = o1.transform(
['a'],
DiffTestObjInvariant(
a=2,
b=1,
)
)
diff = create_diff(o1, o2)
self.expectThat(len(diff.changes), Equals(2))
self.assertEqual(
o2,
diff.apply(o1)
)
@capture_logging(assertHasMessage, DIFF_COMMIT_ERROR)
def test_error_logging(self, logger):
"""
Failures while applying a diff emit a log message containing the full
diff.
"""
o1 = DiffTestObjInvariant(
a=1,
b=2,
)
patcher = MonkeyPatcher()
patcher.addPatch(
DiffTestObjInvariant,
'_perform_invariant_check',
False
)
patcher.patch()
try:
o2 = o1.set('b', 1)
finally:
patcher.restore()
diff = create_diff(o1, o2)
self.assertRaises(
InvariantException,
diff.apply,
o1,
)
def test_application_add(self):
"""
A diff on a Node, which *adds* and application with a volume *and* the
manifestation for the volume, can be applied without triggering an
invariant error on the Node.
"""
node2 = node_strategy(
min_number_of_applications=1,
stateful_applications=True,
).example()
application = node2.applications.values()[0]
node1 = node2.transform(
['applications'],
lambda o: o.remove(application.name)
).transform(
['manifestations'],
lambda o: o.remove(application.volume.manifestation.dataset_id)
)
diff = create_diff(node1, node2)
self.assertEqual(
node2,
diff.apply(node1),
)
def test_application_modify(self):
"""
A diff on a Node, which adds a volume to an *existing* application
volume *and* the manifestation for the volume, can be applied without
triggering an invariant error on the Node.
"""
node2 = node_strategy(
min_number_of_applications=1,
stateful_applications=True,
).example()
application = node2.applications.values()[0]
volume = application.volume
node1 = node2.transform(
['applications', application.name],
lambda o: o.set('volume', None)
).transform(
['manifestations'],
lambda o: o.remove(volume.manifestation.dataset_id)
)
diff = create_diff(node1, node2)
self.assertEqual(
node2,
diff.apply(node1),
)
class TransformProxyTests(TestCase):
"""
Tests for ``_TransformProxy``.
"""
def test_type_error(self):
"""
The wrapped object must provide _IEvolvable.
"""
e = self.assertRaises(
TypeError,
_TransformProxy,
1
)
self.assertEqual(
'1 does not provide _IEvolvable',
e.message,
)
def test_commit_no_change(self):
"""
``commit`` returns the original object if no changes have been
performed.
"""
original = pmap()
self.assertIs(original, _TransformProxy(original).commit())
def test_transform_keyerror(self):
"""
``transform`` raises ``KeyError`` if the supplied ``path`` is not
found.
"""
e = self.assertRaises(
KeyError,
_TransformProxy(pmap()).transform,
['a'], 1
)
self.assertEqual(
"Attribute or key 'a' not found in pmap({})",
e.message,
)
def test_transform_typeerror(self):
"""
``transform`` raises ``TypeError`` if the object at the supplied
``path`` does not provide ``_IEvolvable``.
"""
proxy = _TransformProxy(pmap({'a': 1}))
e = self.assertRaises(
TypeError,
proxy.transform,
['a'], 2,
)
self.assertEqual(
"1 does not provide _IEvolvable",
e.message
)
def test_transform_empty_path(self):
"""
If ``transform`` is supplied with an empty path, the operation is
performed on the root object.
"""
proxy = _TransformProxy(pmap({'a': 1}))
proxy.transform([], lambda o: o.set('a', 2))
self.assertEqual(
pmap({'a': 2}),
proxy.commit(),
)
def test_transform_deep_path(self):
"""
If ``transform`` is supplied with a path containing multiple segments,
the operation is performed on the object corresponding to the last
segment.
"""
proxy = _TransformProxy(
pmap({
'a': pmap({
'b': pmap({
'c': 1
})
})
})
)
proxy.transform(['a', 'b'], lambda o: o.set('c', 2))
self.assertEqual(
pmap({
'a': pmap({
'b': pmap({
'c': 2
})
})
}),
proxy.commit(),
)
def test_transform_deep_evolver(self):
"""
``transform`` can perform operations on nested objects that have
invariant constraints, without triggering the InvariantException.
"""
proxy = _TransformProxy(
pmap({
'a': pmap({
'b': pmap({
'c': DiffTestObjInvariant(
a=1, b=2
)
})
})
})
)
# If these operations were performed directly on the Pyrsistent
# structure it'd trigger InvariantException.
proxy.transform(['a', 'b', 'c'], lambda o: o.set('a', 2))
proxy.transform(['a', 'b', 'c'], lambda o: o.set('b', 1))
self.assertEqual(
pmap({
'a': pmap({
'b': pmap({
'c': DiffTestObjInvariant(
a=2, b=1
)
})
})
}),
proxy.commit(),
)
|
torchsparse/nn/utils/kernel.py | collector-m/torchsparse | 428 | 12661463 | from typing import Tuple, Union
import numpy as np
import torch
from torchsparse.utils import make_ntuple
__all__ = ['get_kernel_offsets']
def get_kernel_offsets(size: Union[int, Tuple[int, ...]],
stride: Union[int, Tuple[int, ...]] = 1,
dilation: Union[int, Tuple[int, ...]] = 1,
device: str = 'cpu') -> torch.Tensor:
size = make_ntuple(size, ndim=3)
stride = make_ntuple(stride, ndim=3)
dilation = make_ntuple(dilation, ndim=3)
offsets = [(np.arange(-size[k] // 2 + 1, size[k] // 2 + 1) * stride[k]
* dilation[k]) for k in range(3)]
# This condition check is only to make sure that our weight layout is
# compatible with `MinkowskiEngine`.
if np.prod(size) % 2 == 1:
offsets = [[x, y, z] for z in offsets[2] for y in offsets[1]
for x in offsets[0]]
else:
offsets = [[x, y, z] for x in offsets[0] for y in offsets[1]
for z in offsets[2]]
offsets = torch.tensor(offsets, dtype=torch.int, device=device)
return offsets
|
tensorflow_toolkit/action_detection/action_detection/nn/parameters/common.py | morkovka1337/openvino_training_extensions | 256 | 12661509 | <gh_stars>100-1000
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import yaml
class AttributedDict(dict):
"""Class to simplify the access to dictionary fields.
"""
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def load_config(config_path):
"""Loads parameters into the dict from the specified path.
:param config_path: Path to config file
:return: Dictionary with parameters
"""
with open(config_path, 'r') as config_file:
config_values = AttributedDict(yaml.load(config_file, Loader=yaml.FullLoader))
return config_values
|
tools/perf/contrib/vr_benchmarks/vr_benchmarks.py | zipated/src | 2,151 | 12661525 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from benchmarks import memory
from core import perf_benchmark
from measurements import smoothness
from telemetry import benchmark
from telemetry import story
from telemetry.timeline import chrome_trace_category_filter
from telemetry.timeline import chrome_trace_config
from telemetry.web_perf import timeline_based_measurement
from contrib.vr_benchmarks import vr_browsing_mode_pages
from contrib.vr_benchmarks import webvr_sample_pages
from contrib.vr_benchmarks import webvr_wpr_pages
from contrib.vr_benchmarks import webxr_sample_pages
class _BaseVRBenchmark(perf_benchmark.PerfBenchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option(
'--shared-prefs-file',
help='The path relative to the Chromium source root '
'to a file containing a JSON list of shared '
'preference files to edit and how to do so. '
'See examples in //chrome/android/'
'shared_preference_files/test/')
parser.add_option(
'--disable-screen-reset',
action='store_true',
default=False,
help='Disables turning screen off and on after each story. '
'This is useful for local testing when turning off the '
'screen leads to locking the phone, which makes Telemetry '
'not produce valid results.')
parser.add_option(
'--recording-wpr',
action='store_true',
default=False,
help='Modifies benchmark behavior slightly while recording WPR files '
'for it. This largely boils down to adding waits/sleeps in order '
'to ensure that enough streaming data is recorded for the '
'benchmark to run without issues.')
class _BaseWebVRWebXRBenchmark(_BaseVRBenchmark):
SUPPORTED_PLATFORMS = [story.expectations.ALL_ANDROID]
def CreateCoreTimelineBasedMeasurementOptions(self):
memory_categories = ['blink.console', 'disabled-by-default-memory-infra']
gpu_categories = ['gpu']
debug_categories = ['toplevel', 'viz']
category_filter = chrome_trace_category_filter.ChromeTraceCategoryFilter(
','.join(['-*'] + memory_categories + gpu_categories
+ debug_categories))
options = timeline_based_measurement.Options(category_filter)
options.config.enable_android_graphics_memtrack = True
options.config.enable_platform_display_trace = True
options.SetTimelineBasedMetrics(['memoryMetric', 'webvrMetric'])
options.config.chrome_trace_config.SetMemoryDumpConfig(
chrome_trace_config.MemoryDumpConfig())
return options
@classmethod
def ShouldAddValue(cls, name, from_first_story_run):
del from_first_story_run # unused
return memory.DefaultShouldAddValueForMemoryMeasurement(name)
class _BaseWebVRBenchmark(_BaseWebVRWebXRBenchmark):
def SetExtraBrowserOptions(self, options):
memory.SetExtraBrowserOptionsForMemoryMeasurement(options)
options.AppendExtraBrowserArgs([
'--enable-webvr',
])
class _BaseWebXRBenchmark(_BaseWebVRWebXRBenchmark):
def SetExtraBrowserOptions(self, options):
memory.SetExtraBrowserOptionsForMemoryMeasurement(options)
options.AppendExtraBrowserArgs([
'--enable-features=WebXR',
])
@benchmark.Owner(emails=['<EMAIL>', '<EMAIL>'])
# pylint: disable=too-many-ancestors
class XrWebVrStatic(_BaseWebVRBenchmark):
"""Measures WebVR performance with synthetic sample pages."""
def CreateStorySet(self, options):
return webvr_sample_pages.WebVrSamplePageSet()
@classmethod
def Name(cls):
return 'xr.webvr.static'
@benchmark.Owner(emails=['<EMAIL>', '<EMAIL>'])
# pylint: disable=too-many-ancestors
class XrWebXrStatic(_BaseWebXRBenchmark):
"""Measures WebXR performance with synthetic sample pages."""
def CreateStorySet(self, options):
return webxr_sample_pages.WebXrSamplePageSet()
@classmethod
def Name(cls):
return 'xr.webxr.static'
@benchmark.Owner(emails=['<EMAIL>', '<EMAIL>'])
# pylint: disable=too-many-ancestors
class XrWebVrWprStatic(_BaseWebVRBenchmark):
"""Measures WebVR performance with WPR copies of live websites."""
def CreateStorySet(self, options):
return webvr_wpr_pages.WebVrWprPageSet()
@classmethod
def Name(cls):
return 'xr.webvr.wpr.static'
@benchmark.Owner(emails=['<EMAIL>', '<EMAIL>'])
# pylint: disable=too-many-ancestors
class XrWebVrLiveStatic(_BaseWebVRBenchmark):
"""Measures WebVR performance with live websites.
This is a superset of xr.webvr.wpr.static, containing all the pages that it
uses plus some that we would like to test with WPR, but behave differently
when using WPR compared to the live version.
"""
def CreateStorySet(self, options):
if not hasattr(options, 'use_live_sites') or not options.use_live_sites:
# We log an error instead of raising an exception here because the
# Telemetry presubmit unittests fail if we raise.
logging.error('Running the live sites benchmark without using live '
'sites. Results will likely be incorrect for some sites.')
return webvr_wpr_pages.WebVrLivePageSet()
@classmethod
def Name(cls):
return 'xr.webvr.live.static'
class _BaseBrowsingBenchmark(_BaseVRBenchmark):
SUPPORTED_PLATFORMS = [story.expectations.ALL_ANDROID]
def CreateTimelineBasedMeasurementOptions(self):
memory_categories = ['blink.console', 'disabled-by-default-memory-infra']
gpu_categories = ['gpu']
debug_categories = ['toplevel', 'viz']
category_filter = chrome_trace_category_filter.ChromeTraceCategoryFilter(
','.join(['-*'] + memory_categories + gpu_categories
+ debug_categories))
options = timeline_based_measurement.Options(category_filter)
options.config.enable_android_graphics_memtrack = True
options.config.enable_platform_display_trace = True
options.SetTimelineBasedMetrics(['frameCycleDurationMetric',
'memoryMetric'])
options.config.chrome_trace_config.SetMemoryDumpConfig(
chrome_trace_config.MemoryDumpConfig())
return options
def SetExtraBrowserOptions(self, options):
options.clear_sytem_cache_for_browser_and_profile_on_start = True
options.AppendExtraBrowserArgs([
'--enable-gpu-benchmarking',
'--touch-events=enabled',
'--enable-vr-shell',
])
@benchmark.Owner(emails=['<EMAIL>'])
class XrBrowsingStatic(_BaseBrowsingBenchmark):
"""Benchmark for testing the VR Browsing Mode performance on sample pages."""
def CreateStorySet(self, options):
return vr_browsing_mode_pages.VrBrowsingModePageSet()
@classmethod
def Name(cls):
return 'xr.browsing.static'
@benchmark.Owner(emails=['<EMAIL>', '<EMAIL>'])
class XrBrowsingWprStatic(_BaseBrowsingBenchmark):
"""Benchmark for testing the VR Browsing Mode performance on WPR pages."""
def CreateStorySet(self, options):
return vr_browsing_mode_pages.VrBrowsingModeWprPageSet()
@classmethod
def Name(cls):
return 'xr.browsing.wpr.static'
@benchmark.Owner(emails=['<EMAIL>', '<EMAIL>'])
class XrBrowsingWprSmoothness(_BaseBrowsingBenchmark):
"""Benchmark for testing VR browser scrolling smoothness and throughput."""
test = smoothness.Smoothness
def CreateStorySet(self, options):
return vr_browsing_mode_pages.VrBrowsingModeWprSmoothnessPageSet()
@classmethod
def Name(cls):
return 'xr.browsing.wpr.smoothness'
|
packages/pyright-internal/src/tests/samples/final2.py | Microsoft/pyright | 3,934 | 12661529 | <filename>packages/pyright-internal/src/tests/samples/final2.py
# This sample tests the handling of the @final method decorator.
from typing import final
class ClassA:
def func1(self):
pass
@classmethod
def func2(cls):
pass
@final
def func3(self):
pass
@final
@classmethod
def func4(cls):
pass
@final
def _func5(self):
pass
@final
def __func6(self):
pass
# This should generate an error because func3 is final.
ClassA.func3 = lambda self: None
# This should generate an error because func4 is final.
ClassA.func4 = lambda cls: None
# This should generate an error because _func5 is final.
ClassA._func5 = lambda self: None
class ClassB(ClassA):
def func1(self):
pass
@classmethod
def func2(cls):
pass
# This should generate an error because func3 is
# defined as final.
def func3(self):
pass
# This should generate an error because func3 is
# defined as final.
@classmethod
def func4(cls):
pass
# This should generate an error because func3 is
# defined as final.
def _func5(self):
pass
# This should not generate an error because double
# underscore symbols are exempt from this check.
def __func6(self):
pass
class Base4:
...
class Base5:
@final
def __init__(self, v: int) -> None:
...
class C(Base4, Base5):
# This should generate an error because it overrides Base5,
# and __init__ is marked final there.
def __init__(self) -> None:
...
|
Binary_Search/Python/jiang42/binary_search.py | Mynogs/Algorithm-Implementations | 1,184 | 12661540 | def binary_search(arr, target):
low, high = 0, len(arr)-1
while low < high:
mid = (low + high)/2
if arr[mid] == target:
return mid
elif arr[mid] > target:
high = mid - 1
else:
low = mid + 1
try:
if arr[high] == target:
return high
else:
return -1
except IndexError as e:
return -1
|
settings.py | 123456pop00/Traffic_sign_Test | 533 | 12661545 | '''
Global settings
'''
import tensorflow as tf
# Default boxes
# DEFAULT_BOXES = ((x1_offset, y1_offset, x2_offset, y2_offset), (...), ...)
# Offset is relative to upper-left-corner and lower-right-corner of the feature map cell
DEFAULT_BOXES = ((-0.5, -0.5, 0.5, 0.5), (0.2, 0.2, -0.2, -0.2), (-0.8, -0.2, 0.8, 0.2), (-0.2, -0.8, 0.2, 0.8))
NUM_DEFAULT_BOXES = len(DEFAULT_BOXES)
# Constants (TODO: Keep this updated as we go along)
NUM_CLASSES = 3 # 2 signs + 1 background class
NUM_CHANNELS = 1 # grayscale->1, RGB->3
NUM_PRED_CONF = NUM_DEFAULT_BOXES * NUM_CLASSES # number of class predictions per feature map cell
NUM_PRED_LOC = NUM_DEFAULT_BOXES * 4 # number of localization regression predictions per feature map cell
# Bounding box parameters
IOU_THRESH = 0.5 # match ground-truth box to default boxes exceeding this IOU threshold, during data prep
NMS_IOU_THRESH = 0.2 # IOU threshold for non-max suppression
# Negatives-to-positives ratio used to filter training data
NEG_POS_RATIO = 5 # negative:positive = NEG_POS_RATIO:1
# Class confidence threshold to count as detection
CONF_THRESH = 0.9
# Model selection and dependent parameters
MODEL = 'AlexNet' # AlexNet/VGG16/ResNet50
if MODEL == 'AlexNet':
#IMG_H, IMG_W = 300, 300
#FM_SIZES = [[36, 36], [17, 17], [9, 9], [5, 5]] # feature map sizes for SSD hooks via TensorBoard visualization (HxW)
IMG_H, IMG_W = 260, 400
FM_SIZES = [[31, 48], [15, 23], [8, 12], [4, 6]]
else:
raise NotImplementedError('Model not implemented')
# Model hyper-parameters
OPT = tf.train.AdadeltaOptimizer()
REG_SCALE = 1e-2 # L2 regularization strength
LOC_LOSS_WEIGHT = 1. # weight of localization loss: loss = conf_loss + LOC_LOSS_WEIGHT * loc_loss
# Training process
RESUME = False # resume training from previously saved model?
NUM_EPOCH = 200
BATCH_SIZE = 32 # batch size for training (relatively small)
VALIDATION_SIZE = 0.05 # fraction of total training set to use as validation set
SAVE_MODEL = True # save trained model to disk?
MODEL_SAVE_PATH = './model.ckpt' # where to save trained model
|
pyqubo/integer/log_encoded_integer.py | dmiracle/pyqubo | 124 | 12661589 | # Copyright 2018 Recruit Communications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cpp_pyqubo import SubH
from pyqubo.array import Array
from pyqubo.integer import Integer
import numpy as np
class LogEncInteger(Integer):
"""Log encoded integer. The value that takes :math:`[0, n]` is
represented by :math:`\\sum_{i=1}^{\\lceil\\log_{2}n\\rceil}2^ix_{i}` without any constraint.
Args:
label (str): Label of the integer.
lower (int): Lower value of the integer.
upper (int): Upper value of the integer.
Examples:
This example finds the value `a`, `b` such that :math:`a+b=5` and :math:`2a-b=1`.
>>> from pyqubo import LogEncInteger
>>> import dimod
>>> a = LogEncInteger("a", (0, 4))
>>> b = LogEncInteger("b", (0, 4))
>>> M=2.0
>>> H = (2*a-b-1)**2 + M*(a+b-5)**2
>>> model = H.compile()
>>> bqm = model.to_bqm()
>>> import dimod
>>> sampleset = dimod.ExactSolver().sample(bqm)
>>> decoded_samples = model.decode_sampleset(sampleset)
>>> best_sample = min(decoded_samples, key=lambda s: s.energy)
>>> print(best_sample.subh['a'])
2.0
>>> print(best_sample.subh['b'])
3.0
"""
def __init__(self, label, value_range):
lower, upper = value_range
assert upper > lower, "upper value should be larger than lower value"
assert isinstance(lower, int)
assert isinstance(upper, int)
span = upper - lower
self._num_variables = int(np.log2(span)) + 1
self.array = Array.create(label, shape=self._num_variables, vartype='BINARY')
d = self._num_variables - 1
express = lower + sum(self.array[i] * 2 ** i for i in range(self._num_variables - 1))
express += (span - (2**d - 1)) * self.array[-1]
express = SubH(express, label)
super().__init__(
label=label,
value_range=value_range,
express=express)
|
dataprep/eda/staged.py | Waterpine/dataprep-1 | 1,229 | 12661688 | <filename>dataprep/eda/staged.py<gh_stars>1000+
"""Decorator to make it cope with two staged computation easily."""
from typing import Any, Callable, Generator, Tuple, Union, cast
import dask
from .intermediate import Intermediate
Decoratee = Callable[..., Generator[Any, Any, Intermediate]]
Completion = Callable[[Any], Intermediate]
def staged(
func: Decoratee,
) -> Callable[..., Union[Tuple[Any, Completion], Intermediate]]:
"""Transform a two stage computation into a result and a completion function."""
def staged_imp(
*args: Any, _staged: bool = False, **kwargs: Any
) -> Union[Tuple[Any, Completion], Intermediate]:
gen = func(*args, **kwargs)
def completion(computed: Any) -> Intermediate:
try:
gen.send(computed)
raise RuntimeError("Computation didn't stop.")
except StopIteration as stop:
return cast(Intermediate, stop.value)
if _staged:
return next(gen), completion
else:
(computed,) = dask.compute(next(gen))
return completion(computed)
return staged_imp
|
dojo/components/urls.py | mtcolman/django-DefectDojo | 1,772 | 12661701 | from django.conf.urls import url
from dojo.components import views
urlpatterns = [
url(r'^components$', views.components,
name='components'),
]
|
lib/asn1/test/asn1_SUITE_data/XSeqOf.py | jjhoo/otp | 8,238 | 12661707 | XSeqOf DEFINITIONS ::=
BEGIN
-- F.2.10.1
-- Use a sequence-of type to model a collection of variables whose
-- types are the same,
-- whose number is large or unpredictable, and whose order is significant.
-- EXAMPLE
NamesOfMemberNations ::= SEQUENCE OF VisibleString
-- in alphabetical order
firstTwo NamesOfMemberNations ::= {"Australia", "Austria"}
DayNames1 ::= SEQUENCE SIZE(7) OF VisibleString
DayNames2 ::= SEQUENCE SIZE(1..7) OF VisibleString
DayNames3 ::= SEQUENCE (SIZE(7)) OF VisibleString
DayNames4 ::= SEQUENCE (SIZE(1..7)) OF VisibleString
END
|
tests/integrate_test/robustness_test.py | baajur/cita | 930 | 12661722 | #! /usr/bin/env python3
# coding=utf-8
import os
import subprocess
import time
import toml
from jsonrpcclient.http_client import HTTPClient
def block_number(host="127.0.0.1", port=1337):
"""
url: str
port: int
"""
url = "http://" + host + ":" + str(port)
try:
response = HTTPClient(url).request("blockNumber", [])
return int(response, 16)
except:
return None
def run_subprocess(cmd, shell=True):
"""
cmd: str, style like "ls -al"
"""
return subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE)
def start(node_number, log_level=""):
"""
node_number: int
log_level: str
"""
for i in range(node_number + 1):
p = run_subprocess(f'bin/cita bebop setup node/{i}')
p.wait()
run_subprocess(f'bin/cita bebop start node/{i} {log_level}')
def stop(node_number):
"""
node_number: int
"""
for i in range(node_number + 1):
p = run_subprocess(f'bin/cita bebop stop node/{i}')
p.wait()
def clean():
run_subprocess("rm node/ -rf")
def modify_forever(node_number):
"""
node_number: int
"""
for i in range(node_number + 1):
with open(f"./node/{i}/forever.toml", "r") as file:
forever_conf = toml.load(file)
forever_conf["process"][-1]["respawn"] = 10000
forever_conf["process"][-2]["respawn"] = 10000
with open(f"./node/{i}/forever.toml", "w") as file:
toml.dump(forever_conf, file)
def remove_statedb(node_number):
"""
node_number: int
"""
for i in range(node_number + 1):
run_subprocess(f'rm ./node/{i}/data/statedb/ -rf')
def kill_process(always, occasionally):
"""
:param always: path, str
:param occasionally: path, str
:return: None
"""
for i in range(50):
if os.path.exists(always):
with open(always, "r") as file:
always_kill = file.read()
run_subprocess(f"kill -9 {always_kill}")
if i % 4 == 0 and os.path.exists(occasionally):
with open(occasionally, "r") as file:
occasionally_kill = file.read()
run_subprocess(f"kill -9 {occasionally_kill}")
time.sleep(0.3)
def prepare():
p = run_subprocess(
"python3 ./scripts/create_cita_config.py create --super_admin '<PASSWORD>' --nodes '127.0.0.1:4000,127.0.0.1:4001,127.0.0.1:4002,127.0.0.1:4003' --chain_name node > /dev/null"
)
p.wait()
modify_forever(3)
start(3)
time.sleep(30)
def test_chain_higher_than_executor():
for i in range(10):
point_number = block_number()
print(f"point height is {point_number}")
stop(0)
remove_statedb(0)
start(0)
start_time = time.time()
while True:
new_node_block_height = block_number()
if new_node_block_height and new_node_block_height > point_number + 2:
print(f"Current height is {new_node_block_height}, finish {i}")
break
else:
print(f"Current height is {new_node_block_height}, wait...")
time.sleep(3)
duration_time = time.time() - start_time
if duration_time > 60:
raise Exception("robustness test failure")
def test_executor_higher_than_chain():
kill_process('./node/0/.cita-executor.pid', "./node/0/.cita-chain.pid")
kill_process("./node/0/.cita-chain.pid", './node/0/.cita-executor.pid')
time.sleep(6)
point_number = block_number(port=1339)
print(f"point height is {point_number}")
start_time = time.time()
while True:
new_node_block_height = block_number()
if new_node_block_height and new_node_block_height > point_number + 10:
print(f"Current height is {new_node_block_height}, finish")
break
else:
print(f"Current height is {new_node_block_height}, wait...")
time.sleep(3)
duration_time = time.time() - start_time
if duration_time > 60:
raise Exception("robustness test failure")
if __name__ == "__main__":
pwd = os.getcwd()
os.chdir(f'{pwd}/target/install')
print("step 0: prepare")
clean()
prepare()
print("step 1: Chain higher than Executor")
test_chain_higher_than_executor()
print("step 2: Executor higher than Chain")
test_executor_higher_than_chain()
print("step 3: stop")
stop(3)
print("step 4: clean up")
clean()
|
aries_cloudagent/protocols/coordinate_mediation/v1_0/handlers/mediation_request_handler.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 12661729 | <filename>aries_cloudagent/protocols/coordinate_mediation/v1_0/handlers/mediation_request_handler.py
"""Handler for mediate-request message."""
from .....messaging.base_handler import BaseHandler, HandlerException
from .....messaging.request_context import RequestContext
from .....messaging.responder import BaseResponder
from ..manager import MediationManager, MediationAlreadyExists
from ..messages.mediate_request import MediationRequest
from ..messages.problem_report import CMProblemReport, ProblemReportReason
class MediationRequestHandler(BaseHandler):
"""Handler for mediate-request message."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle mediate-request message."""
self._logger.debug(
"%s called with context %s", self.__class__.__name__, context
)
assert isinstance(context.message, MediationRequest)
if not context.connection_ready:
raise HandlerException("Invalid mediation request: no active connection")
mgr = MediationManager(context.profile)
try:
record = await mgr.receive_request(
context.connection_record.connection_id, context.message
)
if context.settings.get("mediation.open", False):
record, grant = await mgr.grant_request(record.mediation_id)
await responder.send_reply(grant)
except MediationAlreadyExists:
reply = CMProblemReport(
description={
"en": "Mediation request already exists from this connection",
"code": ProblemReportReason.MEDIATION_REQUEST_REPEAT.value,
}
)
reply.assign_thread_from(context.message)
await responder.send_reply(reply)
|
myia/operations/prim_extract_kwarg.py | strint/myia | 222 | 12661741 | """Definitions for the primitive `extract_kwarg`."""
from ..lib import Inferrer, standard_prim
from . import primitives as P
@standard_prim(P.extract_kwarg)
class _ExtractKwArgInferrer(Inferrer):
"""Infer the return type of primitive `extract_kwarg`."""
async def normalize_args(self, args):
return args
async def infer(self, engine, key, kwarg):
assert key.xvalue() is kwarg.key
return kwarg.argument
__operation_defaults__ = {
"name": "extract_kwarg",
"registered_name": "extract_kwarg",
"mapping": P.extract_kwarg,
"python_implementation": None,
}
__primitive_defaults__ = {
"name": "extract_kwarg",
"registered_name": "extract_kwarg",
"type": "inference",
"python_implementation": None,
"inferrer_constructor": _ExtractKwArgInferrer,
"grad_transform": None,
}
|
savevariables.py | dav009/nextitnet | 112 | 12661768 | # encoding:utf-8
import tensorflow as tf
import os
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def save_mode_pb(pb_file_path):
x = tf.placeholder(tf.int32, name='x')
y = tf.placeholder(tf.int32, name='y')
b = tf.Variable(2, name='b')
xy = tf.multiply(x, y)
# 这里的输出需要加上name属性
op = tf.add(xy, b, name='op_to_store')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
path = os.path.dirname(os.path.abspath(pb_file_path))
if os.path.isdir(path) is False:
os.makedirs(path)
# convert_variables_to_constants 需要指定output_node_names,list(),可以多个
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['op_to_store'])
with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
f.write(constant_graph.SerializeToString())
# test
feed_dict = {x: 2, y: 4}
print(sess.run(op, feed_dict))
def restore_mode_pb(pb_file_path):
sess = tf.Session()
with gfile.FastGFile(pb_file_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
print(sess.run('b:0'))
input_x = sess.graph.get_tensor_by_name('x:0')
input_y = sess.graph.get_tensor_by_name('y:0')
op = sess.graph.get_tensor_by_name('op_to_store:0')
ret = sess.run(op, {input_x: 5, input_y: 5})
print(ret)
if __name__ == '__main__':
# save_mode_pb("Data/Models/generation_model/fajietest.ckpt")
restore_mode_pb("Data/Models/generation_model/fajietest.ckpt")
|
tests/basic/class2.py | MoonStarCZW/py2rb | 124 | 12661778 | class Class1(object):
def __init__(self):
pass
def test1(self):
return 5
class Class2(object):
def test1(self):
return 6
class Class3(object):
def test1(self, x):
return self.test2(x)-1
def test2(self, x):
return 2*x
a = Class1()
print(a.test1())
a = Class2()
print(a.test1())
a = Class3()
print(a.test1(3))
print(a.test2(3))
|
python/ctranslate2/specs/__init__.py | funboarder13920/CTranslate2 | 259 | 12661779 | from ctranslate2.specs.model_spec import LayerSpec
from ctranslate2.specs.model_spec import ModelSpec
from ctranslate2.specs.transformer_spec import TransformerSpec
|
mayan/apps/dynamic_search/__init__.py | eshbeata/open-paperless | 2,743 | 12661812 | from __future__ import unicode_literals
default_app_config = 'dynamic_search.apps.DynamicSearchApp'
|
nodes/1.x/python/InternalUnit.ToDisplayUnit.py | jdehotin/Clockworkfordynamo | 147 | 12661863 | <reponame>jdehotin/Clockworkfordynamo<filename>nodes/1.x/python/InternalUnit.ToDisplayUnit.py
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
vals = IN[0]
dispunit = IN[1]
elementlist = []
for val in vals:
elementlist.append(UnitUtils.ConvertFromInternalUnits(val,dispunit))
OUT = elementlist |
integration_tests/projects/005_functions_and_variables/fal_scripts/write_to_source_twice.py | emekdahl/fal | 360 | 12661902 | <gh_stars>100-1000
import pandas as pd
from functools import reduce
import os
model_name = context.current_model.name
output = ""
df: pd.DataFrame = ref(model_name)
df.columns = df.columns.str.lower() # Snowflake has uppercase columns
output += f"my_float {df.my_float[0]}\n"
write_to_source(df, "results", "some_source", mode="overwrite")
source_size = len(source("results", "some_source"))
output += f"source size {source_size}\n"
write_to_source(df, "results", "some_source", mode="append")
source_size = len(source("results", "some_source"))
output += f"source size {source_size}\n"
path = reduce(
os.path.join, [os.environ["temp_dir"], model_name + ".write_to_source_twice.txt"]
)
with open(path, "w") as file:
file.write(output)
|
tests/test_basic_normal.py | ahartikainen/pystan | 1,030 | 12661937 | <filename>tests/test_basic_normal.py<gh_stars>1000+
import pytest
import stan
program_code = "parameters {real y;} model {y ~ normal(0,1);}"
@pytest.fixture(scope="module")
def normal_posterior():
return stan.build(program_code)
def test_normal_stepsize(normal_posterior):
fit = normal_posterior.sample(stepsize=0.001)
assert fit is not None
|
pysymoro/screw6.py | songhongxiang/symoro | 109 | 12661949 | # -*- coding: utf-8 -*-
"""
This module contains the Screw6 data structure.
"""
from sympy import zeros
from sympy import ShapeError
class Screw6(object):
"""
Data structure:
Represent the data structure (base class) to hold a 6x6 matrix
which in turn contains four 3x3 matrices.
"""
def __init__(self, *args, **kwargs):
"""
Constructor period.
Usage:
>>> # initialise to 0 by default
Screw6()
>>> # initialise to a given 6x6 matrix
Screw6(<value>)
>>> # intiialise each of the 4 sub-matrices individually
Screw6(<top-left>, <top-right>, <bottom-left>, <bottom-right>)
>>> # initialise using keywords
Screw6(value=<value>)
Screw6(
tl=<top-left>, tr=<top-right>,
bl=<bottom-left>, br=<bottom-right>
)
"""
self._val = zeros(6, 6)
if len(args) == 1:
self.val = args[0]
elif len(args) == 4:
self.topleft = args[0]
self.topright = args[1]
self.botleft = args[2]
self.botright = args[3]
elif len(args) > 0:
raise NotImplementedError(
"""Screw6 Constructor does not accept %s positional
arguments. See Usage.""" % (str(len(args)))
)
if len(kwargs) == 4:
self.topleft = kwargs['tl']
self.topright = kwargs['tr']
self.botleft = kwargs['bl']
self.botright = kwargs['br']
elif len(kwargs) == 1:
self.val = kwargs['value']
elif len(kwargs) > 0:
raise NotImplementedError(
"""Screw6 Constructor does not accept %s keyword
arguments. See Usage.""" % (str(len(kwargs)))
)
def __str__(self):
row_format = '[' + ((('{},' * 6) + ';') * 6) + ']'
elements = list()
for i in range(self._val.rows):
for j in range(self._val.cols):
elements.append(str(self._val[i, j]))
str_format = row_format.format(*elements)
return str_format
def __repr__(self):
repr_format = 'Screw6()'
return repr_format
@property
def val(self):
"""
Get current value.
Returns:
A 6x6 Matrix with the current value
"""
return self._val
@val.setter
def val(self, value):
"""
Set the current value.
Args:
value: A 6x6 Matrix
"""
if value.rows != 6 or value.cols != 6:
raise ShapeError("Matrix size has to be 6x6.")
self._val = value
@property
def topleft(self):
"""
Get the top-left part of the 6x6 matrix.
Returns:
A 3x3 Matrix.
"""
return self._val[0:3, 0:3]
@property
def topright(self):
"""
Get the top-right part of the 6x6 matrix.
Returns:
A 3x3 Matrix.
"""
return self._val[0:3, 3:6]
@property
def botleft(self):
"""
Get the bottom-left part of the 6x6 matrix.
Returns:
A 3x3 Matrix.
"""
return self._val[3:6, 0:3]
@property
def botright(self):
"""
Get the bottom-right part of the 6x6 matrix.
Returns:
A 3x3 Matrix.
"""
return self._val[3:6, 3:6]
@topleft.setter
def topleft(self, value):
"""
Set the top-left part of the 6x6 matrix.
Args:
value: A 3x3 Matrix - top-left value.
"""
if value.rows != 3 or value.cols != 3:
raise ShapeError("Top-left value size has to be 3x3.")
self._val[0:3, 0:3] = value
@topright.setter
def topright(self, value):
"""
Set the top-right part of the 6x6 matrix.
Args:
value: A 3x3 Matrix - top-right value.
"""
if value.rows != 3 or value.cols != 3:
raise ShapeError("Top-right value size has to be 3x3.")
self._val[0:3, 3:6] = value
@botleft.setter
def botleft(self, value):
"""
Set the bottom-left part of the 6x6 matrix.
Args:
value: A 3x3 Matrix - bottom-left value.
"""
if value.rows != 3 or value.cols != 3:
raise ShapeError("Bottom-left value size has to be 3x3.")
self._val[3:6, 0:3] = value
@botright.setter
def botright(self, value):
"""
Set the bottom-right part of the 6x6 matrix.
Args:
value: A 3x3 Matrix - bottom-right value.
"""
if value.rows != 3 or value.cols != 3:
raise ShapeError("Bottom-right value size has to be 3x3.")
self._val[3:6, 3:6] = value
def __eq__(self, other):
"""Check equality between two instances of Screw6."""
if type(self) != type(other):
raise ValueError(
"Unable to compare %s with Screw6 type." % str(type(other))
)
return self.val == other.val
def __ne__(self, other):
"""Check non-equality between two instances of Screw6."""
return not self == other
|
src/python/twitter/common/python/marshaller.py | zhouyijiaren/commons | 1,143 | 12661961 | from __future__ import absolute_import
from pex.marshaller import *
|
route/recent_app_submit.py | k0000k/openNAMU | 126 | 12662010 | <reponame>k0000k/openNAMU<gh_stars>100-1000
from .tool.func import *
def recent_app_submit_2(conn):
curs = conn.cursor()
div = ''
curs.execute(db_change('select data from other where name = "requires_approval"'))
requires_approval = curs.fetchall()
if requires_approval and requires_approval[0][0] != 'on':
div += load_lang('approval_requirement_disabled')
if flask.request.method == 'GET':
curs.execute(db_change(
'select data from user_set where name = "application"'
))
db_data = curs.fetchall()
if db_data:
div += '' + \
load_lang('all_register_num') + ' : ' + str(len(db_data)) + \
'<hr class="main_hr">' + \
''
div += '''
<table id="main_table_set">
<tr id="main_table_top_tr">
<td id="main_table_width_half">''' + load_lang('id') + '''</td>
<td id="main_table_width_half">''' + load_lang('email') + '''</td>
</tr>
<tr id="main_table_top_tr">
<td>''' + load_lang('approval_question') + '''</td>
<td>''' + load_lang('answer') + '''</td>
</tr>
'''
for application in db_data:
application = json.loads(application[0])
if 'question' in application:
question = html.escape(application['question'])
question = question if question != '' else '<br>'
else:
question = '<br>'
if 'answer' in application:
answer = html.escape(application['answer'])
answer = answer if answer != '' else '<br>'
else:
answer = '<br>'
if 'email' in application:
email = html.escape(application['email'])
email = email if email != '' else '<br>'
else:
email = '<br>'
div += '''
<form method="post">
<tr>
<td>''' + application['id'] + '''</td>
<td>''' + email + '''</td>
</tr>
<tr>
<td>''' + question + '''</td>
<td>''' + answer + '''</td>
</tr>
<tr>
<td colspan="3">
<button type="submit"
id="save"
name="approve"
value="''' + application['id'] + '''">
''' + load_lang('approve') + '''
</button>
<button type="submit"
name="decline"
value="''' + application['id'] + '''">
''' + load_lang('decline') + '''
</button>
</td>
</tr>
</form>
'''
div += '</table>'
else:
div += load_lang('no_applications_now')
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('application_list'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = div,
menu = [['other', load_lang('return')]]
))
else:
if admin_check(None, 'app submit') != 1:
return re_error('/ban')
if flask.request.form.get('approve', '') != '':
curs.execute(db_change(
'select data from user_set where id = ? and name = "application"'
), [
flask.request.form.get('approve', '')
])
application = curs.fetchall()
if not application:
return re_error('/error/26')
else:
application = json.loads(application[0][0])
add_user(
application['id'],
application['pw'],
application['email'],
application['encode']
)
curs.execute(db_change(
"insert into user_set (name, id, data) values ('approval_question', ?, ?)"
), [
application['id'],
application['question']
])
curs.execute(db_change(
"insert into user_set (name, id, data) values ('approval_question_answer', ?, ?)"
), [
application['id'],
application['answer']
])
curs.execute(db_change(
'delete from user_set where id = ? and name = "application"'
), [
application['id']
])
conn.commit()
elif flask.request.form.get('decline', '') != '':
curs.execute(db_change(
'delete from user_set where id = ? and name = "application"'
), [
flask.request.form.get('decline', '')
])
conn.commit()
return redirect('/app_submit') |
awkward0/arrow.py | kgizdov/awkward-0.x | 224 | 12662018 | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-0.x/blob/master/LICENSE
import codecs
import json
import numpy
import awkward0.array.base
import awkward0.array.chunked
import awkward0.array.indexed
import awkward0.array.jagged
import awkward0.array.masked
import awkward0.array.objects
import awkward0.array.table
import awkward0.array.virtual
import awkward0.type
import awkward0.util
################################################################################ type conversions
def schema2type(schema):
import pyarrow
def recurse(tpe, nullable):
if isinstance(tpe, pyarrow.lib.DictionaryType):
out = recurse(tpe.dictionary.type, nullable)
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.StructType):
out = None
for i in range(tpe.num_children):
x = awkward0.type.ArrayType(tpe[i].name, recurse(tpe[i].type, tpe[i].nullable))
if out is None:
out = x
else:
out = out & x
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.ListType):
out = awkward0.type.ArrayType(float("inf"), recurse(tpe.value_type, nullable))
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.UnionType):
out = None
for i in range(tpe.num_children):
x = recurse(tpe[i].type, nullable)
if out is None:
out = x
else:
out = out | x
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif tpe == pyarrow.string():
if nullable:
return awkward0.type.OptionType(str)
else:
return str
elif tpe == pyarrow.binary():
if nullable:
return awkward0.type.OptionType(bytes)
else:
return bytes
elif tpe == pyarrow.bool_():
out = awkward0.numpy.dtype(bool)
if nullable:
return awkward0.type.OptionType(out)
else:
return out
elif isinstance(tpe, pyarrow.lib.DataType):
if nullable:
return awkward0.type.OptionType(tpe.to_pandas_dtype())
else:
return tpe.to_pandas_dtype()
else:
raise NotImplementedError(repr(tpe))
out = None
for name in schema.names:
field = schema.field(name)
mytype = awkward0.type.ArrayType(name, recurse(field.type, field.nullable))
if out is None:
out = mytype
else:
out = out & mytype
return out
################################################################################ value conversions
# we need an opt-out of the large indices in certain cases, otherwise use by default
def toarrow(obj):
import pyarrow
def recurse(obj, mask):
if isinstance(obj, numpy.ndarray):
return pyarrow.array(obj, mask=mask)
elif isinstance(obj, awkward0.array.chunked.ChunkedArray): # includes AppendableArray
raise TypeError("only top-level ChunkedArrays can be converted to Arrow (as RecordBatches)")
elif isinstance(obj, awkward0.array.indexed.IndexedArray):
if mask is None:
return pyarrow.DictionaryArray.from_arrays(obj.index, recurse(obj.content, mask))
else:
return recurse(obj.content[obj.index], mask)
elif isinstance(obj, awkward0.array.indexed.SparseArray):
return recurse(obj.dense, mask)
elif isinstance(obj, awkward0.array.jagged.JaggedArray):
obj = obj.compact()
if mask is not None:
mask = obj.tojagged(mask).flatten()
arrow_type = pyarrow.ListArray
# 64bit offsets not yet completely golden in arrow
# if hasattr(pyarrow, 'LargeListArray') and obj.starts.itemsize > 4:
# arrow_type = pyarrow.LargeListArray
return arrow_type.from_arrays(obj.offsets, recurse(obj.content, mask))
elif isinstance(obj, awkward0.array.masked.IndexedMaskedArray):
thismask = obj.boolmask(maskedwhen=True)
if mask is not None:
thismask = mask | thismask
if len(obj.content) == 0:
content = obj.numpy.empty(len(obj.mask), dtype=obj.DEFAULTTYPE)
else:
content = obj.content[obj.mask]
return recurse(content, thismask)
elif isinstance(obj, awkward0.array.masked.MaskedArray): # includes BitMaskedArray
thismask = obj.boolmask(maskedwhen=True)
if mask is not None:
thismask = mask | thismask
return recurse(obj.content, thismask)
elif isinstance(obj, awkward0.array.objects.StringArray):
if obj.encoding is None and hasattr(pyarrow.BinaryArray, 'from_buffers'):
arrow_type = pyarrow.BinaryArray
arrow_offset_type = pyarrow.binary()
# 64bit offsets not yet completely golden in arrow
# if hasattr(pyarrow, 'LargeBinaryArray') and obj.starts.itemsize > 4:
# arrow_type = pyarrow.LargeBinaryArray
# arrow_offset_type = pyarrow.large_binary()
convert = lambda length, offsets, content: arrow_type.from_buffers(arrow_offset_type, length, [None, offsets, content])
elif codecs.lookup(obj.encoding) is codecs.lookup("utf-8") or obj.encoding is None:
arrow_type = pyarrow.StringArray
# if hasattr(pyarrow, 'LargeStringArray') and obj.starts.itemsize > 4:
# arrow_type = pyarrow.LargeStringArray
convert = lambda length, offsets, content: arrow_type.from_buffers(length, offsets, content)
else:
raise ValueError("only encoding=None or encoding='utf-8' can be converted to Arrow")
obj = obj.compact()
offsets = obj.offsets
if offsets.dtype != numpy.dtype(numpy.int32):
offsets = offsets.astype(numpy.int32)
return convert(len(offsets) - 1, pyarrow.py_buffer(offsets), pyarrow.py_buffer(obj.content))
elif isinstance(obj, awkward0.array.objects.ObjectArray):
# throw away Python object interpretation, which Arrow can't handle while being multilingual
return recurse(obj.content, mask)
elif isinstance(obj, awkward0.array.table.Table):
return pyarrow.StructArray.from_arrays([recurse(x, mask) for x in obj.contents.values()], list(obj.contents))
elif isinstance(obj, awkward0.array.union.UnionArray):
contents = []
for i, x in enumerate(obj.contents):
if mask is None:
thismask = None
else:
thistags = (obj.tags == i)
thismask = obj.numpy.empty(len(x), dtype=obj.MASKTYPE)
thismask[obj.index[thistags]] = mask[thistags] # hmm... obj.index could have repeats; the Arrow mask in that case would not be well-defined...
contents.append(recurse(x, thismask))
return pyarrow.UnionArray.from_dense(pyarrow.array(obj.tags.astype(numpy.int8)), pyarrow.array(obj.index.astype(numpy.int32)), contents)
elif isinstance(obj, awkward0.array.virtual.VirtualArray):
return recurse(obj.array, mask)
else:
raise TypeError("cannot convert type {0} to Arrow".format(type(obj)))
if isinstance(obj, awkward0.array.chunked.ChunkedArray): # includes AppendableArray
batches = []
for chunk in obj.chunks:
arr = toarrow(chunk)
if isinstance(arr, pyarrow.Table):
batches.extend(arr.to_batches())
else:
batches.append(pyarrow.RecordBatch.from_arrays([arr], [""]))
return pyarrow.Table.from_batches(batches)
elif isinstance(obj, awkward0.array.masked.IndexedMaskedArray) and isinstance(obj.content, awkward0.array.table.Table):
mask = obj.boolmask(maskedwhen=True)
if len(obj.content) == 0:
content = obj.numpy.empty(len(obj.mask), dtype=obj.DEFAULTTYPE)
else:
content = obj.content[obj.mask]
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([recurse(x, mask) for x in obj.content.contents.values()], list(obj.content.contents))])
elif isinstance(obj, awkward0.array.masked.MaskedArray) and isinstance(obj.content, awkward0.array.table.Table): # includes BitMaskedArray
mask = obj.boolmask(maskedwhen=True)
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([recurse(x, mask) for x in obj.content.contents.values()], list(obj.content.contents))])
elif isinstance(obj, awkward0.array.table.Table):
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([recurse(x, None) for x in obj.contents.values()], list(obj.contents))])
else:
return recurse(obj, None)
def fromarrow(obj):
import pyarrow
awkwardlib = awkward0
ARROW_BITMASKTYPE = awkwardlib.numpy.uint8
ARROW_INDEXTYPE = awkwardlib.numpy.int32
ARROW_LARGEINDEXTYPE = awkwardlib.numpy.int64
ARROW_TAGTYPE = awkwardlib.numpy.uint8
ARROW_CHARTYPE = awkwardlib.numpy.uint8
def popbuffers(array, tpe, buffers, length):
if isinstance(tpe, pyarrow.lib.DictionaryType):
index = popbuffers(None if array is None else array.indices, tpe.index_type, buffers, length)
if hasattr(tpe, "dictionary"):
content = fromarrow(tpe.dictionary)
elif array is not None:
content = fromarrow(array.dictionary)
else:
raise NotImplementedError("no way to access Arrow dictionary inside of UnionArray")
if isinstance(index, awkwardlib.BitMaskedArray):
return awkwardlib.BitMaskedArray(index.mask, awkwardlib.IndexedArray(index.content, content), maskedwhen=index.maskedwhen, lsborder=index.lsborder)
else:
return awkwardlib.IndexedArray(index, content)
elif isinstance(tpe, pyarrow.lib.StructType):
assert getattr(tpe, "num_buffers", 1) == 1
mask = buffers.pop(0)
pairs = []
for i in range(tpe.num_children):
pairs.append((tpe[i].name, popbuffers(None if array is None else array.field(tpe[i].name), tpe[i].type, buffers, length)))
out = awkwardlib.Table.frompairs(pairs, 0) # FIXME: better rowstart
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.ListType):
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length + 1]
content = popbuffers(None if array is None else array.flatten(), tpe.value_type, buffers, offsets[-1])
out = awkwardlib.JaggedArray.fromoffsets(offsets, content)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif hasattr(pyarrow.lib, 'LargeListType') and isinstance(tpe, pyarrow.lib.LargeListType):
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_LARGEINDEXTYPE)[:length + 1]
content = popbuffers(None if array is None else array.flatten(), tpe.value_type, buffers, offsets[-1])
out = awkwardlib.JaggedArray.fromoffsets(offsets, content)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.UnionType) and tpe.mode == "sparse":
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
tags = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_TAGTYPE)[:length]
assert buffers.pop(0) is None
index = awkwardlib.numpy.arange(len(tags), dtype=ARROW_INDEXTYPE)
contents = []
for i in range(tpe.num_children):
try:
sublength = index[tags == i][-1] + 1
except IndexError:
sublength = 0
contents.append(popbuffers(None, tpe[i].type, buffers, sublength))
for i in range(len(contents)):
these = index[tags == i]
if len(these) == 0:
contents[i] = contents[i][0:0]
else:
contents[i] = contents[i][: these[-1] + 1]
out = awkwardlib.UnionArray(tags, index, contents)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.UnionType) and tpe.mode == "dense":
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
tags = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_TAGTYPE)[:length]
index = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length]
contents = []
for i in range(tpe.num_children):
try:
sublength = index[tags == i].max() + 1
except ValueError:
sublength = 0
contents.append(popbuffers(None, tpe[i].type, buffers, sublength))
for i in range(len(contents)):
these = index[tags == i]
if len(these) == 0:
contents[i] = contents[i][0:0]
else:
contents[i] = contents[i][: these.max() + 1]
out = awkwardlib.UnionArray(tags, index, contents)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.string():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding="utf-8")
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.large_string():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_LARGEINDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding="utf-8")
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.binary():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_INDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding=None)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.large_binary():
assert getattr(tpe, "num_buffers", 3) == 3
mask = buffers.pop(0)
offsets = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_LARGEINDEXTYPE)[:length + 1]
content = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)[:offsets[-1]]
out = awkwardlib.StringArray.fromoffsets(offsets, content[:offsets[-1]], encoding=None)
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif tpe == pyarrow.bool_():
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
out = awkwardlib.numpy.unpackbits(awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=ARROW_CHARTYPE)).view(awkwardlib.MaskedArray.BOOLTYPE)
out = out.reshape(-1, 8)[:,::-1].reshape(-1)[:length] # lsborder=True
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
elif isinstance(tpe, pyarrow.lib.DataType):
assert getattr(tpe, "num_buffers", 2) == 2
mask = buffers.pop(0)
out = awkwardlib.numpy.frombuffer(buffers.pop(0), dtype=tpe.to_pandas_dtype())[:length]
if mask is not None:
mask = awkwardlib.numpy.frombuffer(mask, dtype=ARROW_BITMASKTYPE)
return awkwardlib.BitMaskedArray(mask, out, maskedwhen=False, lsborder=True)
else:
return out
else:
raise NotImplementedError(repr(tpe))
if isinstance(obj, pyarrow.lib.Array):
buffers = obj.buffers()
out = popbuffers(obj, obj.type, buffers, len(obj))
assert len(buffers) == 0
return out
elif isinstance(obj, pyarrow.lib.ChunkedArray):
chunks = [x for x in obj.chunks if len(x) > 0]
if len(chunks) == 1:
return fromarrow(chunks[0])
else:
return awkwardlib.ChunkedArray([fromarrow(x) for x in chunks], chunksizes=[len(x) for x in chunks])
elif isinstance(obj, pyarrow.lib.RecordBatch):
out = awkwardlib.Table()
for n, x in zip(obj.schema.names, obj.columns):
out[n] = fromarrow(x)
return out
elif isinstance(obj, pyarrow.lib.Table):
chunks = []
chunksizes = []
for batch in obj.to_batches():
chunk = fromarrow(batch)
if len(chunk) > 0:
chunks.append(chunk)
chunksizes.append(len(chunk))
if len(chunks) == 1:
return chunks[0]
else:
return awkwardlib.ChunkedArray(chunks, chunksizes=chunksizes)
else:
raise NotImplementedError(type(obj))
################################################################################ Parquet file handling
def toparquet(where, obj, **options):
import pyarrow.parquet
options["where"] = where
def convert(obj, message):
if isinstance(obj, (awkward0.array.base.AwkwardArray, numpy.ndarray)):
out = toarrow(obj)
if isinstance(out, pyarrow.Table):
return out
else:
return pyarrow.Table.from_batches([pyarrow.RecordBatch.from_arrays([out], [""])])
else:
raise TypeError(message)
if isinstance(obj, awkward0.array.chunked.ChunkedArray):
obj = iter(obj.chunks)
try:
awkitem = next(obj)
except StopIteration:
raise ValueError("iterable is empty")
arritem = convert(awkitem, None)
if "schema" not in options:
options["schema"] = arritem.schema
writer = pyarrow.parquet.ParquetWriter(**options)
writer.write_table(arritem)
try:
while True:
try:
awkitem = next(obj)
except StopIteration:
break
else:
writer.write_table(convert(awkitem, None))
finally:
writer.close()
elif isinstance(obj, (awkward0.array.base.AwkwardArray, numpy.ndarray)):
arritem = convert(obj, None)
options["schema"] = arritem.schema
writer = pyarrow.parquet.ParquetWriter(**options)
writer.write_table(arritem)
writer.close()
else:
try:
obj = iter(obj)
except TypeError:
raise TypeError("cannot write {0} to Parquet file".format(type(obj)))
try:
awkitem = next(obj)
except StopIteration:
raise ValueError("iterable is empty")
arritem = convert(awkitem, "cannot write iterator of {0} to Parquet file".format(type(awkitem)))
if "schema" not in options:
options["schema"] = arritem.schema
writer = pyarrow.parquet.ParquetWriter(**options)
writer.write_table(arritem)
try:
while True:
try:
awkitem = next(obj)
except StopIteration:
break
else:
writer.write_table(convert(awkitem, "cannot write iterator of {0} to Parquet file".format(type(awkitem))))
finally:
writer.close()
class _ParquetFile(object):
def __init__(self, file, metadata=None, common_metadata=None):
self.file = file
self.metadata = metadata
self.common_metadata = common_metadata
self._init()
def _init(self):
import pyarrow.parquet
self.parquetfile = pyarrow.parquet.ParquetFile(self.file, metadata=self.metadata, common_metadata=self.common_metadata)
self.type = schema2type(self.parquetfile.schema.to_arrow_schema())
def __getstate__(self):
return {"file": self.file, "metadata": self.metadata, "common_metadata": self.common_metadata}
def __setstate__(self, state):
self.file = state["file"]
self.metadata = state["metadata"]
self.common_metadata = state["common_metadata"]
self._init()
def __call__(self, rowgroup, column):
return fromarrow(self.parquetfile.read_row_group(rowgroup, columns=[column]))[column]
def tojson(self):
json.dumps([self.file, self.metadata, self.common_metadata])
return {"file": self.file, "metadata": self.metadata, "common_metadata": self.common_metadata}
@classmethod
def fromjson(cls, state):
return cls(state["file"], metadata=state["metadata"], common_metadata=state["common_metadata"])
def fromparquet(file, cache=None, persistvirtual=False, metadata=None, common_metadata=None):
awkwardlib = awkward0
parquetfile = _ParquetFile(file, metadata=metadata, common_metadata=common_metadata)
columns = parquetfile.type.columns
chunks = []
chunksizes = []
for i in range(parquetfile.parquetfile.num_row_groups):
numrows = parquetfile.parquetfile.metadata.row_group(i).num_rows
if numrows > 0:
if columns == [""]:
chunk = awkwardlib.VirtualArray(parquetfile, (i, ""), cache=cache, type=awkwardlib.type.ArrayType(numrows, parquetfile.type[""]), persistvirtual=persistvirtual)
else:
chunk = awkwardlib.Table()
for n in columns:
q = awkwardlib.VirtualArray(parquetfile, (i, n), cache=cache, type=awkwardlib.type.ArrayType(numrows, parquetfile.type[n]), persistvirtual=persistvirtual)
chunk.contents[n] = q
chunks.append(chunk)
chunksizes.append(numrows)
return awkwardlib.ChunkedArray(chunks, chunksizes)
|
tests/pyboard.py | learnforpractice/micropython-cpp | 692 | 12662023 | <reponame>learnforpractice/micropython-cpp<filename>tests/pyboard.py
../tools/pyboard.py |
scripts/reports/old_logs.py | shatadru99/archai | 344 | 12662035 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
from typing import Dict, Type
import glob
import os
import pathlib
from runstats import Statistics
def main():
parser = argparse.ArgumentParser(description='NAS E2E Runs')
parser.add_argument('--logdir', type=str, default='D:\\logdir\\azure\\random_cifar_test',
help='folder with logs')
args, extra_args = parser.parse_known_args()
lines = []
top1s=[]
for filepath in pathlib.Path(args.logdir).rglob('logs.log'):
epoch = 0
for line in pathlib.Path(filepath).read_text().splitlines():
if '[eval_test] Epoch: [ 1/1] ' in line:
top1s.append(Statistics())
top1 = float(line.strip().split('(')[-1].split(',')[0].split('%')[0].strip())/100.0
lines.append(f'{epoch}\t{top1}\t{str(filepath)}')
top1s[epoch].push(top1)
epoch += 1
pathlib.Path(os.path.join(args.logdir, 'summary.tsv')).write_text('\n'.join(lines))
stat_lines = ['epoch\tmean\tstddev\tcount']
for i,top1 in enumerate(top1s):
stat_lines.append(f'{i}\t{top1.mean()}\t{top1.stddev() if len(top1)>1 else float("NaN")}\t{len(top1)}')
pathlib.Path(os.path.join(args.logdir, 'summary_stats.tsv')).write_text('\n'.join(stat_lines))
if __name__ == '__main__':
main()
|
uxy/uxy_w.py | sustrik/uxy | 735 | 12662053 | <gh_stars>100-1000
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import re
import sys
from uxy import base
def _linux_args(args):
parser = argparse.ArgumentParser("__main__.py w", add_help=False)
parser.add_argument("-h", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("--no-header", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("-s", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("--short", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("-f", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("--from", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("-o", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("--old-style", action="store_true", default=argparse.SUPPRESS)
parser.add_argument("--help", action="store_true", default=argparse.SUPPRESS)
base.check_args(args, parser)
return args + []
def _osx_args(args):
parser = argparse.ArgumentParser("__main__.py w", add_help=False)
parser.add_argument("-h", action="store_true", default=argparse.SUPPRESS)
base.check_args(args, parser)
return args + []
def _bsd_args(args):
return args + []
def w(args, uxy_args):
# Launch the underlying binary.
if uxy_args.platform.startswith("linux"):
args = _linux_args(args)
elif uxy_args.platform.startswith("darwin"):
args = _osx_args(args)
else:
args = _bsd_args(args)
proc = base.launch(uxy_args, ['w'] + args[1:])
# Ignore status line.
proc.readline()
# Process the header line.
hdr = proc.readline()
parser = base.FmtParser(hdr)
fmt = base.Format(hdr)
base.writeline(fmt.render())
# Process data lines.
for ln in proc:
base.writeline(fmt.render(parser.extract(ln)))
return proc.wait()
|
2012/plugins_python/htmlize_main.py | mikiec84/code-for-blog | 1,199 | 12662064 | <reponame>mikiec84/code-for-blog
#-------------------------------------------------------------------------------
# htmlize: htmlize_main.py
#
# Main user-facing program. Usage: pipe some input text to its stdin.
#
# <NAME> (<EMAIL>)
# This code is in the public domain
#-------------------------------------------------------------------------------
from datetime import datetime
import os, sys
from htmlize.core import htmlize
from htmlize.db import DB, Post
from htmlize.iplugin import discover_plugins
if __name__ == '__main__':
# Look for plugins in the plugins/ directory which lives in the same
# place with this program.
mydir = os.path.dirname(sys.argv[0])
plugins = discover_plugins([os.path.join(mydir, 'plugins')])
contents = sys.stdin.read()
db = DB()
post = db.create_new_post(
author='eliben',
date=datetime.today(),
title='Hello world',
contents=contents)
print(htmlize(post, db, plugins))
|
cpmpy/remainders.py | tias/hakank | 279 | 12662071 | """
Remainder problem in cpmpy.
'''
11. Is there a number which when divided by 3 gives a remainder of 1;
when divided by 4, gives a remainder of 2; when divided by 5, gives a
remainder of 3; and when divided by 6, gives a remainder of 4?
(Kordemsky)
'''
Model created by <NAME>, <EMAIL>
See also my CPMpy page: http://www.hakank.org/cpmpy/
"""
from cpmpy import *
import numpy as np
from cpmpy_hakank import *
def remainder_problem():
Max = 10000
v = intvar(1,Max,shape=5,name="v")
X,A,B,C,D = v
model = Model([
X == A*3 + 1,
X == B*4 + 2,
X == C*5 + 3,
X == D*6 + 4,
])
ss = CPM_ortools(model)
num_solutions = 0
xs = []
while ss.solve():
num_solutions += 1
# print(v.value())
xs.append(v[0].value())
get_different_solution(ss,v)
print(xs)
print("len:",len(xs))
# Another approach
def remainder_problem2():
Max = 10000
v = intvar(1,Max,shape=5,name="v")
X,A,B,C,D = v
model = Model()
for (i,k) in zip(range(1,4+1),[A,B,C,D]):
model += (X == k*(i+2) + i)
ss = CPM_ortools(model)
num_solutions = 0
xs = []
while ss.solve():
num_solutions += 1
# print(v.value())
xs.append(v[0].value())
get_different_solution(ss,v)
print(xs)
print("len:",len(xs))
remainder_problem()
print("Another approach")
remainder_problem2()
|
calvin/actorstore/systemactors/text/WordCount.py | gabrielcercel/calvin-base | 334 | 12662072 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from calvin.actor.actor import Actor, manage, condition, stateguard
from calvin.runtime.north.calvin_token import EOSToken
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class WordCount(Actor):
"""
Count occurances of words in a stream of words.
Inputs:
in : a word
Outputs:
out : count for each word
"""
@manage([])
def init(self):
self.word_counts = defaultdict(int)
self.finished = False
def exception_handler(self, action, args):
self.finished = True
@condition(['in'], [])
def count_word(self, word):
self.word_counts[word] = self.word_counts[word] + 1
@stateguard(lambda self: self.finished is True)
@condition(action_output=['out'])
def output_counts(self):
self.finished = False
return (self.word_counts,)
action_priority = (count_word, output_counts)
test_set = [
{
'inports': {'in': ['a', 'b', 'a', EOSToken()]},
'outports': {'out': [{'a': 2, 'b': 1}]}
}
]
|
tests/pytests/unit/modules/file/test_file_basics.py | tomdoherty/salt | 9,425 | 12662078 | import logging
import os
import shutil
import pytest
import salt.config
import salt.loader
import salt.modules.cmdmod as cmdmod
import salt.modules.config as configmod
import salt.modules.file as filemod
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from tests.support.mock import MagicMock, call, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
}
}
@pytest.fixture
def tmp_sub_dir(tmp_path):
directory = tmp_path / "file-basics-test-dir"
directory.mkdir()
yield directory
shutil.rmtree(str(directory))
@pytest.fixture
def tfile(tmp_sub_dir):
filename = str(tmp_sub_dir / "file-basics-test-file")
with salt.utils.files.fopen(filename, "w+") as fp:
fp.write("Hi hello! I am a file.")
yield filename
os.remove(filename)
@pytest.fixture
def myfile(tmp_sub_dir):
filename = str(tmp_sub_dir / "myfile")
with salt.utils.files.fopen(filename, "w+") as fp:
fp.write(salt.utils.stringutils.to_str("Hello\n"))
yield filename
os.remove(filename)
@pytest.fixture
def a_link(tmp_sub_dir):
path = tmp_sub_dir / "a_link"
linkname = str(path)
yield linkname
if path.exists():
os.remove(linkname)
@pytest.fixture
def a_hardlink(tmp_sub_dir):
path = tmp_sub_dir / "a_hardlink"
linkname = str(path)
yield linkname
if path.exists():
os.remove(linkname)
@pytest.mark.skip_on_windows(reason="os.symlink is not available on Windows")
def test_symlink_already_in_desired_state(tfile, a_link):
os.symlink(tfile, a_link)
result = filemod.symlink(tfile, a_link)
assert result
@pytest.mark.skip_on_windows(reason="os.link is not available on Windows")
def test_hardlink_sanity(tfile, a_hardlink):
target = a_hardlink
result = filemod.link(tfile, target)
assert result
@pytest.mark.skip_on_windows(reason="os.link is not available on Windows")
def test_hardlink_numlinks(tfile, a_hardlink):
target = a_hardlink
result = filemod.link(tfile, target)
name_i = os.stat(tfile).st_nlink
assert name_i > 1
@pytest.mark.skip_on_windows(reason="os.link is not available on Windows")
def test_hardlink_working(tfile, a_hardlink):
target = a_hardlink
result = filemod.link(tfile, target)
name_i = os.stat(tfile).st_ino
target_i = os.stat(target).st_ino
assert name_i == target_i
def test_source_list_for_list_returns_file_from_dict_via_http():
with patch("salt.modules.file.os.remove") as remove:
remove.return_value = None
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
"cp.cache_file": MagicMock(return_value="/tmp/http.conf"),
},
):
with patch("salt.utils.http.query") as http_query:
http_query.return_value = {}
ret = filemod.source_list(
[{"http://t.est.com/http/httpd.conf": "filehash"}], "", "base"
)
assert list(ret) == ["http://t.est.com/http/httpd.conf", "filehash"]
def test_source_list_use_requests():
with patch("salt.modules.file.os.remove") as remove:
remove.return_value = None
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
"cp.cache_file": MagicMock(return_value="/tmp/http.conf"),
},
):
expected_call = call(
"http://t.est.com/http/file1",
decode_body=False,
method="HEAD",
)
with patch(
"salt.utils.http.query", MagicMock(return_value={})
) as http_query:
ret = filemod.source_list(
[{"http://t.est.com/http/file1": "filehash"}], "", "base"
)
assert list(ret) == ["http://t.est.com/http/file1", "filehash"]
assert expected_call in http_query.mock_calls
def test_source_list_for_list_returns_existing_file():
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=["http/httpd.conf.fallback"]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
["salt://http/httpd.conf", "salt://http/httpd.conf.fallback"],
"filehash",
"base",
)
assert list(ret) == ["salt://http/httpd.conf.fallback", "filehash"]
def test_source_list_for_list_returns_file_from_other_env():
def list_master(env):
dct = {"base": [], "dev": ["http/httpd.conf"]}
return dct[env]
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(side_effect=list_master),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
[
"salt://http/httpd.conf?saltenv=dev",
"salt://http/httpd.conf.fallback",
],
"filehash",
"base",
)
assert list(ret) == ["salt://http/httpd.conf?saltenv=dev", "filehash"]
def test_source_list_for_list_returns_file_from_dict():
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=["http/httpd.conf"]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{"salt://http/httpd.conf": ""}], "filehash", "base")
assert list(ret) == ["salt://http/httpd.conf", "filehash"]
def test_source_list_for_list_returns_existing_local_file_slash(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([myfile + "-foo", myfile], "filehash", "base")
assert list(ret) == [myfile, "filehash"]
def test_source_list_for_list_returns_existing_local_file_proto(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
["file://" + myfile + "-foo", "file://" + myfile],
"filehash",
"base",
)
assert list(ret) == ["file://" + myfile, "filehash"]
def test_source_list_for_list_returns_local_file_slash_from_dict(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{myfile: ""}], "filehash", "base")
assert list(ret) == [myfile, "filehash"]
def test_source_list_for_list_returns_local_file_proto_from_dict(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{"file://" + myfile: ""}], "filehash", "base")
assert list(ret) == ["file://" + myfile, "filehash"]
|
scripts/test_GRSIMatch.py | Ni-Chen/replicability.optics | 156 | 12662079 | <gh_stars>100-1000
from slugify import slugify
filepath = 'GRSI.dat'
GRSI=[]
with open(filepath) as fp:
GRSI = fp.readlines()
print(GRSI)
|
aslam_offline_calibration/kalibr/python/kalibr_imu_camera_calibration/__init__.py | PushyamiKaveti/kalibr | 2,690 | 12662086 | <filename>aslam_offline_calibration/kalibr/python/kalibr_imu_camera_calibration/__init__.py
from IccCalibrator import *
import IccUtil as util
import IccPlots as plots
import IccSensors as sens
|
tests/data/mfoo.py | snakers4/pyarmor | 1,463 | 12662115 | import multiprocessing as mp
import pub_foo
def hello(q):
print('module name: %s' % __name__)
q.put('hello')
if __name__ == '__main__':
try:
ctx = mp.get_context('spawn')
except Exception:
ctx = mp
q = ctx.Queue()
p = ctx.Process(target=pub_foo.proxy_hello, args=(q,))
p.start()
print(q.get())
p.join()
|
tests/test_application.py | boroviksergey/web-app-from-scratch | 120 | 12662120 | <gh_stars>100-1000
from io import BytesIO
from scratch.application import Application
from scratch.headers import Headers
from scratch.request import Request
from scratch.response import Response
app = Application()
@app.route("/")
def static_handler(request):
return Response(content="static")
@app.route("/people/{name}/{age}")
def dynamic_handler(request, name, age):
return Response(content=f"{name} is {age} years old!")
def test_applications_can_route_requests():
# Given that I have an application
# When I request the static_handler
response = app(Request(method="GET", path="/", headers=Headers(), body=BytesIO()))
# Then I should get back a valid response
assert response.body.read() == b"static"
def test_applications_can_route_requests_to_dynamic_paths():
# Given that I have an application
# When I request the dynamic_handler
response = app(Request(method="GET", path="/people/Jim/32", headers=Headers(), body=BytesIO()))
# Then I should get back a valid response
assert response.body.read() == b"Jim is 32 years old!"
def test_applications_can_fail_to_route_invalid_paths():
# Given that I have an application
# When I request a path that isn't registered
response = app(Request(method="GET", path="/invalid", headers=Headers(), body=BytesIO()))
# Then I should get back a 404 response
assert response.status == b"404 Not Found"
|
omnizart/patch_cnn/__init__.py | nicolasanjoran/omnizart | 1,145 | 12662121 | """Vocal pitch contour transcription PatchCNN ver.
Transcribes monophonic pitch contour of vocal in the given polyphonic audio
by using the PatchCNN approach.
Re-implementation of the repository `VocalMelodyExtPatchCNN <https://github.com/leo-so/VocalMelodyExtPatchCNN>`_.
Feature Storage Format
----------------------
Processed feature and label will be stored in ``.hdf`` format, one file per piece.
Columns contained in each file are:
* **feature**: Patch CFP feature.
* **label**: Binary classes of each patch.
* **Z**: The original CFP feature.
* **mapping**: Records the original frequency and time indexes of each patch.
References
##########
Publication of this module can be found in [1]_.
.. [1] <NAME>, "Vocal Melody Extraction Using Patch-based CNN," in IEEE International Conference of Acoustics,
Speech, and Signal Processing (ICASSP), 2018.
"""
from omnizart.patch_cnn.app import PatchCNNTranscription
app = PatchCNNTranscription()
|
absl/testing/tests/absltest_env.py | em10100/abseil-py | 1,969 | 12662131 | <filename>absl/testing/tests/absltest_env.py
"""Helper library to get environment variables for absltest helper binaries."""
import os
_INHERITED_ENV_KEYS = frozenset({
# This is needed to correctly use the Python interpreter determined by
# bazel.
'PATH',
# This is used by the random module on Windows to locate crypto
# libraries.
'SYSTEMROOT',
})
def inherited_env():
"""Returns the environment variables that should be inherited from parent.
Reason why using an explict list of environment variables instead of
inheriting all from parent: the absltest module itself interprets a list of
environment variables set by bazel, e.g. XML_OUTPUT_FILE,
TESTBRIDGE_TEST_ONLY. While testing absltest's own behavior, we should
remove them when invoking the helper subprocess. Using an explicit list is
safer.
"""
env = {}
for key in _INHERITED_ENV_KEYS:
if key in os.environ:
env[key] = os.environ[key]
return env
|
utils/tracing/python/test_tracing.py | tom-kuchler/vhive | 138 | 12662137 | # MIT License
#
# Copyright (c) 2021 <NAME> and EASE lab
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import tracing
import os
import time
class MyTest(unittest.TestCase):
def test(self):
os.system("docker run -d --name zipkin-test -p 9411:9411 openzipkin/zipkin")
time.sleep(5)
tracing.initTracer("test tracer", debug=True)
with tracing.Span("test parent span"):
with tracing.Span("test child span"):
self.assertTrue
time.sleep(5)
os.system("docker rm -f zipkin-test") |
am4/rom/tools/plm.py | yshestakov/cpu11 | 118 | 12662179 | #!/usr/bin/python3
#
# M4 processor PDP-11 instruction decoding PLM Analyzer
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import argparse
def get_x16(mset, mclr, x=16):
s = ''
for i in range(x):
if mset & (1 << (x - 1 - i)):
if mclr & (1 << (x - 1 - i)):
s += '.'
else:
s += '1'
elif mclr & (1 << (x - 1 - i)):
s += '0'
else:
s += 'x'
return s
def createParser():
p = argparse.ArgumentParser(
description='M4 processor PLM analyzer, '
'Version 20.06a, (c) 1801BM1')
p.add_argument('src', nargs=1, type=str,
help='input binary file(s)', metavar='file')
p.add_argument ('-b', type=lambda x: int(x,8), nargs='?',
default=0, metavar='begin',
help='initial address of scan range')
p.add_argument ('-e', type=lambda x: int(x,8), nargs='?',
default=0xFFFF, metavar='end',
help='end address of scan range')
return p
def main():
# Parse the command line
parser = createParser()
params = parser.parse_args()
ba = params.b
ea = params.e
# Read the entire binary file
with open(params.src[0], "rb") as bin_file:
data = bytes(bin_file.read())
if (len(data) != 0x10000):
print('Error: invalid file length %d' % len(data))
return 1
# Scan the specified range for byte flag (upper 8)
mset = 0xFFFF
mclr = 0xFFFF
mcnt = 0
for a in range(ba, ea + 1):
if data[a] & 0x80:
mset &= a
mclr &= ~a
mcnt += 1
print("Cnt: %05X, Mask: %s" % (mcnt, get_x16(mset, mclr)))
# Scan the specified range for address field
ls = {}
for a in range(ba, ea + 1):
v = data[a] & 0x7F
if v in ls:
ls[v][0] += 1
ls[v][1] &= a
ls[v][2] &= ~a
else:
ls[v] = [1, a, ~a]
for v in ls:
print('%02X: %04X %s' % (v, ls[v][0], get_x16(ls[v][1], ls[v][2])))
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
accelerator/examples/build_dsexample-chain.py | eBay/accelerator | 143 | 12662199 | from os.path import dirname, join
from .printer import prt
description = "Dataset: Create a chained dataset."
# Files are stored in same directory as this python file,
# see comment below.
path = dirname(__file__)
def main(urd):
prt.source(__file__)
prt()
prt('Create a chain of datasets using csvimport.')
imp = None
for filename in ('data.csv', 'data2.csv', 'data3.csv'):
# Ideally, you'd set "input_directory" to the location of the
# input files in "accelerator.conf" to avoid an absolute path
# in the input filename.
filename = join(path, filename)
imp = urd.build('csvimport', filename=filename, previous=imp)
prt()
prt('Try this to investigate the chain.')
prt.command('ax ds -c -S', imp)
prt()
prt('To go back in chain and investigate datasets, try')
prt.command('ax ds %s' % (imp,))
prt.command('ax ds %s~' % (imp,))
prt.command('ax ds %s~~' % (imp,))
prt('Note that ~~ can also be written ~2 etc.')
prt()
prt('This method will iterate over the whole chain.')
job = urd.build('dsexample_iteratechain', source=imp)
prt()
prt('To see its output, try')
prt.command('ax job -O', job)
|
release/stubs.min/Autodesk/Revit/UI/__init___parts/DockablePane.py | htlcnn/ironpython-stubs | 182 | 12662204 | <filename>release/stubs.min/Autodesk/Revit/UI/__init___parts/DockablePane.py
class DockablePane(object,IDisposable):
"""
A user interface pane that participates in Revit's docking window system.
DockablePane(other: DockablePane)
DockablePane(id: DockablePaneId)
"""
def Dispose(self):
""" Dispose(self: DockablePane) """
pass
def GetTitle(self):
"""
GetTitle(self: DockablePane) -> str
Returns the current title (a.k.a. window caption) of the dockable pane.
"""
pass
def Hide(self):
"""
Hide(self: DockablePane)
If the pane is on screen,hide it. Has no effect on built-in Revit dockable
panes.
"""
pass
def IsShown(self):
"""
IsShown(self: DockablePane) -> bool
Identify the pane is currently visible or in a tab.
"""
pass
@staticmethod
def PaneExists(id):
"""
PaneExists(id: DockablePaneId) -> bool
Returns true if %id% refers to a dockable pane window that currently exists in
the Revit user interface,whether it's hidden or shown.
"""
pass
@staticmethod
def PaneIsBuiltIn(id):
"""
PaneIsBuiltIn(id: DockablePaneId) -> bool
Returns true if %id% refers to a built-in Revit dockable pane,rather than one
created by an add-in.
"""
pass
@staticmethod
def PaneIsRegistered(id):
"""
PaneIsRegistered(id: DockablePaneId) -> bool
Returns true if %id% refers to a built-in Revit dockable pane,or an add-in
pane that has been properly registered with
%Autodesk.Revit.UI.UIApplication.RegisterDockablePane%.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: DockablePane,disposing: bool) """
pass
def Show(self):
"""
Show(self: DockablePane)
If the pane is not currently visible or in a tab,display the pane in the Revit
user interface at its last docked location.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: DockablePane)
__new__(cls: type,id: DockablePaneId)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Id=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The unique identifier for this dockable pane.
Get: Id(self: DockablePane) -> DockablePaneId
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: DockablePane) -> bool
"""
|
pywick/models/segmentation/testnets/mixnet/__init__.py | achaiah/pywick | 408 | 12662205 | """
Source: https://github.com/zsef123/MixNet-PyTorch
""" |
alipay/aop/api/response/AlipaySocialAntforestPlantConsultResponse.py | antopen/alipay-sdk-python-all | 213 | 12662267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipaySocialAntforestPlantConsultResponse(AlipayResponse):
def __init__(self):
super(AlipaySocialAntforestPlantConsultResponse, self).__init__()
self._current_energy = None
self._project_alliable = None
@property
def current_energy(self):
return self._current_energy
@current_energy.setter
def current_energy(self, value):
self._current_energy = value
@property
def project_alliable(self):
return self._project_alliable
@project_alliable.setter
def project_alliable(self, value):
self._project_alliable = value
def parse_response_content(self, response_content):
response = super(AlipaySocialAntforestPlantConsultResponse, self).parse_response_content(response_content)
if 'current_energy' in response:
self.current_energy = response['current_energy']
if 'project_alliable' in response:
self.project_alliable = response['project_alliable']
|
src/algo/export_tx.py | wengzilla/staketaxcsv | 140 | 12662284 |
from algo.asset import Algo, Asset
from algo.config_algo import localconfig
from common.make_tx import (
make_borrow_tx,
make_deposit_collateral_tx,
make_excluded_tx,
make_income_tx,
make_liquidate_tx,
make_lp_deposit_tx,
make_lp_stake_tx,
make_lp_unstake_tx,
make_lp_withdraw_tx,
make_repay_tx,
make_reward_tx,
make_spend_tx,
make_stake_tx,
make_swap_tx,
make_transfer_in_tx,
make_transfer_out_tx,
make_unstake_tx,
make_withdraw_collateral_tx,
)
lp_tickers = {}
def _ingest_row(exporter, row, fee_amount=0, comment=None):
if fee_amount:
fee = Algo(fee_amount)
row.fee = fee.amount
if comment:
row.comment = comment
exporter.ingest_row(row)
def _should_exclude_tx(asset_list):
for asset in asset_list:
ticker = asset.ticker if isinstance(asset, Asset) else asset
if ticker.lower() in localconfig.exclude_asas:
return True
return False
def export_exclude_tx(exporter, txinfo):
row = make_excluded_tx(txinfo)
_ingest_row(exporter, row)
def exclude_tx(func):
def inner(*args, **kwargs):
asset_list = [arg for arg in args[2:] if isinstance(arg, Asset)]
exporter = args[0]
txinfo = args[1]
if _should_exclude_tx(asset_list):
return export_exclude_tx(exporter, txinfo)
return func(*args, **kwargs)
return inner
def exclude_lp_tx(func):
def inner(*args, **kwargs):
exporter = args[0]
txinfo = args[1]
asset = args[2]
asset_currency = lp_tickers.get(asset.id, asset.ticker)
if asset_currency.startswith("LP_"):
tokens = asset_currency.split("_")
if _should_exclude_tx(tokens[2:]):
return export_exclude_tx(exporter, txinfo)
return func(*args, **kwargs)
return inner
@exclude_tx
def export_send_tx(exporter, txinfo, send_asset, fee_amount=0, dest_address=None, comment=None, z_index=0):
if not send_asset.zero():
row = make_transfer_out_tx(txinfo, send_asset.amount, send_asset.ticker, dest_address, z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_receive_tx(exporter, txinfo, receive_asset, fee_amount=0, comment=None, z_index=0):
if not receive_asset.zero():
row = make_transfer_in_tx(txinfo, receive_asset.amount, receive_asset.ticker, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_reward_tx(exporter, txinfo, reward_asset, fee_amount=0, comment=None, z_index=0):
if not reward_asset.zero():
row = make_reward_tx(txinfo, reward_asset.amount, reward_asset.ticker, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_spend_tx(exporter, txinfo, send_asset, fee_amount=0, comment=None, z_index=0):
row = make_spend_tx(txinfo, send_asset.amount, send_asset.ticker, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
@exclude_lp_tx
def export_income_tx(exporter, txinfo, receive_asset, fee_amount=0, comment=None, z_index=0):
receive_asset_currency = lp_tickers.get(receive_asset.id, receive_asset.ticker)
row = make_income_tx(txinfo, receive_asset.amount, receive_asset_currency, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_swap_tx(exporter, txinfo, send_asset, receive_asset, fee_amount=0, comment=None, z_index=0):
row = make_swap_tx(
txinfo,
send_asset.amount, send_asset.ticker,
receive_asset.amount, receive_asset.ticker,
z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
def export_lp_deposit_tx(
exporter, txinfo, amm_symbol, send_asset_1, send_asset_2, lp_asset,
fee_amount=0, comment=None, z_index=0):
lp_asset_currency = f"LP_{amm_symbol}_{send_asset_1.ticker}_{send_asset_2.ticker}"
lp_tickers[lp_asset.id] = lp_asset_currency
if _should_exclude_tx([send_asset_1, send_asset_2, lp_asset]):
return export_exclude_tx(exporter, txinfo)
row = make_lp_deposit_tx(
txinfo,
send_asset_1.amount, send_asset_1.ticker,
lp_asset.amount / 2, lp_asset_currency,
z_index=z_index)
_ingest_row(exporter, row, fee_amount / 2, comment)
row = make_lp_deposit_tx(
txinfo,
send_asset_2.amount, send_asset_2.ticker,
lp_asset.amount / 2, lp_asset_currency,
z_index=z_index + 1)
_ingest_row(exporter, row, fee_amount / 2, comment)
def export_lp_withdraw_tx(
exporter, txinfo, amm_symbol, lp_asset, receive_asset_1, receive_asset_2,
fee_amount=0, comment=None, z_index=0):
lp_asset_currency = f"LP_{amm_symbol}_{receive_asset_1.ticker}_{receive_asset_2.ticker}"
lp_tickers[lp_asset.id] = lp_asset_currency
if _should_exclude_tx([receive_asset_1, receive_asset_2, lp_asset]):
return export_exclude_tx(exporter, txinfo)
row = make_lp_withdraw_tx(
txinfo,
lp_asset.amount / 2, lp_asset_currency,
receive_asset_1.amount, receive_asset_1.ticker,
z_index=z_index)
_ingest_row(exporter, row, fee_amount / 2, comment)
row = make_lp_withdraw_tx(
txinfo,
lp_asset.amount / 2, lp_asset_currency,
receive_asset_2.amount, receive_asset_2.ticker,
z_index=z_index + 1)
_ingest_row(exporter, row, fee_amount / 2, comment)
@exclude_tx
@exclude_lp_tx
def export_lp_stake_tx(exporter, txinfo, send_asset, fee_amount=0, comment=None, z_index=0):
send_asset_currency = lp_tickers.get(send_asset.id, send_asset.ticker)
row = make_lp_stake_tx(txinfo, send_asset.amount, send_asset_currency, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
@exclude_lp_tx
def export_lp_unstake_tx(exporter, txinfo, receive_asset, fee_amount=0, comment=None, z_index=0):
receive_asset_currency = lp_tickers.get(receive_asset.id, receive_asset.ticker)
row = make_lp_unstake_tx(txinfo, receive_asset.amount, receive_asset_currency, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_borrow_tx(exporter, txinfo, receive_asset, fee_amount=0, comment=None, z_index=0):
row = make_borrow_tx(txinfo, receive_asset.amount, receive_asset.ticker, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_repay_tx(exporter, txinfo, send_asset, fee_amount=0, comment=None, z_index=0):
row = make_repay_tx(txinfo, send_asset.amount, send_asset.ticker, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_liquidate_tx(exporter, txinfo, send_asset, receive_asset, fee_amount=0, comment=None, z_index=0):
row = make_liquidate_tx(
txinfo,
send_asset.amount, send_asset.ticker,
receive_asset.amount, receive_asset.ticker,
z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_deposit_collateral_tx(exporter, txinfo, send_asset, fee_amount=0, comment=None, z_index=0):
row = make_deposit_collateral_tx(txinfo, send_asset.amount, send_asset.ticker, z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_withdraw_collateral_tx(exporter, txinfo, receive_asset, fee_amount=0, comment=None, z_index=0):
row = make_withdraw_collateral_tx(txinfo, receive_asset.amount, receive_asset.ticker, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_stake_tx(exporter, txinfo, send_asset, fee_amount=0, comment=None, z_index=0):
send_asset_currency = lp_tickers.get(send_asset.id, send_asset.ticker)
row = make_stake_tx(txinfo, send_asset.amount, send_asset_currency, z_index)
_ingest_row(exporter, row, fee_amount, comment)
@exclude_tx
def export_unstake_tx(exporter, txinfo, receive_asset, fee_amount=0, comment=None, z_index=0):
receive_asset_currency = lp_tickers.get(receive_asset.id, receive_asset.ticker)
row = make_unstake_tx(txinfo, receive_asset.amount, receive_asset_currency, z_index=z_index)
_ingest_row(exporter, row, fee_amount, comment)
|
supar/utils/common.py | zysite/biaffine-parser | 102 | 12662289 | <filename>supar/utils/common.py
# -*- coding: utf-8 -*-
import os
PAD = '<pad>'
UNK = '<unk>'
BOS = '<bos>'
EOS = '<eos>'
MIN = -1e32
CACHE = os.path.expanduser('~/.cache/supar')
|
iot_hunter/util.py | byamao1/HaboMalHunter | 727 | 12662314 | #
# Tencent is pleased to support the open source community by making IoTHunter available.
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys
import os
import hashlib
import json
import struct
import re
import idc
import idautils
import idaapi
class NetUtil():
@staticmethod
def ip_to_long(ip):
result = 0
while True:
if type(ip) != str:
break
ip_list = ip.split('.')
if len(ip_list) != 4:
break
for i in range( 4 ):
result = result + int(ip_list[i]) * 256 ** (3 - i)
break
break
return result
@staticmethod
def long_to_ip(value):
if type(value) != long:
return ""
floor_list = []
yushu = value
for i in reversed(range(4)):
res = divmod(yushu, 256 ** i)
floor_list.append(str(res[0]))
yushu = res[1]
return '.'.join(floor_list)
@staticmethod
def check_domain(domain):
pass
@staticmethod
def check_ip(string):
ret = False
while True:
if type(string) != str:
break
compile_ip = re.compile('^(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[1-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)$')
if compile_ip.match(string):
ret = True
break
break
return ret
class IdaUtil():
@staticmethod
def is_packed_upx():
strings = idautils.Strings()
count = 0
for s in strings:
if "upx.sf.net" in str(s):
return True
if count >= 2:
break
count += 1
return False
@staticmethod
def match_binary(addr, search_flag, pattern_list):
ret_addr = idc.BADADDR
for pattern in pattern_list:
ret_addr = idc.FindBinary(addr, search_flag, pattern)
if ret_addr != idc.BADADDR:
break
return ret_addr
@staticmethod
def get_to_xrefs(ea):
xref_set = set([])
for xref in idautils.XrefsTo(ea, 1):
xref_set.add(xref.frm)
return xref_set
@staticmethod
def get_frm_xrefs(ea):
xref_set = set([])
for xref in idautils.XrefsFrom(ea, 1):
xref_set.add(xref.to)
return xref_set
@staticmethod
def get_string(addr):
"""
idc.GetString may be return wrong length.
For example: 00096d10f7872706af8155d40ddc4dab address 0x0001A7D4 string length 8, but idc.GetString returns 3.
"""
string = ""
while True:
if idc.Byte(addr) != 0:
string += chr(idc.Byte(addr))
else:
break
addr += 1
return string
class StringUtil():
@staticmethod
def format_data_to_string(data, len):
"""
Replace invisible characters with 16 hexadecimal.
"""
string = ""
for i in data:
if isinstance(i, int):
if i in range(0, 0x20) + range(0x7F, 0xFF):
string += r"\x%02x" % i
else:
string += chr(i)
elif isinstance(i, str):
if ord(i) in range(0, 0x20) + range(0x7F, 0xFF):
string += r"\x%02x" % ord(i)
else:
string += i
return string
|
miner.py | Aareon/tinycoin | 105 | 12662334 | <filename>miner.py
from base import *
from node import GET_WORK, NEW_BLOCK
from block import BlockHeader
msg = str({"type": GET_WORK, "hops": 0})
resp = Queue(10)
@gen.coroutine
def response(message):
if message is None:
print None
else:
bh, b = ast.literal_eval(message)
bh = BlockHeader.deserialize(bh)
while not pow(bh.hash(), BOUND):
b.nonce +=1
yield resp.put(str((bh.serialize(), b ) ))
@gen.coroutine
def OutGoingPeer():
conn = yield tornado.websocket.websocket_connect("ws://"+sys.argv[1], on_message_callback=response)
conn.write_message(msg)
b = yield resp.get()
conn.write_message(str({"type": NEW_BLOCK, "hops":0, "data": b}))
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(OutGoingPeer)
|
src/dataloaders/audio.py | dumpmemory/state-spaces | 513 | 12662358 | <reponame>dumpmemory/state-spaces
import torch
import torchaudio
import numpy as np
import os
from os import listdir
from os.path import join
def minmax_scale(tensor, range_min=0, range_max=1):
"""
Min-max scaling to [0, 1].
"""
min_val = torch.amin(tensor, dim=(1, 2), keepdim=True)
max_val = torch.amax(tensor, dim=(1, 2), keepdim=True)
return range_min + (range_max - range_min) * (tensor - min_val) / (max_val - min_val + 1e-6)
def quantize(samples, bits=8, epsilon=0.01):
"""
Linearly quantize a signal in [0, 1] to a signal in [0, q_levels - 1].
"""
q_levels = 1 << bits
samples *= q_levels - epsilon
samples += epsilon / 2
return samples.long()
def dequantize(samples, bits=8):
"""
Dequantize a signal in [0, q_levels - 1].
"""
q_levels = 1 << bits
return samples.float() / (q_levels / 2) - 1
def mu_law_encode(audio, bits=8):
"""
Perform mu-law companding transformation.
"""
mu = torch.tensor((1 << bits) - 1)
# Audio must be min-max scaled between -1 and 1
audio = minmax_scale(audio, range_min=-1, range_max=1)
# Perform mu-law companding transformation.
numerator = torch.log1p(mu * torch.abs(audio + 1e-8))
denominator = torch.log1p(mu)
encoded = torch.sign(audio) * (numerator / denominator)
# Shift signal to [0, 1]
encoded = (encoded + 1) / 2
# Quantize signal to the specified number of levels.
return quantize(encoded, bits=bits)
def mu_law_decode(encoded, bits=8):
"""
Perform inverse mu-law transformation.
"""
mu = (1 << bits) - 1
# Invert the quantization
x = dequantize(encoded, bits=bits)
# Invert the mu-law transformation
x = torch.sign(x) * ((1 + mu)**(torch.abs(x)) - 1) / mu
# Returned values in range [-1, 1]
return x
def linear_encode(samples, bits=8):
"""
Perform scaling and linear quantization.
"""
samples = samples.clone()
samples = minmax_scale(samples)
return quantize(samples, bits=bits)
def linear_decode(samples, bits=8):
"""
Invert the linear quantization.
"""
return dequantize(samples, bits=bits)
def q_zero(bits=8):
"""
The quantized level of the 0.0 value.
"""
return 1 << (bits - 1)
class AbstractAudioDataset(torch.utils.data.Dataset):
def __init__(
self,
bits=8,
sample_len=None,
quantization='linear',
return_type='autoregressive',
drop_last=True,
target_sr=None,
context_len=None,
pad_len=None,
**kwargs,
) -> None:
super().__init__()
self.bits = bits
self.sample_len = sample_len
self.quantization = quantization
self.return_type = return_type
self.drop_last = drop_last
self.target_sr = target_sr
self.zero = q_zero(bits)
self.context_len = context_len
self.pad_len = pad_len
for key, value in kwargs.items():
setattr(self, key, value)
self.file_names = NotImplementedError("Must be assigned in setup().")
self.transforms = {}
self.setup()
self.create_quantizer(self.quantization)
self.create_examples(self.sample_len)
def setup(self):
return NotImplementedError("Must assign a list of filepaths to self.file_names.")
def __getitem__(self, index):
# Load signal
if self.sample_len is not None:
file_name, start_frame, num_frames = self.examples[index]
seq, sr = torchaudio.load(file_name, frame_offset=start_frame, num_frames=num_frames)
else:
seq, sr = torchaudio.load(self.examples[index])
# Average non-mono signals across channels
if seq.shape[0] > 1:
seq = seq.mean(dim=0, keepdim=True)
# Resample signal if required
if self.target_sr is not None and sr != self.target_sr:
if sr not in self.transforms:
self.transforms[sr] = torchaudio.transforms.Resample(orig_freq=sr, new_freq=self.target_sr)
seq = self.transforms[sr](seq)
# Transpose the signal to get (L, 1)
seq = seq.transpose(0, 1)
# Unsqueeze to (1, L, 1)
seq = seq.unsqueeze(0)
# Quantized signal
qseq = self.quantizer(seq, self.bits)
# Squeeze back to (L, 1)
qseq = qseq.squeeze(0)
# Return the signal
if self.return_type == 'autoregressive':
# Autoregressive training
# x is [0, qseq[0], qseq[1], ..., qseq[-2]]
# y is [qseq[0], qseq[1], ..., qseq[-1]]
y = qseq
x = torch.roll(qseq, 1, 0) # Roll the signal 1 step
x[0] = self.zero # Fill the first element with q_0
x = x.squeeze(1) # Squeeze to (L, )
if self.context_len is not None:
y = y[self.context_len:] # Trim the signal
if self.pad_len is not None:
x = torch.cat((torch.zeros(self.pad_len, dtype=torch.long) + self.zero, x)) # Pad the signal
return x, y
elif self.return_type is None:
return qseq
else:
raise NotImplementedError(f'Invalid return type {self.return_type}')
def __len__(self):
return len(self.examples)
def create_examples(self, sample_len: int):
# Get metadata for all files
self.metadata = [
torchaudio.info(file_name) for file_name in self.file_names
]
if sample_len is not None:
# Reorganize files into a flat list of (file_name, start_frame) pairs
# so that consecutive items are separated by sample_len
self.examples = []
for file_name, metadata in zip(self.file_names, self.metadata):
# Update the sample_len if resampling to target_sr is required
# This is because the resampling will change the length of the signal
# so we need to adjust the sample_len accordingly (e.g. if downsampling
# the sample_len will need to be increased)
sample_len_i = sample_len
if self.target_sr is not None and metadata.sample_rate != self.target_sr:
sample_len_i = int(sample_len * metadata.sample_rate / self.target_sr)
margin = metadata.num_frames % sample_len_i
for start_frame in range(0, metadata.num_frames - margin, sample_len_i):
self.examples.append((file_name, start_frame, sample_len_i))
if margin > 0 and not self.drop_last:
# Last (leftover) example is shorter than sample_len, and equal to the margin
# (must be padded in collate_fn)
self.examples.append((file_name, metadata.num_frames - margin, margin))
else:
self.examples = self.file_names
def create_quantizer(self, quantization: str):
if quantization == 'linear':
self.quantizer = linear_encode
self.dequantizer = linear_decode
elif quantization == 'mu-law':
self.quantizer = mu_law_encode
self.dequantizer = mu_law_decode
else:
raise ValueError('Invalid quantization type')
class QuantizedAudioDataset(AbstractAudioDataset):
"""
Adapted from https://github.com/deepsound-project/samplernn-pytorch/blob/master/dataset.py
"""
def __init__(
self,
path,
bits=8,
ratio_min=0,
ratio_max=1,
sample_len=None,
quantization='linear', # [linear, mu-law]
return_type='autoregressive', # [autoregressive, None]
drop_last=False,
target_sr=None,
context_len=None,
pad_len=None,
**kwargs,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=return_type,
drop_last=drop_last,
target_sr=target_sr,
path=path,
ratio_min=ratio_min,
ratio_max=ratio_max,
context_len=context_len,
pad_len=pad_len,
**kwargs,
)
def setup(self):
from natsort import natsorted
file_names = natsorted(
[join(self.path, file_name) for file_name in listdir(self.path)]
)
self.file_names = file_names[
int(self.ratio_min * len(file_names)) : int(self.ratio_max * len(file_names))
]
class SpeechCommands09(AbstractAudioDataset):
CLASSES = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
CLASS_TO_IDX = dict(zip(CLASSES, range(len(CLASSES))))
def __init__(
self,
path,
bits=8,
split='train',
sample_len=16000,
quantization='linear', # [linear, mu-law]
return_type='autoregressive', # [autoregressive, None]
drop_last=False,
target_sr=None,
dequantize=False,
pad_len=None,
**kwargs,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=return_type,
split=split,
drop_last=drop_last,
target_sr=target_sr,
path=path,
dequantize=dequantize,
pad_len=pad_len,
**kwargs,
)
def setup(self):
with open(join(self.path, 'validation_list.txt')) as f:
validation_files = set([line.rstrip() for line in f.readlines()])
with open(join(self.path, 'testing_list.txt')) as f:
test_files = set([line.rstrip() for line in f.readlines()])
# Get all files in the paths named after CLASSES
self.file_names = []
for class_name in self.CLASSES:
self.file_names += [
(class_name, file_name)
for file_name in listdir(join(self.path, class_name))
if file_name.endswith('.wav')
]
# Keep files based on the split
if self.split == 'train':
self.file_names = [
join(self.path, class_name, file_name)
for class_name, file_name in self.file_names
if join(class_name, file_name) not in validation_files
and join(class_name, file_name) not in test_files
]
elif self.split == 'validation':
self.file_names = [
join(self.path, class_name, file_name)
for class_name, file_name in self.file_names
if join(class_name, file_name) in validation_files
]
elif self.split == 'test':
self.file_names = [
join(self.path, class_name, file_name)
for class_name, file_name in self.file_names
if join(class_name, file_name) in test_files
]
def __getitem__(self, index):
item = super().__getitem__(index)
x, y, *z = item
if self.dequantize:
x = self.dequantizer(x).unsqueeze(1)
return x, y, *z
|
language/nqg/model/parser/inference/eval_model.py | urikz/language | 1,199 | 12662361 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to evaluate model.
This binary can also be configured to run alongside a training job
and poll for new model checkpoints, writing eval metrics (e.g. for TensorBoard).
This binary also supports evaluations for settings such as NQG-T5, where
predictions from T5 are used when NQG does not produce an output. Such
'fallback' predictions can be supplied via the `--fallback_predictions` flag.
"""
import os
import time
from absl import app
from absl import flags
from language.nqg.model.parser import config_utils
from language.nqg.model.parser.data import tokenization_utils
from language.nqg.model.parser.inference import inference_wrapper
from language.nqg.model.parser.inference.targets import target_grammar
from language.nqg.model.qcfg import qcfg_file
from language.nqg.tasks import tsv_utils
import tensorflow as tf
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("limit", 0,
"Index of example to begin processing (Ignored if 0).")
flags.DEFINE_integer("offset", 0,
"Index of example to end processing (Ignored if 0).")
flags.DEFINE_bool("verbose", True, "Whether to print debug output.")
flags.DEFINE_string("model_dir", "", "Model directory.")
flags.DEFINE_bool("poll", False, "Whether to poll.")
flags.DEFINE_bool("write", False, "Whether to write metrics to model_dir.")
flags.DEFINE_string("subdir", "eval_test",
"Sub-directory of model_dir for writing metrics.")
flags.DEFINE_string("checkpoint", "", "Checkpoint prefix, or None for latest.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string("bert_dir", "",
"Directory for BERT, including vocab and config.")
flags.DEFINE_string("rules", "", "QCFG rules txt file.")
flags.DEFINE_string("fallback_predictions", "",
"Optional fallback predictions txt file.")
flags.DEFINE_string("target_grammar", "", "Optional target CFG.")
def compute_metrics(wrapper, examples):
"""Compute accuracy on examples."""
# Initialize stats.
num_examples = 0
num_nqg_correct = 0
num_nqg_predictions = 0
num_fallback_correct = 0
num_hybrid_correct = 0
fallback_predictions = None
if FLAGS.fallback_predictions:
fallback_predictions = []
with tf.io.gfile.GFile(FLAGS.fallback_predictions, "r") as predictions_file:
for line in predictions_file:
fallback_predictions.append(line.rstrip())
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
if FLAGS.verbose:
print("Processing example %s: %s" % (idx, example[0]))
num_examples += 1
source = example[0]
gold_target = example[1]
nqg_prediction, _ = wrapper.get_output(source)
if nqg_prediction:
num_nqg_predictions += 1
if nqg_prediction == gold_target:
num_nqg_correct += 1
else:
if FLAGS.verbose:
print("nqg incorrect (gold vs. predicted):\n%s\n%s\n" %
(gold_target, nqg_prediction))
fallback_prediction = (
fallback_predictions[idx] if fallback_predictions else None)
if fallback_prediction == gold_target:
num_fallback_correct += 1
else:
if FLAGS.verbose:
print("fallback incorrect (gold vs. predicted):\n%s\n%s\n" %
(gold_target, fallback_prediction))
hybrid_prediction = nqg_prediction or fallback_prediction
if hybrid_prediction == gold_target:
num_hybrid_correct += 1
if FLAGS.verbose:
print("hybrid correct.")
else:
if FLAGS.verbose:
print("hybrid incorrect.")
metrics_dict = {
"nqg_accuracy": float(num_nqg_correct) / float(num_examples),
"fallback_accuracy": float(num_fallback_correct) / float(num_examples),
"hybrid_accuracy": float(num_hybrid_correct) / float(num_examples),
"nqg_coverage": float(num_nqg_predictions) / float(num_examples),
"nqg_precision": float(num_nqg_correct) / float(num_nqg_predictions),
}
if FLAGS.verbose:
print("num_examples: %s" % num_examples)
print("num_nqg_correct: %s" % num_nqg_correct)
print("num_nqg_predictions: %s" % num_nqg_predictions)
print("num_fallback_correct: %s" % num_fallback_correct)
print("num_hybrid_correct: %s" % num_hybrid_correct)
print("metrics_dict: %s" % metrics_dict)
return metrics_dict
def get_summary_writer():
if not FLAGS.write:
return None
return tf.summary.create_file_writer(
os.path.join(FLAGS.model_dir, FLAGS.subdir))
def write_metric(writer, name, metric, step):
with writer.as_default():
tf.summary.scalar(name, metric, step=step)
def get_checkpoint():
"""Return checkpoint path and step, or (None, None)."""
if FLAGS.checkpoint:
checkpoint = os.path.join(FLAGS.model_dir, FLAGS.checkpoint)
else:
checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
# TODO(petershaw): Consider less hacky way to get current step.
step = None
if checkpoint is not None:
step = int(checkpoint.split("-")[-2])
print("Using checkpoint %s at step %s" % (checkpoint, step))
return checkpoint, step
def get_inference_wrapper(config):
"""Construct and return InferenceWrapper."""
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
target_grammar_rules = None
if FLAGS.target_grammar:
target_grammar_rules = target_grammar.load_rules_from_file(
FLAGS.target_grammar)
wrapper = inference_wrapper.InferenceWrapper(tokenizer, rules, config,
bert_config,
target_grammar_rules)
return wrapper
def run_inference(writer, wrapper, examples, checkpoint, step=None):
"""Run inference."""
wrapper.restore_checkpoint(checkpoint)
metrics_dict = compute_metrics(wrapper, examples)
for metric_name, metric_value in metrics_dict.items():
print("%s at %s: %s" % (metric_name, step, metric_value))
if FLAGS.write:
write_metric(writer, metric_name, metric_value, step)
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
wrapper = get_inference_wrapper(config)
examples = tsv_utils.read_tsv(FLAGS.input)
writer = get_summary_writer()
if FLAGS.poll:
last_checkpoint = None
while True:
checkpoint, step = get_checkpoint()
if checkpoint == last_checkpoint:
print("Waiting for new checkpoint...\nLast checkpoint: %s" %
last_checkpoint)
else:
run_inference(writer, wrapper, examples, checkpoint, step=step)
last_checkpoint = checkpoint
if step and step >= config["training_steps"]:
# Stop eval job after completing eval for last training step.
break
time.sleep(10)
else:
checkpoint, _ = get_checkpoint()
run_inference(writer, wrapper, examples, checkpoint)
if __name__ == "__main__":
app.run(main)
|
StonePaperScissor - GUI/StonePaperScissors.py | avinashkranjan/PraticalPythonProjects | 930 | 12662403 | # -*- coding: utf-8 -*-
import random
from tkinter import *
#variables and Dictionary
# These are total events that could occur if/else can also be used but they are pain to implement
schema = {
"rock": {"rock": 1, "paper": 0, "scissors": 2},
"paper": {"rock": 2, "paper": 1, "scissors": 0},
"scissors": {"rock": 0, "paper": 2, "scissors": 1}
}
comp_score = 0
player_score = 0
# functions
def outcome_handler(user_choice):
global comp_score
global player_score
outcomes = ["rock", "paper", "scissors"]
num = random.randint(0, 2)
computer_choice = outcomes[num]
result = schema[user_choice][computer_choice]
# now config the labes acc to the choices
Player_Choice_Label.config(
fg="green", text="Player choice : "+str(user_choice))
Computer_Choice_Label.config(
fg="red", text="Computer choice : "+str(computer_choice))
if result == 2:
player_score += 2
Player_Score_Label.config(text="Player : "+str(player_score))
Outcome_Label.config(fg="blue", bg="skyblue", text="Player-Won")
elif result == 1:
player_score += 1
comp_score += 1
Player_Score_Label.config(text="Player : "+str(player_score))
Outcome_Label.config(fg="blue", bg="skyblue", text="Draw")
Computer_Score_Label.config(text="Computer : "+str(comp_score))
elif result == 0:
comp_score += 2
Outcome_Label.config(fg="blue", bg="skyblue", text="Computer-Won")
Computer_Score_Label.config(text="Computer : "+str(comp_score))
# main Screen
master = Tk()
master.title("RPS")
# labels
Label(master, text="Rock , Paper , Scissors", font=(
"Calibri", 15)).grid(row=0, sticky=N, pady=10, padx=200)
Label(master, text="Please Select an option",
font=("Calibri", 12)).grid(row=2, sticky=N)
Player_Score_Label = Label(master, text="Player : 0", font=(
"Calibri", 12)) # label for player Score
Player_Score_Label.grid(row=3, sticky=W)
Computer_Score_Label = Label(master, text="Computer : 0", font=(
"Calibri", 12)) # label for computer score
Computer_Score_Label.grid(row=3, sticky=E)
# player and computer choice labels
Player_Choice_Label = Label(master, font=("Calibri", 12))
Player_Choice_Label.grid(row=5, sticky=W)
Computer_Choice_Label = Label(master, font=("Calibri", 12))
Computer_Choice_Label.grid(row=5, sticky=E)
# outcome Labels
Outcome_Label = Label(master, font=("Calibri", 12))
Outcome_Label.grid(row=5, sticky=N, pady=10)
# buttons
Button(master, text="Rock", width=17, command=lambda: outcome_handler(
"rock")).grid(row=6, sticky=W, padx=10, pady=10)
Button(master, text="Paper", width=17, command=lambda: outcome_handler(
"paper")).grid(row=6, sticky=N, pady=10)
Button(master, text="Scissors", width=17, command=lambda: outcome_handler(
"scissors")).grid(row=6, sticky=E, padx=10, pady=10)
# dummy label to create space at the end of master screen
Label(master).grid(row=5)
master.mainloop()
|
cleo/io/io.py | Ivoz/cleo | 859 | 12662407 | from typing import Iterable
from typing import Optional
from typing import Union
from .inputs.input import Input
from .outputs.output import Output
from .outputs.output import Type as OutputType
from .outputs.output import Verbosity
from .outputs.section_output import SectionOutput
class IO:
def __init__(self, input: Input, output: Output, error_output: Output) -> None:
self._input = input
self._output = output
self._error_output = error_output
@property
def input(self) -> Input:
return self._input
@property
def output(self) -> Output:
return self._output
@property
def error_output(self) -> Output:
return self._error_output
def read(self, length: int, default: Optional[str] = None) -> str:
"""
Reads the given amount of characters from the input stream.
"""
return self._input.read(length, default=default)
def read_line(
self, length: Optional[int] = None, default: Optional[str] = None
) -> str:
"""
Reads a line from the input stream.
"""
return self._input.read_line(length=length, default=default)
def write_line(
self,
messages: Union[str, Iterable[str]],
verbosity: Verbosity = Verbosity.NORMAL,
type: OutputType = OutputType.NORMAL,
) -> None:
self._output.write_line(messages, verbosity=verbosity, type=type)
def write(
self,
messages: Union[str, Iterable[str]],
new_line: bool = False,
verbosity: Verbosity = Verbosity.NORMAL,
type: OutputType = OutputType.NORMAL,
) -> None:
self._output.write(messages, new_line=new_line, verbosity=verbosity, type=type)
def write_error_line(
self,
messages: Union[str, Iterable[str]],
verbosity: Verbosity = Verbosity.NORMAL,
type: OutputType = OutputType.NORMAL,
) -> None:
self._error_output.write_line(messages, verbosity=verbosity, type=type)
def write_error(
self,
messages: Union[str, Iterable[str]],
new_line: bool = False,
verbosity: Verbosity = Verbosity.NORMAL,
type: OutputType = OutputType.NORMAL,
) -> None:
self._error_output.write(
messages, new_line=new_line, verbosity=verbosity, type=type
)
def overwrite(self, messages: Union[str, Iterable[str]]) -> None:
from cleo.cursor import Cursor
cursor = Cursor(self._output)
cursor.move_to_column(1)
cursor.clear_line()
self.write(messages)
def overwrite_error(self, messages: Union[str, Iterable[str]]) -> None:
from cleo.cursor import Cursor
cursor = Cursor(self._error_output)
cursor.move_to_column(1)
cursor.clear_line()
self.write_error(messages)
def flush(self) -> None:
self._output.flush()
def is_interactive(self) -> bool:
return self._input.is_interactive()
def interactive(self, interactive: bool = True) -> None:
self._input.interactive(interactive)
def decorated(self, decorated: bool = True) -> None:
self._output.decorated(decorated)
self._error_output.decorated(decorated)
def is_decorated(self) -> bool:
return self._output.is_decorated()
def supports_utf8(self) -> bool:
return self._output.supports_utf8()
def set_verbosity(self, verbosity: Verbosity) -> None:
self._output.set_verbosity(verbosity)
self._error_output.set_verbosity(verbosity)
def is_verbose(self) -> bool:
return self.output.is_verbose()
def is_very_verbose(self) -> bool:
return self.output.is_very_verbose()
def is_debug(self) -> bool:
return self.output.is_debug()
def set_input(self, input: Input) -> None:
self._input = input
def with_input(self, input: Input) -> "IO":
return self.__class__(input, self._output, self._error_output)
def remove_format(self, text: str) -> str:
return self._output.remove_format(text)
def section(self) -> SectionOutput:
return self._output.section()
|
tests/components/zwave_js/test_humidifier.py | MrDelik/core | 30,023 | 12662409 | <gh_stars>1000+
"""Test the Z-Wave JS humidifier platform."""
from zwave_js_server.const import CommandClass
from zwave_js_server.const.command_class.humidity_control import HumidityControlMode
from zwave_js_server.event import Event
from homeassistant.components.humidifier import HumidifierDeviceClass
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DOMAIN as HUMIDIFIER_DOMAIN,
SERVICE_SET_HUMIDITY,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from .common import DEHUMIDIFIER_ADC_T3000_ENTITY, HUMIDIFIER_ADC_T3000_ENTITY
async def test_humidifier(hass, client, climate_adc_t3000, integration):
"""Test a humidity control command class entity."""
node = climate_adc_t3000
state = hass.states.get(HUMIDIFIER_ADC_T3000_ENTITY)
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_DEVICE_CLASS] == HumidifierDeviceClass.HUMIDIFIER
assert state.attributes[ATTR_HUMIDITY] == 35
assert state.attributes[ATTR_MIN_HUMIDITY] == 10
assert state.attributes[ATTR_MAX_HUMIDITY] == 70
client.async_send_command.reset_mock()
# Test setting humidity
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY,
ATTR_HUMIDITY: 41,
},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 1,
"commandClassName": "Humidity Control Setpoint",
"commandClass": CommandClass.HUMIDITY_CONTROL_SETPOINT,
"endpoint": 0,
"property": "setpoint",
"propertyKey": 1,
"propertyName": "setpoint",
"propertyKeyName": "Humidifier",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"unit": "%",
"min": 10,
"max": 70,
"ccSpecific": {"setpointType": 1},
},
"value": 35,
}
assert args["value"] == 41
client.async_send_command.reset_mock()
# Test de-humidify mode update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.DEHUMIDIFY),
"prevValue": int(HumidityControlMode.HUMIDIFY),
},
},
)
node.receive_event(event)
state = hass.states.get(HUMIDIFIER_ADC_T3000_ENTITY)
assert state.state == STATE_OFF
client.async_send_command.reset_mock()
# Test auto mode update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.HUMIDIFY),
},
},
)
node.receive_event(event)
state = hass.states.get(HUMIDIFIER_ADC_T3000_ENTITY)
assert state.state == STATE_ON
client.async_send_command.reset_mock()
# Test off mode update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.OFF),
"prevValue": int(HumidityControlMode.HUMIDIFY),
},
},
)
node.receive_event(event)
state = hass.states.get(HUMIDIFIER_ADC_T3000_ENTITY)
assert state.state == STATE_OFF
client.async_send_command.reset_mock()
# Test turning off when device is previously humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.HUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.HUMIDIFY),
}
assert args["value"] == int(HumidityControlMode.OFF)
client.async_send_command.reset_mock()
# Test turning off when device is previously auto
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.AUTO),
}
assert args["value"] == int(HumidityControlMode.DEHUMIDIFY)
client.async_send_command.reset_mock()
# Test turning off when device is previously de-humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.DEHUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning off when device is previously off
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.OFF),
"prevValue": int(HumidityControlMode.AUTO),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning on when device is previously humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.HUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning on when device is previously auto
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning on when device is previously de-humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.DEHUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.DEHUMIDIFY),
}
assert args["value"] == int(HumidityControlMode.AUTO)
client.async_send_command.reset_mock()
# Test turning on when device is previously off
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.OFF),
"prevValue": int(HumidityControlMode.AUTO),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: HUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.OFF),
}
assert args["value"] == int(HumidityControlMode.HUMIDIFY)
async def test_dehumidifier_missing_setpoint(
hass, client, climate_adc_t3000_missing_setpoint, integration
):
"""Test a humidity control command class entity."""
entity_id = "humidifier.adc_t3000_missing_setpoint_dehumidifier"
state = hass.states.get(entity_id)
assert state
assert ATTR_HUMIDITY not in state.attributes
assert state.attributes[ATTR_MIN_HUMIDITY] == DEFAULT_MIN_HUMIDITY
assert state.attributes[ATTR_MAX_HUMIDITY] == DEFAULT_MAX_HUMIDITY
client.async_send_command.reset_mock()
# Test setting humidity
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: entity_id,
ATTR_HUMIDITY: 41,
},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
async def test_humidifier_missing_mode(
hass, client, climate_adc_t3000_missing_mode, integration
):
"""Test a humidity control command class entity."""
node = climate_adc_t3000_missing_mode
# Test that de-humidifer entity does not exist but humidifier entity does
entity_id = "humidifier.adc_t3000_missing_mode_dehumidifier"
state = hass.states.get(entity_id)
assert not state
entity_id = "humidifier.adc_t3000_missing_mode_humidifier"
state = hass.states.get(entity_id)
assert state
client.async_send_command.reset_mock()
# Test turning off when device is previously auto for a device which does not have de-humidify mode
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.AUTO),
}
assert args["value"] == int(HumidityControlMode.OFF)
client.async_send_command.reset_mock()
async def test_dehumidifier(hass, client, climate_adc_t3000, integration):
"""Test a humidity control command class entity."""
node = climate_adc_t3000
state = hass.states.get(DEHUMIDIFIER_ADC_T3000_ENTITY)
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_DEVICE_CLASS] == HumidifierDeviceClass.DEHUMIDIFIER
assert state.attributes[ATTR_HUMIDITY] == 60
assert state.attributes[ATTR_MIN_HUMIDITY] == 30
assert state.attributes[ATTR_MAX_HUMIDITY] == 90
client.async_send_command.reset_mock()
# Test setting humidity
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY,
ATTR_HUMIDITY: 41,
},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 1,
"commandClassName": "Humidity Control Setpoint",
"commandClass": CommandClass.HUMIDITY_CONTROL_SETPOINT,
"endpoint": 0,
"property": "setpoint",
"propertyKey": 2,
"propertyName": "setpoint",
"propertyKeyName": "De-humidifier",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"unit": "%",
"min": 30,
"max": 90,
"ccSpecific": {"setpointType": 2},
},
"value": 60,
}
assert args["value"] == 41
client.async_send_command.reset_mock()
# Test humidify mode update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.HUMIDIFY),
"prevValue": int(HumidityControlMode.DEHUMIDIFY),
},
},
)
node.receive_event(event)
state = hass.states.get(DEHUMIDIFIER_ADC_T3000_ENTITY)
assert state.state == STATE_OFF
client.async_send_command.reset_mock()
# Test auto mode update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.DEHUMIDIFY),
},
},
)
node.receive_event(event)
state = hass.states.get(DEHUMIDIFIER_ADC_T3000_ENTITY)
assert state.state == STATE_ON
client.async_send_command.reset_mock()
# Test off mode update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.OFF),
"prevValue": int(HumidityControlMode.DEHUMIDIFY),
},
},
)
node.receive_event(event)
state = hass.states.get(DEHUMIDIFIER_ADC_T3000_ENTITY)
assert state.state == STATE_OFF
client.async_send_command.reset_mock()
# Test turning off when device is previously de-humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.DEHUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.DEHUMIDIFY),
}
assert args["value"] == int(HumidityControlMode.OFF)
client.async_send_command.reset_mock()
# Test turning off when device is previously auto
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.AUTO),
}
assert args["value"] == int(HumidityControlMode.HUMIDIFY)
client.async_send_command.reset_mock()
# Test turning off when device is previously humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.HUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning off when device is previously off
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.OFF),
"prevValue": int(HumidityControlMode.AUTO),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning on when device is previously de-humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.DEHUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning on when device is previously auto
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.AUTO),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 0
client.async_send_command.reset_mock()
# Test turning on when device is previously humidifying
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.HUMIDIFY),
"prevValue": int(HumidityControlMode.OFF),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.HUMIDIFY),
}
assert args["value"] == int(HumidityControlMode.AUTO)
client.async_send_command.reset_mock()
# Test turning on when device is previously off
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 68,
"args": {
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"newValue": int(HumidityControlMode.OFF),
"prevValue": int(HumidityControlMode.AUTO),
},
},
)
node.receive_event(event)
await hass.services.async_call(
HUMIDIFIER_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEHUMIDIFIER_ADC_T3000_ENTITY},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args_list[0][0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == 68
assert args["valueId"] == {
"ccVersion": 2,
"commandClassName": "Humidity Control Mode",
"commandClass": CommandClass.HUMIDITY_CONTROL_MODE,
"endpoint": 0,
"property": "mode",
"propertyName": "mode",
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 0,
"max": 255,
"label": "Humidity control mode",
"states": {"0": "Off", "1": "Humidify", "2": "De-humidify", "3": "Auto"},
},
"value": int(HumidityControlMode.OFF),
}
assert args["value"] == int(HumidityControlMode.DEHUMIDIFY)
|
scripts/artifacts/cashApp.py | Krypterry/ALEAPP | 187 | 12662459 | <gh_stars>100-1000
import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_cashApp(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('.db'):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''Select
payment.role,
payment.sender_id,
CASE WHEN customer.cashtag IS NULL THEN '***NO CASH TAG PRESENT***' ELSE customer.cashtag END,
customer.customer_display_name,
payment.recipient_id,
CASE WHEN customer1.cashtag IS NULL THEN '***NO CASH TAG PRESENT***' ELSE customer1.cashtag END,
customer1.customer_display_name,
payment.state,
datetime(payment.display_date / 1000.0, 'unixepoch'),
CASE WHEN json_extract (payment.render_data, '$."note"') IS NULL THEN '***NO NOTE SUBMITTED***' ELSE json_extract (payment.render_data, '$."note"') END,
printf("$%.2f", json_extract(payment.render_data, '$."amount"."amount"') / 100.0)
From payment
Inner Join customer On customer.customer_id = payment.sender_id
Inner Join customer customer1 On payment.recipient_id = customer1.customer_id
ORDER BY payment.display_date DESC
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Transactions')
report.start_artifact_report(report_folder, 'Transactions')
report.add_script()
data_headers = ('Transaction Date', 'User Account Role','Sender Display Name','Sender Unique ID', 'Sender Cashtag','Recipient Display Name', 'Recipient Unique ID', 'Recipient Cashtag','Transaction Amount','Transaction Status','Note') # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[8],row[0],row[3],row[1],row[2],row[6],row[4],row[5],row[10],row[7],row[9]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'Cash App Transactions'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Cash App Transactions'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Cash App Transactions data available')
db.close()
return
|
src/rayoptics/parax/idealimager.py | ajeddeloh/ray-optics | 106 | 12662478 | <filename>src/rayoptics/parax/idealimager.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2019 <NAME>
""" module to setup an ideal imager
.. Created on Thu May 16 19:57:47 2019
.. codeauthor: <NAME>
"""
import math
from collections import namedtuple
ideal_imager_keys = ['m', 's', 'sp', 'tt', 'f']
ideal_imager_labels = ["m", "s", "s'", "tt", "f"]
IdealImager = namedtuple('IdealImager', ideal_imager_keys)
IdealImager.m.__doc__ = "(lateral) magnification"
IdealImager.s.__doc__ = "object distance from first principal plane, P1->Obj"
IdealImager.sp.__doc__ = "image distance from second principal plane, P2->Img"
IdealImager.tt.__doc__ = "total track length, tt = sp - s"
IdealImager.f.__doc__ = "focal length"
""" tuple grouping together first order specifications
Attributes:
m: (lateral) magnification
s: object distance from first principal plane, P1->Obj
sp: image distance from second principal plane, P2->Img
tt: total track length, tt = sp - s
f: focal length
"""
def ideal_imager_setup(**inputs):
""" Calculate the ideal imaging properties given two independent parameters
Given 2 system parameters from the following list, this function
calculates the remaining parameters.
Note that if specifying ``tt`` and ``f``, their ratio, tt/f, must be
greater than or equal to 4. A `ValueError` is raised otherwise.
For a typical system, the value of ``s`` is negative, i.e. the object is to
the left of the first principal plane.
Example::
In [3]: m1s1 = ideal_imager_setup(m=-0.5, s=-10.0); m1s1
Out[3]: IdealImager(m=-0.5, s=-10.0, sp=5.0, tt=15.0, f=3.333333333333)
In [4]: s_inf_efl = ideal_imager_setup(s=-math.inf, f=25.0); s_inf_efl
Out[4]: IdealImager(m=-0.0, s=-inf, sp=25.0, tt=inf, f=25.0)
Args:
m: (lateral) magnification
s: object distance from first principal plane, P1->Obj
sp: image distance from second principal plane, P2->Img
tt: total track length, tt = sp - s
f: focal length
Returns:
:class:`IdealImager` namedtuple
Raises:
ValueError: if tt/f < 4
"""
if 'm' in inputs:
m = inputs['m']
if 's' in inputs:
s = inputs['s']
sp = m*s
tt = sp - s
f = s*sp/(s - sp)
elif 'sp' in inputs:
sp = inputs['sp']
s = sp/m
tt = sp - s
f = s*sp/(s - sp)
elif 'tt' in inputs:
tt = inputs['tt']
s = tt/(m - 1)
sp = m*s
f = s*sp/(s - sp)
elif 'f' in inputs:
f = inputs['f']
tt = -f*(m - 1)**2/m
s = -f*(m - 1)/m # = tt/(m - 1)
sp = m*s
else:
return IdealImager(m, None, None, None, None)
elif 's' in inputs:
# arrange calculations so that s=-inf is handled gracefully
s = inputs['s']
if 'sp' in inputs:
sp = inputs['sp']
f = 1/(1/sp - 1/s)
m = sp/s
tt = sp - s
elif 'tt' in inputs:
tt = inputs['tt']
m = 1 + tt/s
sp = m*s
f = s*sp/(s - sp)
elif 'f' in inputs:
f = inputs['f']
m = f/(s + f)
sp = 1/(1/f + 1/s)
tt = sp - s
else:
return IdealImager(None, s, None, None, None)
elif 'sp' in inputs:
# arrange calculations so that sp=inf is handled gracefully
sp = inputs['sp']
if 'tt' in inputs:
tt = inputs['tt']
m = sp/(sp - tt)
s = sp/m
f = s*sp/(s - sp)
elif 'f' in inputs:
f = inputs['f']
m = (f - sp)/f
s = 1/(1/sp - 1/f)
tt = sp - s
else:
return IdealImager(None, None, sp, None, None)
elif 'tt' in inputs:
tt = inputs['tt']
if 'f' in inputs:
f = inputs['f']
ttf = tt/f
# tt/f >= 4, else no solution
# pick root (+) that gives |s|>=|sp|, i.e. -1 <= m < 0
m = ((2 - ttf) + math.sqrt(ttf*(ttf - 4)))/2
s = tt/(m - 1)
sp = m*s
else:
return IdealImager(None, None, None, tt, None)
elif 'f' in inputs:
f = inputs['f']
return IdealImager(None, None, None, None, f)
else:
return IdealImager(None, None, None, None, None)
return IdealImager(m, s, sp, tt, f)
|
tests/propagate_uncertainty_test.py | sethvargo/vaex | 337 | 12662512 | <reponame>sethvargo/vaex
import vaex
def test_propagate_uncertainty():
ds = vaex.from_scalars(x=1, y=2, e_x=2, e_y=4)
ds['r'] = ds.x + ds.y
ds.propagate_uncertainties([ds.r])
print(ds.r_uncertainty.expression)
assert ds.r_uncertainty.expand().expression == 'sqrt(((e_x ** 2) + (e_y ** 2)))'
def test_matrix():
ds = vaex.from_scalars(x=1, y=0, z=0, x_e=0.1, y_e=0.2, z_e=0.3)
matrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
ds.add_virtual_columns_matrix3d(ds.x, ds.y, ds.z, 'xn', 'yn', 'zy', matrix)
ds.propagate_uncertainties([ds.xn])
assert ds.xn.values[0] == ds.x.values[0]
assert ds.xn_uncertainty.values[0] == ds.x_e.values[0]
ds = vaex.from_scalars(x=1, y=0, z=0, x_e=0.1, y_e=0.2, z_e=0.3)
matrix = [[0, 1, 0], [1, 0, 0], [0, 0, 1]]
ds.add_virtual_columns_matrix3d(ds.x, ds.y, ds.z, 'xn', 'yn', 'zy', matrix)
ds.propagate_uncertainties([ds.xn, ds.yn])
assert ds.xn.values[0] == ds.y.values[0]
assert ds.xn_uncertainty.values[0] == ds.y_e.values[0]
assert ds.yn.values[0] == ds.x.values[0]
assert ds.yn_uncertainty.values[0] == ds.x_e.values[0]
|
projects/Task017_CADA/scripts/prepare.py | joeranbosma/nnDetection | 242 | 12662518 | <reponame>joeranbosma/nnDetection
import os
import shutil
from pathlib import Path
import SimpleITK as sitk
from nndet.io import save_json
from nndet.utils.check import env_guard
from nndet.utils.info import maybe_verbose_iterable
def run_prep(source_data: Path, source_label: Path,
target_data_dir, target_label_dir: Path):
case_id = f"{(source_data.stem).rsplit('_', 1)[0]}"
shutil.copy(source_data, target_data_dir / f"{case_id}_0000.nii.gz")
shutil.copy(source_label, target_label_dir / f"{case_id}.nii.gz") # rename label file to match data
label_itk = sitk.ReadImage(str(source_label))
label_np = sitk.GetArrayFromImage(label_itk)
instances = {int(_id + 1): 0 for _id in range(label_np.max())}
save_json({"instances": instances}, target_label_dir / f"{case_id}")
@env_guard
def main():
det_data_dir = Path(os.getenv('det_data'))
task_data_dir = det_data_dir / "Task017_CADA"
# setup raw paths
source_data_dir = task_data_dir / "raw" / "train_dataset"
if not source_data_dir.is_dir():
raise RuntimeError(f"{source_data_dir} should contain the raw data but does not exist.")
source_label_dir = task_data_dir / "raw" / "train_mask_images"
if not source_label_dir.is_dir():
raise RuntimeError(f"{source_label_dir} should contain the raw labels but does not exist.")
# setup raw splitted dirs
target_data_dir = task_data_dir / "raw_splitted" / "imagesTr"
target_data_dir.mkdir(exist_ok=True, parents=True)
target_label_dir = task_data_dir / "raw_splitted" / "labelsTr"
target_label_dir.mkdir(exist_ok=True, parents=True)
# prepare dataset info
meta = {
"name": "CADA",
"task": "Task017_CADA",
"target_class": None,
"test_labels": False,
"labels": {"0": "aneurysm"},
"modalities": {"0": "CT"},
"dim": 3,
}
save_json(meta, task_data_dir / "dataset.json")
# prepare data & label
case_ids = [(p.stem).rsplit('_', 1)[0] for p in source_data_dir.glob("*.nii.gz")]
print(f"Found {len(case_ids)} case ids")
for cid in maybe_verbose_iterable(case_ids):
run_prep(
source_data=source_data_dir / f"{cid}_orig.nii.gz",
source_label=source_label_dir / f"{cid}_labeledMasks.nii.gz",
target_data_dir=target_data_dir,
target_label_dir=target_label_dir,
)
if __name__ == "__main__":
main()
|
test_project/select2_nestedadmin/__init__.py | epoiate/django-autocomplete-light | 1,368 | 12662590 | default_app_config = 'select2_nestedadmin.apps.TestApp'
|
scripts/generateTestcase.py | DataFinnovation/Arelle | 292 | 12662593 | <reponame>DataFinnovation/Arelle<filename>scripts/generateTestcase.py
#!/usr/bin/env python
#
# this script generates a testcase variations file for entry point checking
#
import os, fnmatch, xml.dom.minidom, datetime
def main():
# the top directory where to generate the test case (and relative file names in the variations)
topDirectory = "C:\\temp\\editaxonomy20110314"
testcaseName = "EDInet test cases"
ownerName = "<NAME>"
ownerEmail = "<EMAIL>"
entryRelativeFilePaths = []
for root, dirs, files in os.walk(topDirectory):
for fileName in files:
if fnmatch.fnmatch(fileName, '*.xsd'):
fullFilePath = os.path.join(root, fileName)
entryRelativeFilePaths.append( os.path.relpath(fullFilePath, topDirectory) )
lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
'<!-- Copyright 2011 XBRL International. All Rights Reserved. -->',
'<?xml-stylesheet type="text/xsl" href="http://www.xbrl.org/Specification/formula/REC-2009-06-22/conformance/infrastructure/test.xsl"?>',
'<testcase name="{0}" date="{1}" '.format(testcaseName,datetime.date.today()),
' xmlns="http://xbrl.org/2008/conformance"',
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"',
' xsi:schemaLocation="http://xbrl.org/2008/conformance http://www.xbrl.org/Specification/formula/REC-2009-06-22/conformance/infrastructure/test.xsd">',
' <creator>',
' <name>{0}</name>'.format(ownerName),
' <email>{0}</email>'.format(ownerEmail),
' </creator>',
' <name>{0}</name>'.format(ownerEmail),
' <description>{0}</description>'.format(testcaseName),
]
num = 1
for entryFile in entryRelativeFilePaths:
fileName = os.path.basename(entryFile)
lines.append(" <variation name='{0}' id='V-{1}'>".format(fileName, num))
num += 1
lines.append(" <description>{0}</description>".format(fileName))
lines.append(" <data>")
lines.append(" <xsd readMeFirst='true'>{0}</xsd>".format(entryFile.replace("\\","/")))
lines.append(" </data>")
lines.append(" <result expected='valid'/>")
lines.append(" </variation>")
lines.append('</testcase>')
with open( os.path.join(topDirectory, "testcase.xml"), "w") as fh:
fh.write('\n'.join(lines))
if __name__ == "__main__":
main()
|
leetcode.com/python/528_Random_Pick_with_Weight.py | vansh-tiwari/coding-interview-gym | 713 | 12662596 | <filename>leetcode.com/python/528_Random_Pick_with_Weight.py<gh_stars>100-1000
import bisect
import random
class Solution(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.prefisSum = w
for i in range(1, len(self.prefisSum)):
self.prefisSum[i] = self.prefisSum[i] + self.prefisSum[i - 1]
def pickIndex(self):
"""
:rtype: int
"""
target = random.randint(1, self.prefisSum[-1])
return bisect.bisect_left(self.prefisSum, target)
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex() |
docs/conf.py | bvanhou/infrastructure-components | 101 | 12662643 |
master_doc = 'index'
project = u'Infrastructure-Components'
copyright = '2019, <NAME>'
htmlhelp_basename = 'Infrastructure-Components-Doc'
language = 'en'
gettext_compact = False
html_theme = 'sphinx_rtd_theme'
#html_logo = 'img/logo.svg'
html_theme_options = {
'logo_only': True,
'display_version': False,
}
# sphinx-notfound-page
# https://github.com/rtfd/sphinx-notfound-page
notfound_context = {
'title': 'Page Not Found',
'body': '''
<h1>Page Not Found</h1>
<p>Sorry, we couldn't find that page.</p>
<p>Try using the search box or go to the homepage.</p>
''',
} |
demo/fourier_poisson1D.py | spectralDNS/shenfun | 138 | 12662654 | <reponame>spectralDNS/shenfun
r"""
Solve Poisson equation on (-2\pi, 2\pi) with periodic bcs
.. math::
\nabla^2 u = f, u(2\pi) = u(-2\pi)
Use Fourier basis and find u in V such that::
(v, div(grad(u))) = (v, f) for all v in V
V is the Fourier basis span{exp(1jkx)}_{k=-N/2}^{N/2-1}
Use the method of manufactured solutions, and choose a
solution that is either real or complex.
"""
import os
from sympy import Symbol, cos, sin, lambdify
import numpy as np
from shenfun import inner, grad, TestFunction, TrialFunction, FunctionSpace, Function, \
Array
# Use sympy to compute a rhs, given an analytical solution
x = Symbol("x", real=True)
ue = cos(4*x) + 1j*sin(6*x)
#ue = cos(4*x)
fe = ue.diff(x, 2)
# Size of discretization
N = 40
dtype = {True: complex, False: float}[ue.has(1j)]
ST = FunctionSpace(N, dtype=dtype, domain=(-2*np.pi, 2*np.pi))
u = TrialFunction(ST)
v = TestFunction(ST)
# Get f on quad points and exact solution
fj = Array(ST, buffer=fe)
uj = Array(ST, buffer=ue)
# Compute right hand side
f_hat = Function(ST)
f_hat = inner(v, fj, output_array=f_hat)
# Solve Poisson equation
A = inner(grad(v), grad(u))
u_hat = Function(ST)
u_hat = A.solve(-f_hat, u_hat)
uq = ST.backward(u_hat)
u_hat = ST.forward(uq, u_hat, fast_transform=False)
uq = ST.backward(u_hat, uq, fast_transform=False)
assert np.allclose(uj, uq)
point = np.array([0.1, 0.2])
p = ST.eval(point, u_hat)
assert np.allclose(p, lambdify(x, ue)(point))
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = ST.mesh()
plt.plot(X, uj.real)
plt.title("U")
plt.figure()
plt.plot(X, (uq - uj).real)
plt.title("Error")
plt.show()
|
tests/test_providers_package.py | xkortex/ulid | 303 | 12662678 | <gh_stars>100-1000
"""
test_providers_package
~~~~~~~~~~~~~~~~~~~~~~
Tests for the :mod:`~ulid.providers` package.
"""
from ulid import providers
from ulid.providers import default, monotonic
def test_package_has_dunder_all():
"""
Assert that :pkg:`~ulid.providers` exposes the :attr:`~ulid.providers.__all__` attribute as a list.
"""
assert hasattr(providers, '__all__')
assert isinstance(providers.__all__, list)
def test_package_exposes_expected_interface():
"""
Assert that :attr:`~ulid.providers.__all__` exposes expected interface.
"""
assert providers.__all__ == ['Provider', 'DEFAULT', 'MICROSECOND', 'MONOTONIC']
def test_package_has_default_provider():
"""
Assert :attr:`~ulid.providers.DEFAULT` is a :class:`~ulid.providers.default.Provider` instance.
"""
assert isinstance(providers.DEFAULT, default.Provider)
def test_package_has_monotonic_provider():
"""
Assert :attr:`~ulid.providers.MONOTONIC` is a :class:`~ulid.providers.monotonic.Provider` instance.
"""
assert isinstance(providers.MONOTONIC, monotonic.Provider)
|
venv/Lib/site-packages/langdetect/tests/test_detector.py | GuilhermeJC13/storIA | 1,269 | 12662702 | <filename>venv/Lib/site-packages/langdetect/tests/test_detector.py
import unittest
import six
from langdetect.detector_factory import DetectorFactory
from langdetect.utils.lang_profile import LangProfile
class DetectorTest(unittest.TestCase):
TRAINING_EN = 'a a a b b c c d e'
TRAINING_FR = 'a b b c c c d d d'
TRAINING_JA = six.u('\u3042 \u3042 \u3042 \u3044 \u3046 \u3048 \u3048')
JSON_LANG1 = '{"freq":{"A":3,"B":6,"C":3,"AB":2,"BC":1,"ABC":2,"BBC":1,"CBA":1},"n_words":[12,3,4],"name":"lang1"}'
JSON_LANG2 = '{"freq":{"A":6,"B":3,"C":3,"AA":3,"AB":2,"ABC":1,"ABA":1,"CAA":1},"n_words":[12,5,3],"name":"lang2"}'
def setUp(self):
self.factory = DetectorFactory()
profile_en = LangProfile('en')
for w in self.TRAINING_EN.split():
profile_en.add(w)
self.factory.add_profile(profile_en, 0, 3)
profile_fr = LangProfile('fr')
for w in self.TRAINING_FR.split():
profile_fr.add(w)
self.factory.add_profile(profile_fr, 1, 3)
profile_ja = LangProfile('ja')
for w in self.TRAINING_JA.split():
profile_ja.add(w)
self.factory.add_profile(profile_ja, 2, 3)
def test_detector1(self):
detect = self.factory.create()
detect.append('a')
self.assertEqual(detect.detect(), 'en')
def test_detector2(self):
detect = self.factory.create()
detect.append('b d')
self.assertEqual(detect.detect(), 'fr')
def test_detector3(self):
detect = self.factory.create()
detect.append('d e')
self.assertEqual(detect.detect(), 'en')
def test_detector4(self):
detect = self.factory.create()
detect.append(six.u('\u3042\u3042\u3042\u3042a'))
self.assertEqual(detect.detect(), 'ja')
def test_lang_list(self):
langlist = self.factory.get_lang_list()
self.assertEqual(len(langlist), 3)
self.assertEqual(langlist[0], 'en')
self.assertEqual(langlist[1], 'fr')
self.assertEqual(langlist[2], 'ja')
def test_factory_from_json_string(self):
self.factory.clear()
profiles = [self.JSON_LANG1, self.JSON_LANG2]
self.factory.load_json_profile(profiles)
langlist = self.factory.get_lang_list()
self.assertEqual(len(langlist), 2)
self.assertEqual(langlist[0], 'lang1')
self.assertEqual(langlist[1], 'lang2')
|
testing/MLDB-2161-utf8-in-script-apply.py | kstepanmpmg/mldb | 665 | 12662727 | <filename>testing/MLDB-2161-utf8-in-script-apply.py
# coding=utf-8 #
# MLDB-2161-utf8-in-script-apply.py
# <NAME>, 2017-03-08
# This file is part of MLDB. Copyright 2017 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
class MLDB2161Utf8InScriptApply(MldbUnitTest): # noqa
def test_python_script_apply_with_utf8(self):
mldb.put("/v1/functions/filter_top_themes", {
"type": "script.apply",
"params": {
"language": 'python',
"scriptConfig": {
"source": """
from mldb import mldb
# retrieve all themes
mldb.log(mldb.script.args)
request.set_return([[str(mldb.script.args[0][1]), 0, '1970-01-01T00:00:00.0000000Z']])
"""
}
}
})
self.assertTableResultEquals(mldb.query("""
SELECT filter_top_themes(
{{"Politique Provinciale":2, "Élections":1, "Thèmes et sous-thàmes":0} AS args}
) AS *
"""),
[
[
"_rowName",
"return.['Thèmes et sous-thàmes', [0, '-Inf']]"
],
[
"result",
0
]
]
)
if __name__ == '__main__':
mldb.run_tests()
|
a00_Bert/train_bert_toy_task.py | sunshinenum/text_classification | 7,723 | 12662747 | # coding=utf-8
"""
train bert model
"""
import modeling
import tensorflow as tf
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Describe your program')
parser.add_argument('-batch_size', '--batch_size', type=int,default=128)
args = parser.parse_args()
batch_size=args.batch_size
print("batch_size:",batch_size)
def bert_train_fn():
is_training=True
hidden_size = 768
num_labels = 10
#batch_size=128
max_seq_length=512
use_one_hot_embeddings = False
bert_config = modeling.BertConfig(vocab_size=21128, hidden_size=hidden_size, num_hidden_layers=12,
num_attention_heads=12,intermediate_size=3072)
input_ids = tf.placeholder(tf.int32, [batch_size, max_seq_length], name="input_ids")
input_mask = tf.placeholder(tf.int32, [batch_size, max_seq_length], name="input_mask")
segment_ids = tf.placeholder(tf.int32, [batch_size,max_seq_length],name="segment_ids")
label_ids = tf.placeholder(tf.float32, [batch_size,num_labels], name="label_ids")
loss, per_example_loss, logits, probabilities, model = create_model(bert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels,
use_one_hot_embeddings)
# 1. generate or load training/validation/test data. e.g. train:(X,y). X is input_ids,y is labels.
# 2. train the model by calling create model, get loss
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
sess = tf.Session(config=gpu_config)
sess.run(tf.global_variables_initializer())
for i in range(1000):
input_ids_=np.ones((batch_size,max_seq_length),dtype=np.int32)
input_mask_=np.ones((batch_size,max_seq_length),dtype=np.int32)
segment_ids_=np.ones((batch_size,max_seq_length),dtype=np.int32)
label_ids_=np.ones((batch_size,num_labels),dtype=np.float32)
feed_dict = {input_ids: input_ids_, input_mask: input_mask_,segment_ids:segment_ids_,label_ids:label_ids_}
loss_ = sess.run([loss], feed_dict)
print("loss:",loss_)
# 3. eval the model from time to time
def bert_predict_fn():
# 1. predict based on
pass
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable("output_weights", [num_labels, hidden_size],initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable("output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training: # if training, add dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
print("output_layer:",output_layer.shape,";output_weights:",output_weights.shape,";logits:",logits.shape)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
per_example_loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
loss = tf.reduce_mean(per_example_loss)
return loss, per_example_loss, logits, probabilities,model
bert_train_fn()
|
tools/build_defs/fb_python_library.bzl | tjzhou23/profilo | 1,466 | 12662816 | def fb_python_library(name, **kwargs):
native.python_library(
name = name,
**kwargs
)
|
angrgdb/explore.py | janbbeck/angrgdb | 170 | 12662828 | <filename>angrgdb/explore.py<gh_stars>100-1000
from cmd import Cmd
class GUICallbackBaseClass():
def update_ip(self, ip):
pass
class BinjaCallback(GUICallbackBaseClass):
def __init__(self, bv):
self.bv = bv
def update_ip(self, ip):
self.bv.file.navigate(self.bv.file.view, ip)
def red(text):
return "\x1b[0;31m" + text + "\x1b[0m"
class ExploreInteractive(Cmd, object):
intro = red("[!] Dropping into angr shell\n")
intro += red("Available Commands: print, pyshell, (p)ick, (r)un, (s)tep, stepi, (q)uit")
prompt = red(">>> ")
def __init__(self, proj, state, gui_callback_object=GUICallbackBaseClass()):
super(ExploreInteractive, self).__init__()
self.proj = proj
self.simgr = proj.factory.simulation_manager(state)
if "deferred" not in self.simgr.stashes:
self.simgr.stashes["deferred"] = []
self.gui_cb = gui_callback_object
@property
def state(self):
"""
Alias to `self.simgr.one_active`
:return:
"""
return self.simgr.one_active
def _clearScreen(self):
print("\033[H\033[J")
def do_quit(self, args):
"""Quits the cli."""
print(red("Exiting cmd-loop"))
return True
def do_q(self, args):
self.do_quit(args)
return True
def do_print(self, arg):
"""
print [state_number]
Prints a state
state_number optionally specifies the state to print if multiple are available
"""
if not arg:
arg = "0"
pick = int(arg)
active = len(self.simgr.active)
if pick >= active:
print(red("Only {} active state(s), indexed from 0".format(active)))
else:
self.simgr.active[pick].context_view.pprint()
self.gui_cb.update_ip(self.simgr.active[pick].addr)
def do_stepi(self, args):
"""
stepi
Steps one instruction
"""
if len(self.simgr.active) == 1:
self.simgr.step(num_inst=1)
self._clearScreen()
self.simgr.one_active.context_view.pprint()
self.gui_cb.update_ip(self.simgr.one_active.addr)
elif len(self.simgr.active) > 1:
for idx, state in enumerate(self.simgr.active):
print(state.context_view.pstr_branch_info(idx))
def do_step(self, args):
"""
step
Steps the current state one basic block
"""
if len(self.simgr.active) == 1:
self.simgr.step()
self._clearScreen()
self.simgr.one_active.context_view.pprint()
self.gui_cb.update_ip(self.simgr.one_active.addr)
elif len(self.simgr.active) > 1:
for idx, state in enumerate(self.simgr.active):
print(state.context_view.pstr_branch_info(idx))
def do_s(self, args):
self.do_step(args)
def do_s(self, args):
self.do_step(args)
def do_run(self, args):
"""
run [state_number]
Runs until a branch is encountered
state_number optionally picks a state if multiple are available
"""
if len(self.simgr.active) > 1 and args:
self.do_pick(args)
if len(self.simgr.active) == 1:
self.simgr.run(until=lambda s: len(s.active) != 1)
if self.simgr.active:
self.gui_cb.update_ip(self.simgr.one_active.addr)
if len(self.simgr.active) > 0:
for i, state in enumerate(self.simgr.active):
print(state.context_view.pstr_branch_info(i))
else:
print(red("STATE FINISHED EXECUTION"))
if len(self.simgr.stashes["deferred"]) == 0:
print(red("No states left to explore"))
else: # DFS-style like
print(red("Other side of last branch has been added to {}".format(self.simgr)))
self.simgr.stashes["active"].append(self.simgr.stashes["deferred"].pop())
def do_r(self, args):
self.do_run(args)
def do_pick(self, arg):
"""
pick <state_number>
Selects a state to continue if multiple are available, the other state is saved
"""
try:
pick = int(arg)
ip = self.simgr.active[pick].regs.ip
except:
print("Invalid Choice: "+red("{}".format(arg))+", for {}".format(self.simgr))
return False
print(red("Picking state with ip: " + (str(ip))))
self.simgr.move(from_stash='active',
to_stash="deferred",
filter_func=lambda x: x.solver.eval(ip != x.regs.ip))
self.simgr.step()
self._clearScreen()
self.simgr.one_active.context_view.pprint()
def do_p(self, args):
self.do_pick(args)
def do_pyshell(self, args):
import gdb
gdb.execute('pi')
def do_EOF(self, args):
self.do_quit(args)
return True
|
third_party/bi_att_flow/my/tensorflow/__init__.py | jmrf/active-qa | 327 | 12662864 | from third_party.bi_att_flow.my.tensorflow import * |
tests/pykafka/utils/test_struct_helpers.py | Instamojo/pykafka | 1,174 | 12662878 | import unittest2
from pykafka.utils import struct_helpers
class StructHelpersTests(unittest2.TestCase):
def test_basic_unpack(self):
output = struct_helpers.unpack_from(
'iiqhi',
b'\x00\x00\x00\x01\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x00\n\x00<\x00\x00\x00\x04'
)
self.assertEqual(output, (1, 10, 10, 60, 4))
def test_string_encoding(self):
output = struct_helpers.unpack_from('S', b'\x00\x04test')
self.assertEqual(output, (b'test',))
def test_bytearray_unpacking(self):
output = struct_helpers.unpack_from('Y', b'\x00\x00\x00\x04test')
self.assertEqual(output, (b'test',))
def test_array_unpacking(self):
output = struct_helpers.unpack_from(
'[i]',
b'\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04'
)
# A 1-length tuple with a 4-length tuple as the element
self.assertEqual(output, [1, 2, 3, 4])
if __name__ == '__main__':
unittest2.main()
|
src/richie/plugins/lti_consumer/urls.py | leduong/richie | 174 | 12662913 | <reponame>leduong/richie<filename>src/richie/plugins/lti_consumer/urls.py
"""LTI Consumer plugin URLs configuration."""
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from . import models
from .api import LTIConsumerViewsSet
router = DefaultRouter()
router.register(
models.LTIConsumer.RESOURCE_NAME,
LTIConsumerViewsSet,
basename="lti-consumer",
)
url_patterns = [path("", include(router.urls))]
|
credentials.py | ghzwireless/control | 293 | 12662924 | <filename>credentials.py
#!/usr/bin/env python
"""credentials - the login credentials for all of the modules are stored here and imported into each module.
Please be sure that you are using restricted accounts (preferably with read-only access) to your servers.
"""
__author__ = '<EMAIL> (<NAME>)'
# VMware
VMWARE_VCENTER_USERNAME = "domain\\username"
VMWARE_VCENTER_PASSWORD = "<PASSWORD>"
# SNMP Community String (Read-Only)
SNMP_COMMUNITY = "public"
# Tintri
TINTRI_USER = "youraccount"
TINTRI_PASSWORD = "<PASSWORD>"
# Workdesk MySQL
WORKDESK_USER = 'youraccount'
WORKDESK_PASSWORD = '<PASSWORD>'
# Rubrik
RUBRIK_USER = 'youraccount'
RUBRIK_PASSWORD = '<PASSWORD>'
# Nutanix
NUTANIX_USER = 'youraccount'
NUTANIX_PASSWORD = '<PASSWORD>'
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_qlpreviewpanel.py | albertz/music-player | 132 | 12662937 |
from PyObjCTools.TestSupport import *
import objc
from Foundation import NSObject
try:
from Quartz import *
except ImportError:
pass
class TestQLPreviewPanelHelper (NSObject):
def acceptsPreviewPanelControl_(self, panel): return 1
def previewPanel_handleEvent_(self, panel, event): return 1
def previewPanel_sourceFrameOnScreenForPreviewItem_(self, panel, item): return 1
def previewPanel_transitionImageForPreviewItem_contentRect_(self, panel, item, rect): return 1
class TestQLPreviewPanel (TestCase):
@min_os_level('10.6')
def testClasses(self):
self.assertIsInstance(QLPreviewPanel, objc.objc_class)
@min_os_level('10.6')
def testMethods(self):
self.assertResultIsBOOL(QLPreviewPanel.sharedPreviewPanelExists)
self.assertResultIsBOOL(QLPreviewPanel.enterFullScreenMode_withOptions_)
self.assertResultIsBOOL(QLPreviewPanel.isInFullScreenMode)
self.assertResultIsBOOL(TestQLPreviewPanelHelper.acceptsPreviewPanelControl_)
self.assertResultIsBOOL(TestQLPreviewPanelHelper.previewPanel_handleEvent_)
self.assertResultHasType(TestQLPreviewPanelHelper.previewPanel_sourceFrameOnScreenForPreviewItem_, NSRect.__typestr__)
self.assertArgHasType(TestQLPreviewPanelHelper.previewPanel_transitionImageForPreviewItem_contentRect_, 2, objc._C_PTR + NSRect.__typestr__)
if __name__ == "__main__":
main()
|
data/TACoS/merge_npys_to_hdf5.py | frostinassiky/2D-TAN | 249 | 12662989 | <filename>data/TACoS/merge_npys_to_hdf5.py
import glob
import h5py
import numpy as np
import os
import tqdm
import json
def convert_tall_c3d_features(sampling_rate):
stride = sampling_rate//5
data_root = "./data/TACoS/"
hdf5_file = h5py.File(os.path.join(data_root, 'tall_c3d_{}_features.hdf5'.format(sampling_rate)), 'w')
with open(os.path.join(data_root,'train.json')) as json_file:
annotation = json.load(json_file)
with open(os.path.join(data_root, 'val.json')) as json_file:
annotation.update(json.load(json_file))
with open(os.path.join(data_root, 'test.json')) as json_file:
annotation.update(json.load(json_file))
pbar = tqdm.tqdm(total=len(annotation))
for vid, anno in annotation.items():
video_feature = []
for i in range(0,(anno['num_frames']-sampling_rate)//stride+1):
s_idx = i*stride+1
e_idx = s_idx + sampling_rate
clip_path = os.path.join(data_root, 'Interval64_128_256_512_overlap0.8_c3d_fc6','{}_{}_{}.npy'.format(vid, s_idx, e_idx))
frame_feat = np.load(clip_path)
video_feature.append(frame_feat)
video_feature = np.stack(video_feature)
hdf5_file.create_dataset(vid, data=video_feature, compression="gzip")
pbar.update(1)
pbar.close()
hdf5_file.close()
if __name__ == '__main__':
convert_tall_c3d_features(64)
|
benchmarks/sparse/dlmc/utils.py | Hacky-DH/pytorch | 60,067 | 12663004 | import torch
from pathlib import Path
from scipy import sparse
import math
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
return sparse.coo_matrix((values_1, (indices_1[0], indices_1[1])),
shape=x.shape)
def sparse_grad_output(a, b):
c = torch.sparse.mm(a, b)
if c.is_sparse:
c2 = torch.rand_like(c.to_dense())
return c2.sparse_mask(c.coalesce())
else:
return torch.rand_like(c)
def read_matrix_params(path):
with open(path, 'r') as file:
line = file.readline()
nrows, ncols, nnz = map(lambda el: int(el), line.split(', '))
return (nrows, ncols), nnz
def csr_to_coo(indices, indptr, shape):
n_rows, n_cols = shape
cols = indices
rows = [0] * len(cols)
for i in range(n_rows):
for j in range(indptr[i], indptr[i + 1]):
rows[j] = i
return torch.tensor([rows, cols], dtype=torch.long)
def load_sparse_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
index_pointers = list(index_pointers)
indices = list(indices)
data = torch.randn(nnz, dtype=torch.double)
shape = (nrows, ncols)
return torch.sparse_coo_tensor(csr_to_coo(indices, index_pointers, shape), data, shape, device=device)
def gen_vector(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
return torch.randn(nrows, dtype=torch.double, device=device)
def gen_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
return torch.randn(nrows, ncols, dtype=torch.double, device=device)
def load_spmv_dataset(dataset_path, hidden_size, sparsity, device, n_limit=math.inf):
"""load_spmv_dataset loads a DLMC dataset for a sparse matrix-vector multiplication (SPMV) performance test.
Args:
dataset_path:
path of the dataset from DLMC collection.
hidden_size
This value allows tensors of varying sizes.
sparsity:
This value allows tensors of varying sparsities.
device:
Whether to place the Tensor on a GPU or CPU.
n_limit:
This value allows a dataset with some limit size.
"""
current_folder_path = f"{dataset_path}/{sparsity}"
path = Path(current_folder_path)
files = path.glob('**/*.smtx')
print(dataset_path, hidden_size, sparsity)
index = 0
x_files, y_files = [], []
for f in files:
if index >= n_limit:
break
print('.', end='')
size, nnz = read_matrix_params(f.as_posix())
if size[1] == hidden_size:
x_files.append(f.as_posix())
if size[0] == hidden_size:
y_files.append(f.as_posix())
index += 1
print()
for fx, fy in zip(x_files, y_files):
x = load_sparse_matrix(fx, device)
y = gen_vector(fy, device)
yield (x, y)
def load_spmm_dataset(dataset_path, hidden_size, sparsity, spmm_type, device, n_limit=math.inf):
"""load_spmm_dataset loads a DLMC dataset for a sparse matrix-matrix multiplication (SPMM) performance test.
Args:
dataset_path:
path of the dataset from DLMC collection.
hidden_size
This value allows tensors of varying sizes.
sparsity:
This value allows tensors of varying sparsities.
spmm_type:
This value allows tensors for `sparse@sparse` or `sparse@dense` operations.
device:
Whether to place the Tensor on a GPU or CPU.
n_limit:
This value allows a dataset with some limit size.
"""
current_folder_path = f"{dataset_path}/{sparsity}"
path = Path(current_folder_path)
files = path.glob('**/*.smtx')
print(dataset_path, hidden_size, sparsity)
index = 0
x_files, y_files = [], []
for f in files:
if index >= n_limit:
break
print('.', end='')
size, nnz = read_matrix_params(f.as_posix())
if size[1] == hidden_size:
x_files.append(f.as_posix())
if size[0] == hidden_size:
y_files.append(f.as_posix())
index += 1
print()
for fx, fy in zip(x_files, y_files):
x = load_sparse_matrix(fx, device)
y = gen_matrix(fy, device) if spmm_type == 'sparse@dense' else load_sparse_matrix(fy, device)
yield (x, y)
def load_dlmc_dataset(dataset_path, operation, hidden_size, sparsity, device, requires_grad, n_limit=math.inf):
"""load_dlmc_dataset loads a DLMC dataset for a matmul performance test.
Args:
dataset_path:
path of the dataset from DLMC collection.
operation:
This value allows tensors for `sparse@sparse`|`sparse@dense`|`sparse@vector` operations.
hidden_size
This value allows tensors of varying sizes.
sparsity:
This value allows tensors of varying sparsities.
device:
Whether to place the Tensor on a GPU or CPU.
requires_grad:
Loads the dataset for backward test.
n_limit:
This value allows a dataset with some limit size.
"""
if operation == 'sparse@sparse' or operation == "sparse@dense":
collection = load_spmm_dataset(dataset_path, hidden_size, sparsity, operation, device, n_limit)
elif operation == 'sparse@vector':
collection = load_spmv_dataset(dataset_path, hidden_size, sparsity, device, n_limit)
scipy_vars = {}
backward_vars = {}
for x, y in collection:
if device == 'cpu':
scipy_vars = {
"sx": to_coo_scipy(x) if x.is_sparse else x.numpy(),
"sy": to_coo_scipy(y) if y.is_sparse else y.numpy(),
}
if not requires_grad:
dx = x.to_dense() if x.is_sparse else x
dy = y.to_dense() if y.is_sparse else y
else:
c = sparse_grad_output(x, y)
backward_vars = {
"sparse_grad_output": c,
"grad_output": c.to_dense() if c.is_sparse else c,
}
x.requires_grad_(True)
y.requires_grad_(True)
dx = x.to_dense().detach() if x.is_sparse else x.clone().detach()
dy = y.to_dense().detach() if y.is_sparse else y.clone().detach()
dx.requires_grad_(True)
dy.requires_grad_(True)
yield {
"x": x,
"y": y,
"dx": dx,
"dy": dy,
**scipy_vars,
**backward_vars
}
|
archivebox/cli/archivebox_config.py | sarvex/ArchiveBox | 6,340 | 12663029 | <filename>archivebox/cli/archivebox_config.py
#!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox config'
import sys
import argparse
from typing import Optional, List, IO
from ..main import config
from ..util import docstring
from ..config import OUTPUT_DIR
from ..logging_util import SmartFormatter, accept_stdin
@docstring(config.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=config.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--get', #'-g',
action='store_true',
help="Get the value for the given config KEYs",
)
group.add_argument(
'--set', #'-s',
action='store_true',
help="Set the given KEY=VALUE config values",
)
group.add_argument(
'--reset', #'-s',
action='store_true',
help="Reset the given KEY config values to their defaults",
)
parser.add_argument(
'config_options',
nargs='*',
type=str,
help='KEY or KEY=VALUE formatted config values to get or set',
)
command = parser.parse_args(args or ())
config_options_str = ''
if not command.config_options:
config_options_str = accept_stdin(stdin)
config(
config_options_str=config_options_str,
config_options=command.config_options,
get=command.get,
set=command.set,
reset=command.reset,
out_dir=pwd or OUTPUT_DIR,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
|
test/shared/test_utils.py | AMHesch/aws-allowlister | 180 | 12663033 | import json
import unittest
from aws_allowlister.shared.utils import clean_service_name, get_service_name_matching_iam_service_prefix, \
clean_service_name_after_brackets_and_parentheses, chomp_keep_single_spaces, chomp
class UtilsTestCase(unittest.TestCase):
def test_get_service_name_matching_iam_service_prefix(self):
iam_service_prefix = "s3"
result = get_service_name_matching_iam_service_prefix(iam_service_prefix)
print(json.dumps(result, indent=4))
self.assertEqual(result, "Amazon S3")
service_name_pairs = {
"a4b": "<NAME>",
"access-analyzer": "IAM Access Analyzer",
"account": "AWS Accounts",
"acm": "AWS Certificate Manager"
# .. etc.
# Try opening the SQLite database in DB Browser for SQLite to examine it more.
# And view the table called compliancetable
}
for iam_service_prefix in list(service_name_pairs.keys()):
# service prefix is like a4b, access-analyzer, etc.
result = get_service_name_matching_iam_service_prefix(iam_service_prefix)
self.assertEqual(result, service_name_pairs.get(iam_service_prefix))
print(f"{iam_service_prefix}: {result}")
def test_chomp(self):
result = chomp_keep_single_spaces("DoD CC SRG")
print(result)
#
# def test_normalize_tags_or_strings(self):
# print()
def test_clean_service_name_non_breaking_spaces(self):
result = clean_service_name('AWS Amplify\u00a0')
self.assertEqual(result, "AWS Amplify")
# def test_clean_service_name_remove_text_after_bracket(self):
# # Example: Amazon Aurora on https://aws.amazon.com/compliance/hipaa-eligible-services-reference/
# result = clean_service_name('Amazon Aurora [MySQL, PostgreSQL]')
# self.assertEqual(result, "Amazon Aurora")
#
# def test_clean_service_name_remove_text_after_parentheses(self):
# # Example: Alexa for Business on https://aws.amazon.com/compliance/hipaa-eligible-services-reference/
# result = clean_service_name('Alexa for Business (for healthcare skills only – requires Alexa Skills BAA. See '
# 'HIPAA whitepaper for details)')
# self.assertEqual(result, "Alexa for Business")
def test_clean_service_name_tabs_and_newlines(self):
# Make sure tabs and newlines are removed properly
result = clean_service_name('\n\n\t\tAmazon API Gateway\t\n')
self.assertEqual(result, "Amazon API Gateway")
result = clean_service_name('Amazon API Gateway\n')
self.assertTrue(result == "Amazon API Gateway")
def test_clean_service_name_text_after_brackets_and_parentheses(self):
# Example: Amazon Aurora on https://aws.amazon.com/compliance/hipaa-eligible-services-reference/
result = clean_service_name_after_brackets_and_parentheses('Amazon Aurora [MySQL, PostgreSQL]')
self.assertEqual(result, "Amazon Aurora")
# Example: Alexa for Business on https://aws.amazon.com/compliance/hipaa-eligible-services-reference/
result = clean_service_name_after_brackets_and_parentheses('Alexa for Business (for healthcare skills '
'only – requires Alexa Skills BAA. See HIPAA '
'whitepaper for details)')
self.assertEqual(result, "Alexa for Business")
# Make sure tabs and newlines are removed properly
result = clean_service_name_after_brackets_and_parentheses('\n\n\t\tAmazon API Gateway\t\n')
self.assertEqual(result, "Amazon API Gateway")
result = clean_service_name_after_brackets_and_parentheses('Amazon API Gateway\n')
self.assertTrue(result == "Amazon API Gateway") |
src/gausskernel/dbmind/tools/sqldiag/main.py | Yanci0/openGauss-server | 360 | 12663038 | """
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
import argparse
import logging
import sys
from configparser import ConfigParser
from algorithm.diag import SQLDiag
from utils import ResultSaver, is_valid_conf
from preprocessing import LoadData, split_sql
__version__ = '2.0.0'
__description__ = 'SQLdiag integrated by openGauss.'
def parse_args():
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument('mode', choices=['train', 'predict', 'finetune'],
help='The training mode is to perform feature extraction and '
'model training based on historical SQL statements. '
'The prediction mode is to predict the execution time of '
'a new SQL statement through the trained model.')
parser.add_argument('-f', '--csv-file', type=argparse.FileType('r'),
help='The data set for training or prediction. '
'The file format is CSV. '
'If it is two columns, the format is (SQL statement, duration time). '
'If it is three columns, '
'the format is (timestamp of SQL statement execution time, SQL statement, duration time).')
parser.add_argument('--predicted-file', help='The file path to save the predicted result.')
parser.add_argument('--model', default='template', choices=['template', 'dnn'],
help='Choose the model model to use.')
parser.add_argument('--query', help='Input the querys to predict.')
parser.add_argument('--threshold', help='Slow SQL threshold.')
parser.add_argument('--model-path', required=True,
help='The storage path of the model file, used to read or save the model file.')
parser.add_argument('--config-file', default='sqldiag.conf')
parser.version = __version__
return parser.parse_args()
def get_config(filepath):
cp = ConfigParser()
cp.read(filepath, encoding='UTF-8')
return cp
def main(args):
logging.basicConfig(level=logging.WARNING)
if not is_valid_conf(args.config_file):
logging.fatal('The [--config-file] parameter is incorrect')
sys.exit(1)
model = SQLDiag(args.model, get_config(args.config_file))
if args.mode in ('train', 'finetune'):
if not args.csv_file:
logging.fatal('The [--csv-file] parameter is required for train mode')
sys.exit(1)
train_data = LoadData(args.csv_file).train_data
if args.mode == 'train':
model.fit(train_data)
else:
model.fine_tune(args.model_path, train_data)
model.save(args.model_path)
else:
model.load(args.model_path)
if args.csv_file and not args.query:
predict_data = LoadData(args.csv_file).predict_data
elif args.query and not args.csv_file:
predict_data = split_sql(args.query)
else:
logging.error('The predict model only supports [--csv-file] or [--query] at the same time.')
sys.exit(1)
args.threshold = -100 if not args.threshold else float(args.threshold)
pred_result = model.transform(predict_data)
if args.predicted_file:
if args.model == 'template':
info_sum = []
for stats, _info in pred_result.items():
if _info:
_info = list(filter(lambda item: item[1]>=args.threshold, _info))
for item in _info:
item.insert(1, stats)
info_sum.extend(_info)
ResultSaver().save(info_sum, args.predicted_file)
else:
pred_result = list(filter(lambda item: float(item[1])>=args.threshold, pred_result))
ResultSaver().save(pred_result, args.predicted_file)
else:
from prettytable import PrettyTable
display_table = PrettyTable()
if args.model == 'template':
display_table.field_names = ['sql', 'status', 'predicted time', 'most similar template']
display_table.align = 'l'
status = ('Suspect illegal SQL', 'No SQL information', 'No SQL template found', 'Fine match')
for stats in status:
if pred_result[stats]:
for sql, predicted_time, similariest_sql in pred_result[stats]:
if predicted_time >= args.threshold or stats == 'Suspect illegal sql':
display_table.add_row([sql, stats, predicted_time, similariest_sql])
else:
display_table.field_names = ['sql', 'predicted time']
display_table.align = 'l'
for sql, predicted_time in pred_result:
if float(predicted_time) >= args.threshold:
display_table.add_row([sql, predicted_time])
print(display_table.get_string())
if __name__ == '__main__':
main(parse_args())
|
src/app/conf/healthchecks.py | denkasyanov/education-backend | 151 | 12663051 |
HEALTH_CHECKS_ERROR_CODE = 503
HEALTH_CHECKS = {
'db': 'django_healthchecks.contrib.check_database',
}
|
test.py | abhi-kumar/blitznet | 331 | 12663057 | #!/usr/bin/env python3
from glob import glob
import logging
import logging.config
import os
import tensorflow as tf
import numpy as np
from PIL import ImageFont
from config import get_logging_config, args, evaluation_logfile
from config import config as net_config
from paths import CKPT_ROOT
import matplotlib
matplotlib.use('Agg')
from vgg import VGG
from resnet import ResNet
from voc_loader import VOCLoader
from coco_loader import COCOLoader
from evaluation import Evaluation, COCOEval
from detector import Detector
slim = tf.contrib.slim
logging.config.dictConfig(get_logging_config(args.run_name))
log = logging.getLogger()
def main(argv=None): # pylint: disable=unused-argument
assert args.ckpt > 0 or args.batch_eval
assert args.detect or args.segment, "Either detect or segment should be True"
if args.trunk == 'resnet50':
net = ResNet
depth = 50
if args.trunk == 'resnet101':
net = ResNet
depth = 101
if args.trunk == 'vgg16':
net = VGG
depth = 16
net = net(config=net_config, depth=depth, training=False)
if args.dataset == 'voc07' or args.dataset == 'voc07+12':
loader = VOCLoader('07', 'test')
if args.dataset == 'voc12':
loader = VOCLoader('12', 'val', segmentation=args.segment)
if args.dataset == 'coco':
loader = COCOLoader(args.split)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt)
if args.dataset == 'coco':
tester = COCOEval(detector, loader)
else:
tester = Evaluation(detector, loader, iou_thresh=args.voc_iou_thresh)
if not args.batch_eval:
detector.restore_from_ckpt(args.ckpt)
tester.evaluate_network(args.ckpt)
else:
log.info('Evaluating %s' % args.run_name)
ckpts_folder = CKPT_ROOT + args.run_name + '/'
out_file = ckpts_folder + evaluation_logfile
max_checked = get_last_eval(out_file)
log.debug("Maximum checked ckpt is %i" % max_checked)
with open(out_file, 'a') as f:
start = max(args.min_ckpt, max_checked+1)
ckpt_files = glob(ckpts_folder + '*.data*')
folder_has_nums = np.array(list((map(filename2num, ckpt_files))), dtype='int')
nums_available = sorted(folder_has_nums[folder_has_nums >= start])
nums_to_eval = [nums_available[-1]]
for n in reversed(nums_available):
if nums_to_eval[-1] - n >= args.step:
nums_to_eval.append(n)
nums_to_eval.reverse()
for ckpt in nums_to_eval:
log.info("Evaluation of ckpt %i" % ckpt)
tester.reset()
detector.restore_from_ckpt(ckpt)
res = tester.evaluate_network(ckpt)
f.write(res)
f.flush()
def filename2num(filename):
num = filename.split('/')[-1].split('-')[1].split('.')[0]
num = int(num) / 1000
return num
def num2filename(num):
filename = 'model.ckpt-' + str(num) + '000.data-00000-of-00001'
return filename
def get_last_eval(out_file):
'''finds the last evaluated checkpoint'''
max_num = 0
if os.path.isfile(out_file):
with open(out_file, 'r') as f:
for line in f:
max_num = int(line.split('\t')[0])
return max_num
if __name__ == '__main__':
tf.app.run()
|
bibliopixel/project/attributes.py | rec/leds | 253 | 12663074 | <reponame>rec/leds
def check(kwds, name):
if kwds:
msg = ', '.join('"%s"' % s for s in sorted(kwds))
s = '' if len(kwds) == 1 else 's'
raise ValueError('Unknown attribute%s for %s: %s' % (s, name, msg))
def set_reserved(value, section, name=None, data=None, **kwds):
check(kwds, '%s %s' % (section, value.__class__.__name__))
value.name = name
value.data = data
|
kitty/kitty-themes/.tools/preview.py | adicco/dotconfig | 1,464 | 12663099 | import sys
import os
import sys
theme_keys = [
"cursor", "foreground", "background", "background_opacity", "dynamic_background_opacity", "dim_opacity",
"selection_foreground", "selection_background", "color0", "color8", "color1", "color9", "color2", "color10",
"color3", "color11", "color4", "color12", "color5", "color13", "color6", "color14", "color7", "color15"
]
def is_valid(line):
"""
Returns true if a line inside a configuration file is a valid theme configuration pair: is not a comment, is not
empty and the key is correct.
:param line: a line inside the configuration file
:type line: str
:return: true if is valid, false otherwise
:rtype: bool
"""
return (not line.lstrip().startswith("#") # is not a comment
and len(line.strip()) != 0 # is not empty
and line.split(maxsplit=1)[0] in theme_keys) # key is a valid one
def extract_configuration_pair(line):
"""
Extract a configuration pair by splitting on spaces and taking the first couple of values.
:param line: a line inside the configuration file
:type line: str
:return: a key-value pair
:rtype: bool
"""
split = line.split(maxsplit=2)
return split[0], split[1]
def read_configuration(filename):
"""
Read a kitty configuration file and extract only theme related keys and values.
:param filename: path to the configuration file
:type filename: str
:return: a map with theme related configuration values
:rtype: dict[str, str]
"""
with open(filename, "r") as fp:
lines = fp.readlines()
theme_config = dict([extract_configuration_pair(line) for line in lines if is_valid(line)])
return theme_config
def fg(color, text):
rgb = tuple(int(color[i + 1:i + 3], 16) for i in (0, 2, 4))
return ('\x1b[38;2;%s;%s;%sm' % rgb + text + '\x1b[0m')
def bg(color, text):
rgb = tuple(int(color[i + 1:i + 3], 16) for i in (0, 2, 4))
return ('\x1b[48;2;%s;%s;%sm' % rgb + text + '\x1b[0m')
def print_preview(filename, configuration):
cursor = configuration["cursor"]
background = configuration["background"]
foreground = configuration["foreground"]
theme = os.path.basename(filename)
size = len(theme) + (2 + 2 + 16 + 2 + 16 + 1 + 2)
print(bg(background, " " * size))
print(bg(background, " "), end="")
print(bg(background, fg(foreground, theme)), end="")
print(bg(background, " "), end="")
c='a'
for i in range(0, 16):
color = configuration["color%d" % i]
print(bg(background, fg(color, c)), end="")
c = chr(ord(c) + 1)
print(bg(background, " "), end="")
selection_background = configuration["selection_background"]
selection_foreground = configuration["selection_foreground"]
c='A'
for i in range(0, 16):
print(bg(selection_background, fg(selection_foreground, c)), end="")
c = chr(ord(c) + 1)
print(bg(cursor, " "), end="")
print(bg(background, " "))
print(bg(background, " " * size))
print(bg(background, " "), end="")
print(bg(configuration["color0"], " "), end="")
print(bg(configuration["color1"], " "), end="")
print(bg(configuration["color2"], " "), end="")
print(bg(configuration["color3"], " "), end="")
print(bg(configuration["color4"], " "), end="")
print(bg(configuration["color5"], " "), end="")
print(bg(configuration["color6"], " "), end="")
print(bg(configuration["color7"], " "), end="")
print(bg(background, " "), end="")
print(bg(configuration["color8"], " "), end="")
print(bg(configuration["color9"], " "), end="")
print(bg(configuration["color10"], " "), end="")
print(bg(configuration["color11"], " "), end="")
print(bg(configuration["color12"], " "), end="")
print(bg(configuration["color13"], " "), end="")
print(bg(configuration["color14"], " "), end="")
print(bg(configuration["color15"], " "), end="")
print(bg(background, " " * (size - 16 - 4)), end="")
print()
print(bg(background, " " * size))
print()
def main(directory):
for filename in os.listdir(directory):
try:
path = os.path.join(directory, filename)
configuration = read_configuration(path)
print_preview(path, configuration)
except Exception as e:
print(e, file=sys.stderr)
print("Error while processing %s" % filename, file=sys.stderr)
if __name__ == "__main__":
main(sys.argv[1])
|
loudml/loudml/influx.py | jkbrandt/loudml | 245 | 12663100 | <gh_stars>100-1000
"""
InfluxDB module for Loud ML
"""
import logging
import influxdb.exceptions
import numpy as np
import requests.exceptions
from voluptuous import (
Required,
Optional,
All,
Length,
Boolean,
)
from influxdb import (
InfluxDBClient,
)
from . import (
errors,
schemas,
)
from loudml.misc import (
escape_quotes,
escape_doublequotes,
make_ts,
parse_addr,
str_to_ts,
ts_to_str,
)
from loudml.bucket import Bucket
g_aggregators = {}
def get_metric(name):
if name.lower() == 'avg':
return 'avg'
elif name.lower() == 'mean':
return 'avg'
elif name.lower() == 'average':
return 'avg'
elif name.lower() == 'stddev':
return 'std_deviation'
elif name.lower() == 'std_dev':
return 'std_deviation'
elif name.lower() == 'count':
return 'count'
elif name.lower() == 'min':
return 'min'
elif name.lower() == 'max':
return 'max'
elif name.lower() == 'sum':
return 'sum'
else:
return name
def ts_to_ns(ts):
"""
Convert second timestamp to integer nanosecond timestamp
"""
# XXX Due to limited mantis in float numbers,
# do not multiply directly by 1e9
return int(int(ts * 1e6) * int(1e3))
def make_ts_ns(mixed):
"""
Build a nanosecond timestamp from a mixed input
(second timestamp or string)
"""
return ts_to_ns(make_ts(mixed))
def format_bool(string):
if string.lower() == 'true':
return 'True'
elif string.lower() == 'false':
return 'False'
else:
return string
def aggregator(*aliases):
"""
Decorator to register aggregators and indexing them by their aliases
"""
global g_aggregator
def decorated(func):
for alias in aliases:
g_aggregators[alias] = func
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorated
@aggregator('avg', 'mean', 'average')
def _build_avg_agg(feature):
return "MEAN(\"{}\")".format(feature.field)
@aggregator('count')
def _build_count_agg(feature):
return "COUNT(\"{}\")".format(feature.field)
@aggregator('deriv', 'derivative')
def _build_derivative_agg(feature):
return "DERIVATIVE(\"{}\")".format(feature.field)
@aggregator('integral')
def _build_integral_agg(feature):
return "INTEGRAL(\"{}\")".format(feature.field)
@aggregator('max')
def _build_max_agg(feature):
return "MAX(\"{}\")".format(feature.field)
@aggregator('med', 'median')
def _build_median_agg(feature):
return "MEDIAN(\"{}\")".format(feature.field)
@aggregator('min')
def _build_min_agg(feature):
return "MIN(\"{}\")".format(feature.field)
@aggregator('mode')
def _build_mode_agg(feature):
return "MODE(\"{}\")".format(feature.field)
@aggregator('5percentile')
def _build_5percentile_agg(feature):
return "PERCENTILE(\"{}\", 5)".format(feature.field)
@aggregator('10percentile')
def _build_10percentile_agg(feature):
return "PERCENTILE(\"{}\", 10)".format(feature.field)
@aggregator('90percentile')
def _build_90percentile_agg(feature):
return "PERCENTILE(\"{}\", 90)".format(feature.field)
@aggregator('95percentile')
def _build_95percentile_agg(feature):
return "PERCENTILE(\"{}\", 95)".format(feature.field)
@aggregator('spread')
def _build_spread_agg(feature):
return "SPREAD(\"{}\")".format(feature.field)
@aggregator('stddev', 'std_dev')
def _build_stddev_agg(feature):
return "STDDEV(\"{}\")".format(feature.field)
@aggregator('sum')
def _build_sum_agg(feature):
return "SUM(\"{}\")".format(feature.field)
def _build_agg(feature):
"""
Build requested aggregation
"""
global g_aggregators
aggregator = g_aggregators.get(feature.metric.lower())
if aggregator is None:
raise errors.UnsupportedMetric(
"unsupported aggregation '{}' in feature '{}'".format(
feature.metric, feature.name,
),
)
agg = aggregator(feature)
return "{} as \"{}\"".format(agg, escape_doublequotes(feature.name))
def _build_count_agg2(feature):
"""
Build requested aggregation
"""
agg = _build_count_agg(feature)
return "{} as \"count_{}\"".format(agg, feature.field)
def _build_sum_agg2(feature):
"""
Build requested aggregation
"""
agg = _build_sum_agg(feature)
return "{} as \"sum_{}\"".format(agg, feature.field)
def _sum_of_squares(feature):
"""
Build requested aggregation
"""
return "SUM(\"squares_{}\") as \"sum_squares_{}\"".format(
feature.field,
feature.field,
)
def _build_time_predicates(
from_date=None,
to_date=None,
from_included=True,
to_included=False,
):
"""
Build time range predicates for 'where' clause
"""
must = []
if from_date:
must.append("time {} {}".format(
">=" if from_included else ">",
make_ts_ns(from_date),
))
if to_date:
must.append("time {} {}".format(
"<=" if to_included else "<",
make_ts_ns(to_date),
))
return must
def _build_tags_predicates(match_all=None):
"""
Build tags predicates for 'where' clause
"""
must = []
if match_all:
for condition in match_all:
quoted_tag = "\"{}\"".format(
escape_doublequotes(condition['tag'])
)
val = condition['value']
predicate = "{}='{}'".format(
quoted_tag,
escape_quotes(str(val)),
)
if isinstance(val, bool) or isinstance(val, int):
predicate = "({} OR {}={})".format(
predicate,
quoted_tag,
str(val)
)
must.append(predicate)
return must
def _build_key_predicate(tag, val=None):
"""
Build key predicate for 'where' clause
"""
must = []
if val:
must.append("\"{}\"='{}'".format(
escape_doublequotes(tag),
escape_quotes(format_bool(val)),
))
return must
def catch_query_error(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except (
influxdb.exceptions.InfluxDBClientError,
requests.exceptions.RequestException,
) as exn:
raise errors.BucketError(self.name, str(exn))
return wrapper
class InfluxBucket(Bucket):
"""
InfluxDB bucket
"""
SCHEMA = Bucket.SCHEMA.extend({
Required('addr'): str,
Required('database'): schemas.key,
Required('measurement'): All(schemas.key, Length(max=256)),
Optional('create_database', default=True): Boolean(),
Optional('dbuser'): All(schemas.key, Length(max=256)),
Optional('dbuser_password'): str,
Optional('retention_policy'): schemas.key,
Optional('use_ssl', default=False): Boolean(),
Optional('verify_ssl', default=False): Boolean(),
Optional('annotation_db', default='chronograf'): str,
})
def __init__(self, cfg):
cfg['type'] = 'influxdb'
super().__init__(cfg)
self._influxdb = None
self._annotationdb = None
self._from_prefix = ""
retention_policy = self.retention_policy
if retention_policy:
self._from_prefix = '"{}"."{}".'.format(
escape_doublequotes(self.db),
escape_doublequotes(retention_policy),
)
@property
def measurement(self):
return self.cfg['measurement']
@property
def addr(self):
return self.cfg['addr']
@property
def db(self):
return self.cfg['database']
@property
def dbuser(self):
return self.cfg.get('dbuser')
@property
def dbuser_password(self):
return self.cfg.get('dbuser_password')
@property
def retention_policy(self):
return self.cfg.get('retention_policy')
@property
def use_ssl(self):
return self.cfg.get('use_ssl') or False
@property
def verify_ssl(self):
return self.cfg.get('verify_ssl') or False
@property
def annotation_db_name(self):
return self.cfg.get('annotation_db') or 'chronograf'
@property
def influxdb(self):
if self._influxdb is None:
addr = parse_addr(self.addr, default_port=8086)
logging.info(
"connecting to influxdb on %s:%d, using database '%s'",
addr['host'],
addr['port'],
self.db,
)
self._influxdb = InfluxDBClient(
host=addr['host'],
port=addr['port'],
database=self.db,
username=self.dbuser,
password=<PASSWORD>,
ssl=self.use_ssl,
verify_ssl=self.verify_ssl,
)
return self._influxdb
@property
def annotationdb(self):
if self._annotationdb is None:
addr = parse_addr(self.addr, default_port=8086)
db = self.annotation_db_name
logging.info(
"connecting to influxdb on %s:%d, using database '%s'",
addr['host'],
addr['port'],
db,
)
self._annotationdb = InfluxDBClient(
host=addr['host'],
port=addr['port'],
database=db,
username=self.dbuser,
password=<PASSWORD>,
ssl=self.use_ssl,
verify_ssl=self.verify_ssl,
)
self._annotationdb.create_database(db)
return self._annotationdb
@catch_query_error
def init(self, db=None, *args, **kwargs):
"""
Create database
"""
if self.cfg.get('create_database'):
self.influxdb.create_database(db or self.db)
@catch_query_error
def drop(self, db=None):
"""
Delete database
"""
try:
self.influxdb.drop_database(db or self.db)
except influxdb.exceptions.InfluxDBClientError as exn:
if exn.code != 404:
raise exn
def insert_data(self, data):
raise errors.NotImplemented("InfluxDB is a pure time-series database")
def insert_times_data(
self,
ts,
data,
measurement=None,
tags=None,
*args,
**kwargs
):
"""
Insert data
"""
ts = make_ts(ts)
# suppress None (nothing to save)
data = {k: v for k, v in data.items() if v is not None}
entry = {
'measurement': measurement or self.measurement,
'time': ts_to_ns(ts),
'fields': data,
}
if tags:
entry['tags'] = tags
if len(data) > 0:
self.enqueue(entry)
@catch_query_error
def send_bulk(self, requests):
"""
Send data to InfluxDB
"""
self.influxdb.write_points(
requests,
retention_policy=self.retention_policy,
)
def _build_annotations_query(
self,
measurement,
from_date=None,
to_date=None,
tags=None,
):
"""
Build queries according to requested time range
"""
# TODO sanitize inputs to avoid injection!
time_pred = _build_time_predicates(from_date, to_date)
must = time_pred
for key, val in tags.items():
if isinstance(val, bool):
val = str(val)
elif isinstance(val, int):
val = str(val)
val = "'{}'".format(escape_quotes(val))
must.append("\"{}\"={}".format(
escape_doublequotes(key),
val),
)
must.append("\"{}\"={}".format(
"deleted",
"false"
))
where = " where {}".format(" and ".join(must)) if len(must) else ""
yield "select * from \"{}\".\"{}\".\"{}\"{} ;".format(
escape_doublequotes(self.annotation_db_name),
"autogen",
escape_doublequotes(measurement),
where,
)
def _build_times_queries(
self,
bucket_interval,
features,
from_date=None,
to_date=None,
):
"""
Build queries according to requested features
"""
# TODO sanitize inputs to avoid injection!
time_pred = _build_time_predicates(from_date, to_date)
for feature in features:
must = time_pred + _build_tags_predicates(feature.match_all)
where = " where {}".format(" and ".join(must)) if len(must) else ""
yield "select {} from {}\"{}\"{} group by time({}ms);".format(
_build_agg(feature),
self._from_prefix,
escape_doublequotes(feature.measurement or self.measurement),
where,
int(bucket_interval * 1000),
)
@catch_query_error
def get_times_data(
self,
bucket_interval,
features,
from_date=None,
to_date=None,
):
nb_features = len(features)
queries = self._build_times_queries(
bucket_interval, features, from_date, to_date)
queries = ''.join(queries)
results = self.influxdb.query(queries)
if not isinstance(results, list):
results = [results]
buckets = []
# Merge results
for i, result in enumerate(results):
feature = features[i]
for j, point in enumerate(result.get_points()):
agg_val = point.get(feature.name)
timeval = point['time']
if j < len(buckets):
bucket = buckets[j]
else:
bucket = {
'time': timeval,
'mod': int(str_to_ts(timeval)) % bucket_interval,
'values': {},
}
buckets.append(bucket)
bucket['values'][feature.name] = agg_val
# XXX Note that the buckets of InfluxDB results are aligned on
# modulo(bucket_interval)
# Build final result
t0 = None
result = []
for bucket in buckets:
X = np.full(nb_features, np.nan, dtype=float)
timeval = bucket['time']
ts = str_to_ts(timeval)
for i, feature in enumerate(features):
agg_val = bucket['values'].get(feature.name)
if agg_val is None:
logging.info(
"missing data: field '%s', metric '%s', bucket: %s",
feature.field, feature.metric, timeval,
)
else:
X[i] = agg_val
if t0 is None:
t0 = ts
result.append(((ts - t0) / 1000, X, timeval))
return result
def insert_annotation(
self,
dt,
desc,
_type,
_id,
measurement='annotations',
tags=None,
):
ts = make_ts(dt.timestamp())
data = {
'deleted': False,
'modified_time_ns': ts_to_ns(ts),
'start_time': ts_to_ns(ts),
'text': desc,
'id': _id,
}
# tag type easier to view annotations using TICK 1.7.x
_tags = {
'type': _type,
}
if tags is not None:
_tags.update(tags)
points = [{
'measurement': measurement,
'time': ts_to_ns(ts),
'fields': data,
'tags': _tags,
}]
self.annotationdb.write_points(points)
return points
def update_annotation(
self,
dt,
points,
):
ts = make_ts(dt.timestamp())
points[0]['fields']['deleted'] = True
self.annotationdb.write_points(points)
points[0]['time'] = ts_to_ns(ts)
points[0]['fields']['deleted'] = False
self.annotationdb.write_points(points)
return points
def list_anomalies(
self,
from_date,
to_date,
tags=None,
):
_tags = {
'type': 'loudml',
}
if tags is not None:
_tags.update(tags)
query = self._build_annotations_query(
measurement='annotations',
from_date=from_date,
to_date=to_date,
tags=_tags,
)
query = ''.join(query)
result = self.annotationdb.query(query)
windows = []
for j, point in enumerate(result.get_points()):
timeval = point.get('start_time')
timeval2 = point['time']
if timeval is not None and timeval2 is not None:
windows.append([
ts_to_str(make_ts(timeval / 1e9)),
ts_to_str(make_ts(timeval2)),
])
return windows
|
packages/python/plotly/plotly/matplotlylib/mplexporter/tests/__init__.py | mastermind88/plotly.py | 11,750 | 12663108 | <reponame>mastermind88/plotly.py
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
|
anomalib/utils/metrics/anomaly_score_distribution.py | ashwinvaidya17/anomalib | 689 | 12663137 | <gh_stars>100-1000
"""Module that computes the parameters of the normal data distribution of the training set."""
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchmetrics import Metric
class AnomalyScoreDistribution(Metric):
"""Mean and standard deviation of the anomaly scores of normal training data."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.anomaly_maps = []
self.anomaly_scores = []
self.add_state("image_mean", torch.empty(0), persistent=True)
self.add_state("image_std", torch.empty(0), persistent=True)
self.add_state("pixel_mean", torch.empty(0), persistent=True)
self.add_state("pixel_std", torch.empty(0), persistent=True)
self.image_mean = torch.empty(0)
self.image_std = torch.empty(0)
self.pixel_mean = torch.empty(0)
self.pixel_std = torch.empty(0)
# pylint: disable=arguments-differ
def update( # type: ignore
self, anomaly_scores: Optional[Tensor] = None, anomaly_maps: Optional[Tensor] = None
) -> None:
"""Update the precision-recall curve metric."""
if anomaly_maps is not None:
self.anomaly_maps.append(anomaly_maps)
if anomaly_scores is not None:
self.anomaly_scores.append(anomaly_scores)
def compute(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Compute stats."""
anomaly_scores = torch.hstack(self.anomaly_scores)
anomaly_scores = torch.log(anomaly_scores)
self.image_mean = anomaly_scores.mean()
self.image_std = anomaly_scores.std()
if self.anomaly_maps:
anomaly_maps = torch.vstack(self.anomaly_maps)
anomaly_maps = torch.log(anomaly_maps).cpu()
self.pixel_mean = anomaly_maps.mean(dim=0).squeeze()
self.pixel_std = anomaly_maps.std(dim=0).squeeze()
return self.image_mean, self.image_std, self.pixel_mean, self.pixel_std
|
src/netius/extra/dhcp_s.py | timgates42/netius | 107 | 12663177 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import copy
import netius.common
import netius.servers
class DHCPServerS(netius.servers.DHCPServer):
def __init__(self, pool = None, options = {}, *args, **kwargs):
netius.servers.DHCPServer.__init__(self, *args, **kwargs)
self.pool = pool or netius.common.AddressPool("192.168.0.61", "192.168.0.69")
self.options = {}
self.lease = 3600
self._build(options)
def get_type(self, request):
type = request.get_type()
requested = request.get_requested()
mac = request.get_mac()
if type == 0x01: result = netius.common.OFFER_DHCP
elif type == 0x03:
current = self.pool.assigned(mac) or requested
is_owner = self.pool.is_owner(mac, current)
if is_owner: result = netius.common.ACK_DHCP
else: result = netius.common.NAK_DHCP
return result
def get_options(self, request):
options = copy.copy(self.options)
return options
def get_yiaddr(self, request):
type = request.get_type()
if type == 0x01: yiaddr = self._reserve(request)
elif type == 0x03: yiaddr = self._confirm(request)
return yiaddr
def _build(self, options):
lease = options.get("lease", {})
self.lease = lease.get("time", 3600)
for key, value in netius.legacy.iteritems(options):
key_i = netius.common.OPTIONS_DHCP.get(key, None)
if not key_i: continue
self.options[key_i] = value
def _reserve(self, request):
mac = request.get_mac()
return self.pool.reserve(
owner = mac,
lease = self.lease
)
def _confirm(self, request):
requested = request.get_requested()
mac = request.get_mac()
current = self.pool.assigned(mac) or requested
is_valid = self.pool.is_valid(current)
if is_valid: self.pool.touch(current, self.lease)
return current
if __name__ == "__main__":
import logging
host = netius.common.host()
pool = netius.common.AddressPool("172.16.0.80", "172.16.0.89")
options = dict(
router = dict(routers = ["1192.168.127.12"]),
subnet = dict(subnet = "255.255.0.0"),
dns = dict(
servers = ["172.16.0.11", "172.16.0.12"]
),
identifier = dict(identifier = host),
broadcast = dict(broadcast = "172.16.255.255"),
name = dict(name = "hive"),
lease = dict(time = 3600),
renewal = dict(time = 1800),
rebind = dict(time = 2700),
proxy = dict(url = "http://172.16.0.25:8080/proxy.pac")
)
server = DHCPServerS(
pool = pool,
options = options,
level = logging.INFO
)
server.serve(env = True)
else:
__path__ = []
|
skyline/functions/settings/manage_external_settings.py | datastreaming/skyline-1 | 396 | 12663219 | <filename>skyline/functions/settings/manage_external_settings.py
"""
external_settings_configs
"""
import logging
import traceback
import requests
import simplejson as json
from ast import literal_eval
from skyline_functions import get_redis_conn_decoded
import settings
# @added 20210601 - Feature #4000: EXTERNAL_SETTINGS
def manage_external_settings(current_skyline_app):
"""
Return a concatenated external settings from :mod:`settings.EXTERNAL_SETTINGS`
of any fetched external settings.
:param current_skyline_app: the app calling the function so the function
knows which log to write too.
:type current_skyline_app: str
:return: (external_settings, external_from_cache)
:rtype: (dict, boolean)
"""
# Get the logger
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
function_str = 'metrics_manager :: functions.settings.manage_external_settings'
debug_get_external_settings = None
# Set the default dicts to return
external_settings = {}
external_from_cache = None
last_known_redis_key = 'skyline.last_known.external_settings'
# Define the items that are expected in the external settings json
EXTERNAL_SETTINGS_JSON_ITEMS = (
'id', 'namespace', 'full_duration',
'second_order_resolution_seconds', 'learn_full_duration_seconds',
)
OPTIONAL_EXTERNAL_ALERTS_JSON_ITEMS = (
'retention_1_resolution_seconds', 'retention_1_period_seconds',
'retention_2_resolution_seconds', 'retention_2_period_seconds',
'flux_token', 'thunder_alert_endpoint', 'thunder_alert_token',
'alert_on_no_data', 'alert_on_stale_metrics',
'do_not_alert_on_stale_metrics',
)
try:
EXTERNAL_SETTINGS = settings.EXTERNAL_SETTINGS.copy()
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: failed to determine EXTERNAL_SETTINGS - %s' % (
function_str, e))
return (external_settings, external_from_cache)
for external_settings_item in list(EXTERNAL_SETTINGS.keys()):
endpoint = None
try:
endpoint = EXTERNAL_SETTINGS[external_settings_item]['endpoint']
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: failed to determine endpoint for EXTERNAL_SETTINGS[\'%s\'] - %s' % (
function_str, str(external_settings_item), e))
if not endpoint:
continue
post_data = None
try:
post_data = EXTERNAL_SETTINGS[external_settings_item]['post_data']
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: failed to determine post_data for EXTERNAL_SETTINGS[\'%s\'] - %s' % (
function_str, str(external_settings_item), e))
if not post_data:
continue
external_settings_dict = {}
current_logger.info('%s :: fetching external settings from %s' % (
function_str, str(endpoint)))
try:
header = {"content-type": "application/json"}
r = requests.post(endpoint, data=json.dumps(post_data),
headers=header, timeout=10)
external_settings_dict = r.json()
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: could not retrieve json from the url - %s - %s' % (
function_str, str(endpoint), e))
continue
if not external_settings_dict:
current_logger.error('error :: %s :: failed to retrieve json from the url - %s' % (
function_str, str(endpoint)))
if external_settings_dict:
namespaces_list = []
try:
namespaces_list = external_settings_dict['data']['namespaces']
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: could not parse [\'data\'][\'namespaces\'] from json from url - %s - %s' % (
function_str, str(endpoint), e))
continue
for item in namespaces_list:
try:
required_elements = True
for element in EXTERNAL_SETTINGS_JSON_ITEMS:
valid_element = False
try:
valid_element = item[element]
except Exception as e:
current_logger.error('error :: %s :: could not validate %s from json from url - %s - %s' % (
function_str, element, str(endpoint), e))
required_elements = False
if not valid_element:
required_elements = False
continue
if required_elements:
config_id = 'external-%s' % str(item['id'])
external_settings[config_id] = item
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: get_external_settings :: could not parse namespace element from json from url - %s - %s - %s' % (
str(endpoint), str(item), e))
redis_conn_decoded = None
try:
redis_conn_decoded = get_redis_conn_decoded(current_skyline_app)
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: get_external_settings :: failed to get decoded Redis connection - %s' % e)
return (external_settings, external_from_cache)
if not external_settings:
try:
external_settings_raw = redis_conn_decoded.get(last_known_redis_key)
if external_settings_raw:
external_settings = literal_eval(external_settings_raw)
external_from_cache = True
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: get_external_settings :: failed to query Redis for %s - %s' % (
last_known_redis_key, e))
return (external_settings, external_from_cache)
redis_key = 'skyline.external_settings'
try:
redis_conn_decoded.set(redis_key, str(external_settings))
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: get_external_settings :: failed to set Redis key %s - %s' % (
redis_key, e))
try:
redis_conn_decoded.set(last_known_redis_key, str(external_settings))
except Exception as e:
current_logger.error(traceback.format_exc())
current_logger.error('error :: get_external_settings :: failed to set Redis key %s - %s' % (
last_known_redis_key, e))
return (external_settings, external_from_cache)
|
test/parse/t01.py | timmartin/skulpt | 2,671 | 12663252 | <reponame>timmartin/skulpt<gh_stars>1000+
print x+2*3
|
frontend/tests/unit/models/test_data_issue.py | defendercrypt/amundsen | 2,072 | 12663279 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from amundsen_application.models.data_issue import DataIssue, Priority
class DataIssueTest(unittest.TestCase):
def setUp(self) -> None:
self.issue_key = 'key'
self.title = 'title'
self.url = 'https://place'
self.status = 'open'
self.priority = Priority.P2
self.maxDiff = None
def test_mapping_priority(self) -> None:
expected_priority_name = 'major'
expected_priority_display_name = 'P2'
data_issue = DataIssue(issue_key=self.issue_key,
title=self.title,
url=self.url,
status=self.status,
priority=self.priority).serialize()
self.assertEqual(data_issue['priority_display_name'], expected_priority_display_name)
self.assertEqual(data_issue['priority_name'], expected_priority_name)
self.assertEqual(data_issue['issue_key'], self.issue_key)
self.assertEqual(data_issue['title'], self.title)
self.assertEqual(data_issue['url'], self.url)
self.assertEqual(data_issue['status'], self.status)
def test_mapping_priorty_missing(self) -> None:
expected_priority_name = None # type: ignore
expected_priority_display_name = None # type: ignore
data_issue = DataIssue(issue_key=self.issue_key,
title=self.title,
url=self.url,
status=self.status,
priority=None).serialize()
self.assertEqual(data_issue['priority_display_name'], expected_priority_display_name)
self.assertEqual(data_issue['priority_name'], expected_priority_name)
self.assertEqual(data_issue['issue_key'], self.issue_key)
self.assertEqual(data_issue['title'], self.title)
self.assertEqual(data_issue['url'], self.url)
self.assertEqual(data_issue['status'], self.status)
|
tests/test_summarizers/test_random.py | isarth/sumy | 2,880 | 12663285 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from sumy._compat import to_unicode
from sumy.summarizers.random import RandomSummarizer
from ..utils import build_document, build_document_from_string
def test_empty_document():
document = build_document()
summarizer = RandomSummarizer()
sentences = summarizer(document, 10)
assert len(sentences) == 0
def test_less_sentences_than_requested():
document = build_document_from_string("""
This is only one sentence.
""")
summarizer = RandomSummarizer()
sentences = summarizer(document, 10)
assert len(sentences) == 1
assert to_unicode(sentences[0]) == "This is only one sentence."
def test_sentences_in_right_order():
document = build_document_from_string("""
# Heading one
First sentence.
Second sentence.
Third sentence.
""")
summarizer = RandomSummarizer()
sentences = summarizer(document, 4)
assert len(sentences) == 3
assert to_unicode(sentences[0]) == "First sentence."
assert to_unicode(sentences[1]) == "Second sentence."
assert to_unicode(sentences[2]) == "Third sentence."
def test_more_sentences_than_requested():
document = build_document_from_string("""
# Heading one
First sentence.
Second sentence.
Third sentence.
# Heading two
I like sentences
They are so wordy
And have many many letters
And are green in my editor
But someone doesn't like them :(
""")
summarizer = RandomSummarizer()
sentences = summarizer(document, 4)
assert len(sentences) == 4
|
office-plugin/windows-office/program/wizards/fax/FaxWizardDialog.py | jerrykcode/kkFileView | 6,660 | 12663308 | <gh_stars>1000+
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from .FaxWizardDialogResources import FaxWizardDialogResources
from .FaxWizardDialogConst import FaxWizardDialogConst, HIDMAIN, HID
from ..ui.WizardDialog import WizardDialog, uno, UIConsts, PropertyNames
from com.sun.star.awt.FontUnderline import SINGLE
class FaxWizardDialog(WizardDialog):
def __init__(self, xmsf):
super(FaxWizardDialog,self).__init__(xmsf, HIDMAIN )
#Load Resources
self.resources = FaxWizardDialogResources()
#set dialog properties...
self.setDialogProperties(True, 210, True, 104, 52, 1, 1,
self.resources.resFaxWizardDialog_title, 310)
self.fontDescriptor4 = \
uno.createUnoStruct('com.sun.star.awt.FontDescriptor')
self.fontDescriptor5 = \
uno.createUnoStruct('com.sun.star.awt.FontDescriptor')
self.fontDescriptor4.Weight = 100
self.fontDescriptor5.Weight = 150
def buildStep1(self):
self.optBusinessFax = self.insertRadioButton("optBusinessFax",
FaxWizardDialogConst.OPTBUSINESSFAX_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTBUSINESSFAX_HID,
self.resources.resoptBusinessFax_value, 97, 28, 1, 1, 184),
self)
self.lstBusinessStyle = self.insertListBox("lstBusinessStyle",
FaxWizardDialogConst.LSTBUSINESSSTYLE_ACTION_PERFORMED,
FaxWizardDialogConst.LSTBUSINESSSTYLE_ITEM_CHANGED,
("Dropdown", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(True, 12, FaxWizardDialogConst.LSTBUSINESSSTYLE_HID,
180, 40, 1, 3, 74), self)
self.optPrivateFax = self.insertRadioButton("optPrivateFax",
FaxWizardDialogConst.OPTPRIVATEFAX_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTPRIVATEFAX_HID,
self.resources.resoptPrivateFax_value,97, 81, 1, 2, 184), self)
self.lstPrivateStyle = self.insertListBox("lstPrivateStyle",
FaxWizardDialogConst.LSTPRIVATESTYLE_ACTION_PERFORMED,
FaxWizardDialogConst.LSTPRIVATESTYLE_ITEM_CHANGED,
("Dropdown", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(True, 12, FaxWizardDialogConst.LSTPRIVATESTYLE_HID,
180, 95, 1, 4, 74), self)
self.insertLabel("lblBusinessStyle",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblBusinessStyle_value,
110, 42, 1, 32, 60))
self.insertLabel("lblTitle1",
("FontDescriptor", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(self.fontDescriptor5, 16, self.resources.reslblTitle1_value,
True, 91, 8, 1, 37, 212))
self.insertLabel("lblPrivateStyle",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblPrivateStyle_value, 110, 95, 1, 50, 60))
self.insertLabel("lblIntroduction",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(39, self.resources.reslblIntroduction_value,
True, 104, 145, 1, 55, 199))
self.ImageControl3 = self.insertInfoImage(92, 145, 1)
def buildStep2(self):
self.chkUseLogo = self.insertCheckBox("chkUseLogo",
FaxWizardDialogConst.CHKUSELOGO_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSELOGO_HID,
self.resources.reschkUseLogo_value, 97, 28, 0, 2, 5, 212),
self)
self.chkUseDate = self.insertCheckBox("chkUseDate",
FaxWizardDialogConst.CHKUSEDATE_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSEDATE_HID,
self.resources.reschkUseDate_value, 97, 43, 0, 2, 6, 212),
self)
self.chkUseCommunicationType = self.insertCheckBox(
"chkUseCommunicationType",
FaxWizardDialogConst.CHKUSECOMMUNICATIONTYPE_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSECOMMUNICATIONTYPE_HID,
self.resources.reschkUseCommunicationType_value,
97, 57, 0, 2, 7, 100), self)
self.lstCommunicationType = self.insertComboBox(
"lstCommunicationType",
FaxWizardDialogConst.LSTCOMMUNICATIONTYPE_ACTION_PERFORMED,
FaxWizardDialogConst.LSTCOMMUNICATIONTYPE_ITEM_CHANGED,
FaxWizardDialogConst.LSTCOMMUNICATIONTYPE_TEXT_CHANGED,
("Dropdown", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(True, 12, FaxWizardDialogConst.LSTCOMMUNICATIONTYPE_HID,
105, 68, 2, 8, 174), self)
self.chkUseSubject = self.insertCheckBox("chkUseSubject",
FaxWizardDialogConst.CHKUSESUBJECT_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSESUBJECT_HID,
self.resources.reschkUseSubject_value, 97, 87, 0, 2, 9, 212),
self)
self.chkUseSalutation = self.insertCheckBox("chkUseSalutation",
FaxWizardDialogConst.CHKUSESALUTATION_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSESALUTATION_HID,
self.resources.reschkUseSalutation_value,
97, 102, 0, 2, 10, 100), self)
self.lstSalutation = self.insertComboBox("lstSalutation",
FaxWizardDialogConst.LSTSALUTATION_ACTION_PERFORMED,
FaxWizardDialogConst.LSTSALUTATION_ITEM_CHANGED,
FaxWizardDialogConst.LSTSALUTATION_TEXT_CHANGED,
("Dropdown", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(True, 12, FaxWizardDialogConst.LSTSALUTATION_HID,
105, 113, 2, 11, 174), self)
self.chkUseGreeting = self.insertCheckBox("chkUseGreeting",
FaxWizardDialogConst.CHKUSEGREETING_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSEGREETING_HID,
self.resources.reschkUseGreeting_value,
97, 132, 0, 2, 12, 100), self)
self.lstGreeting = self.insertComboBox("lstGreeting",
FaxWizardDialogConst.LSTGREETING_ACTION_PERFORMED,
FaxWizardDialogConst.LSTGREETING_ITEM_CHANGED,
FaxWizardDialogConst.LSTGREETING_TEXT_CHANGED,
("Dropdown", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(True, 12, FaxWizardDialogConst.LSTGREETING_HID,
105, 143, 2, 13, 174), self)
self.chkUseFooter = self.insertCheckBox("chkUseFooter",
FaxWizardDialogConst.CHKUSEFOOTER_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKUSEFOOTER_HID,
self.resources.reschkUseFooter_value, 97, 163,
0, 2, 14, 212), self)
self.insertLabel("lblTitle3",
("FontDescriptor", PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(self.fontDescriptor5, 16, self.resources.reslblTitle3_value,
True, 91, 8, 2, 59, 212))
def buildStep3(self):
self.optSenderPlaceholder = self.insertRadioButton(
"optSenderPlaceholder",
FaxWizardDialogConst.OPTSENDERPLACEHOLDER_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTSENDERPLACEHOLDER_HID,
self.resources.resoptSenderPlaceholder_value,
104, 42, 3, 15, 149), self)
self.optSenderDefine = self.insertRadioButton("optSenderDefine",
FaxWizardDialogConst.OPTSENDERDEFINE_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTSENDERDEFINE_HID,
self.resources.resoptSenderDefine_value,
104, 54, 3, 16, 149), self)
self.txtSenderName = self.insertTextField("txtSenderName",
FaxWizardDialogConst.TXTSENDERNAME_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTSENDERNAME_HID,
182, 67, 3, 17, 119), self)
self.txtSenderStreet = self.insertTextField("txtSenderStreet",
FaxWizardDialogConst.TXTSENDERSTREET_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTSENDERSTREET_HID,
182, 81, 3, 18, 119), self)
self.txtSenderPostCode = self.insertTextField("txtSenderPostCode",
FaxWizardDialogConst.TXTSENDERPOSTCODE_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTSENDERPOSTCODE_HID,
182, 95, 3, 19, 25), self)
self.txtSenderState = self.insertTextField("txtSenderState",
FaxWizardDialogConst.TXTSENDERSTATE_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTSENDERSTATE_HID,
211, 95, 3, 20, 21), self)
self.txtSenderCity = self.insertTextField("txtSenderCity",
FaxWizardDialogConst.TXTSENDERCITY_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTSENDERCITY_HID,
236, 95, 3, 21, 65), self)
self.txtSenderFax = self.insertTextField("txtSenderFax",
FaxWizardDialogConst.TXTSENDERFAX_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTSENDERFAX_HID,
182, 109, 3, 22, 119), self)
self.optReceiverPlaceholder = self.insertRadioButton(
"optReceiverPlaceholder",
FaxWizardDialogConst.OPTRECEIVERPLACEHOLDER_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTRECEIVERPLACEHOLDER_HID,
self.resources.resoptReceiverPlaceholder_value,
104, 148, 3, 23, 200), self)
self.optReceiverDatabase = self.insertRadioButton(
"optReceiverDatabase",
FaxWizardDialogConst.OPTRECEIVERDATABASE_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTRECEIVERDATABASE_HID,
self.resources.resoptReceiverDatabase_value,
104, 160, 3, 24, 200), self)
self.insertLabel("lblSenderAddress",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblSenderAddress_value,
97, 28, 3, 46, 136))
self.insertFixedLine("FixedLine2", (PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(5, 90, 126, 3, 51, 212))
self.insertLabel("lblSenderName",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblSenderName_value,
113, 69, 3, 52, 68))
self.insertLabel("lblSenderStreet",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblSenderStreet_value,
113, 82, 3, 53, 68))
self.insertLabel("lblPostCodeCity",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblPostCodeCity_value,
113, 97, 3, 54, 68))
self.insertLabel("lblTitle4",
("FontDescriptor",
PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(self.fontDescriptor5, 16, self.resources.reslblTitle4_value,
True, 91, 8, 3, 60, 212))
self.insertLabel("lblSenderFax",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.resLabel1_value, 113, 111, 3, 68, 68))
self.insertLabel("Label2",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.resLabel2_value, 97, 137, 3, 69, 136))
def buildStep4(self):
self.txtFooter = self.insertTextField("txtFooter",
FaxWizardDialogConst.TXTFOOTER_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(47, FaxWizardDialogConst.TXTFOOTER_HID,
True, 97, 40, 4, 25, 203), self)
self.chkFooterNextPages = self.insertCheckBox("chkFooterNextPages",
FaxWizardDialogConst.CHKFOOTERNEXTPAGES_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKFOOTERNEXTPAGES_HID,
self.resources.reschkFooterNextPages_value,
97, 92, 0, 4, 26, 202), self)
self.chkFooterPageNumbers = self.insertCheckBox("chkFooterPageNumbers",
FaxWizardDialogConst.CHKFOOTERPAGENUMBERS_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STATE,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.CHKFOOTERPAGENUMBERS_HID,
self.resources.reschkFooterPageNumbers_value,
97, 106, 0, 4, 27, 201), self)
self.insertLabel("lblFooter",
("FontDescriptor",
PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(self.fontDescriptor4, 8, self.resources.reslblFooter_value,
97, 28, 4, 33, 116))
self.insertLabel("lblTitle5",
("FontDescriptor",
PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(self.fontDescriptor5, 16, self.resources.reslblTitle5_value,
True, 91, 8, 4, 61, 212))
def buildStep5(self):
self.txtTemplateName = self.insertTextField("txtTemplateName",
FaxWizardDialogConst.TXTTEMPLATENAME_TEXT_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
"Text",
PropertyNames.PROPERTY_WIDTH),
(12, FaxWizardDialogConst.TXTTEMPLATENAME_HID, 202, 56, 5, 28,
self.resources.restxtTemplateName_value, 100), self)
self.optCreateFax = self.insertRadioButton("optCreateFax",
FaxWizardDialogConst.OPTCREATEFAX_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTCREATEFAX_HID,
self.resources.resoptCreateFax_value,
104, 111, 5, 30, 198), self)
self.optMakeChanges = self.insertRadioButton("optMakeChanges",
FaxWizardDialogConst.OPTMAKECHANGES_ITEM_CHANGED,
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_HELPURL,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, FaxWizardDialogConst.OPTMAKECHANGES_HID,
self.resources.resoptMakeChanges_value,
104, 123, 5, 31, 198), self)
self.insertLabel("lblFinalExplanation1",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(28, self.resources.reslblFinalExplanation1_value,
True, 97, 28, 5, 34, 205))
self.insertLabel("lblProceed",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblProceed_value, 97, 100, 5,
35, 204))
self.insertLabel("lblFinalExplanation2",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(33, self.resources.reslblFinalExplanation2_value,
True, 104, 145, 5, 36, 199))
self.insertImage("ImageControl2",
("Border",
PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_IMAGEURL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
"ScaleImage",
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(0, 10, UIConsts.INFOIMAGEURL, 92, 145,
False, 5, 47, 10))
self.insertLabel("lblTemplateName",
(PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(8, self.resources.reslblTemplateName_value, 97, 58, 5,
57, 101))
self.insertLabel("lblTitle6",
("FontDescriptor",
PropertyNames.PROPERTY_HEIGHT,
PropertyNames.PROPERTY_LABEL,
PropertyNames.PROPERTY_MULTILINE,
PropertyNames.PROPERTY_POSITION_X,
PropertyNames.PROPERTY_POSITION_Y,
PropertyNames.PROPERTY_STEP,
PropertyNames.PROPERTY_TABINDEX,
PropertyNames.PROPERTY_WIDTH),
(self.fontDescriptor5, 16, self.resources.reslblTitle6_value,
True, 91, 8, 5, 62, 212))
|
cartography/intel/aws/ec2/reserved_instances.py | ramonpetgrave64/cartography | 2,322 | 12663359 | <filename>cartography/intel/aws/ec2/reserved_instances.py<gh_stars>1000+
import logging
from typing import Dict
from typing import List
import boto3
import neo4j
from botocore.exceptions import ClientError
from .util import get_botocore_config
from cartography.util import aws_handle_regions
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
@aws_handle_regions
def get_reserved_instances(boto3_session: boto3.session.Session, region: str) -> List[Dict]:
client = boto3_session.client('ec2', region_name=region, config=get_botocore_config())
try:
reserved_instances = client.describe_reserved_instances()['ReservedInstances']
except ClientError as e:
logger.warning(f"Failed retrieve reserved instances for region - {region}. Error - {e}")
raise
return reserved_instances
@timeit
def load_reserved_instances(
neo4j_session: neo4j.Session, data: List[Dict], region: str,
current_aws_account_id: str, update_tag: int,
) -> None:
ingest_reserved_instances = """
UNWIND {reserved_instances_list} as res
MERGE (ri:EC2ReservedInstance{id: res.ReservedInstancesId})
ON CREATE SET ri.firstseen = timestamp()
SET ri.lastupdated = {update_tag}, ri.availabilityzone = res.AvailabilityZone, ri.duration = res.Duration,
ri.end = res.End, ri.start = res.Start, ri.count = res.InstanceCount, ri.type = res.InstanceType,
ri.productdescription = res.ProductDescription, ri.state = res.State, ri.currencycode = res.CurrencyCode,
ri.instancetenancy = res.InstanceTenancy, ri.offeringclass = res.OfferingClass,
ri.offeringtype = res.OfferingType, ri.scope = res.Scope, ri.fixedprice = res.FixedPrice, ri.region={Region}
WITH ri
MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})
MERGE (aa)-[r:RESOURCE]->(ri)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
for r_instance in data:
r_instance['Start'] = str(r_instance['Start'])
r_instance['End'] = str(r_instance['End'])
neo4j_session.run(
ingest_reserved_instances,
reserved_instances_list=data,
AWS_ACCOUNT_ID=current_aws_account_id,
Region=region,
update_tag=update_tag,
)
@timeit
def cleanup_reserved_instances(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
run_cleanup_job(
'aws_import_reserved_instances_cleanup.json',
neo4j_session,
common_job_parameters,
)
@timeit
def sync_ec2_reserved_instances(
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str],
current_aws_account_id: str,
update_tag: int, common_job_parameters: Dict,
) -> None:
for region in regions:
logger.debug("Syncing reserved instances for region '%s' in account '%s'.", region, current_aws_account_id)
data = get_reserved_instances(boto3_session, region)
load_reserved_instances(neo4j_session, data, region, current_aws_account_id, update_tag)
cleanup_reserved_instances(neo4j_session, common_job_parameters)
|
tests/clpy_tests/random_tests/test_permutations.py | fixstars/clpy | 142 | 12663391 | <reponame>fixstars/clpy
import unittest
import clpy
from clpy import testing
@testing.gpu
class TestPermutations(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.gpu
class TestShuffle(unittest.TestCase):
_multiprocess_can_split_ = True
# Test ranks
@testing.numpy_clpy_raises()
def test_shuffle_zero_dim(self, xp):
a = testing.shaped_random((), xp)
xp.random.shuffle(a)
# Test same values
@testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True)
def test_shuffle_sort_1dim(self, dtype):
a = clpy.arange(10, dtype=dtype)
b = clpy.copy(a)
clpy.random.shuffle(a)
testing.assert_allclose(clpy.sort(a), b)
@testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True)
def test_shuffle_sort_ndim(self, dtype):
a = clpy.arange(15, dtype=dtype).reshape(5, 3)
b = clpy.copy(a)
clpy.random.shuffle(a)
testing.assert_allclose(clpy.sort(a, axis=0), b)
# Test seed
@testing.for_all_dtypes()
def test_shuffle_seed1(self, dtype):
a = testing.shaped_random((10,), clpy, dtype)
b = clpy.copy(a)
clpy.random.seed(0)
clpy.random.shuffle(a)
clpy.random.seed(0)
clpy.random.shuffle(b)
testing.assert_allclose(a, b)
|
src/textual/drivers/win32.py | eduard93/textual | 2,177 | 12663408 | <gh_stars>1000+
import ctypes
import msvcrt
import sys
import threading
from asyncio import AbstractEventLoop, run_coroutine_threadsafe
from ctypes import Structure, Union, byref, wintypes
from ctypes.wintypes import BOOL, CHAR, DWORD, HANDLE, SHORT, UINT, WCHAR, WORD
from typing import IO, Callable, List, Optional
from .._types import EventTarget
from .._xterm_parser import XTermParser
from ..events import Event, Resize
from ..geometry import Size
KERNEL32 = ctypes.WinDLL("kernel32", use_last_error=True)
# Console input modes
ENABLE_ECHO_INPUT = 0x0004
ENABLE_EXTENDED_FLAGS = 0x0080
ENABLE_INSERT_MODE = 0x0020
ENABLE_LINE_INPUT = 0x0002
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_WINDOW_INPUT = 0x0008
ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
# Console output modes
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
ENABLE_LVB_GRID_WORLDWIDE = 0x0010
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
WAIT_TIMEOUT = 0x00000102
GetStdHandle = KERNEL32.GetStdHandle
GetStdHandle.argtypes = [wintypes.DWORD]
GetStdHandle.restype = wintypes.HANDLE
class COORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/coord-str"""
_fields_ = [
("X", SHORT),
("Y", SHORT),
]
class uChar(Union):
"""https://docs.microsoft.com/en-us/windows/console/key-event-record-str"""
_fields_ = [
("AsciiChar", CHAR),
("UnicodeChar", WCHAR),
]
class KEY_EVENT_RECORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/key-event-record-str"""
_fields_ = [
("bKeyDown", BOOL),
("wRepeatCount", WORD),
("wVirtualKeyCode", WORD),
("wVirtualScanCode", WORD),
("uChar", uChar),
("dwControlKeyState", DWORD),
]
class MOUSE_EVENT_RECORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str"""
_fields_ = [
("dwMousePosition", COORD),
("dwButtonState", DWORD),
("dwControlKeyState", DWORD),
("dwEventFlags", DWORD),
]
class WINDOW_BUFFER_SIZE_RECORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/window-buffer-size-record-str"""
_fields_ = [("dwSize", COORD)]
class MENU_EVENT_RECORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/menu-event-record-str"""
_fields_ = [("dwCommandId", UINT)]
class FOCUS_EVENT_RECORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/focus-event-record-str"""
_fields_ = [("bSetFocus", BOOL)]
class InputEvent(Union):
"""https://docs.microsoft.com/en-us/windows/console/input-record-str"""
_fields_ = [
("KeyEvent", KEY_EVENT_RECORD),
("MouseEvent", MOUSE_EVENT_RECORD),
("WindowBufferSizeEvent", WINDOW_BUFFER_SIZE_RECORD),
("MenuEvent", MENU_EVENT_RECORD),
("FocusEvent", FOCUS_EVENT_RECORD),
]
class INPUT_RECORD(Structure):
"""https://docs.microsoft.com/en-us/windows/console/input-record-str"""
_fields_ = [("EventType", wintypes.WORD), ("Event", InputEvent)]
def _set_console_mode(file: IO, mode: int) -> bool:
"""Set the console mode for a given file (stdout or stdin).
Args:
file (IO): A file like object.
mode (int): New mode.
Returns:
bool: True on success, otherwise False.
"""
windows_filehandle = msvcrt.get_osfhandle(file.fileno())
success = KERNEL32.SetConsoleMode(windows_filehandle, mode)
return success
def _get_console_mode(file: IO) -> int:
"""Get the console mode for a given file (stdout or stdin)
Args:
file (IO): A file-like object.
Returns:
int: The current console mode.
"""
windows_filehandle = msvcrt.get_osfhandle(file.fileno())
mode = wintypes.DWORD()
KERNEL32.GetConsoleMode(windows_filehandle, ctypes.byref(mode))
return mode.value
def enable_application_mode() -> Callable[[], None]:
"""Enable application mode.
Returns:
Callable[[], None]: A callable that will restore terminal to previous state.
"""
terminal_in = sys.stdin
terminal_out = sys.stdout
current_console_mode_in = _get_console_mode(terminal_in)
current_console_mode_out = _get_console_mode(terminal_out)
def restore() -> None:
"""Restore console mode to previous settings"""
_set_console_mode(terminal_in, current_console_mode_in)
_set_console_mode(terminal_out, current_console_mode_out)
_set_console_mode(
terminal_out, current_console_mode_out | ENABLE_VIRTUAL_TERMINAL_PROCESSING
)
_set_console_mode(terminal_in, ENABLE_VIRTUAL_TERMINAL_INPUT)
return restore
def _wait_for_handles(handles: List[HANDLE], timeout: int = -1) -> Optional[HANDLE]:
"""
Waits for multiple handles. (Similar to 'select') Returns the handle which is ready.
Returns `None` on timeout.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms687025(v=vs.85).aspx
Note that handles should be a list of `HANDLE` objects, not integers. See
this comment in the patch by @quark-zju for the reason why:
''' Make sure HANDLE on Windows has a correct size
Previously, the type of various HANDLEs are native Python integer
types. The ctypes library will treat them as 4-byte integer when used
in function arguments. On 64-bit Windows, HANDLE is 8-byte and usually
a small integer. Depending on whether the extra 4 bytes are zero-ed out
or not, things can happen to work, or break. '''
This function returns either `None` or one of the given `HANDLE` objects.
(The return value can be tested with the `is` operator.)
"""
arrtype = HANDLE * len(handles)
handle_array = arrtype(*handles)
ret: int = KERNEL32.WaitForMultipleObjects(
len(handle_array), handle_array, BOOL(False), DWORD(timeout)
)
if ret == WAIT_TIMEOUT:
return None
else:
return handles[ret]
class EventMonitor(threading.Thread):
"""A thread to send key / window events to Textual loop."""
def __init__(
self,
loop: AbstractEventLoop,
app,
target: EventTarget,
exit_event: threading.Event,
process_event: Callable[[Event], None],
) -> None:
self.loop = loop
self.app = app
self.target = target
self.exit_event = exit_event
self.process_event = process_event
self.app.log("event monitor constructed")
super().__init__()
def run(self) -> None:
self.app.log("event monitor thread started")
exit_requested = self.exit_event.is_set
parser = XTermParser(self.target, lambda: False)
try:
read_count = wintypes.DWORD(0)
hIn = GetStdHandle(STD_INPUT_HANDLE)
MAX_EVENTS = 1024
KEY_EVENT = 0x0001
WINDOW_BUFFER_SIZE_EVENT = 0x0004
arrtype = INPUT_RECORD * MAX_EVENTS
input_records = arrtype()
ReadConsoleInputW = KERNEL32.ReadConsoleInputW
keys: List[str] = []
append_key = keys.append
while not exit_requested():
# Wait for new events
if _wait_for_handles([hIn], 200) is None:
# No new events
continue
# Get new events
ReadConsoleInputW(
hIn, byref(input_records), MAX_EVENTS, byref(read_count)
)
read_input_records = input_records[: read_count.value]
del keys[:]
new_size: Optional[tuple[int, int]] = None
for input_record in read_input_records:
event_type = input_record.EventType
if event_type == KEY_EVENT:
# Key event, store unicode char in keys list
key_event = input_record.Event.KeyEvent
key = key_event.uChar.UnicodeChar
if key_event.bKeyDown or key == "\x1b":
append_key(key)
elif event_type == WINDOW_BUFFER_SIZE_EVENT:
# Window size changed, store size
size = input_record.Event.WindowBufferSizeEvent.dwSize
new_size = (size.X, size.Y)
if keys:
# Process keys
for event in parser.feed("".join(keys)):
self.process_event(event)
if new_size is not None:
# Process changed size
self.on_size_change(*new_size)
except Exception as error:
self.app.log("EVENT MONITOR ERROR", error)
self.app.log("event monitor thread finished")
def on_size_change(self, width: int, height: int) -> None:
"""Called when terminal size changes."""
event = Resize(self.target, Size(width, height))
run_coroutine_threadsafe(self.target.post_message(event), loop=self.loop)
|
alipay/aop/api/domain/AlipayInsAutoUserMsgSendModel.py | snowxmas/alipay-sdk-python-all | 213 | 12663409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AutoMktTouchExtendInfoEntry import AutoMktTouchExtendInfoEntry
from alipay.aop.api.domain.AutoMktTouchExtendInfoEntry import AutoMktTouchExtendInfoEntry
class AlipayInsAutoUserMsgSendModel(object):
def __init__(self):
self._biz_time = None
self._extend_info = None
self._out_biz_no = None
self._scene_code = None
self._source = None
self._template_content_info = None
self._user_id = None
@property
def biz_time(self):
return self._biz_time
@biz_time.setter
def biz_time(self, value):
self._biz_time = value
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
if isinstance(value, list):
self._extend_info = list()
for i in value:
if isinstance(i, AutoMktTouchExtendInfoEntry):
self._extend_info.append(i)
else:
self._extend_info.append(AutoMktTouchExtendInfoEntry.from_alipay_dict(i))
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def template_content_info(self):
return self._template_content_info
@template_content_info.setter
def template_content_info(self, value):
if isinstance(value, list):
self._template_content_info = list()
for i in value:
if isinstance(i, AutoMktTouchExtendInfoEntry):
self._template_content_info.append(i)
else:
self._template_content_info.append(AutoMktTouchExtendInfoEntry.from_alipay_dict(i))
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_time:
if hasattr(self.biz_time, 'to_alipay_dict'):
params['biz_time'] = self.biz_time.to_alipay_dict()
else:
params['biz_time'] = self.biz_time
if self.extend_info:
if isinstance(self.extend_info, list):
for i in range(0, len(self.extend_info)):
element = self.extend_info[i]
if hasattr(element, 'to_alipay_dict'):
self.extend_info[i] = element.to_alipay_dict()
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.template_content_info:
if isinstance(self.template_content_info, list):
for i in range(0, len(self.template_content_info)):
element = self.template_content_info[i]
if hasattr(element, 'to_alipay_dict'):
self.template_content_info[i] = element.to_alipay_dict()
if hasattr(self.template_content_info, 'to_alipay_dict'):
params['template_content_info'] = self.template_content_info.to_alipay_dict()
else:
params['template_content_info'] = self.template_content_info
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsAutoUserMsgSendModel()
if 'biz_time' in d:
o.biz_time = d['biz_time']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'source' in d:
o.source = d['source']
if 'template_content_info' in d:
o.template_content_info = d['template_content_info']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
moldesign/_tests/test_io.py | Autodesk/molecular-design-toolkit | 147 | 12663411 | """ Tests for molecule creation and file i/o
"""
import io
import os
import subprocess
from future.utils import PY2, native_str
from builtins import str
import collections
import pathlib
import gzip
import bz2
import pickle
import numpy
import pytest
import moldesign as mdt
mdt.compute.config.engine_type = 'docker'
from moldesign import units as u
from .helpers import get_data_path, native_str_buffer, requires_internet_connection
from .object_fixtures import h2_trajectory, h2_harmonic, h2
__PYTEST_MARK__ = 'io'
@pytest.fixture
def bipyridine_sdf():
return mdt.read(get_data_path('bipyridine.sdf'))
@pytest.fixture
def bipyridine_xyz():
return mdt.read(get_data_path('bipyridine.xyz'))
@pytest.fixture
def bipyridine_mol2():
return mdt.read(get_data_path('bipyridine.mol2'))
@pytest.fixture
def bipyridine_iupac():
return mdt.from_name('bipyridine')
@pytest.fixture
def bipyridine_inchi():
return mdt.from_inchi('InChI=1S/C10H8N2/c1-3-7-11-9(5-1)10-6-2-4-8-12-10/h1-8H')
@pytest.fixture
def bipyridine_smiles():
return mdt.from_smiles('c1ccnc(c1)c2ccccn2')
ATOMDATA = { # (symbol, valence, mass)
1: ('H', 1, 1.008 * u.amu),
6: ('C', 4, 12.000 * u.amu),
7: ('N', 3, 14.003 * u.amu),
8: ('O', 2, 15.995 * u.amu)}
@pytest.mark.parametrize('key', 'iupac smiles inchi xyz sdf'.split())
@pytest.mark.screening
def test_auto_unique_atom_names(key, request):
mol = request.getfixturevalue('bipyridine_'+key)
atomnames = set(atom.name for atom in mol.atoms)
assert len(atomnames) == mol.num_atoms
def test_atom_names_preserved_from_input_file_mol2(bipyridine_mol2):
mol = bipyridine_mol2
for atom in mol.atoms:
assert atom.name == atom.symbol + str(atom.index)
@pytest.fixture
def propane_pdb():
return mdt.read(get_data_path('propane.pdb'))
def test_pdb_with_missing_chains(propane_pdb):
""" In response to an observed bug where various conversions would fail with a PDB file
that's missing chain data
"""
mol = propane_pdb
if not mdt.compute.packages.openbabel.force_remote:
pbmol = mdt.interfaces.mol_to_pybel(mol)
assert len(pbmol.atoms) == mol.num_atoms
pmedmol = mdt.interfaces.mol_to_parmed(mol)
assert len(pmedmol.atoms) == mol.num_atoms
@pytest.mark.parametrize('key', 'mol2 xyz sdf iupac smiles inchi'.split())
@pytest.mark.screening
def test_read_bipyridine_from_format(key, request):
mol = request.getfixturevalue('bipyridine_'+key)
atomcounts = collections.Counter(atom.symbol for atom in mol.atoms)
assert len(atomcounts) == 3
assert atomcounts['C'] == 10
assert atomcounts['N'] == 2
assert atomcounts['H'] == 8
assert mol.charge == 0
assert abs(mol.mass - 156.069*u.amu) < 0.001 * u.amu
for atom in mol.atoms:
assert atom.formal_charge == 0.0
symb, val, mss = ATOMDATA[atom.atnum]
assert atom.symbol == symb
assert atom.valence == val
assert abs(atom.mass - mss) < 0.001 * u.amu
assert mol.num_bonds == 21
bondorders = collections.Counter(bond.order for bond in mol.bonds)
assert bondorders[2] == 6
assert bondorders[1] == 15
assert len(bondorders) == 2
@pytest.mark.parametrize('suffix', ['gz','bz2'])
def test_compressed_write(bipyridine_xyz, tmpdir, suffix):
# Note: compressed read is tested elsewhere when reading test data files
path = pathlib.Path(native_str(tmpdir))
dest = path / ('bipyr.xyz.' + suffix)
bipyridine_xyz.write(dest)
# don't use MDT's reader here! Need to make sure it's really gzip'd
if suffix == 'gz':
opener = gzip.open
elif suffix == 'bz2':
opener = bz2.BZ2File
else:
raise ValueError('Unrecognized suffix "%s"' % suffix)
if PY2:
mode = 'r'
else:
mode = 'rt'
if suffix == 'bz2':
opener = bz2.open
with opener(str(dest), mode) as infile:
content = infile.read()
mol = mdt.read(content, format='xyz')
assert mol.num_atoms == bipyridine_xyz.num_atoms
@pytest.fixture
def dna_pdb():
return mdt.read(pathlib.Path(get_data_path('ACTG.pdb')))
@pytest.fixture
def dna_mmcif():
return mdt.read(get_data_path('ACTG.cif'))
@pytest.fixture
def dna_sequence():
return mdt.build_bdna('ACTG')
@pytest.fixture
def pdb_1kbu():
return mdt.read(pathlib.Path(get_data_path('1KBU.pdb.bz2')))
@pytest.fixture
def mmcif_1kbu():
return mdt.read(get_data_path('1KBU.cif.bz2'))
@requires_internet_connection
def test_from_pdb_pdb_format():
mol = mdt.from_pdb('3aid')
assert mol.metadata.pdbid == '3aid'
assert mol.metadata.sourceformat == 'pdb'
assert mol.num_atoms == 1912
@requires_internet_connection
def test_from_pdb_mmcif_format():
mol = mdt.from_pdb('3aid', usecif=True)
assert mol.metadata.pdbid == '3aid'
assert mol.metadata.sourceformat == 'mmcif'
assert mol.metadata.sourceurl.split('.')[-1] == 'cif'
assert mol.num_atoms == 1912
@requires_internet_connection
@pytest.mark.skip("Takes over 10 minutes right now ...")
def test_mmcif_fallback_if_no_pdb_file():
mol = mdt.from_pdb('4V5X')
assert mol.metadata.pdbid.lower() == '4v5x'
assert mol.metadata.sourceformat == 'mmcif'
assert mol.metadata.sourceurl.split('.')[-1] == 'cif'
@pytest.mark.parametrize('key', 'pdb mmcif sequence'.split())
def test_read_dna_from_format(key, request):
if key == 'mmcif':
pytest.xfail(reason='Known mmcif parser bug, fix this by 0.7.4')
mol = request.getfixturevalue('dna_'+key)
def test_write_file_to_buffer(bipyridine_smiles):
mol = bipyridine_smiles
buffer = native_str_buffer()
mol.write(buffer, format='pdb')
buffer.seek(0)
newmol = mdt.read(buffer.getvalue(), format='pdb')
assert mol.num_atoms == newmol.num_atoms
def test_write_pickle_to_buffer(bipyridine_smiles):
mol = bipyridine_smiles
buffer = io.BytesIO()
mol.write(buffer, format='pkl')
newmol = pickle.loads(buffer.getvalue())
assert newmol.is_identical(mol, verbose=True)
def test_read_from_buffer():
s = native_str("2\nmy xyz file\n H 1.0 1.0 1.0\n H 1.0 2.0 1.0\n")
buffer = native_str_buffer(s)
h2 = mdt.read(buffer, format='xyz')
assert h2.num_atoms == 2
@pytest.mark.parametrize('key', '<KEY>'.split())
@pytest.mark.screening
def test_1kbu_assembly_data(key, request):
mol = request.getfixturevalue('%s_1kbu' % key)
assert len(mol.properties.bioassemblies) == 1
assert '1' in mol.properties.bioassemblies
assembly = mol.properties.bioassemblies['1']
assert len(assembly.transforms) == 2
assert set(assembly.chains) == set(c.name for c in mol.chains)
# first transform is identity
numpy.testing.assert_allclose(assembly.transforms[0],
numpy.identity(4))
# second transform's rotation is unitary
rot = assembly.transforms[1][:3,:3]
numpy.testing.assert_allclose(rot.dot(rot.T),
numpy.identity(3))
@pytest.mark.parametrize('key', '<KEY>'.split())
def test_1kbu_assembly_build(key, request):
asym = request.getfixturevalue('%s_1kbu' % key)
original = mdt.Molecule(asym)
assembly = asym.properties.bioassemblies['1']
rot = assembly.transforms[1][:3,:3]
move = assembly.transforms[1][:3,3] * u.angstrom
mol = mdt.build_assembly(asym, 1)
assert mol.num_chains == 2 * asym.num_chains
# test that original is unaffected
assert original.is_identical(asym)
testchain = assembly.chains[0]
new_chain_pos = mol.chains[testchain].positions.T.ldot(rot).T + move[None, :]
numpy.testing.assert_allclose(new_chain_pos.defunits_value(),
mol.chains[asym.num_chains].positions.defunits_value())
@pytest.mark.parametrize('fmt', 'smiles pdb mol2 sdf inchi mmcif pkl'.split())
def test_topology_preserved_in_serialization(bipyridine_smiles, fmt):
""" Test that bond topology is preserved even if it doesn't make sense from distances
"""
if fmt != 'pkl':
pytest.xfail("We are currently unable to get an unambiguous representation of a molecular "
"sructure with ANY current file formats or parsers.")
mol = bipyridine_smiles.copy() # don't screw up the fixture object
mol.bond_graph[mol.atoms[3]][mol.atoms[5]] = 3
mol.bond_graph[mol.atoms[5]][mol.atoms[3]] = 3
mol.atoms[3].x += 10.0 * u.angstrom
newmol = mdt.read(mol.write(format=fmt), format=fmt)
assert mol.same_bonds(newmol, verbose=True)
def test_write_traj(h2_trajectory, tmpdir):
path = os.path.join(str(tmpdir), 'traj.xyz')
h2_trajectory.write(path)
assert int(subprocess.check_output(['wc', '-l', path]).split()[0]) == (
(h2_trajectory.mol.num_atoms+2) * h2_trajectory.num_frames)
|
tests/test_subhd.py | appotry/subfinder | 718 | 12663423 | # -*- coding: utf -*-
import pytest
from subfinder.subsearcher import SubHDSubSearcher
from subfinder.subfinder import SubFinder
from subfinder.subsearcher.exceptions import LanguageError, ExtError
@pytest.fixture(scope='module')
def subhd():
s = SubFinder()
z = SubHDSubSearcher(s)
return z
def test_languages(subhd):
subhd._check_languages(['zh_chs'])
with pytest.raises(LanguageError):
subhd._check_languages(['fake_lang'])
def test_exts(subhd):
subhd._check_exts(['ass'])
with pytest.raises(ExtError):
subhd._check_exts(['fage_ext'])
|
pypasser/reCaptchaV2/__init__.py | renatowow14/PyPasser | 108 | 12663438 | from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import Chrome
import os
import speech_recognition as sr
from time import sleep
from typing import Type
from pypasser.exceptions import IpBlock
from pypasser.utils import download_audio, convert_to_wav
class reCaptchaV2(object):
"""
reCaptchaV2 bypass
-----------------
Solving reCaptcha V2 using speech to text
Attributes
----------
driver: webdriver
play: bool
default is True
attempts: int
default is 3 times
Returns
----------
bool: result of solver
"""
def __new__(cls, *args, **kwargs) -> bool:
instance = super(reCaptchaV2, cls).__new__(cls)
instance.__init__(*args,**kwargs)
remaining_attempts = instance.attempts
file_path = None
try:
cls.__click_check_box__(instance.driver)
if cls.__is_checked__(instance.driver):
return True
cls.__click_audio_button__(instance.driver)
while remaining_attempts:
remaining_attempts -= 1
link = cls.__get_audio_link__(instance.driver, instance.play)
file_path = convert_to_wav(download_audio(link))
cls.__type_text__(instance.driver, cls.speech_to_text(file_path))
os.remove(file_path)
checked = cls.__is_checked__(instance.driver)
if checked or not remaining_attempts:
return checked
except Exception as e:
if file_path:
os.remove(file_path)
if 'rc-doscaptcha-header' in instance.driver.page_source:
raise IpBlock()
else:
raise e
def __init__(self, driver: Type[Chrome], play: bool = True, attempts: int = 3):
self.driver = driver
self.play = play
self.attempts = attempts
def __click_check_box__(driver):
driver.switch_to.frame(driver.find_element(By.TAG_NAME, "iframe"))
check_box = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR ,"#recaptcha-anchor")))
check_box.click()
driver.switch_to.default_content()
def __click_audio_button__(driver):
driver.switch_to.frame(driver.find_elements(By.TAG_NAME, "iframe")[2])
audio_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR ,"#recaptcha-audio-button")))
audio_btn.click()
driver.switch_to.default_content()
def __get_audio_link__(driver, play):
voice = driver.find_elements(By.TAG_NAME, "iframe")[2]
driver.switch_to.frame(voice)
download_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR ,".rc-audiochallenge-tdownload-link")))
link = download_btn.get_attribute('href')
if play:
play_button = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".rc-audiochallenge-play-button > button")))
play_button.click()
return link
def __type_text__(driver, text):
text_field = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR ,"#audio-response")))
text_field.send_keys(text , Keys.ENTER)
driver.switch_to.default_content()
def __is_checked__(driver):
sleep(3)
driver.switch_to.frame(WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'iframe[name^=a]'))))
try:
driver.find_element(By.CSS_SELECTOR, '.recaptcha-checkbox-checked')
driver.switch_to.default_content()
return True
except NoSuchElementException:
driver.switch_to.default_content()
return False
def speech_to_text(audio_path: str) -> str:
r = sr.Recognizer()
with sr.AudioFile(audio_path) as source:
audio = r.record(source)
return r.recognize_sphinx(audio) |
python/pmercury/protocols/http_server.py | raj-apoorv/mercury | 299 | 12663442 | <filename>python/pmercury/protocols/http_server.py
"""
Copyright (c) 2019 Cisco Systems, Inc. All rights reserved.
License at https://github.com/cisco/mercury/blob/master/LICENSE
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.protocols.protocol import Protocol
class HTTP_Server(Protocol):
def __init__(self, fp_database=None, config=None):
# populate fingerprint databases
self.fp_db = {}
# configuration
HTTP_Server.all_headers = False
HTTP_Server.all_headers_and_data = False
if config == None or 'http_server' not in config:
HTTP_Server.static_names = set([b'appex-activity-id',b'cdnuuid',b'cf-ray',b'content-range',b'content-type',
b'date',b'etag',b'expires',b'flow_context',b'ms-cv',b'msregion',b'ms-requestid',
b'request-id',b'vary',b'x-amz-cf-pop',b'x-amz-request-id',b'x-azure-ref-originshield',
b'x-cache',b'x-cache-hits',b'x-ccc',b'x-diagnostic-s',b'x-feserver',b'x-hw',
b'x-msedge-ref',b'x-ocsp-responder-id',b'x-requestid',b'x-served-by',b'x-timer',
b'x-trace-context'])
HTTP_Server.static_names_and_values = set([b'access-control-allow-credentials',b'access-control-allow-headers',
b'access-control-allow-methods',b'access-control-expose-headers',
b'cache-control',b'connection',b'content-language',b'content-transfer-encoding',
b'p3p',b'pragma',b'server',b'strict-transport-security',b'x-aspnetmvc-version',
b'x-aspnet-version',b'x-cid',b'x-ms-version',b'x-xss-protection'])
HTTP_Server.headers_data = [0,1,2]
HTTP_Server.contextual_data = {b'via':'via'}
else:
HTTP_Server.static_names = set([])
HTTP_Server.static_names_and_values = set([])
HTTP_Server.headers_data = []
HTTP_Server.contextual_data = {}
if 'static_names' in config['http_server']:
if config['http_server']['static_names'] == ['*']:
HTTP_Server.all_headers = True
HTTP_Server.static_names = set(map(lambda x: x.encode(), config['http_server']['static_names']))
if 'static_names_and_values' in config['http_server']:
if config['http_server']['static_names_and_values'] == ['*']:
HTTP_Server.all_headers_and_data = True
HTTP_Server.static_names_and_values = set(map(lambda x: x.encode(), config['http_server']['static_names_and_values']))
if 'preamble' in config['http_server']:
if 'version' in config['http_server']['preamble']:
HTTP_Server.headers_data.append(0)
if 'code' in config['http_server']['preamble']:
HTTP_Server.headers_data.append(1)
if 'reason' in config['http_server']['preamble']:
HTTP_Server.headers_data.append(2)
if '*' in config['http_server']['preamble']:
HTTP_Server.headers_data = [0,1,2]
if 'context' in config['http_server']:
for c in config['http_server']['context']:
HTTP_Server.contextual_data[c.encode()] = c.lower().replace('-','_')
@staticmethod
def proto_identify(data, offset, data_len):
if data_len-offset < 16:
return False
if (data[offset] == 72 and
data[offset+1] == 84 and
data[offset+2] == 84 and
data[offset+3] == 80 and
data[offset+4] == 47 and
data[offset+5] == 49):
return True
return False
@staticmethod
def fingerprint(data, offset, data_len):
t_ = data[offset:].split(b'\x0d\x0a', 1)
response = t_[0].split(b'\x20',2)
if len(response) < 2:
return None, None
c = []
for rh in HTTP_Server.headers_data:
try:
c.append('(%s)' % response[rh].hex())
except IndexError:
c.append('()')
if len(t_) == 1:
return ''.join(c), None
headers = t_[1].split(b'\x0d\x0a')
if headers[0] == '':
headers = headers[1:]
http_ah = HTTP_Server.all_headers
http_ahd = HTTP_Server.all_headers_and_data
http_sn = HTTP_Server.static_names
http_snv = HTTP_Server.static_names_and_values
http_ctx = HTTP_Server.contextual_data
context = []
for h_ in headers:
if h_ == b'':
break
t0_ = h_.split(b'\x3a\x20',1)[0]
t0_lower = t0_.lower()
h_c = ''
if http_ahd:
h_c = h_.hex()
elif t0_lower in http_snv:
h_c = h_.hex()
elif t0_lower in http_sn:
h_c = t0_.hex()
elif http_ah:
h_c = t0_.hex()
if h_c != '':
c.append('(%s)' % h_c)
if t0_lower in http_ctx:
if b'\x3a\x20' in h_:
try:
context.append({'name':http_ctx[t0_lower], 'data':h_.split(b'\x3a\x20',1)[1].decode()})
except UnicodeDecodeError:
context.append({'name':http_ctx[t0_lower], 'data':h_.split(b'\x3a\x20',1)[1].hex()})
else:
context.append({'name':http_ctx[t0_lower], 'data':''})
return ''.join(c), context
def get_human_readable(self, fp_str_):
t_ = [bytes.fromhex(x[1:]) for x in fp_str_.split(')')[:-1]]
try:
fp_h = [{'version':t_[0].decode()},{'code':t_[1].decode()},{'response':t_[2].decode()}]
except:
fp_h = [{'version':t_[0].hex()},{'code':t_[1].hex()},{'response':t_[2].hex()}]
for i in range(3, len(t_)-1):
field = t_[i].split(b': ')
if len(field) == 2:
try:
fp_h.append({field[0].decode(): field[1].decode()})
except:
fp_h.append({field[0].hex(): field[1].hex()})
else:
try:
fp_h.append({field[0].decode(): ''})
except:
fp_h.append({field[0].hex(): ''})
return fp_h
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.