max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/old_tests/_test_prepend.py | syaiful6/aerospike-client-python | 105 | 11066189 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import pytest
import sys
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestPrepend(object):
def setup_class(cls):
"""
Setup method.
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user is None and password is None:
TestPrepend.client = aerospike.client(config).connect()
else:
TestPrepend.client = aerospike.client(config).connect(user,
password)
def teardown_class(cls):
TestPrepend.client.close()
def setup_method(self, method):
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' % (str(i)), 'age': i, 'nolist': [1, 2, 3]}
TestPrepend.client.put(key, rec)
key = ('test', 'demo', 'bytearray_key')
TestPrepend.client.put(
key, {"bytearray_bin": bytearray("asd;as[d'as;d", "utf-8")})
def teardown_method(self, method):
"""
Teardoen method.
"""
for i in range(5):
key = ('test', 'demo', i)
TestPrepend.client.remove(key)
key = ('test', 'demo', 'bytearray_key')
TestPrepend.client.remove(key)
def test_prepend_with_no_parameters(self):
"""
Invoke prepend() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestPrepend.client.prepend()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_prepend_with_correct_paramters(self):
"""
Invoke prepend() with correct parameters
"""
key = ('test', 'demo', 1)
TestPrepend.client.prepend(key, "name", "str")
(key, _, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
def test_prepend_with_correct_policy(self):
"""
Invoke prepend() with correct policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_ALL
}
TestPrepend.client.prepend(key, "name", "str", {}, policy)
(key, _, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
def test_prepend_with_policy_key_send(self):
"""
Invoke prepend() with policy key send
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'retry': aerospike.POLICY_RETRY_ONCE,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER
}
TestPrepend.client.prepend(key, "name", "str", {}, policy)
(key, _, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_prepend_with_policy_key_gen_EQ_ignore(self):
"""
Invoke prepend() with gen eq positive
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'retry': aerospike.POLICY_RETRY_ONCE,
'gen': aerospike.POLICY_GEN_IGNORE
}
meta = {'gen': 10, 'ttl': 1200}
TestPrepend.client.prepend(key, "name", "str", meta, policy)
(key, meta, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_prepend_with_policy_key_gen_EQ_positive(self):
"""
Invoke prepend() with gen eq positive
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'retry': aerospike.POLICY_RETRY_ONCE,
'gen': aerospike.POLICY_GEN_EQ
}
(key, meta) = TestPrepend.client.exists(key)
gen = meta['gen']
meta = {'gen': gen, 'ttl': 1200}
TestPrepend.client.prepend(key, "name", "str", meta, policy)
(key, meta, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None, bytearray(
b'\<KEY>')
)
def test_prepend_with_policy_key_gen_EQ_not_equal(self):
"""
Invoke prepend() with policy key GEN_EQ not equal
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'retry': aerospike.POLICY_RETRY_ONCE,
'gen': aerospike.POLICY_GEN_EQ
}
(key, meta) = TestPrepend.client.exists(key)
gen = meta['gen']
meta = {
'gen': gen + 5,
'ttl': 1200
}
try:
TestPrepend.client.prepend(key, "name", "str", meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == "AEROSPIKE_ERR_RECORD_GENERATION"
assert exception.bin == 'name'
(key, meta, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'name1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_prepend_with_policy_key_gen_GT_lesser(self):
"""
Invoke prepend() with gen GT positive lesser
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'retry': aerospike.POLICY_RETRY_ONCE,
'gen': aerospike.POLICY_GEN_GT
}
(key, meta) = TestPrepend.client.exists(key)
gen = meta['gen']
meta = {
'gen': gen,
'ttl': 1200
}
try:
TestPrepend.client.prepend(key, "name", "str", meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == "AEROSPIKE_ERR_RECORD_GENERATION"
assert exception.bin == "name"
(key, meta, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'name1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_prepend_with_policy_key_gen_GT_positive(self):
"""
Invoke prepend() with gen GT positive
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_SEND,
'retry': aerospike.POLICY_RETRY_ONCE,
'gen': aerospike.POLICY_GEN_GT
}
(key, meta) = TestPrepend.client.exists(key)
gen = meta['gen']
meta = {'gen': gen + 2, 'ttl': 1200}
TestPrepend.client.prepend(key, "name", "str", meta, policy)
(key, meta, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_prepend_with_policy_key_digest(self):
"""
Invoke prepend() with policy key digest
"""
key = ('test', 'demo', None, bytearray("<KEY>",
"utf-8"))
rec = {'name': 'name%s' % (str(1)), 'age': 1, 'nolist': [1, 2, 3]}
TestPrepend.client.put(key, rec)
policy = {
'timeout': 1000,
'key': aerospike.POLICY_KEY_DIGEST,
'retry': aerospike.POLICY_RETRY_NONE
}
TestPrepend.client.prepend(key, "name", "str", {}, policy)
(key, _, bins) = TestPrepend.client.get(key)
assert bins == {'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
assert key == ('test', 'demo', None,
bytearray(b"asd;as[d\'as;djk;uyfl"))
TestPrepend.client.remove(key)
"""
def test_prepend_with_correct_policyandlist(self):
#Invoke prepend() with correct policy
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'key' : aerospike.POLICY_KEY_SEND
}
TestPrepend.client.prepend(key, "age", "str", policy)
(key , meta, bins) = TestPrepend.client.get(key)
assert bins == { 'age': 1, 'name': 'strname1', 'nolist': [1, 2, 3]}
"""
def test_prepend_with_incorrect_policy(self):
"""
Invoke prepend() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
TestPrepend.client.prepend(key, "name", "str", {}, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_prepend_with_nonexistent_key(self):
"""
Invoke prepend() with non-existent key
"""
key = ('test', 'demo', 1000)
status = TestPrepend.client.prepend(key, "name", "str")
assert status == 0
TestPrepend.client.remove(key)
def test_prepend_with_nonexistent_bin(self):
"""
Invoke prepend() with non-existent bin
"""
key = ('test', 'demo', 1)
status = TestPrepend.client.prepend(key, "name1", "str")
assert status == 0
def test_prepend_value_not_string(self):
"""
Invoke prepend() not a string
"""
key = ('test', 'demo', 1)
try:
TestPrepend.client.prepend(key, "name", 2)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Cannot concatenate 'str' and 'non-str' objects"
def test_prepend_with_extra_parameter(self):
"""
Invoke prepend() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
TestPrepend.client.prepend(key, "name", "str", {}, policy, "")
assert "prepend() takes at most 5 arguments (6 given)" in str(
typeError.value)
def test_prepend_policy_is_string(self):
"""
Invoke prepend() with policy is string
"""
key = ('test', 'demo', 1)
try:
TestPrepend.client.prepend(key, "name", "abc", {}, "")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict"
def test_prepend_key_is_none(self):
"""
Invoke prepend() with key is none
"""
try:
TestPrepend.client.prepend(None, "name", "str")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_prepend_bin_is_none(self):
"""
Invoke prepend() with bin is none
"""
key = ('test', 'demo', 1)
try:
TestPrepend.client.prepend(key, None, "str")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bin name should be of type string"
def test_prepend_unicode_string(self):
"""
Invoke prepend() with unicode string
"""
key = ('test', 'demo', 1)
TestPrepend.client.prepend(key, "name", u"age")
key, _, bins = TestPrepend.client.get(key)
assert bins['name'] == 'agename1'
def test_prepend_unicode_bin_name(self):
"""
Invoke prepend() with unicode string
"""
key = ('test', 'demo', 1)
TestPrepend.client.prepend(key, u"add", u"address")
key, _, bins = TestPrepend.client.get(key)
assert bins['add'] == 'address'
def test_prepend_with_correct_parameters_without_connection(self):
"""
Invoke prepend() with correct parameters without connection
"""
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
key = ('test', 'demo', 1)
try:
client1.prepend(key, "name", "str")
except e.ClusterError as exception:
assert exception.code == 11
assert exception.msg == 'No connection to aerospike cluster'
def test_prepend_with_bytearray(self):
"""
Invoke prepend() with bytearray value
"""
key = ('test', 'demo', 'bytearray_key')
TestPrepend.client.prepend(
key, "bytearray_bin", bytearray("abc", "utf-8"))
(key, _, bins) = TestPrepend.client.get(key)
assert bins == {
'bytearray_bin': bytearray("abcasd;as[d'as;d", "utf-8")}
def test_prepend_with_bytearray_new_key(self):
"""
Invoke prepend() with bytearray value with a new record(non-existing)
"""
key = ('test', 'demo', 'bytearray_new')
TestPrepend.client.prepend(
key, "bytearray_bin", bytearray("asd;as[d'as;d", "utf-8"))
(key, _, bins) = TestPrepend.client.get(key)
assert bins == {'bytearray_bin': bytearray("asd;as[d'as;d", "utf-8")}
TestPrepend.client.remove(key)
|
barbican/tests/model/repositories/test_repositories_secret_metadata.py | mail2nsrajesh/barbican | 177 | 11066204 | <filename>barbican/tests/model/repositories/test_repositories_secret_metadata.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from barbican.model import models
from barbican.model import repositories
from barbican.tests import database_utils
from barbican.tests import utils
@utils.parameterized_test_case
class WhenTestingSecretMetadataRepository(database_utils.RepositoryTestCase):
def setUp(self):
super(WhenTestingSecretMetadataRepository, self).setUp()
self.repo = repositories.SecretUserMetadatumRepo()
self.test_metadata = {
"dog": "poodle",
"cat": "siamese"
}
def _create_base_secret(self, project_id=None):
# Setup the secret and needed base relationship
secret_repo = repositories.get_secret_repository()
session = secret_repo.get_session()
if project_id is None: # don't re-create project if it created earlier
project = models.Project()
project.external_id = "keystone_project_id"
project.save(session=session)
project_id = project.id
secret_model = models.Secret()
secret_model.project_id = project_id
secret = secret_repo.create_from(secret_model, session=session)
secret.save(session=session)
session.commit()
return secret
def test_create_and_get_metadata_for_secret(self):
secret = self._create_base_secret()
self.repo.create_replace_user_metadata(secret.id,
self.test_metadata)
metadata = self.repo.get_metadata_for_secret(secret.id)
self.assertEqual(self.test_metadata, metadata)
def test_get_metadata_invalid_secret(self):
metadata = self.repo.get_metadata_for_secret("invalid_id")
self.assertEqual({}, metadata)
def test_create_user_metadatum(self):
secret = self._create_base_secret()
self.repo.create_replace_user_metadata(secret.id,
self.test_metadata)
# adds a new key
self.repo.create_replace_user_metadatum(secret.id,
'lizard',
'green anole')
self.test_metadata['lizard'] = 'green anole'
metadata = self.repo.get_metadata_for_secret(secret.id)
self.assertEqual(self.test_metadata, metadata)
def test_replace_user_metadatum(self):
secret = self._create_base_secret()
self.repo.create_replace_user_metadata(secret.id,
self.test_metadata)
# updates existing key
self.repo.create_replace_user_metadatum(secret.id,
'dog',
'rat terrier')
self.test_metadata['dog'] = 'rat terrier'
metadata = self.repo.get_metadata_for_secret(secret.id)
self.assertEqual(self.test_metadata, metadata)
def test_delete_user_metadatum(self):
secret = self._create_base_secret()
self.repo.create_replace_user_metadata(secret.id,
self.test_metadata)
# deletes existing key
self.repo.delete_metadatum(secret.id,
'cat')
del self.test_metadata['cat']
metadata = self.repo.get_metadata_for_secret(secret.id)
self.assertEqual(self.test_metadata, metadata)
def test_delete_secret_deletes_secret_metadata(self):
secret = self._create_base_secret()
self.repo.create_replace_user_metadata(secret.id,
self.test_metadata)
metadata = self.repo.get_metadata_for_secret(secret.id)
self.assertEqual(self.test_metadata, metadata)
# deletes existing secret
secret.delete()
metadata = self.repo.get_metadata_for_secret(secret.id)
self.assertEqual({}, metadata)
|
tests/validators/test_number_range.py | Ennkua/wtforms | 1,197 | 11066205 | import decimal
import pytest
from wtforms.validators import NumberRange
from wtforms.validators import ValidationError
@pytest.mark.parametrize(
"min_v, max_v, test_v", [(5, 10, 7), (5, None, 7), (None, 100, 70)]
)
def test_number_range_passes(min_v, max_v, test_v, dummy_form, dummy_field):
"""
It should pass if the test_v is between min_v and max_v
"""
dummy_field.data = test_v
validator = NumberRange(min_v, max_v)
validator(dummy_form, dummy_field)
@pytest.mark.parametrize(
"min_v, max_v, test_v",
[
(5, 10, None),
(5, 10, 0),
(5, 10, 12),
(5, 10, -5),
(5, None, 4),
(None, 100, 500),
],
)
def test_number_range_raises(min_v, max_v, test_v, dummy_form, dummy_field):
"""
It should raise ValidationError if the test_v is not between min_v and max_v
"""
dummy_field.data = test_v
validator = NumberRange(min_v, max_v)
with pytest.raises(ValidationError):
validator(dummy_form, dummy_field)
@pytest.mark.parametrize("nan", [float("NaN"), decimal.Decimal("NaN")])
def test_number_range_nan(nan, dummy_form, dummy_field):
validator = NumberRange(0, 10)
dummy_field.data = nan
with pytest.raises(ValidationError):
validator(dummy_form, dummy_field)
|
fpga/lib/pcie/example/ExaNIC_X10/fpga_axi/tb/fpga_core/test_fpga_core.py | mfkiwl/corundum-fpga-100g-Ethernet | 447 | 11066213 | """
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.axi import AxiStreamBus
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePcieDevice
from cocotbext.axi.utils import hexdump_str
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.dev = UltraScalePcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=8,
user_clk_frequency=250e6,
alignment="dword",
straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk,
user_reset=dut.rst,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
# pcie_rq_seq_num=dut.s_axis_rq_seq_num,
# pcie_rq_seq_num_vld=dut.s_axis_rq_seq_num_valid,
# pcie_rq_tag
# pcie_rq_tag_av
# pcie_rq_tag_vld
# Requester Completion Interface
rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),
# Completer reQuest Interface
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
# cfg_fc_ph=dut.cfg_fc_ph,
# cfg_fc_pd=dut.cfg_fc_pd,
# cfg_fc_nph=dut.cfg_fc_nph,
# cfg_fc_npd=dut.cfg_fc_npd,
# cfg_fc_cplh=dut.cfg_fc_cplh,
# cfg_fc_cpld=dut.cfg_fc_cpld,
# cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_vf_enable=dut.cfg_interrupt_msi_vf_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**22)
self.dev.functions[0].configure_bar(2, 2**22)
async def init(self):
await FallingEdge(self.dut.rst)
await Timer(100, 'ns')
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.init()
mem_base, mem_data = tb.rc.alloc_region(16*1024*1024)
dev_pf0_bar0 = tb.rc.tree[0][0].bar_addr[0]
dev_pf0_bar2 = tb.rc.tree[0][0].bar_addr[2]
tb.log.info("Test memory write to BAR 2")
await tb.rc.mem_write(dev_pf0_bar2, b'\x11\x22\x33\x44')
await Timer(100, 'ns')
tb.log.info("Test memory read from BAR 2")
val = await tb.rc.mem_read(dev_pf0_bar2, 4, 1000)
tb.log.info("Read data: %s", val)
assert val == b'\x11\x22\x33\x44'
tb.log.info("Test DMA")
# write packet data
mem_data[0:1024] = bytearray([x % 256 for x in range(1024)])
# enable DMA
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100000, 1)
# write pcie read descriptor
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100100, (mem_base+0x0000) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100104, (mem_base+0x0000 >> 32) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100108, (0x100) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x10010C, (0x100 >> 32) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100110, 0x400)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100114, 0xAA)
await Timer(2000, 'ns')
# read status
val = await tb.rc.mem_read_dword(dev_pf0_bar0+0x100118)
tb.log.info("Status: 0x%x", val)
assert val == 0x800000AA
# write pcie write descriptor
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100200, (mem_base+0x1000) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100204, (mem_base+0x1000 >> 32) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100208, (0x100) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x10020C, (0x100 >> 32) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100210, 0x400)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x100214, 0x55)
await Timer(2000, 'ns')
# read status
val = await tb.rc.mem_read_dword(dev_pf0_bar0+0x100218)
tb.log.info("Status: 0x%x", val)
assert val == 0x80000055
tb.log.info("%s", hexdump_str(mem_data, 0x1000, 64))
assert mem_data[0:1024] == mem_data[0x1000:0x1000+1024]
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "axi_ram.v"),
os.path.join(rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "axis_arb_mux.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axi_dma.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axi_dma_rd.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axi_dma_wr.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axi_master.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axi_master_rd.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axi_master_wr.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axis_cq_demux.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "arbiter.v"),
os.path.join(pcie_rtl_dir, "priority_encoder.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 256
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 60
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 85
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33
parameters['RQ_SEQ_NUM_WIDTH'] = 4
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
pyamg/gallery/diffusion.py | nicknytko/pyamg | 371 | 11066224 | <filename>pyamg/gallery/diffusion.py
"""Generate a diffusion stencil.
Supports isotropic diffusion (FE,FD), anisotropic diffusion (FE, FD), and
rotated anisotropic diffusion (FD).
The stencils include redundancy to maintain readability for simple cases (e.g.
isotropic diffusion).
"""
# pylint: disable=redefined-builtin
import numpy as np
def diffusion_stencil_2d(epsilon=1.0, theta=0.0, type='FE'):
"""Rotated Anisotropic diffusion in 2d of the form.
-div Q A Q^T grad u
Q = [cos(theta) -sin(theta)]
[sin(theta) cos(theta)]
A = [1 0 ]
[0 eps ]
Parameters
----------
epsilon : float, optional
Anisotropic diffusion coefficient: -div A grad u,
where A = [1 0; 0 epsilon]. The default is isotropic, epsilon=1.0
theta : float, optional
Rotation angle `theta` in radians defines -div Q A Q^T grad,
where Q = [cos(`theta`) -sin(`theta`); sin(`theta`) cos(`theta`)].
type : {'FE','FD'}
Specifies the discretization as Q1 finite element (FE) or 2nd order
finite difference (FD)
The default is `theta` = 0.0
Returns
-------
stencil : numpy array
A 3x3 diffusion stencil
See Also
--------
stencil_grid, poisson
Notes
-----
Not all combinations are supported.
Examples
--------
>>> import scipy as sp
>>> from pyamg.gallery.diffusion import diffusion_stencil_2d
>>> sten = diffusion_stencil_2d(epsilon=0.0001,theta=sp.pi/6,type='FD')
>>> print(sten)
[[-0.2164847 -0.750025 0.2164847]
[-0.250075 2.0002 -0.250075 ]
[ 0.2164847 -0.750025 -0.2164847]]
"""
eps = float(epsilon) # for brevity
theta = float(theta)
C = np.cos(theta)
S = np.sin(theta)
CS = C*S
CC = C**2
SS = S**2
if type == 'FE':
# FE approximation to::
# - (eps c^2 + s^2) u_xx +
# -2(eps - 1) c s u_xy +
# - ( c^2 + eps s^2) u_yy
# [ -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps,
# 2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
# -c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps]
# [-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps,
# 8*c^2*eps+8*s^2+8*c^2+8*s^2*eps,
# -4*c^2*eps-4*s^2+2*c^2+2*s^2*eps]
# [-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps,
# 2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
# -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps]
# c = cos(theta)
# s = sin(theta)
#
a = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (3*eps - 3)*CS
b = (2*eps - 4)*CC + (-4*eps + 2)*SS
c = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (-3*eps + 3)*CS
d = (-4*eps + 2)*CC + (2*eps - 4)*SS
e = (8*eps + 8)*CC + (8*eps + 8)*SS
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]]) / 6.0
elif type == 'FD':
# FD approximation to:
# - (eps c^2 + s^2) u_xx +
# -2(eps - 1) c s u_xy +
# - ( c^2 + eps s^2) u_yy
# c = cos(theta)
# s = sin(theta)
# A = [ 1/2(eps - 1) c s -(c^2 + eps s^2) -1/2(eps - 1) c s ]
# [ ]
# [ -(eps c^2 + s^2) 2 (eps + 1) -(eps c^2 + s^2) ]
# [ ]
# [ -1/2(eps - 1) c s -(c^2 + eps s^2) 1/2(eps - 1) c s ]
#
a = 0.5*(eps - 1)*CS
b = -(eps*SS + CC)
c = -a
d = -(eps*CC + SS)
e = 2.0*(eps + 1)
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]])
return stencil
def _symbolic_rotation_helper():
"""Use SymPy to generate the 3D matrices for diffusion_stencil_3d."""
# pylint: disable=import-error,import-outside-toplevel
from sympy import symbols, Matrix
cpsi, spsi = symbols('cpsi, spsi')
cth, sth = symbols('cth, sth')
cphi, sphi = symbols('cphi, sphi')
Rpsi = Matrix([[cpsi, spsi, 0], [-spsi, cpsi, 0], [0, 0, 1]])
Rth = Matrix([[1, 0, 0], [0, cth, sth], [0, -sth, cth]])
Rphi = Matrix([[cphi, sphi, 0], [-sphi, cphi, 0], [0, 0, 1]])
Q = Rpsi * Rth * Rphi
epsy, epsz = symbols('epsy, epsz')
A = Matrix([[1, 0, 0], [0, epsy, 0], [0, 0, epsz]])
D = Q * A * Q.T
for i in range(3):
for j in range(3):
print(f'D[{i}, {j}] = {D[i, j]}')
def _symbolic_product_helper():
"""Use SymPy to generate the 3D products for diffusion_stencil_3d."""
# pylint: disable=import-error,import-outside-toplevel
from sympy import symbols, Matrix
D11, D12, D13, D21, D22, D23, D31, D32, D33 =\
symbols('D11, D12, D13, D21, D22, D23, D31, D32, D33')
D = Matrix([[D11, D12, D13], [D21, D22, D23], [D31, D32, D33]])
grad = Matrix([['dx', 'dy', 'dz']]).T
div = grad.T
a = div * D * grad
print(a[0])
def diffusion_stencil_3d(epsilony=1.0, epsilonz=1.0, theta=0.0, phi=0.0,
psi=0.0, type='FD'):
"""Rotated Anisotropic diffusion in 3d of the form.
-div Q A Q^T grad u
Q = Rpsi Rtheta Rphi
Rpsi = [ c s 0 ]
[-s c 0 ]
[ 0 0 1 ]
c = cos(psi)
s = sin(psi)
Rtheta = [ 1 0 0 ]
[ 0 c s ]
[ 0 -s c ]
c = cos(theta)
s = sin(theta)
Rphi = [ c s 0 ]
[-s c 0 ]
[ 0 0 1 ]
c = cos(phi)
s = sin(phi)
Here Euler Angles are used:
http://en.wikipedia.org/wiki/Euler_angles
This results in
Q = [ cphi*cpsi - cth*sphi*spsi, cpsi*sphi + cphi*cth*spsi, spsi*sth]
[ - cphi*spsi - cpsi*cth*sphi, cphi*cpsi*cth - sphi*spsi, cpsi*sth]
[ sphi*sth, -cphi*sth, cth]
A = [1 0 ]
[0 epsy ]
[0 0 epsz]
D = Q A Q^T
Parameters
----------
epsilony : float, optional
Anisotropic diffusion coefficient in the y-direction
where A = [1 0 0; 0 epsilon_y 0; 0 0 epsilon_z]. The default is
isotropic, epsilon=1.0
epsilonz : float, optional
Anisotropic diffusion coefficient in the z-direction
where A = [1 0 0; 0 epsilon_y 0; 0 0 epsilon_z]. The default is
isotropic, epsilon=1.0
theta : float, optional
Euler rotation angle `theta` in radians. The default is 0.0.
phi : float, optional
Euler rotation angle `phi` in radians. The default is 0.0.
psi : float, optional
Euler rotation angle `psi` in radians. The default is 0.0.
type : {'FE','FD'}
Specifies the discretization as Q1 finite element (FE) or 2nd order
finite difference (FD)
Returns
-------
stencil : numpy array
A 3x3 diffusion stencil
See Also
--------
stencil_grid, poisson, _symbolic_rotation_helper, _symbolic_product_helper
Notes
-----
Not all combinations are supported.
Examples
--------
>>> import scipy as sp
>>> from pyamg.gallery.diffusion import diffusion_stencil_2d
>>> sten = diffusion_stencil_2d(epsilon=0.0001,theta=sp.pi/6,type='FD')
>>> print(sten)
[[-0.2164847 -0.750025 0.2164847]
[-0.250075 2.0002 -0.250075 ]
[ 0.2164847 -0.750025 -0.2164847]]
"""
epsy = float(epsilony) # for brevity
epsz = float(epsilonz) # for brevity
theta = float(theta)
phi = float(phi)
psi = float(psi)
D = np.zeros((3, 3))
cphi = np.cos(phi)
sphi = np.sin(phi)
cth = np.cos(theta)
sth = np.sin(theta)
cpsi = np.cos(psi)
spsi = np.sin(psi)
# from _symbolic_rotation_helper
D[0, 0] = epsy*(cphi*cth*spsi + cpsi*sphi)**2 + epsz*spsi**2*sth**2 +\
(cphi*cpsi - cth*sphi*spsi)**2
D[0, 1] = cpsi*epsz*spsi*sth**2 +\
epsy*(cphi*cpsi*cth - sphi*spsi)*(cphi*cth*spsi + cpsi*sphi) +\
(cphi*cpsi - cth*sphi*spsi)*(-cphi*spsi - cpsi*cth*sphi)
D[0, 2] = -cphi*epsy*sth*(cphi*cth*spsi + cpsi*sphi) +\
cth*epsz*spsi*sth + sphi*sth*(cphi*cpsi - cth*sphi*spsi)
D[1, 0] = cpsi*epsz*spsi*sth**2 +\
epsy*(cphi*cpsi*cth - sphi*spsi)*(cphi*cth*spsi + cpsi*sphi) +\
(cphi*cpsi - cth*sphi*spsi)*(-cphi*spsi - cpsi*cth*sphi)
D[1, 1] = cpsi**2*epsz*sth**2 + epsy*(cphi*cpsi*cth - sphi*spsi)**2 +\
(-cphi*spsi - cpsi*cth*sphi)**2
D[1, 2] = -cphi*epsy*sth*(cphi*cpsi*cth - sphi*spsi) +\
cpsi*cth*epsz*sth + sphi*sth*(-cphi*spsi - cpsi*cth*sphi)
D[2, 0] = -cphi*epsy*sth*(cphi*cth*spsi + cpsi*sphi) + cth*epsz*spsi*sth +\
sphi*sth*(cphi*cpsi - cth*sphi*spsi)
D[2, 1] = -cphi*epsy*sth*(cphi*cpsi*cth - sphi*spsi) + cpsi*cth*epsz*sth +\
sphi*sth*(-cphi*spsi - cpsi*cth*sphi)
D[2, 2] = cphi**2*epsy*sth**2 + cth**2*epsz + sphi**2*sth**2
stencil = np.zeros((3, 3, 3))
if type == 'FE':
raise NotImplementedError('FE not implemented yet')
if type == 'FD':
# from _symbolic_product_helper
# dx*(D11*dx + D21*dy + D31*dz) +
# dy*(D12*dx + D22*dy + D32*dz) +
# dz*(D13*dx + D23*dy + D33*dz)
#
# D00*dxx +
# (D10+D01)*dxy +
# (D20+D02)*dxz +
# D11*dyy +
# (D21+D12)*dyz +
# D22*dzz
i, j, k = (1, 1, 1)
# dxx
stencil[[i-1, i, i+1], j, k] += np.array([-1, 2, -1]) * D[0, 0]
# dyy
stencil[i, [j-1, j, j+1], k] += np.array([-1, 2, -1]) * D[1, 1]
# dzz
stencil[i, j, [k-1, k, k+1]] += np.array([-1, 2, -1]) * D[2, 2]
L = np.array([-1, -1, 1, 1])
M = np.array([-1, 1, -1, 1])
# dxy
stencil[i + L, j + M, k] \
+= 0.25 * np.array([1, -1, -1, 1]) * (D[1, 0] + D[0, 1])
# dxz
stencil[i + L, j, k + M] \
+= 0.25 * np.array([1, -1, -1, 1]) * (D[2, 0] + D[0, 2])
# dyz
stencil[i, j + L, k + M] \
+= 0.25 * np.array([1, -1, -1, 1]) * (D[2, 1] + D[1, 2])
return stencil
|
hyperopt/spark.py | Loquats/hyperopt | 6,071 | 11066263 | <reponame>Loquats/hyperopt<gh_stars>1000+
import copy
import threading
import time
import timeit
import traceback
from hyperopt import base, fmin, Trials
from hyperopt.base import validate_timeout, validate_loss_threshold
from hyperopt.utils import coarse_utcnow, _get_logger, _get_random_id
from py4j.clientserver import ClientServer
try:
from pyspark.sql import SparkSession
from pyspark.util import VersionUtils
import pyspark
_have_spark = True
_spark_major_minor_version = VersionUtils.majorMinorVersion(pyspark.__version__)
except ImportError as e:
_have_spark = False
_spark_major_minor_version = None
logger = _get_logger("hyperopt-spark")
class SparkTrials(Trials):
"""
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
"""
asynchronous = True
# Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128.
MAX_CONCURRENT_JOBS_ALLOWED = 128
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super().__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
self._spark_pinned_threads_enabled = isinstance(
self._spark_context._gateway, ClientServer
)
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = (
self._spark_pinned_threads_enabled
or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup")
)
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
@staticmethod
def _decide_parallelism(requested_parallelism, spark_default_parallelism):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism, s=spark_default_parallelism
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
return parallelism
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
def delete_all(self):
"""
Reset the Trials to init state
"""
super().delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
trials_save_file="",
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not supported
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not supported
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file="", # not supported
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
class _SparkFMinState:
"""
Class for managing threads which run concurrent Spark jobs.
This maintains a primary dispatcher thread, plus 1 thread per Hyperopt trial.
Each trial's thread runs 1 Spark job with 1 task.
"""
def __init__(self, spark, eval_function, space, trials):
self.spark = spark
self.eval_function = eval_function
self.space = space
self.trials = trials
self._fmin_done = False
self._dispatcher_thread = None
self._task_threads = set()
if self.trials._spark_supports_job_cancelling:
spark_context = spark.sparkContext
self._job_group_id = spark_context.getLocalProperty("spark.jobGroup.id")
self._job_desc = spark_context.getLocalProperty("spark.job.description")
interrupt_on_cancel = spark_context.getLocalProperty(
"spark.job.interruptOnCancel"
)
if interrupt_on_cancel is None:
self._job_interrupt_on_cancel = False
else:
self._job_interrupt_on_cancel = "true" == interrupt_on_cancel.lower()
# In certain Spark deployments, the local property "spark.jobGroup.id"
# value is None, so we create one to use for SparkTrials.
if self._job_group_id is None:
self._job_group_id = "Hyperopt_SparkTrials_" + _get_random_id()
if self._job_desc is None:
self._job_desc = "Trial evaluation jobs launched by hyperopt fmin"
logger.debug(
"Job group id: {g}, job desc: {d}, job interrupt on cancel: {i}".format(
g=self._job_group_id,
d=self._job_desc,
i=self._job_interrupt_on_cancel,
)
)
def running_trial_count(self):
return self.trials.count_by_state_unsynced(base.JOB_STATE_RUNNING)
@staticmethod
def _begin_trial_run(trial):
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
logger.debug("trial task {tid} started".format(tid=trial["tid"]))
@staticmethod
def _get_traceback(err):
return err.__dict__.get("_tb_str")
def _finish_trial_run(self, is_success, is_cancelled, trial, data):
"""
Call this method when a trial evaluation finishes. It will save results to the
trial object and update task counters.
:param is_success: whether the trial succeeded
:param is_cancelled: whether the trial was cancelled
:param data: If the trial succeeded, this is the return value from the trial
task function. Otherwise, this is the exception raised when running the trial
task.
"""
if is_cancelled:
logger.debug(
"trial task {tid} cancelled, exception is {e}".format(
tid=trial["tid"], e=str(data)
)
)
self._write_cancellation_back(trial, e=data)
elif is_success:
logger.debug(
"trial task {tid} succeeded, result is {r}".format(
tid=trial["tid"], r=data
)
)
self._write_result_back(trial, result=data)
else:
logger.error(
"trial task {tid} failed, exception is {e}.\n {tb}".format(
tid=trial["tid"], e=str(data), tb=self._get_traceback(data)
)
)
self._write_exception_back(trial, e=data)
def launch_dispatcher(self):
def run_dispatcher():
start_time = timeit.default_timer()
while not self._fmin_done:
new_tasks = self._poll_new_tasks()
for trial in new_tasks:
self._run_trial_async(trial)
cur_time = timeit.default_timer()
elapsed_time = cur_time - start_time
# In the future, timeout checking logic could be moved to `fmin`.
# For now, timeouts are specific to SparkTrials.
# When a timeout happens:
# - Set `trials._fmin_cancelled` flag to be True.
# - FMinIter checks this flag and exits if it is set to True.
if (
self.trials.timeout is not None
and elapsed_time > self.trials.timeout
and not self.trials._fmin_cancelled
):
self.trials._fmin_cancelled = True
self.trials._fmin_cancelled_reason = "fmin run timeout"
self._cancel_running_trials()
logger.warning(
"fmin cancelled because of "
+ self.trials._fmin_cancelled_reason
)
time.sleep(1)
if self.trials._fmin_cancelled:
# Because cancelling fmin triggered, warn that the dispatcher won't launch
# more trial tasks.
logger.warning("fmin is cancelled, so new trials will not be launched.")
logger.debug("dispatcher thread exits normally.")
self._dispatcher_thread = threading.Thread(target=run_dispatcher)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.start()
@staticmethod
def _get_spec_from_trial(trial):
return base.spec_from_misc(trial["misc"])
@staticmethod
def _write_result_back(trial, result):
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
def _write_exception_back(self, trial, e):
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), self._get_traceback(e))
trial["refresh_time"] = coarse_utcnow()
@staticmethod
def _write_cancellation_back(trial, e):
trial["state"] = base.JOB_STATE_CANCEL
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
def _run_trial_async(self, trial):
def finish_trial_run(result_or_e):
if not isinstance(result_or_e, BaseException):
self._finish_trial_run(
is_success=True,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread exits normally and writes results "
"back correctly.".format(tid=trial["tid"])
)
else:
self._finish_trial_run(
is_success=False,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread catches an exception and writes the "
"info back correctly.".format(tid=trial["tid"])
)
def run_task_thread():
local_eval_function, local_space = self.eval_function, self.space
params = self._get_spec_from_trial(trial)
def run_task_on_executor(_):
domain = base.Domain(
local_eval_function, local_space, pass_expr_memo_ctrl=None
)
try:
result = domain.evaluate(
params, ctrl=None, attach_attachments=False
)
yield result
except BaseException as e:
# Because the traceback is not pickable, we need format it and pass it back
# to driver
_traceback_string = traceback.format_exc()
logger.error(_traceback_string)
e._tb_str = _traceback_string
yield e
try:
worker_rdd = self.spark.sparkContext.parallelize([0], 1)
if self.trials._spark_supports_job_cancelling:
if self.trials._spark_pinned_threads_enabled:
spark_context = self.spark.sparkContext
spark_context.setLocalProperty(
"spark.jobGroup.id", self._job_group_id
)
spark_context.setLocalProperty(
"spark.job.description", self._job_desc
)
spark_context.setLocalProperty(
"spark.job.interruptOnCancel",
str(self._job_interrupt_on_cancel).lower(),
)
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collectWithJobGroup(
self._job_group_id,
self._job_desc,
self._job_interrupt_on_cancel,
)[
0
]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
except BaseException as e:
# I recommend to catch all exceptions here, it can make the program more robust.
# There're several possible reasons lead to raising exception here.
# so I use `except BaseException` here.
#
# If cancelled flag is set, it represent we need to cancel all running tasks,
# Otherwise it represent the task failed.
finish_trial_run(e)
else:
# The exceptions captured in run_task_on_executor would be returned in the result_or_e
finish_trial_run(result_or_e)
if self.trials._spark_pinned_threads_enabled:
try:
# pylint: disable=no-name-in-module,import-outside-toplevel
from pyspark import inheritable_thread_target
run_task_thread = inheritable_thread_target(run_task_thread)
except ImportError:
pass
task_thread = threading.Thread(target=run_task_thread)
task_thread.setDaemon(True)
task_thread.start()
self._task_threads.add(task_thread)
def _poll_new_tasks(self):
new_task_list = []
for trial in copy.copy(self.trials.trials):
if trial["state"] == base.JOB_STATE_NEW:
# check parallelism limit
if self.running_trial_count() >= self.trials.parallelism:
break
new_task_list.append(trial)
self._begin_trial_run(trial)
return new_task_list
def _cancel_running_trials(self):
if self.trials._spark_supports_job_cancelling:
logger.debug(
"Cancelling all running jobs in job group {g}".format(
g=self._job_group_id
)
)
self.spark.sparkContext.cancelJobGroup(self._job_group_id)
# Make a copy of trials by slicing
for trial in self.trials.trials[:]:
if trial["state"] in [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]:
trial["state"] = base.JOB_STATE_CANCEL
else:
logger.info(
"Because the current Apache PySpark version does not support "
"cancelling jobs by job group ID, SparkTrials will block until all of "
"its running Spark jobs finish."
)
def wait_for_all_threads(self):
"""
Wait for the dispatcher and worker threads to finish.
:param cancel_running_trials: If true, try to cancel all running trials.
"""
self._fmin_done = True
self._dispatcher_thread.join()
self._dispatcher_thread = None
for task_thread in self._task_threads:
task_thread.join()
self._task_threads.clear()
|
tests/parsers/presets.py | pyllyukko/plaso | 1,253 | 11066269 | <reponame>pyllyukko/plaso
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for parser and parser plugin presets."""
import unittest
from plaso.containers import artifacts
from plaso.parsers import presets
from tests import test_lib as shared_test_lib
class ParserPresetTest(shared_test_lib.BaseTestCase):
"""Tests for the parser and parser plugin preset."""
def testInitialize(self):
"""Tests the __init__ function."""
test_definition = presets.ParserPreset('test', ['parser1', 'parser2'])
self.assertIsNotNone(test_definition)
class ParserPresetsManagerTest(shared_test_lib.BaseTestCase):
"""Tests for the parser and parser plugin presets manager."""
_LINUX_PARSERS = [
'bash_history',
'bencode',
'czip/oxml',
'dockerjson',
'dpkg',
'filestat',
'gdrive_synclog',
'olecf',
'pls_recall',
'popularity_contest',
'selinux',
'sqlite/google_drive',
'sqlite/skype',
'sqlite/zeitgeist',
'syslog',
'systemd_journal',
'utmp',
'vsftpd',
'webhist',
'xchatlog',
'xchatscrollback',
'zsh_extended_history']
_MACOS_PARSERS = [
'asl_log',
'bash_history',
'bencode',
'bsm_log',
'cups_ipp',
'czip/oxml',
'filestat',
'fseventsd',
'gdrive_synclog',
'mac_appfirewall_log',
'mac_keychain',
'mac_securityd',
'macwifi',
'olecf',
'plist',
'sqlite/appusage',
'sqlite/google_drive',
'sqlite/imessage',
'sqlite/ls_quarantine',
'sqlite/mac_document_versions',
'sqlite/mackeeper_cache',
'sqlite/skype',
'syslog',
'utmpx',
'webhist',
'zsh_extended_history']
# TODO add tests for _ReadPresetDefinitionValues
# TODO add tests for _ReadPresetsFromFileObject
def testGetNames(self):
"""Tests the GetNames function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
test_names = list(test_manager.GetNames())
self.assertEqual(len(test_names), 7)
expected_names = sorted([
'android', 'linux', 'macos', 'webhist', 'win7', 'win_gen', 'winxp'])
self.assertEqual(test_names, expected_names)
def testGetParsersByPreset(self):
"""Tests the GetParsersByPreset function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
parser_names = test_manager.GetParsersByPreset('linux')
self.assertEqual(parser_names, self._LINUX_PARSERS)
with self.assertRaises(KeyError):
test_manager.GetParsersByPreset('bogus')
def testGetPresetByName(self):
"""Tests the GetPresetByName function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
test_preset = test_manager.GetPresetByName('linux')
self.assertIsNotNone(test_preset)
self.assertEqual(test_preset.name, 'linux')
self.assertEqual(test_preset.parsers, self._LINUX_PARSERS)
test_preset = test_manager.GetPresetByName('bogus')
self.assertIsNone(test_preset)
def testGetPresetsByOperatingSystem(self):
"""Tests the GetPresetsByOperatingSystem function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
operating_system = artifacts.OperatingSystemArtifact(family='MacOS')
test_presets = test_manager.GetPresetsByOperatingSystem(operating_system)
self.assertEqual(len(test_presets), 1)
self.assertEqual(test_presets[0].name, 'macos')
self.assertEqual(test_presets[0].parsers, self._MACOS_PARSERS)
operating_system = artifacts.OperatingSystemArtifact(family='bogus')
test_presets = test_manager.GetPresetsByOperatingSystem(operating_system)
self.assertEqual(len(test_presets), 0)
def testGetPresetsInformation(self):
"""Tests the GetPresetsInformation function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
parser_presets_information = test_manager.GetPresetsInformation()
self.assertGreaterEqual(len(parser_presets_information), 1)
available_parser_names = [name for name, _ in parser_presets_information]
self.assertIn('linux', available_parser_names)
# TODO add tests for ReadFromFile
if __name__ == '__main__':
unittest.main()
|
picamera/streams.py | takehirokj/picamera | 1,311 | 11066279 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
from threading import RLock
from collections import deque
from operator import attrgetter
from weakref import ref
from picamera.exc import PiCameraValueError
from picamera.frames import PiVideoFrame, PiVideoFrameType
class BufferIO(io.IOBase):
"""
A stream which uses a :class:`memoryview` for storage.
This is used internally by picamera for capturing directly to an existing
object which supports the buffer protocol (like a numpy array). Because the
underlying storage is fixed in size, the stream also has a fixed size and
will raise an :exc:`IOError` exception if an attempt is made to write
beyond the end of the buffer (though seek beyond the end is supported).
Users should never need this class directly.
"""
__slots__ = ('_buf', '_pos', '_size')
def __init__(self, obj):
self._buf = memoryview(obj)
if self._buf.ndim > 1 or self._buf.format != 'B':
try:
# Py2.7 doesn't have memoryview.cast
self._buf = self._buf.cast('B')
except AttributeError:
raise ValueError(
'buffer object must be one-dimensional and have unsigned '
'byte format ("B")')
self._pos = 0
self._size = self._buf.shape[0]
def close(self):
super(BufferIO, self).close()
try:
self._buf.release()
except AttributeError:
# Py2.7 doesn't have memoryview.release
pass
def _check_open(self):
if self.closed:
raise ValueError('I/O operation on a closed stream')
@property
def size(self):
"""
Return the maximum size of the buffer in bytes.
"""
return self._size
def readable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`read`.
"""
self._check_open()
return True
def writable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`write`.
"""
self._check_open()
return not self._buf.readonly
def seekable(self):
"""
Returns ``True``, indicating the stream supports :meth:`seek` and
:meth:`tell`.
"""
self._check_open()
return True
def getvalue(self):
"""
Return ``bytes`` containing the entire contents of the buffer.
"""
with self.lock:
return self._buf.tobytes()
def tell(self):
"""
Return the current buffer position.
"""
self._check_open()
return self._pos
def seek(self, offset, whence=io.SEEK_SET):
"""
Change the buffer position to the given byte *offset*. *offset* is
interpreted relative to the position indicated by *whence*. Values for
*whence* are:
* ``SEEK_SET`` or ``0`` – start of the buffer (the default); *offset*
should be zero or positive
* ``SEEK_CUR`` or ``1`` – current buffer position; *offset* may be
negative
* ``SEEK_END`` or ``2`` – end of the buffer; *offset* is usually
negative
Return the new absolute position.
"""
self._check_open()
if whence == io.SEEK_CUR:
offset = self._pos + offset
elif whence == io.SEEK_END:
offset = self.size + offset
if offset < 0:
raise ValueError(
'New position is before the start of the stream')
self._pos = offset
return self._pos
def read(self, n=-1):
"""
Read up to *n* bytes from the buffer and return them. As a convenience,
if *n* is unspecified or -1, :meth:`readall` is called. Fewer than *n*
bytes may be returned if there are fewer than *n* bytes from the
current buffer position to the end of the buffer.
If 0 bytes are returned, and *n* was not 0, this indicates end of the
buffer.
"""
self._check_open()
if n < 0:
return self.readall()
elif n == 0:
return b''
else:
result = self._buf[self._pos:self._pos + n].tobytes()
self._pos += len(result)
return result
def readinto(self, b):
"""
Read bytes into a pre-allocated, writable bytes-like object b, and
return the number of bytes read.
"""
self._check_open()
result = max(0, min(len(b), self._size - self._pos))
if result == 0:
return 0
else:
b[:result] = self._buf[self._pos:self._pos + result]
return result
def readall(self):
"""
Read and return all bytes from the buffer until EOF.
"""
return self.read(max(0, self.size - self._pos))
def truncate(self, size=None):
"""
Raises :exc:`NotImplementedError` as the underlying buffer cannot be
resized.
"""
raise NotImplementedError('cannot resize a BufferIO stream')
def write(self, b):
"""
Write the given bytes or bytearray object, *b*, to the underlying
buffer and return the number of bytes written. If the underlying
buffer isn't large enough to contain all the bytes of *b*, as many
bytes as possible will be written before raising :exc:`IOError`.
"""
self._check_open()
if self._buf.readonly:
raise IOError('buffer object is not writeable')
excess = max(0, len(b) - (self.size - self._pos))
result = len(b) - excess
if excess:
self._buf[self._pos:self._pos + result] = b[:-excess]
else:
self._buf[self._pos:self._pos + result] = b
self._pos += result
return result
class CircularIO(io.IOBase):
"""
A thread-safe stream which uses a ring buffer for storage.
CircularIO provides an in-memory stream similar to the :class:`io.BytesIO`
class. However, unlike :class:`io.BytesIO` its underlying storage is a
`ring buffer`_ with a fixed maximum size. Once the maximum size is reached,
writing effectively loops round to the beginning to the ring and starts
overwriting the oldest content.
Actually, this ring buffer is slightly different to "traditional" ring
buffers. This ring buffer is optimized for camera usage which is expected
to be read-light, write-heavy, and with writes *mostly* aligned to frame
boundaries. Internally, the stream simply references each chunk written and
drops references each time the overall size of the stream would exceed the
specified limit.
As a result the ring buffer doesn't stay strictly at its allocated limit as
traditional ring buffers do. It also drops entire writes when the limit is
reached (this is a desirable behaviour because it means that often whole
frames are dropped from the start of the stream, rather than leaving
partial frames at the start as in a traditional ring buffer). For example:
.. code-block:: pycon
>>> stream = CircularIO(size=10)
>>> stream.write(b'abc')
>>> stream.write(b'def')
>>> stream.getvalue()
b'abcdef'
>>> stream.write(b'ghijk')
>>> stream.getvalue()
b'defghijk'
In a traditional ring buffer, one would expect the last ``getvalue()`` call
to return ``'bcdefghijk'`` as only the first character would be lost at the
limit of 10 bytes. However, this ring buffer has dropped the entire write
of ``'abc'``.
The *size* parameter specifies the maximum size of the stream in bytes. The
:meth:`read`, :meth:`tell`, and :meth:`seek` methods all operate
equivalently to those in :class:`io.BytesIO` whilst :meth:`write` only
differs in the wrapping behaviour described above. A :meth:`read1` method
is also provided for efficient reading of the underlying ring buffer in
write-sized chunks (or less).
A re-entrant threading lock guards all operations, and is accessible for
external use via the :attr:`lock` attribute.
The performance of the class is geared toward faster writing than reading
on the assumption that writing will be the common operation and reading the
rare operation (a reasonable assumption for the camera use-case, but not
necessarily for more general usage).
.. _ring buffer: https://en.wikipedia.org/wiki/Circular_buffer
"""
def __init__(self, size):
if size < 1:
raise ValueError('size must be a positive integer')
self._lock = RLock()
self._data = deque()
self._size = size
self._length = 0
self._pos = 0
self._pos_index = 0
self._pos_offset = 0
def _check_open(self):
if self.closed:
raise ValueError('I/O operation on a closed stream')
@property
def lock(self):
"""
A re-entrant threading lock which is used to guard all operations.
"""
return self._lock
@property
def size(self):
"""
Return the maximum size of the buffer in bytes.
"""
return self._size
def readable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`read`.
"""
self._check_open()
return True
def writable(self):
"""
Returns ``True``, indicating that the stream supports :meth:`write`.
"""
self._check_open()
return True
def seekable(self):
"""
Returns ``True``, indicating the stream supports :meth:`seek` and
:meth:`tell`.
"""
self._check_open()
return True
def getvalue(self):
"""
Return ``bytes`` containing the entire contents of the buffer.
"""
with self.lock:
return b''.join(self._data)
def _set_pos(self, value):
self._pos = value
self._pos_index = -1
self._pos_offset = chunk_pos = 0
for self._pos_index, chunk in enumerate(self._data):
if chunk_pos + len(chunk) > value:
self._pos_offset = value - chunk_pos
return
else:
chunk_pos += len(chunk)
self._pos_index += 1
self._pos_offset = value - chunk_pos
def tell(self):
"""
Return the current stream position.
"""
self._check_open()
with self.lock:
return self._pos
def seek(self, offset, whence=io.SEEK_SET):
"""
Change the stream position to the given byte *offset*. *offset* is
interpreted relative to the position indicated by *whence*. Values for
*whence* are:
* ``SEEK_SET`` or ``0`` – start of the stream (the default); *offset*
should be zero or positive
* ``SEEK_CUR`` or ``1`` – current stream position; *offset* may be
negative
* ``SEEK_END`` or ``2`` – end of the stream; *offset* is usually
negative
Return the new absolute position.
"""
self._check_open()
with self.lock:
if whence == io.SEEK_CUR:
offset = self._pos + offset
elif whence == io.SEEK_END:
offset = self._length + offset
if offset < 0:
raise ValueError(
'New position is before the start of the stream')
self._set_pos(offset)
return self._pos
def read(self, n=-1):
"""
Read up to *n* bytes from the stream and return them. As a convenience,
if *n* is unspecified or -1, :meth:`readall` is called. Fewer than *n*
bytes may be returned if there are fewer than *n* bytes from the
current stream position to the end of the stream.
If 0 bytes are returned, and *n* was not 0, this indicates end of the
stream.
"""
self._check_open()
if n < 0:
return self.readall()
elif n == 0:
return b''
else:
with self.lock:
if self._pos >= self._length:
return b''
from_index, from_offset = self._pos_index, self._pos_offset
self._set_pos(self._pos + n)
result = self._data[from_index][from_offset:from_offset + n]
# Bah ... can't slice a deque
for i in range(from_index + 1, self._pos_index):
result += self._data[i]
if from_index < self._pos_index < len(self._data):
result += self._data[self._pos_index][:self._pos_offset]
return result
def readall(self):
"""
Read and return all bytes from the stream until EOF, using multiple
calls to the stream if necessary.
"""
return self.read(max(0, self._length - self._pos))
def read1(self, n=-1):
"""
Read up to *n* bytes from the stream using only a single call to the
underlying object.
In the case of :class:`CircularIO` this roughly corresponds to
returning the content from the current position up to the end of the
write that added that content to the stream (assuming no subsequent
writes overwrote the content). :meth:`read1` is particularly useful
for efficient copying of the stream's content.
"""
self._check_open()
with self.lock:
if self._pos == self._length:
return b''
chunk = self._data[self._pos_index]
if n == -1:
n = len(chunk) - self._pos_offset
result = chunk[self._pos_offset:self._pos_offset + n]
self._pos += len(result)
self._pos_offset += n
if self._pos_offset >= len(chunk):
self._pos_index += 1
self._pos_offset = 0
return result
def truncate(self, size=None):
"""
Resize the stream to the given *size* in bytes (or the current position
if *size* is not specified). This resizing can extend or reduce the
current stream size. In case of extension, the contents of the new file
area will be NUL (``\\x00``) bytes. The new stream size is returned.
The current stream position isn’t changed unless the resizing is
expanding the stream, in which case it may be set to the maximum stream
size if the expansion causes the ring buffer to loop around.
"""
self._check_open()
with self.lock:
if size is None:
size = self._pos
if size < 0:
raise ValueError('size must be zero, or a positive integer')
if size > self._length:
# Backfill the space between stream end and current position
# with NUL bytes
fill = b'\x00' * (size - self._length)
self._set_pos(self._length)
self.write(fill)
elif size < self._length:
# Lop off chunks until we get to the last one at the truncation
# point, and slice that one
save_pos = self._pos
self._set_pos(size)
while self._pos_index < len(self._data) - 1:
self._data.pop()
if self._pos_offset > 0:
self._data[self._pos_index] = self._data[self._pos_index][:self._pos_offset]
self._pos_index += 1
self._pos_offset = 0
else:
self._data.pop()
self._length = size
if self._pos != save_pos:
self._set_pos(save_pos)
def write(self, b):
"""
Write the given bytes or bytearray object, *b*, to the underlying
stream and return the number of bytes written.
"""
self._check_open()
b = bytes(b)
with self.lock:
# Special case: stream position is beyond the end of the stream.
# Call truncate to backfill space first
if self._pos > self._length:
self.truncate()
result = len(b)
if self._pos == self._length:
# Fast path: stream position is at the end of the stream so
# just append a new chunk
self._data.append(b)
self._length += len(b)
self._pos = self._length
self._pos_index = len(self._data)
self._pos_offset = 0
else:
# Slow path: stream position is somewhere in the middle;
# overwrite bytes in the current (and if necessary, subsequent)
# chunk(s), without extending them. If we reach the end of the
# stream, call ourselves recursively to continue down the fast
# path
while b and (self._pos < self._length):
chunk = self._data[self._pos_index]
head = b[:len(chunk) - self._pos_offset]
assert head
b = b[len(head):]
self._data[self._pos_index] = b''.join((
chunk[:self._pos_offset],
head,
chunk[self._pos_offset + len(head):]
))
self._pos += len(head)
if self._pos_offset + len(head) >= len(chunk):
self._pos_index += 1
self._pos_offset = 0
else:
self._pos_offset += len(head)
if b:
self.write(b)
# If the stream is now beyond the specified size limit, remove
# whole chunks until the size is within the limit again
while self._length > self._size:
chunk = self._data.popleft()
self._length -= len(chunk)
self._pos -= len(chunk)
self._pos_index -= 1
# no need to adjust self._pos_offset
return result
class PiCameraDequeHack(deque):
def __init__(self, stream):
super(PiCameraDequeHack, self).__init__()
self.stream = ref(stream) # avoid a circular ref
def append(self, item):
# Include the frame's metadata.
frame = self.stream()._get_frame()
return super(PiCameraDequeHack, self).append((item, frame))
def pop(self):
return super(PiCameraDequeHack, self).pop()[0]
def popleft(self):
return super(PiCameraDequeHack, self).popleft()[0]
def __getitem__(self, index):
return super(PiCameraDequeHack, self).__getitem__(index)[0]
def __setitem__(self, index, value):
frame = super(PiCameraDequeHack, self).__getitem__(index)[1]
return super(PiCameraDequeHack, self).__setitem__(index, (value, frame))
def __iter__(self):
for item, frame in self.iter_both(False):
yield item
def __reversed__(self):
for item, frame in self.iter_both(True):
yield item
def iter_both(self, reverse):
if reverse:
return super(PiCameraDequeHack, self).__reversed__()
else:
return super(PiCameraDequeHack, self).__iter__()
class PiCameraDequeFrames(object):
def __init__(self, stream):
super(PiCameraDequeFrames, self).__init__()
self.stream = ref(stream) # avoid a circular ref
def __iter__(self):
with self.stream().lock:
pos = 0
for item, frame in self.stream()._data.iter_both(False):
pos += len(item)
if frame:
# Rewrite the video_size and split_size attributes
# according to the current position of the chunk
frame = PiVideoFrame(
index=frame.index,
frame_type=frame.frame_type,
frame_size=frame.frame_size,
video_size=pos,
split_size=pos,
timestamp=frame.timestamp,
complete=frame.complete,
)
# Only yield the frame meta-data if the start of the frame
# still exists in the stream
if pos - frame.frame_size >= 0:
yield frame
def __reversed__(self):
with self.stream().lock:
pos = self.stream()._length
for item, frame in self.stream()._data.iter_both(True):
if frame:
frame = PiVideoFrame(
index=frame.index,
frame_type=frame.frame_type,
frame_size=frame.frame_size,
video_size=pos,
split_size=pos,
timestamp=frame.timestamp,
complete=frame.complete,
)
if pos - frame.frame_size >= 0:
yield frame
pos -= len(item)
class PiCameraCircularIO(CircularIO):
"""
A derivative of :class:`CircularIO` which tracks camera frames.
PiCameraCircularIO provides an in-memory stream based on a ring buffer. It
is a specialization of :class:`CircularIO` which associates video frame
meta-data with the recorded stream, accessible from the :attr:`frames`
property.
.. warning::
The class makes a couple of assumptions which will cause the frame
meta-data tracking to break if they are not adhered to:
* the stream is only ever appended to - no writes ever start from
the middle of the stream
* the stream is never truncated (from the right; being ring buffer
based, left truncation will occur automatically); the exception
to this is the :meth:`clear` method.
The *camera* parameter specifies the :class:`PiCamera` instance that will
be recording video to the stream. If specified, the *size* parameter
determines the maximum size of the stream in bytes. If *size* is not
specified (or ``None``), then *seconds* must be specified instead. This
provides the maximum length of the stream in seconds, assuming a data rate
in bits-per-second given by the *bitrate* parameter (which defaults to
``17000000``, or 17Mbps, which is also the default bitrate used for video
recording by :class:`PiCamera`). You cannot specify both *size* and
*seconds*.
The *splitter_port* parameter specifies the port of the built-in splitter
that the video encoder will be attached to. This defaults to ``1`` and most
users will have no need to specify anything different. If you do specify
something else, ensure it is equal to the *splitter_port* parameter of the
corresponding call to :meth:`~PiCamera.start_recording`. For example::
import picamera
with picamera.PiCamera() as camera:
with picamera.PiCameraCircularIO(camera, splitter_port=2) as stream:
camera.start_recording(stream, format='h264', splitter_port=2)
camera.wait_recording(10, splitter_port=2)
camera.stop_recording(splitter_port=2)
.. attribute:: frames
Returns an iterator over the frame meta-data.
As the camera records video to the stream, the class captures the
meta-data associated with each frame (in the form of a
:class:`PiVideoFrame` tuple), discarding meta-data for frames which are
no longer fully stored within the underlying ring buffer. You can use
the frame meta-data to locate, for example, the first keyframe present
in the stream in order to determine an appropriate range to extract.
"""
def __init__(
self, camera, size=None, seconds=None, bitrate=17000000,
splitter_port=1):
if size is None and seconds is None:
raise PiCameraValueError('You must specify either size, or seconds')
if size is not None and seconds is not None:
raise PiCameraValueError('You cannot specify both size and seconds')
if seconds is not None:
size = bitrate * seconds // 8
super(PiCameraCircularIO, self).__init__(size)
try:
camera._encoders
except AttributeError:
raise PiCameraValueError('camera must be a valid PiCamera object')
self.camera = camera
self.splitter_port = splitter_port
self._data = PiCameraDequeHack(self)
self._frames = PiCameraDequeFrames(self)
def _get_frame(self):
"""
Return frame metadata from latest frame, when it is complete.
"""
encoder = self.camera._encoders[self.splitter_port]
return encoder.frame if encoder.frame.complete else None
@property
def frames(self):
"""
An iterable which contains the meta-data (:class:`PiVideoFrame`
objects) for all complete frames currently stored in the circular
buffer.
"""
return self._frames
def clear(self):
"""
Resets the stream to empty safely.
This method truncates the stream to empty, and clears the associated
frame meta-data too, ensuring that subsequent writes operate correctly
(see the warning in the :class:`PiCameraCircularIO` class
documentation).
"""
with self.lock:
self.seek(0)
self.truncate()
def _find(self, field, criteria, first_frame):
first = last = None
attr = attrgetter(field)
for frame in reversed(self._frames):
if last is None:
last = frame
if first_frame in (None, frame.frame_type):
first = frame
if last is not None and attr(last) - attr(frame) >= criteria:
break
if last is not None and attr(last) - attr(frame) >= criteria:
break
return first, last
def _find_all(self, first_frame):
chunks = []
first = last = None
for frame in reversed(self._frames):
last = frame
break
for frame in self._frames:
if first_frame in (None, frame.frame_type):
first = frame
break
return first, last
def copy_to(
self, output, size=None, seconds=None, frames=None,
first_frame=PiVideoFrameType.sps_header):
"""
copy_to(output, size=None, seconds=None, frames=None, first_frame=PiVideoFrameType.sps_header)
Copies content from the stream to *output*.
By default, this method copies all complete frames from the circular
stream to the filename or file-like object given by *output*.
If *size* is specified then the copy will be limited to the whole
number of frames that fit within the specified number of bytes. If
*seconds* if specified, then the copy will be limited to that number of
seconds worth of frames. If *frames* is specified then the copy will
be limited to that number of frames. Only one of *size*, *seconds*, or
*frames* can be specified. If none is specified, all frames are copied.
If *first_frame* is specified, it defines the frame type of the first
frame to be copied. By default this is
:attr:`~PiVideoFrameType.sps_header` as this must usually be the first
frame in an H264 stream. If *first_frame* is ``None``, not such limit
will be applied.
.. warning::
Note that if a frame of the specified type (e.g. SPS header) cannot
be found within the specified number of seconds, bytes, or frames,
then this method will simply copy nothing (but no error will be
raised).
The stream's position is not affected by this method.
"""
if (size, seconds, frames).count(None) < 2:
raise PiCameraValueError(
'You can only specify one of size, seconds, or frames')
if isinstance(output, bytes):
output = output.decode('utf-8')
opened = isinstance(output, str)
if opened:
output = io.open(output, 'wb')
try:
with self.lock:
if size is not None:
first, last = self._find('video_size', size, first_frame)
elif seconds is not None:
seconds = int(seconds * 1000000)
first, last = self._find('timestamp', seconds, first_frame)
elif frames is not None:
first, last = self._find('index', frames, first_frame)
else:
first, last = self._find_all(first_frame)
# Copy chunk references into a holding buffer; this allows us
# to release the lock on the stream quickly (in case recording
# is on-going)
chunks = []
if first is not None and last is not None:
pos = 0
for buf, frame in self._data.iter_both(False):
if pos > last.position + last.frame_size:
break
elif pos >= first.position:
chunks.append(buf)
pos += len(buf)
# Perform the actual I/O, copying chunks to the output
for buf in chunks:
output.write(buf)
return first, last
finally:
if opened:
output.close()
|
lib/galaxy/util/sqlite.py | rikeshi/galaxy | 1,085 | 11066280 | import re
import sqlite3
try:
import sqlparse
def is_read_only_query(query):
statements = sqlparse.parse(query)
for statement in statements:
if statement.get_type() != "SELECT":
return False
return True
except ImportError:
# Without sqlparse we use a very weak regex check
def is_read_only_query(query):
if re.match("select ", query, re.IGNORECASE):
if re.search("^([^\"]|\"[^\"]*\")*?;", query) or re.search("^([^\']|\'[^\']*\')*?;", query):
return False
else:
return True
return False
def connect(path):
connection = sqlite3.connect(path)
connection.row_factory = sqlite3.Row
return connection
|
setup.py | Venkat1729/kubernetes-py | 129 | 11066286 | <gh_stars>100-1000
import version_query
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
my_version = version_query.query_version_str()
setup(
name="kubernetes-py",
version=my_version,
description="A python module for Kubernetes.",
author="mnubo inc.",
author_email="<EMAIL>",
url="https://github.com/mnubo/kubernetes-py",
download_url="https://github.com/mnubo/kubernetes-py/tarball/{}".format(my_version),
keywords=["kubernetes_py", "k8s", "kubernetes"],
packages=[
"kubernetes_py",
"kubernetes_py.models",
"kubernetes_py.models.unversioned",
"kubernetes_py.models.v1",
"kubernetes_py.models.v1alpha1",
"kubernetes_py.models.v1beta1",
"kubernetes_py.models.v2alpha1",
"kubernetes_py.utils",
],
install_requires=["six>=1.10.0", "PyYAML>=3.13", "requests>=2.10.0", "uuid>=1.30", "python-dateutil>=2.6.0"],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
scripts=[],
test_suite="nose.collector",
tests_require=["nose", "nose-cover3"],
include_package_data=True,
zip_safe=False,
)
|
pytorch_widedeep/models/tab_mlp.py | rajshah4/pytorch-widedeep | 692 | 11066307 | <reponame>rajshah4/pytorch-widedeep
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
allowed_activations = ["relu", "leaky_relu", "tanh", "gelu", "geglu", "reglu"]
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class REGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
def get_activation_fn(activation):
if activation == "relu":
return nn.ReLU(inplace=True)
if activation == "leaky_relu":
return nn.LeakyReLU(inplace=True)
if activation == "tanh":
return nn.Tanh()
if activation == "gelu":
return nn.GELU()
if activation == "geglu":
return GEGLU()
if activation == "reglu":
return REGLU()
def dense_layer(
inp: int,
out: int,
activation: str,
p: float,
bn: bool,
linear_first: bool,
):
# This is basically the LinBnDrop class at the fastai library
if activation == "geglu":
raise ValueError(
"'geglu' activation is only used as 'transformer_activation' "
"in transformer-based models"
)
act_fn = get_activation_fn(activation)
layers = [nn.BatchNorm1d(out if linear_first else inp)] if bn else []
if p != 0:
layers.append(nn.Dropout(p)) # type: ignore[arg-type]
lin = [nn.Linear(inp, out, bias=not bn), act_fn]
layers = lin + layers if linear_first else layers + lin
return nn.Sequential(*layers)
class CatEmbeddingsAndCont(nn.Module):
def __init__(
self,
column_idx: Dict[str, int],
embed_input: List[Tuple[str, int, int]],
embed_dropout: float,
continuous_cols: Optional[List[str]],
cont_norm_layer: str,
):
super(CatEmbeddingsAndCont, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.continuous_cols = continuous_cols
# Embeddings: val + 1 because 0 is reserved for padding/unseen cateogories.
if self.embed_input is not None:
self.embed_layers = nn.ModuleDict(
{
"emb_layer_" + col: nn.Embedding(val + 1, dim, padding_idx=0)
for col, val, dim in self.embed_input
}
)
self.embedding_dropout = nn.Dropout(embed_dropout)
self.emb_out_dim: int = int(
np.sum([embed[2] for embed in self.embed_input])
)
else:
self.emb_out_dim = 0
# Continuous
if self.continuous_cols is not None:
self.cont_idx = [self.column_idx[col] for col in self.continuous_cols]
self.cont_out_dim: int = len(self.continuous_cols)
if cont_norm_layer == "batchnorm":
self.cont_norm: NormLayers = nn.BatchNorm1d(self.cont_out_dim)
elif cont_norm_layer == "layernorm":
self.cont_norm = nn.LayerNorm(self.cont_out_dim)
else:
self.cont_norm = nn.Identity()
else:
self.cont_out_dim = 0
self.output_dim = self.emb_out_dim + self.cont_out_dim
def forward(self, X: Tensor) -> Tuple[Tensor, Any]:
if self.embed_input is not None:
embed = [
self.embed_layers["emb_layer_" + col](X[:, self.column_idx[col]].long())
for col, _, _ in self.embed_input
]
x_emb = torch.cat(embed, 1)
x_emb = self.embedding_dropout(x_emb)
else:
x_emb = None
if self.continuous_cols is not None:
x_cont = self.cont_norm((X[:, self.cont_idx].float()))
else:
x_cont = None
return x_emb, x_cont
class MLP(nn.Module):
def __init__(
self,
d_hidden: List[int],
activation: str,
dropout: Optional[Union[float, List[float]]],
batchnorm: bool,
batchnorm_last: bool,
linear_first: bool,
):
super(MLP, self).__init__()
if not dropout:
dropout = [0.0] * len(d_hidden)
elif isinstance(dropout, float):
dropout = [dropout] * len(d_hidden)
self.mlp = nn.Sequential()
for i in range(1, len(d_hidden)):
self.mlp.add_module(
"dense_layer_{}".format(i - 1),
dense_layer(
d_hidden[i - 1],
d_hidden[i],
activation,
dropout[i - 1],
batchnorm and (i != len(d_hidden) - 1 or batchnorm_last),
linear_first,
),
)
def forward(self, X: Tensor) -> Tensor:
return self.mlp(X)
class TabMlp(nn.Module):
r"""Defines a ``TabMlp`` model that can be used as the ``deeptabular``
component of a Wide & Deep model.
This class combines embedding representations of the categorical features
with numerical (aka continuous) features. These are then passed through a
series of dense layers (i.e. a MLP).
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the ``TabMlp`` model. Required to slice the tensors. e.g. {'education':
0, 'relationship': 1, 'workclass': 2, ...}
embed_input: List, Optional, default = None
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. [(education, 11, 32), ...]
embed_dropout: float, default = 0.1
embeddings dropout
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: 'layernorm', 'batchnorm' or None.
mlp_hidden_dims: List, default = [200, 100]
List with the number of neurons per dense layer in the mlp.
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP. Currently
``tanh``, ``relu``, ``leaky_relu`` and ``gelu`` are supported
mlp_dropout: float or List, default = 0.1
float or List of floats with the dropout between the dense layers.
e.g: [0.5,0.5]
mlp_batchnorm: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_embed_and_cont: ``nn.Module``
This is the module that processes the categorical and continuous columns
tab_mlp: ``nn.Sequential``
mlp model that will receive the concatenation of the embeddings and
the continuous columns
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import TabMlp
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabMlp(mlp_hidden_dims=[8,4], column_idx=column_idx, embed_input=embed_input,
... continuous_cols = ['e'])
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
embed_input: Optional[List[Tuple[str, int, int]]] = None,
embed_dropout: float = 0.1,
continuous_cols: Optional[List[str]] = None,
cont_norm_layer: str = "batchnorm",
mlp_hidden_dims: List[int] = [200, 100],
mlp_activation: str = "relu",
mlp_dropout: Union[float, List[float]] = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = False,
):
super(TabMlp, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.mlp_hidden_dims = mlp_hidden_dims
self.embed_dropout = embed_dropout
self.continuous_cols = continuous_cols
self.cont_norm_layer = cont_norm_layer
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_linear_first = mlp_linear_first
if self.mlp_activation not in allowed_activations:
raise ValueError(
"Currently, only the following activation functions are supported "
"for for the MLP's dense layers: {}. Got {} instead".format(
", ".join(allowed_activations), self.mlp_activation
)
)
self.cat_embed_and_cont = CatEmbeddingsAndCont(
column_idx,
embed_input,
embed_dropout,
continuous_cols,
cont_norm_layer,
)
# MLP
mlp_input_dim = self.cat_embed_and_cont.output_dim
mlp_hidden_dims = [mlp_input_dim] + mlp_hidden_dims
self.tab_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
r"""Forward pass that concatenates the continuous features with the
embeddings. The result is then passed through a series of dense layers
"""
x_emb, x_cont = self.cat_embed_and_cont(X)
if x_emb is not None:
x = x_emb
if x_cont is not None:
x = torch.cat([x, x_cont], 1) if x_emb is not None else x_cont
return self.tab_mlp(x)
|
ml4a/models/wav2lip.py | KushGabani/ml4a-guides | 1,110 | 11066327 | import os
import logging
import random
import subprocess
import numpy as np
import PIL
import cv2
import torch
from tqdm import tqdm
from .. import image
from .. import audio as ml4a_audio
from ..utils import downloads
from . import submodules
cuda_available = submodules.cuda_available()
#with submodules.localimport('submodules/Wav2Lip') as _importer:
with submodules.import_from('Wav2Lip'): # localimport fails here
import audio
import face_detection
from models import Wav2Lip
model = None
def setup():
global model
device = 'cuda' if torch.cuda.is_available() else 'cpu'
checkpoint_path = downloads.download_from_gdrive(
gdrive_fileid='1_OvqStxNxLc7bXzlaVG5sz695p-FVfYY',
output_path='Wav2Lip/wav2lip_gan.pth')
model = load_model(checkpoint_path, device)
def __load__(checkpoint_path, device):
if device == 'cuda':
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def load_model(path, device):
model = Wav2Lip()
print("Load checkpoint from: {}".format(path))
checkpoint = __load__(path, device)
s = checkpoint["state_dict"]
new_s = {}
for k, v in s.items():
new_s[k.replace('module.', '')] = v
model.load_state_dict(new_s)
model = model.to(device)
return model.eval()
def get_smoothened_boxes(boxes, T):
for i in range(len(boxes)):
if i + T > len(boxes):
window = boxes[len(boxes) - T:]
else:
window = boxes[i : i + T]
boxes[i] = np.mean(window, axis=0)
return boxes
def face_detect(images, pads, nosmooth, batch_size, device):
detector = face_detection.FaceAlignment(
face_detection.LandmarksType._2D,
flip_input=False,
device=device)
while True:
predictions = []
try:
for i in tqdm(range(0, len(images), batch_size)):
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
except RuntimeError:
if batch_size == 1:
raise RuntimeError('Image too big to run face detection on GPU. Please use the resize_factor argument')
batch_size //= 2
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
continue
break
results = []
pady1, pady2, padx1, padx2 = pads
for rect, image in zip(predictions, images):
if rect is None:
#cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
#raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
results.append([None, None, None, None])
else:
y1 = max(0, rect[1] - pady1)
y2 = min(image.shape[0], rect[3] + pady2)
x1 = max(0, rect[0] - padx1)
x2 = min(image.shape[1], rect[2] + padx2)
results.append([x1, y1, x2, y2])
results_found = [r for r in results if None not in r]
if len(results_found) == 0:
raise ValueError('Face not detected in any frames! Ensure the video contains a face in at least one frame')
if len(results_found) < len(results):
x1a, y1a, x2a, y2a = [int(_) for _ in np.mean(results_found, axis=0)]
for r in range(len(results)):
results[r] = [x1a, y1a, x2a, y2a] if None in results[r] else results[r]
boxes = np.array(results)
if not nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
del detector
return results
def datagen(frames, mels, box, static, img_size, wav2lip_batch_size, face_det_batch_size, pads, nosmooth, device):
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
if box[0] == -1:
if not static:
face_det_results = face_detect(frames, pads, nosmooth, face_det_batch_size, device) # BGR2RGB for CNN face detection
else:
face_det_results = face_detect([frames[0]], pads, nosmooth, face_det_batch_size, device)
else:
print('Using the specified bounding box instead of face detection...')
y1, y2, x1, x2 = box
face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]
for i, m in enumerate(mels):
idx = 0 if static else i%len(frames)
frame_to_save = frames[idx].copy()
face, coords = face_det_results[idx].copy()
face = cv2.resize(face, (img_size, img_size))
img_batch.append(face)
mel_batch.append(m)
frame_batch.append(frame_to_save)
coords_batch.append(coords)
if len(img_batch) >= wav2lip_batch_size:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, img_size//2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
if len(img_batch) > 0:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, img_size//2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
def modify_frame(frame, resize_factor, rotate, crop):
frame = np.array(frame)
if resize_factor > 1:
new_size = (frame.shape[1]//resize_factor, frame.shape[0]//resize_factor)
frame = cv2.resize(frame, new_size)
if rotate:
frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)
y1, y2, x1, x2 = crop
if x2 == -1: x2 = frame.shape[1]
if y2 == -1: y2 = frame.shape[0]
frame = frame[y1:y2, x1:x2]
return frame
def run(input_video, input_audio, output_video, sampling_rate=None, pads=None, resize_factor=1, crop=None, box=None, fps=25, rotate=False):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
img_size = 96
mel_step_size = 16
static = False
nosmooth = False
wav2lip_batch_size = 128
face_det_batch_size = 16
image_is_image = isinstance(input_video, (PIL.Image.Image, np.ndarray))
image_is_image_list = isinstance(input_video, list) and isinstance(input_video[0], (PIL.Image.Image, np.ndarray))
image_is_str = isinstance(input_video, str)
image_is_movieplayer = isinstance(input_video, image.MoviePlayer)
sound_is_sound = isinstance(input_audio, (torch.Tensor, np.ndarray))
sound_is_str = isinstance(input_audio, str)
assert not (sound_is_sound and sampling_rate is None), \
'If setting input_audio directly to a waveform, must set sampling_rate!'
if pads is None:
pads = [0, 10, 0, 0]
if crop is None:
crop = [0, -1, 0, -1]
if box is None:
box = [-1, -1, -1, -1]
if image_is_str:
if not os.path.isfile(input_video):
raise ValueError('No image or video found at {}'.format(input_video))
elif input_video.split('.')[1] in ['jpg', 'png', 'jpeg']:
full_frames = [cv2.imread(input_video)]
else:
video_stream = cv2.VideoCapture(input_video)
fps = video_stream.get(cv2.CAP_PROP_FPS)
full_frames = []
while 1:
still_reading, frame = video_stream.read()
if not still_reading:
video_stream.release()
break
frame = modify_frame(frame, resize_factor, rotate, crop)
full_frames.append(frame)
elif image_is_image_list:
full_frames = [np.array(img)[...,[2,1,0]] for img in input_video]
elif image_is_image:
full_frames = [np.array(input_video)[...,[2,1,0]]]
elif image_is_movieplayer:
full_frames = []
for f in range(input_video.num_frames):
frame = input_video.get_frame(f+1)
frame = modify_frame(frame, resize_factor, rotate, crop)
full_frames.append(frame)
#print ("Number of frames available for inference: "+str(len(full_frames)))
random_key = random.randint(1, 1e8)
scratch_folder = downloads.get_ml4a_folder('scratch/wav2lip')
temp_video_file = os.path.join(scratch_folder, 'temp_video_%08d.avi' % random_key)
temp_audio_file = os.path.join(scratch_folder, 'temp_audio_%08d.wav' % random_key)
if sound_is_str:
if input_audio.endswith('.wav'):
temp_audio_file = input_audio
else:
print('Extracting raw audio...')
command = 'ffmpeg -y -i {} -strict -2 {}'.format(input_audio, temp_audio_file)
subprocess.call(command, shell=True)
elif sound_is_sound:
if isinstance(input_audio, torch.Tensor):
input_audio = input_audio.cpu().numpy()
if input_audio.ndim > 1:
input_audio = input_audio[0]
ml4a_audio.save(temp_audio_file, input_audio, sampling_rate=sampling_rate)
input_audio = temp_audio_file
wav = audio.load_wav(temp_audio_file, 16000)
mel = audio.melspectrogram(wav)
if np.isnan(mel.reshape(-1)).sum() > 0:
raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')
mel_chunks = []
mel_idx_multiplier = 80./fps
i = 0
while 1:
start_idx = int(i * mel_idx_multiplier)
if start_idx + mel_step_size > len(mel[0]):
mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
break
mel_chunks.append(mel[:, start_idx : start_idx + mel_step_size])
i += 1
#print("Length of mel chunks: {}".format(len(mel_chunks)))
full_frames = full_frames[:len(mel_chunks)]
gen = datagen(full_frames.copy(), mel_chunks,
box, static, img_size,
wav2lip_batch_size, face_det_batch_size,
pads, nosmooth, device)
for i, (img_batch, mel_batch, frames, coords) in enumerate(tqdm(gen,
total=int(np.ceil(float(len(mel_chunks))/wav2lip_batch_size)))):
if model is None:
setup()
if i == 0:
frame_h, frame_w = full_frames[0].shape[:-1]
out = cv2.VideoWriter(temp_video_file,
cv2.VideoWriter_fourcc(*'DIVX'),
fps, (frame_w, frame_h))
img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device)
with torch.no_grad():
pred = model(mel_batch, img_batch)
pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.
for p, f, c in zip(pred, frames, coords):
y1, y2, x1, x2 = c
p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))
f[y1:y2, x1:x2] = p
out.write(f)
out.release()
command = '/usr/bin/ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(input_audio, temp_video_file, output_video)
result = subprocess.call(command, shell=True)
if result:
logging.info("error on ffmpeg: {}".format(result))
if os.path.isfile(temp_video_file):
os.remove(temp_video_file)
if os.path.isfile(temp_audio_file):
os.remove(temp_audio_file)
#return result |
uc_irv_mess_dl.py | Muyu-gk/DynamicGraph | 323 | 11066342 | <filename>uc_irv_mess_dl.py
import utils as u
import os
import tarfile
import torch
class Uc_Irvine_Message_Dataset():
def __init__(self,args):
args.uc_irc_args = u.Namespace(args.uc_irc_args)
tar_file = os.path.join(args.uc_irc_args.folder, args.uc_irc_args.tar_file)
tar_archive = tarfile.open(tar_file, 'r:bz2')
self.edges = self.load_edges(args,tar_archive)
def load_edges(self,args,tar_archive):
data = u.load_data_from_tar(args.uc_irc_args.edges_file,
tar_archive,
starting_line=2,
sep=' ')
cols = u.Namespace({'source': 0,
'target': 1,
'weight': 2,
'time': 3})
data = data.long()
self.num_nodes = int(data[:,[cols.source,cols.target]].max())
#first id should be 0 (they are already contiguous)
data[:,[cols.source,cols.target]] -= 1
#add edges in the other direction (simmetric)
data = torch.cat([data,
data[:,[cols.target,
cols.source,
cols.weight,
cols.time]]],
dim=0)
data[:,cols.time] = u.aggregate_by_time(data[:,cols.time],
args.uc_irc_args.aggr_time)
ids = data[:,cols.source] * self.num_nodes + data[:,cols.target]
self.num_non_existing = float(self.num_nodes**2 - ids.unique().size(0))
idx = data[:,[cols.source,
cols.target,
cols.time]]
self.max_time = data[:,cols.time].max()
self.min_time = data[:,cols.time].min()
return {'idx': idx, 'vals': torch.ones(idx.size(0))} |
Algorithms/Python/Dijkstra.py | Shaswat-2203/HacktoberfestForBeginners | 115 | 11066372 | <filename>Algorithms/Python/Dijkstra.py
from Graph import Node, Graph
class Dijkstra(Graph):
def __init__(self, graph):
self.nodes = graph.nodes
self.num_nodes = graph.num_nodes
self.min_costs = None
self.next_nodes = {}
self.visited_nodes = []
def add_next_node(self, next_node, current_node):
if next_node not in self.visited_nodes:
try:
self.next_nodes[next_node] = min(self.nodes[current_node].neighbors[next_node] +
self.min_costs[current_node], self.next_nodes[next_node])
except:
self.next_nodes[next_node] = self.nodes[current_node].neighbors[next_node] + \
self.min_costs[current_node]
def remove_next_node(self, remove_idx):
self.next_nodes.pop(remove_idx)
def visit_node(self, node_idx, cost, step_by_step=False):
self.min_costs[node_idx] = cost
if step_by_step:
print(" Chosen node: ", node_idx, " | New minimum costs: ", list(self.min_costs))
print("-------------------------------------------------------------------------")
self.visited_nodes.append(node_idx)
self.remove_next_node(remove_idx=node_idx)
def get_min_in_next_nodes(self, step_by_step=False):
minimum_value = min(self.next_nodes.values())
if step_by_step:
print("Value of nodes to be inserted: ", self.next_nodes)
print("-------------------------------------------------------------------------")
print("Lowest value: ", minimum_value, "<<<")
for node in self.next_nodes.keys():
if self.next_nodes[node] == minimum_value:
if step_by_step:
print("Lowest value node: ", node, "<<<")
print("-------------------------------------------------------------------------")
return node
def build_instance(self, node_idx, step_by_step=False, next_nodes=True):
self.min_costs = np.full(self.num_nodes,float('inf'))
if step_by_step:
print("Initial node: ", node_idx)
print("Initial costs: ", list(self.min_costs))
print("-------------------------------------------------------------------------")
if next_nodes:
self.next_nodes[node_idx] = 0
else:
self.min_costs[node_idx] = 0
def run(self, node_idx, step_by_step=False):
self.build_instance(node_idx, step_by_step=step_by_step)
while(len(self.next_nodes.keys()) > 0):
chosen_node_idx = self.get_min_in_next_nodes(step_by_step=step_by_step)
self.visit_node(node_idx=chosen_node_idx, cost=self.next_nodes[chosen_node_idx],
step_by_step=step_by_step)
for node in self.nodes[chosen_node_idx].neighbors.keys():
self.add_next_node(next_node=node, current_node=chosen_node_idx)
if step_by_step:
print("Novos nós a serem inseridos: ", self.next_nodes)
def show_results(self):
print("\n\n\n\n")
print("Minimum paths:")
for idx, cost in enumerate(self.min_costs):
print("-------------------------------------------------------------------------")
print("Node number:", idx+1,"\nCost:", cost)
|
reudom/__init__.py | BarryYBL/reudom | 393 | 11066384 | #!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .case import TestCase
from .running.test_runner import main
from .TestRunner.HTMLTestRunner import SMTP
# 跳过用例
from .skip import skip
from .skip import skip_if
from .skip import skip_unless
# request方法引入
from requests import post
from requests import get
from requests import put
from requests import head
from requests import patch
from requests import options
# 时间戳
from .testdata.timestamp import TimeStamp
# ddt数据驱动
from .testdata.parameterizeds import ddt, ddt_class
# User-Agent 浏览器用户代理
from .testdata.User_Agent import chromePC, safariPC, iePC, firefoxPc
from .testdata.User_Agent import chromePhone, safariPhone, iePhone, firefoxPhone
__author__ = "Barry"
__version__ = "1.2.3.0"
__description__ = "Automated testing framework based on requests and unittest interface."
|
accelerator/examples/a_dsexample_appendcolumn.py | eBay/accelerator | 143 | 11066399 | <filename>accelerator/examples/a_dsexample_appendcolumn.py
description = "Dataset: Append a column to an existing dataset."
datasets = ('source',)
def prepare(job):
dw = job.datasetwriter(parent=datasets.source)
dw.add('Floatcolumn', 'float64')
return dw
def analysis(prepare_res, sliceno):
dw = prepare_res
for n in datasets.source.iterate(sliceno, 'Numbercolumn'):
dw.write(n * 3.14)
|
v2.0.1/python/examples/template/strategies/rates_historic.py | 1Blackdiamondsc/dwx-zeromq-connector | 320 | 11066416 | <reponame>1Blackdiamondsc/dwx-zeromq-connector
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
rates_historic.py
An example using the Darwinex ZeroMQ Connector for Python 3 and MetaTrader 4 PULL REQUEST
for v2.0.1 in which a Client requests rate history from EURGBP Daily from 2019.01.04 to
to 2019.01.14.
-------------------
Rates history:
-------------------
Through commmand HIST, this client can select multiple rates from an INSTRUMENT (symbol, timeframe).
For example, to receive rates from instruments EURUSD(M1), between two dates, it will send this
command to the Server, through its PUSH channel:
"HIST;EURUSD;1;2019.01.04 00:00:00;2019.01.14 00:00:00"
--
@author: [raulMrello](https://www.linkedin.com/in/raul-martin-19254530/)
"""
#############################################################################
# DWX-ZMQ required imports
#############################################################################
# Append path for main project folder
import sys
sys.path.append('../../..')
# Import ZMQ-Strategy from relative path
from examples.template.strategies.base.DWX_ZMQ_Strategy import DWX_ZMQ_Strategy
#############################################################################
# Other required imports
#############################################################################
import os
from pandas import Timedelta, to_datetime, Timestamp
from threading import Thread, Lock
from time import sleep
import random
#############################################################################
# Class derived from DWZ_ZMQ_Strategy includes data processor for PULL,SUB data
#############################################################################
class rates_historic(DWX_ZMQ_Strategy):
def __init__(self,
_name="PRICES_SUBSCRIPTIONS",
_delay=0.1,
_broker_gmt=3,
_verbose=False):
# call DWX_ZMQ_Strategy constructor and passes itself as data processor for handling
# received data on PULL and SUB ports
super().__init__(_name,
[], # Empty symbol list (not needed for this example)
_broker_gmt,
[self], # Registers itself as handler of pull data via self.onPullData()
[self], # Registers itself as handler of sub data via self.onSubData()
_verbose)
# This strategy's variables
self._delay = _delay
self._verbose = _verbose
self._finished = False
# lock for acquire/release of ZeroMQ connector
self._lock = Lock()
##########################################################################
def isFinished(self):
""" Check if execution finished"""
return self._finished
##########################################################################
def onPullData(self, data):
"""
Callback to process new data received through the PULL port
"""
# print responses to request commands
print('Historic from ExpertAdvisor={}'.format(data))
##########################################################################
def onSubData(self, data):
"""
Callback to process new data received through the SUB port
"""
# split msg to get topic and message
_topic, _msg = data.split(" ")
print('Data on Topic={} with Message={}'.format(_topic, _msg))
##########################################################################
def run(self):
"""
Request historic data
"""
self._finished = False
# request rates
print('Requesting Daily Rates from EURGBP')
self._zmq._DWX_MTX_SEND_HIST_REQUEST_(_symbol='EURGBP',
_timeframe=1440,
_start='2020.05.04 00:00:00',
_end ='2020.05.14 00:00:00')
sleep(1)
print('\nHistory Data Dictionary:')
print(self._zmq._History_DB)
# finishes (removes all subscriptions)
self.stop()
##########################################################################
def stop(self):
"""
unsubscribe from all market symbols and exits
"""
# remove subscriptions and stop symbols price feeding
try:
# Acquire lock
self._lock.acquire()
self._zmq._DWX_MTX_UNSUBSCRIBE_ALL_MARKETDATA_REQUESTS_()
print('Unsubscribing from all topics')
finally:
# Release lock
self._lock.release()
sleep(self._delay)
self._finished = True
""" -----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
SCRIPT SETUP
-----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
"""
if __name__ == "__main__":
# creates object with a predefined configuration: historic EURGBP_D1 between 4th adn 14th January 2019
print('Loading example...')
example = rates_historic()
# Starts example execution
print('unning example...')
example.run()
# Waits example termination
print('Waiting example termination...')
while not example.isFinished():
sleep(1)
print('Bye!!!')
|
examples/cvode/serial/plot_cvPendulum.py | tbrk/sundials | 256 | 11066424 | <gh_stars>100-1000
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Programmer(s): <NAME> @ LLNL
# ------------------------------------------------------------------------------
# SUNDIALS Copyright Start
# Copyright (c) 2002-2021, Lawrence Livermore National Security
# and Southern Methodist University.
# All rights reserved.
#
# See the top-level LICENSE and NOTICE files for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# SUNDIALS Copyright End
# ------------------------------------------------------------------------------
# matplotlib-based plotting script for cvPendulum_dns.c example
# ------------------------------------------------------------------------------
# imports
import argparse
import numpy as np
import matplotlib.pyplot as plt
# command line options
parser = argparse.ArgumentParser(description='Plots cvPendulum_dns output')
parser.add_argument('sfile', type=str,
help='solution output file to read')
# parse inputs
args = parser.parse_args()
# read solution output file
data = np.loadtxt(args.sfile, dtype=np.double)
# extract times, positions, and velocities
t = data[:, 0]
x = data[:, 1]
y = data[:, 2]
vx = data[:, 3]
vy = data[:, 4]
# read reference solution output file
ref = np.loadtxt("cvPendulum_dns_ref.txt", dtype=np.double)
# extract positions and velocities
xr = ref[:, 1]
yr = ref[:, 2]
vxr = ref[:, 3]
vyr = ref[:, 4]
# lower half of unit circle
tt = np.linspace(np.pi, 2*np.pi, 10000)
xt = np.cos(tt)
yt = np.sin(tt)
# plot solution in xy plane
fig, ax = plt.subplots()
ax.axhline(y=0, color='black', linestyle='--', label=None)
ax.axvline(x=0, color='black', linestyle='--', label=None)
plt.plot(xt, yt, color='black', linestyle='--', label=None)
plt.scatter(x, y, color='red', label='comp')
plt.scatter(xr, yr, color='blue', label='ref')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Pendulum')
ax.set_aspect('equal')
plt.legend(loc='lower right')
# plot position over time
fig, ax = plt.subplots()
ax.axhline(y=0, color='black', linestyle='--')
plt.plot(t, x, label='x')
plt.plot(t, y, label='y')
plt.xlabel('t')
plt.ylabel('position')
plt.title('Pendulum Position')
plt.legend()
# plot velocity over time
fig, ax = plt.subplots()
ax.axhline(y=0, color='black', linestyle='--')
plt.plot(t, vx, label='$v_x$')
plt.plot(t, vy, label='$v_y$')
plt.xlabel('t')
plt.ylabel('velocity')
plt.title('Pendulum Velocity')
plt.legend()
# display plots
plt.show()
##### end of script #####
|
recipes/Python/128711_Fibonacci_Object/recipe-128711.py | tdiprima/code | 2,023 | 11066428 | from math import sqrt, pow, floor
from __future__ import generators
class Fibonacci:
"""A simple class which brings functions to calculate Fibonacci numbers
and make operations between them"""
def __init__(self):
self.Phi = (1 + sqrt(5))/2
self.PhiP = (1 - sqrt(5))/2
self.rs5 = 1/sqrt(5)
self.succ = self.unboundedGenerator()
def __call__(self, n=-1):
if n == -1:
return self.next()
return self.nth(n)
def next(self):
"""Next Fibonacci number in the sucesion"""
return self.succ.next()
def nth(self, n):
"""Calculate the nth Fibonacci Number by formula. Doesn't work for n > 1474"""
return floor(self.rs5 * (pow(self.Phi, n) - pow(self.PhiP, n)))
def list(self, k, n):
"""Returns a list from Fibonacci(k) to Fibonacci(n) numbers"""
return [ self.nth(i) for i in range(k, n + 1) ]
def first(self, n):
"""Returns a list with the first n Fibonacci numbers"""
g = self.generator(n)
return [ g.next() for i in range(n) ]
def unboundedGenerator(self):
"""Unbounded Fibonacci generator"""
thisnum, nextnum = 0, 1L
while 1:
yield thisnum
thisnum, nextnum = nextnum, thisnum + nextnum
return
def generator(self, n):
"""n-Bounded Fibonacci generator"""
thisnum, nextnum = 0, 1L
for i in range(n + 1):
yield thisnum
thisnum, nextnum = nextnum, thisnum + nextnum
return
|
theory/read_mesh_dump.py | johnterickson/Mesh | 1,494 | 11066437 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
import argparse
from collections import defaultdict
import meshers
MB = 1/1024.0/1024.0
class Span(object):
def __init__(self, obj):
self.name = obj['name']
self.size = obj['object-size']
self.length = obj['length']
self.bitmap = obj['bitmap']
def read_data(json_path):
'''
Reads a dict of size -> span list from a mesh dump file at `json_path`
'''
size_classes = defaultdict(list)
with open(json_path) as f:
for l in f.readlines():
obj = json.loads(l)
span = Span(obj)
size_classes[span.size].append(span)
return size_classes
def count_meshes(mesher, spans):
bitmaps = [s.bitmap for s in spans]
if len(bitmaps) % 2 == 1:
bitmaps.append('1' * len(s.bitmap))
return mesher(bitmaps)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--size', type=int, help='size to dump graph')
parser.add_argument('json_file', nargs=1, help='A JSON dump from libmesh')
args = parser.parse_args()
if not args.json_file:
parser.print_help()
return 1
size_classes = read_data(args.json_file[0])
sizes = sorted(size_classes.keys(), reverse=True)
total_size = 0
for size in sizes:
spans = size_classes[size]
total_size += sum([s.size * s.length for s in spans])
print('Total heap size: %.1f MiB' % (total_size * MB,))
saved = 0
for size in sizes:
if size >= 4096:
continue
spans = size_classes[size]
# n = count_meshes(meshers.optimalMesher, spans)
# n = count_meshes(meshers.optimalMesher, spans)
n = count_meshes(meshers.greedyMesher, spans)
print('\t%5d: %d spans (%d meshes)' % (size, len(spans), len(n)))
saved += (size * spans[0].length) * len(n)
print('Saved size: %.1f MiB' % (saved * MB,))
return 0
if __name__ == '__main__':
sys.exit(main())
|
scripts/misc/ray_test.py | microsoft/archai | 344 | 11066441 | <reponame>microsoft/archai
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
import time
import random
import ray
import torch
from archai import cifar10_models
from archai.common.trainer import Trainer
from archai.common.config import Config
from archai.common import common
from archai.datasets import data
from archai.common.metrics import Metrics
def train_test()->Metrics:
conf = common.get_conf()
conf_eval = conf['nas']['eval']
# region conf vars
conf_loader = conf_eval['loader']
conf_trainer = conf_eval['trainer']
# endregion
conf_trainer['validation']['freq']=1
conf_trainer['epochs'] = 1
conf_loader['train_batch'] = 128
conf_loader['test_batch'] = 4096
conf_loader['cutout'] = 0
conf_trainer['drop_path_prob'] = 0.0
conf_trainer['grad_clip'] = 0.0
conf_trainer['aux_weight'] = 0.0
Net = cifar10_models.resnet34
model = Net().to(torch.device('cuda'))
# get data
data_loaders = data.get_data(conf_loader)
assert data_loaders.train_dl is not None and data_loaders.test_dl is not None
trainer = Trainer(conf_trainer, model, None)
trainer.fit(data_loaders)
met = trainer.get_metrics()
return met
@ray.remote(num_gpus=1)
def train_test_ray(common_state):
common.init_from(common_state)
return train_test()
def train_test_dist():
start = time.time()
result_ids = [train_test_ray.remote(common.get_state()) for x in range(2)]
while len(result_ids):
done_id, result_ids = ray.wait(result_ids)
metrics:Metrics = ray.get(done_id[0])
print(f'result={metrics.run_metrics.epochs_metrics[-1].top1.avg}, '
f'time={time.time()-start}')
if __name__ == '__main__':
ray.init(num_gpus=1)
print('ray init done')
common.common_init(config_filepath='confs/algos/darts.yaml',
param_args=['--common.experiment_name', 'resnet_test'])
train_test_dist()
exit(0) |
pypy2.7/multiprocess/__init__.py | agcopenhaver/multiprocess | 356 | 11066450 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70.13.dev0'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = '<NAME> (<EMAIL>)'
#
# Imports
#
import os
import sys
from multiprocess.process import Process, current_process, active_children
from multiprocess.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
# This is down here because _multiprocessing uses BufferTooShort
try:
import _multiprocess as _multiprocessing
except ImportError:
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocess.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocess.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocess.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocess.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocess.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocess import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocess.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocess.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocess.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocess.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocess.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocess.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocess.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocess.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocess.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocess.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocess.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocess.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocess.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocess.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
|
config.py | josephernest/talktalktalk | 278 | 11066452 | <reponame>josephernest/talktalktalk<filename>config.py
PORT = 9000 # if you change this line, change the port as well in .htaccess
HOST = "127.0.0.1"
ADMINNAME = 'admin' # this username will be available if *and only if* the following username is entered in the input field:
ADMINHIDDENNAME = 'adminxyz'
ALLOWEDTAGS = [] # tags allowed in messages, could be ['a', 'b', 'em', 'code'], etc. |
pygears/lib/din_cat.py | bogdanvuk/pygears | 120 | 11066453 | from pygears import gear
from pygears.lib import ccat
def din_cat(f, other):
@gear
def din_cat_impl(cfg, din):
return ccat(din, cfg) | f
return din_cat_impl(other)
|
third_party/gsutil/third_party/apitools/apitools/gen/util_test.py | tingshao/catapult | 2,151 | 11066469 | #
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import unittest2
from apitools.gen import util
class NormalizeVersionTest(unittest2.TestCase):
def testVersions(self):
already_valid = 'v1'
self.assertEqual(already_valid, util.NormalizeVersion(already_valid))
to_clean = 'v0.1'
self.assertEqual('v0_1', util.NormalizeVersion(to_clean))
class NamesTest(unittest2.TestCase):
def testKeywords(self):
names = util.Names([''])
self.assertEqual('in_', names.CleanName('in'))
def testNormalizeEnumName(self):
names = util.Names([''])
self.assertEqual('_0', names.NormalizeEnumName('0'))
|
py/google/fhir/r4/extensions_test.py | swrobel/fhir | 648 | 11066475 | <filename>py/google/fhir/r4/extensions_test.py
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test extensions functionality."""
import os
import sys
from typing import Type
from google.protobuf import message
from absl.testing import absltest
from proto.google.fhir.proto.r4 import fhirproto_extensions_pb2
from proto.google.fhir.proto.r4 import ml_extensions_pb2
from proto.google.fhir.proto.r4.core import datatypes_pb2
from proto.google.fhir.proto.r4.core import extensions_pb2
from proto.google.fhir.proto.r4.core.resources import patient_pb2
from google.fhir import extensions
from google.fhir import extensions_test
try:
from testdata.r4.profiles import test_extensions_pb2
except ImportError:
# TODO: Add test protos to PYTHONPATH during dist testing.
pass # Fall through
_EXTENSIONS_DIR = os.path.join('testdata', 'r4', 'extensions')
class ExtensionsTest(extensions_test.ExtensionsTest):
"""Tests functionality provided by the extensions module."""
@property
def extension_cls(self) -> Type[message.Message]:
return datatypes_pb2.Extension
@property
def testdata_dir(self) -> str:
return _EXTENSIONS_DIR
def testGetFhirExtensions_withNoExtensions_returnsEmptyList(self):
"""Tests get_fhir_extensions returns an empty list with no extensions."""
patient = patient_pb2.Patient()
self.assertEmpty(extensions.get_fhir_extensions(patient))
def testGetFhirExtensions_withExtensions_returnsList(self):
"""Tests get_fhir_extensions returns a non-empty list with extensions."""
patient = patient_pb2.Patient()
patient.extension.add(
url=datatypes_pb2.Uri(value='abcd'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)))
self.assertLen(extensions.get_fhir_extensions(patient), 1)
def testClearFhirExtensions_withMultipleExtensions_succeeds(self):
"""Tests ClearFhirExtensions when a message has multiple extensions."""
arbitrary_string = datatypes_pb2.String()
arbitrary_string.extension.add(
url=datatypes_pb2.Uri(value='first'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)))
arbitrary_string.extension.add(
url=datatypes_pb2.Uri(value='second'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)))
arbitrary_string.extension.add(
url=datatypes_pb2.Uri(value='third'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)))
self.assertLen(extensions.get_fhir_extensions(arbitrary_string), 3)
# Remove middle extension
extensions.clear_fhir_extensions_with_url(arbitrary_string, 'second')
remaining_extensions = extensions.get_fhir_extensions(arbitrary_string)
self.assertLen(remaining_extensions, 2)
remaining_urls = [extension.url.value for extension in remaining_extensions]
self.assertEqual(remaining_urls, ['first', 'third'])
def testExtensionToMessage_withEventTrigger_succeeds(self):
self.assert_extension_to_message_equals_golden(
'trigger', ml_extensions_pb2.EventTrigger)
def testMessageToExtension_withEventTrigger_succeeds(self):
self.assert_message_to_extension_equals_golden(
'trigger', ml_extensions_pb2.EventTrigger)
def testExtensionToMessage_withEventLabel_succeeds(self):
self.assert_extension_to_message_equals_golden('label',
ml_extensions_pb2.EventLabel)
def testMessageToExtension_withEventLabel_succeeds(self):
self.assert_message_to_extension_equals_golden('label',
ml_extensions_pb2.EventLabel)
def testExtensionToMessage_withPrimitiveHasNoValue_succeeds(self):
self.assert_extension_to_message_equals_golden(
'primitive_has_no_value', fhirproto_extensions_pb2.PrimitiveHasNoValue)
def testMessageToExtension_withPrimitiveHasNoValue_succeeds(self):
self.assert_message_to_extension_equals_golden(
'primitive_has_no_value', fhirproto_extensions_pb2.PrimitiveHasNoValue)
def testExtensionToMessage_withEmptyPrimitiveHasNoValue_succeeds(self):
self.assert_extension_to_message_equals_golden(
'empty', fhirproto_extensions_pb2.PrimitiveHasNoValue)
def testMessageToExtension_withEmptyPrimitiveHasNoValue_succeeds(self):
self.assert_message_to_extension_equals_golden(
'empty', fhirproto_extensions_pb2.PrimitiveHasNoValue)
def testExtensionToMessage_withCapabilityStatementSearchParameterCombination_succeeds(
self):
self.assert_extension_to_message_equals_golden(
'capability',
extensions_pb2.CapabilityStatementSearchParameterCombination)
def testMessageToExtension_withCapabilityStatementSearchParameterCombination_succeeds(
self):
self.assert_message_to_extension_equals_golden(
'capability',
extensions_pb2.CapabilityStatementSearchParameterCombination)
@absltest.skipIf(
'testdata' not in sys.modules,
'google-fhir package does not build+install tertiary testdata protos.')
def testExtensionToMessage_withDigitalMediaType_succeeds(self):
self.assert_extension_to_message_equals_golden(
'digital_media_type', test_extensions_pb2.DigitalMediaType)
@absltest.skipIf(
'testdata' not in sys.modules,
'google-fhir package does not build+install tertiary testdata protos.')
def testMessageToExtension_withDigitalMediaType_succeeds(self):
self.assert_message_to_extension_equals_golden(
'digital_media_type', test_extensions_pb2.DigitalMediaType)
if __name__ == '__main__':
absltest.main()
|
onetimepass/__init__.py | shantanoo/onetimepass | 461 | 11066489 | """
onetimepass module is designed to work for one-time passwords - HMAC-based and
time-based. It is compatible with Google Authenticator application and
applications based on it.
@version: 1.0.1
@author: <NAME>
@contact: http://github.com/tadeck
@license: MIT
>>> secret = b'MFRGGZDFMZTWQ2LK'
>>> get_hotp(secret, 1) == 765705
True
>>> get_hotp(secret, 1, as_string=True) == b'765705'
True
>>> valid_hotp(get_hotp(secret, 123), secret)
123
>>> valid_hotp(get_hotp(secret, 123), secret, last=123)
False
>>> valid_totp(get_totp(secret), secret)
True
>>> valid_totp(get_totp(secret)+1, secret)
False
>>> valid_hotp(get_totp(secret), secret)
False
>>> valid_totp(get_hotp(secret, 1), secret)
False
"""
import base64
import hashlib
import hmac
import six
import struct
import time
__author__ = '<NAME> <<EMAIL>>'
__date__ = '31 July 2015'
__version_info__ = (1, 0, 1)
__version__ = '%s.%s.%s' % __version_info__
__license__ = 'MIT'
def _is_possible_token(token, token_length=6):
"""Determines if given value is acceptable as a token. Used when validating
tokens.
Currently allows only numeric tokens no longer than 6 chars.
:param token: token value to be checked
:type token: int or str
:param token_length: allowed length of token
:type token_length: int
:return: True if can be a candidate for token, False otherwise
:rtype: bool
>>> _is_possible_token(123456)
True
>>> _is_possible_token(b'<PASSWORD>')
True
>>> _is_possible_token(b'abcdef')
False
>>> _is_possible_token(b'<PASSWORD>')
False
"""
if not isinstance(token, bytes):
token = six.b(str(token))
return token.isdigit() and len(token) <= token_length
def get_hotp(
secret,
intervals_no,
as_string=False,
casefold=True,
digest_method=hashlib.sha1,
token_length=6,
):
"""
Get HMAC-based one-time password on the basis of given secret and
interval number.
:param secret: the base32-encoded string acting as secret key
:type secret: str or unicode
:param intervals_no: interval number used for getting different tokens, it
is incremented with each use
:type intervals_no: int
:param as_string: True if result should be padded string, False otherwise
:type as_string: bool
:param casefold: True (default), if should accept also lowercase alphabet
:type casefold: bool
:param digest_method: method of generating digest (hashlib.sha1 by default)
:type digest_method: callable
:param token_length: length of the token (6 by default)
:type token_length: int
:return: generated HOTP token
:rtype: int or str
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=1)
765705
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=2)
816065
>>> result = get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=2, as_string=True)
>>> result == b'816065'
True
"""
if isinstance(secret, six.string_types):
# It is unicode, convert it to bytes
secret = secret.encode('utf-8')
# Get rid of all the spacing:
secret = secret.replace(b' ', b'')
try:
key = base64.b32decode(secret, casefold=casefold)
except (TypeError):
raise TypeError('Incorrect secret')
msg = struct.pack('>Q', intervals_no)
hmac_digest = hmac.new(key, msg, digest_method).digest()
ob = hmac_digest[19] if six.PY3 else ord(hmac_digest[19])
o = ob & 15
token_base = struct.unpack('>I', hmac_digest[o:o + 4])[0] & 0x7fffffff
token = token_base % (10 ** token_length)
if as_string:
# TODO: should as_string=True return unicode, not bytes?
return six.b('{{:0{}d}}'.format(token_length).format(token))
else:
return token
def get_totp(
secret,
as_string=False,
digest_method=hashlib.sha1,
token_length=6,
interval_length=30,
clock=None,
):
"""Get time-based one-time password on the basis of given secret and time.
:param secret: the base32-encoded string acting as secret key
:type secret: str
:param as_string: True if result should be padded string, False otherwise
:type as_string: bool
:param digest_method: method of generating digest (hashlib.sha1 by default)
:type digest_method: callable
:param token_length: length of the token (6 by default)
:type token_length: int
:param interval_length: length of TOTP interval (30 seconds by default)
:type interval_length: int
:param clock: time in epoch seconds to generate totp for, default is now
:type clock: int
:return: generated TOTP token
:rtype: int or str
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', int(time.time())//30) == \
get_totp(b'MFRGGZDFMZTWQ2LK')
True
>>> get_hotp(b'MFRGGZDFMZTWQ2LK', int(time.time())//30) == \
get_totp(b'MFRGGZDFMZTWQ2LK', as_string=True)
False
"""
if clock is None:
clock = time.time()
interv_no = int(clock) // interval_length
return get_hotp(
secret,
intervals_no=interv_no,
as_string=as_string,
digest_method=digest_method,
token_length=token_length,
)
def valid_hotp(
token,
secret,
last=1,
trials=1000,
digest_method=hashlib.sha1,
token_length=6,
):
"""Check if given token is valid for given secret. Return interval number
that was successful, or False if not found.
:param token: token being checked
:type token: int or str
:param secret: secret for which token is checked
:type secret: str
:param last: last used interval (start checking with next one)
:type last: int
:param trials: number of intervals to check after 'last'
:type trials: int
:param digest_method: method of generating digest (hashlib.sha1 by default)
:type digest_method: callable
:param token_length: length of the token (6 by default)
:type token_length: int
:return: interval number, or False if check unsuccessful
:rtype: int or bool
>>> secret = b'<KEY>'
>>> valid_hotp(713385, secret, last=1, trials=5)
4
>>> valid_hotp(865438, secret, last=1, trials=5)
False
>>> valid_hotp(713385, secret, last=4, trials=5)
False
"""
if not _is_possible_token(token, token_length=token_length):
return False
for i in six.moves.xrange(last + 1, last + trials + 1):
token_candidate = get_hotp(
secret=secret,
intervals_no=i,
digest_method=digest_method,
token_length=token_length,
)
if token_candidate == int(token):
return i
return False
def valid_totp(
token,
secret,
digest_method=hashlib.sha1,
token_length=6,
interval_length=30,
clock=None,
window=0,
):
"""Check if given token is valid time-based one-time password for given
secret.
:param token: token which is being checked
:type token: int or str
:param secret: secret for which the token is being checked
:type secret: str
:param digest_method: method of generating digest (hashlib.sha1 by default)
:type digest_method: callable
:param token_length: length of the token (6 by default)
:type token_length: int
:param interval_length: length of TOTP interval (30 seconds by default)
:type interval_length: int
:param clock: time in epoch seconds to generate totp for, default is now
:type clock: int
:param window: compensate for clock skew, number of intervals to check on
each side of the current time. (default is 0 - only check the current
clock time)
:type window: int (positive)
:return: True, if is valid token, False otherwise
:rtype: bool
>>> secret = b'<KEY>'
>>> token = get_totp(secret)
>>> valid_totp(token, secret)
True
>>> valid_totp(token+1, secret)
False
>>> token = get_totp(secret, as_string=True)
>>> valid_totp(token, secret)
True
>>> valid_totp(token + b'1', secret)
False
"""
if _is_possible_token(token, token_length=token_length):
if clock is None:
clock = time.time()
for w in range(-window, window+1):
if int(token) == get_totp(
secret,
digest_method=digest_method,
token_length=token_length,
interval_length=interval_length,
clock=int(clock)+(w*interval_length)
):
return True
return False
__all__ = [
'get_hotp',
'get_totp',
'valid_hotp',
'valid_totp'
]
|
operations/binsec/dffml_operations_binsec/log.py | SGeetansh/dffml | 171 | 11066513 | <reponame>SGeetansh/dffml
import logging
LOGGER = logging.getLogger(__package__)
|
test/unit/jobs/test_rules_override/10_rule.py | rikeshi/galaxy | 1,085 | 11066519 | <gh_stars>1000+
def rule_module_override():
# Dummy rule for testing rule module overrides
return 'new_rules_package'
|
ryu/services/protocols/bgp/utils/stats.py | w180112/ryu | 975 | 11066531 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for stats related classes and utilities.
"""
import datetime
import json
import logging
import time
from ryu.services.protocols.bgp.rtconf.base import ConfWithId
_STATS_LOGGER = logging.getLogger('stats')
# Various stats related constants.
DEFAULT_LOG_LEVEL = logging.INFO
RESOURCE_ID = 'resource_id'
RESOURCE_NAME = 'resource_name'
TIMESTAMP = 'timestamp'
LOG_LEVEL = 'log_level'
STATS_RESOURCE = 'stats_resource'
STATS_SOURCE = 'stats_source'
# VRF related stat constants
REMOTE_ROUTES = 'remote_routes'
LOCAL_ROUTES = 'local_routes'
# Peer related stat constant.
UPDATE_MSG_IN = 'update_message_in'
UPDATE_MSG_OUT = 'update_message_out'
TOTAL_MSG_IN = 'total_message_in'
TOTAL_MSG_OUT = 'total_message_out'
FMS_EST_TRANS = 'fsm_established_transitions'
UPTIME = 'uptime'
def log(stats_resource=None, stats_source=None, log_level=DEFAULT_LOG_LEVEL,
**kwargs):
"""Utility to log given stats to *stats* logger.
Stats to log are given by `stats_source` and in its absence we log
`kwargs`. *stats* logger is configured independently from any logger.
Only stats should be logged to this logger. Will add current timestamp
to the logged stats if not given.
Parameters:
- `stats_resource`: any object that complies with `id` and `name`
attrs.
- `stats_source`: any callable that give a `dict` that will be
logged to *stats* logger.
- `log_level`: str representing level at which to log this stats
message.
- `**kwargs`: if `stats_source` is not given, we log this `dict`.
"""
# Get stats from source if given.
if stats_source is not None:
kwargs = stats_source()
if stats_resource is None:
if RESOURCE_ID not in kwargs or RESOURCE_NAME not in kwargs:
raise ValueError('Missing required stats labels.')
else:
if not (hasattr(stats_resource, ConfWithId.ID) and
hasattr(stats_resource, ConfWithId.NAME)):
raise ValueError('Given stats source is missing id or name'
' attributes.')
kwargs[RESOURCE_ID] = stats_resource.id
kwargs[RESOURCE_NAME] = stats_resource.name
if TIMESTAMP not in kwargs:
kwargs[TIMESTAMP] = datetime.datetime.utcfromtimestamp(
time.time()).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
_STATS_LOGGER.log(log_level,
json.dumps(kwargs))
def logd(**kwargs):
log(log_level=logging.DEBUG, **kwargs)
def logi(**kwargs):
log(log_level=logging.INFO, **kwargs)
|
src/anyconfig/api/_dump.py | ssato/python-anyconfig | 213 | 11066535 | #
# Copyright (C) 2012 - 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
#
r"""Public APIs to dump configurations data.
"""
from .. import common, ioinfo, parsers
from .datatypes import ParserT
def dump(data: common.InDataExT, out: ioinfo.PathOrIOInfoT,
ac_parser: parsers.MaybeParserT = None, **options
) -> None:
"""
Save 'data' to 'out'.
:param data: A mapping object may have configurations data to dump
:param out:
An output file path, a file, a file-like object, :class:`pathlib.Path`
object represents the file or a namedtuple 'anyconfig.ioinfo.IOInfo'
object represents output to dump some data to.
:param ac_parser: Forced parser type or parser object
:param options:
Backend specific optional arguments, e.g. {"indent": 2} for JSON
loader/dumper backend
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
ioi = ioinfo.make(out)
psr: ParserT = parsers.find(ioi, forced_type=ac_parser)
psr.dump(data, ioi, **options)
def dumps(data: common.InDataExT,
ac_parser: parsers.MaybeParserT = None,
**options) -> str:
"""
Return string representation of 'data' in forced type format.
:param data: Config data object to dump
:param ac_parser: Forced parser type or ID or parser object
:param options: see :func:`dump`
:return: Backend-specific string representation for the given data
:raises: ValueError, UnknownProcessorTypeError
"""
psr: ParserT = parsers.find(None, forced_type=ac_parser)
return psr.dumps(data, **options)
# vim:sw=4:ts=4:et:
|
Tools/freeze/makeconfig.py | arvindm95/unladen-swallow | 2,293 | 11066542 | import re
# Write the config.c file
never = ['marshal', '__main__', '__builtin__', 'sys', 'exceptions']
def makeconfig(infp, outfp, modules, with_ifdef=0):
m1 = re.compile('-- ADDMODULE MARKER 1 --')
m2 = re.compile('-- ADDMODULE MARKER 2 --')
while 1:
line = infp.readline()
if not line: break
outfp.write(line)
if m1 and m1.search(line):
m1 = None
for mod in modules:
if mod in never:
continue
if with_ifdef:
outfp.write("#ifndef init%s\n"%mod)
outfp.write('extern void init%s(void);\n' % mod)
if with_ifdef:
outfp.write("#endif\n")
elif m2 and m2.search(line):
m2 = None
for mod in modules:
if mod in never:
continue
outfp.write('\t{"%s", init%s},\n' %
(mod, mod))
if m1:
sys.stderr.write('MARKER 1 never found\n')
elif m2:
sys.stderr.write('MARKER 2 never found\n')
# Test program.
def test():
import sys
if not sys.argv[3:]:
print 'usage: python makeconfig.py config.c.in outputfile',
print 'modulename ...'
sys.exit(2)
if sys.argv[1] == '-':
infp = sys.stdin
else:
infp = open(sys.argv[1])
if sys.argv[2] == '-':
outfp = sys.stdout
else:
outfp = open(sys.argv[2], 'w')
makeconfig(infp, outfp, sys.argv[3:])
if outfp != sys.stdout:
outfp.close()
if infp != sys.stdin:
infp.close()
if __name__ == '__main__':
test()
|
nmt/utils/iterator_utils_test.py | godblessforhimself/nmt | 6,575 | 11066554 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for iterator_utils.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from ..utils import iterator_utils
class IteratorUtilsTest(tf.test.TestCase):
def testGetIterator(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["f e a g", "c c a", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c", "a b", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 0, 3], # c a eos -- eos is padding
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([2, 3], src_len_v)
self.assertAllEqual(
[[4, 1, 2], # sos b c
[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[1, 2, 3], # b c eos
[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], # c c a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], # sos a b
target_input_v)
self.assertAllEqual(
[[0, 1, 3]], # a b eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithShard(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "f e a g", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a b", "c c", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
num_shards=2,
shard_index=1,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 0, 3], # c a eos -- eos is padding
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([2, 3], src_len_v)
self.assertAllEqual(
[[4, 1, 2], # sos b c
[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[1, 2, 3], # b c eos
[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithSkipCount(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c a", "c c a", "d", "f e a g"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["b c", "a b", "", "c c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
skip_count = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
skip_count=skip_count,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer, feed_dict={skip_count: 3})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
# Re-init iterator with skip_count=0.
sess.run(iterator.initializer, feed_dict={skip_count: 0})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 0, 3]], # c a eos -- eos is padding
source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 1, 2]], # sos b c
target_input_v)
self.assertAllEqual(
[[2, 2, 3], # c c eos
[1, 2, 3]], # b c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], # c c a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], # sos a b
target_input_v)
self.assertAllEqual(
[[0, 1, 3]], # a b eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetInferIterator(self):
src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "c a", "d", "f e a g"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_infer_iterator(
src_dataset=src_dataset,
src_vocab_table=src_vocab_table,
batch_size=batch_size,
eos=hparams.eos,
src_max_len=src_max_len)
table_initializer = tf.tables_initializer()
source = iterator.source
seq_len = iterator.source_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None], seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[2, 2, 0], # c c a
[2, 0, 3]], # c a eos
source_v)
self.assertAllEqual([3, 2], seq_len_v)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[-1, 3, 3], # "d" == unknown, eos eos
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([1, 3], seq_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run((source, seq_len))
if __name__ == "__main__":
tf.test.main()
|
tools/conv_cmap.py | dntjr970/pdfminer.six | 3,538 | 11066558 | #!/usr/bin/env python3
import sys
import pickle as pickle
import codecs
class CMapConverter:
def __init__(self, enc2codec={}):
self.enc2codec = enc2codec
self.code2cid = {} # {'cmapname': ...}
self.is_vertical = {}
self.cid2unichr_h = {} # {cid: unichr}
self.cid2unichr_v = {} # {cid: unichr}
return
def get_encs(self):
return self.code2cid.keys()
def get_maps(self, enc):
if enc.endswith('-H'):
(hmapenc, vmapenc) = (enc, None)
elif enc == 'H':
(hmapenc, vmapenc) = ('H', 'V')
else:
(hmapenc, vmapenc) = (enc+'-H', enc+'-V')
if hmapenc in self.code2cid:
hmap = self.code2cid[hmapenc]
else:
hmap = {}
self.code2cid[hmapenc] = hmap
vmap = None
if vmapenc:
self.is_vertical[vmapenc] = True
if vmapenc in self.code2cid:
vmap = self.code2cid[vmapenc]
else:
vmap = {}
self.code2cid[vmapenc] = vmap
return (hmap, vmap)
def load(self, fp):
encs = None
for line in fp:
(line, _, _) = line.strip().partition('#')
if not line:
continue
values = line.split('\t')
if encs is None:
assert values[0] == 'CID', str(values)
encs = values
continue
def put(dmap, code, cid, force=False):
for b in code[:-1]:
if b in dmap:
dmap = dmap[b]
else:
d = {}
dmap[b] = d
dmap = d
b = code[-1]
if force or ((b not in dmap) or dmap[b] == cid):
dmap[b] = cid
return
def add(unimap, enc, code):
try:
codec = self.enc2codec[enc]
c = code.decode(codec, 'strict')
if len(c) == 1:
if c not in unimap:
unimap[c] = 0
unimap[c] += 1
except KeyError:
pass
except UnicodeError:
pass
return
def pick(unimap):
chars = list(unimap.items())
chars.sort(key=(lambda x: (x[1], -ord(x[0]))), reverse=True)
(c, _) = chars[0]
return c
cid = int(values[0])
unimap_h = {}
unimap_v = {}
for (enc, value) in zip(encs, values):
if enc == 'CID':
continue
if value == '*':
continue
# hcodes, vcodes: encoded bytes for each writing mode.
hcodes = []
vcodes = []
for code in value.split(','):
vertical = code.endswith('v')
if vertical:
code = code[:-1]
try:
code = codecs.decode(code, 'hex_codec')
except Exception:
code = chr(int(code, 16))
if vertical:
vcodes.append(code)
add(unimap_v, enc, code)
else:
hcodes.append(code)
add(unimap_h, enc, code)
# add cid to each map.
(hmap, vmap) = self.get_maps(enc)
if vcodes:
assert vmap is not None
for code in vcodes:
put(vmap, code, cid, True)
for code in hcodes:
put(hmap, code, cid, True)
else:
for code in hcodes:
put(hmap, code, cid)
put(vmap, code, cid)
# Determine the "most popular" candidate.
if unimap_h:
self.cid2unichr_h[cid] = pick(unimap_h)
if unimap_v or unimap_h:
self.cid2unichr_v[cid] = pick(unimap_v or unimap_h)
return
def dump_cmap(self, fp, enc):
data = dict(
IS_VERTICAL=self.is_vertical.get(enc, False),
CODE2CID=self.code2cid.get(enc),
)
fp.write(pickle.dumps(data, 2))
return
def dump_unicodemap(self, fp):
data = dict(
CID2UNICHR_H=self.cid2unichr_h,
CID2UNICHR_V=self.cid2unichr_v,
)
fp.write(pickle.dumps(data, 2))
return
def main(argv):
import getopt
import gzip
import os.path
def usage():
print('usage: %s [-c enc=codec] output_dir regname [cid2code.txt ...]'
% argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'c:')
except getopt.GetoptError:
return usage()
enc2codec = {}
for (k, v) in opts:
if k == '-c':
(enc, _, codec) = v.partition('=')
enc2codec[enc] = codec
if not args:
return usage()
outdir = args.pop(0)
if not args:
return usage()
regname = args.pop(0)
converter = CMapConverter(enc2codec)
for path in args:
print('reading: %r...' % path)
fp = open(path)
converter.load(fp)
fp.close()
for enc in converter.get_encs():
fname = '%s.pickle.gz' % enc
path = os.path.join(outdir, fname)
print('writing: %r...' % path)
fp = gzip.open(path, 'wb')
converter.dump_cmap(fp, enc)
fp.close()
fname = 'to-unicode-%s.pickle.gz' % regname
path = os.path.join(outdir, fname)
print('writing: %r...' % path)
fp = gzip.open(path, 'wb')
converter.dump_unicodemap(fp)
fp.close()
return
if __name__ == '__main__':
sys.exit(main(sys.argv)) # type: ignore[no-untyped-call]
|
Pyro5/compatibility/Pyro4.py | lonly7star/Pyro5 | 188 | 11066559 | """
An effort to provide a backward-compatible Pyro4 API layer,
to make porting existing code from Pyro4 to Pyro5 easier.
This only works for code that imported Pyro4 symbols from the Pyro4 module
directly, instead of from one of Pyro4's sub modules. So, for instance:
from Pyro4 import Proxy instead of: from Pyro4.core import Proxy
*some* submodules are more or less emulated such as Pyro4.errors, Pyro4.socketutil.
So, you may first have to convert your old code to use the importing scheme to
only import the Pyro4 module and not from its submodules, and then you should
insert this at the top to enable the compatibility layer:
from Pyro5.compatibility import Pyro4
Pyro - Python Remote Objects. Copyright by <NAME> (<EMAIL>).
"""
import sys
import ipaddress
from .. import api
from .. import errors
from .. import serializers
from .. import socketutil as socketutil_pyro5
from ..configure import Configuration
__all__ = ["config", "URI", "Proxy", "Daemon", "callback", "batch",
"asyncproxy", "oneway", "expose", "behavior", "current_context",
"locateNS", "resolve", "Future", "errors"]
# symbols that are no longer available in Pyro5 and that we don't emulate:
def asyncproxy(*args, **kwargs):
raise NotImplementedError("async proxy is no longer available in Pyro5")
class Future(object):
def __init__(self, *args):
raise NotImplementedError("Pyro5 no longer provides its own Future class, "
"you should use Python's concurrent.futures module instead for that")
class NamespaceInterceptor:
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, item):
raise NotImplementedError("The Pyro4 compatibility layer doesn't provide the Pyro4.{0} module, "
"first make sure the code only uses symbols from the Pyro4 package directly"
.format(self.namespace))
naming = NamespaceInterceptor("naming")
core = NamespaceInterceptor("core")
message = NamespaceInterceptor("message")
# compatibility wrappers for the other symbols:
__version__ = api.__version__
callback = api.callback
oneway = api.oneway
expose = api.expose
behavior = api.behavior
current_context = api.current_context
class CompatConfiguration(Configuration):
def asDict(self):
return self.as_dict()
config = CompatConfiguration()
class URI(api.URI):
pass
class Proxy(api.Proxy):
def _pyroAsync(self, asynchronous=True):
raise NotImplementedError("async proxy is no longer available in Pyro5")
@property
def _pyroHmacKey(self):
raise NotImplementedError("pyro5 doesn't have hmacs anymore")
def __setattr__(self, name, value):
if name == "_pyroHmacKey":
raise NotImplementedError("pyro5 doesn't have hmacs anymore")
return super().__setattr__(name, value)
class Daemon(api.Daemon):
pass
def locateNS(host=None, port=None, broadcast=True, hmac_key=None):
if hmac_key:
raise NotImplementedError("hmac_key is no longer available in Pyro5, consider using 2-way SSL instead")
return api.locate_ns(host, port, broadcast)
def resolve(uri, hmac_key=None):
if hmac_key:
raise NotImplementedError("hmac_key is no longer available in Pyro5, consider using 2-way SSL instead")
return api.resolve(uri)
class BatchProxy(api.BatchProxy):
def __call__(self, oneway=False, asynchronous=False):
if asynchronous:
raise NotImplementedError("async proxy is no longer available in Pyro5")
return super().__call__(oneway)
def batch(proxy):
return BatchProxy(proxy)
class UtilModule:
@staticmethod
def getPyroTraceback(ex_type=None, ex_value=None, ex_tb=None):
return errors.get_pyro_traceback(ex_type, ex_value, ex_tb)
@staticmethod
def formatTraceback(ex_type=None, ex_value=None, ex_tb=None, detailed=False):
return errors.format_traceback(ex_type, ex_value, ex_tb, detailed)
SerializerBase = serializers.SerializerBase
def excepthook(self, *args, **kwargs):
return errors.excepthook(*args, **kwargs)
util = UtilModule()
class SocketUtilModule:
@staticmethod
def getIpVersion(hostnameOrAddress):
return ipaddress.ip_address(hostnameOrAddress).version
@staticmethod
def getIpAddress(hostname, workaround127=False, ipVersion=None):
return str(socketutil_pyro5.get_ip_address(hostname, workaround127, ipVersion))
@staticmethod
def getInterfaceAddress(ip_address):
return str(socketutil_pyro5.get_interface(ip_address).ip)
@staticmethod
def createSocket(bind=None, connect=None, reuseaddr=False, keepalive=True,
timeout=-1, noinherit=False,
ipv6=False, nodelay=True, sslContext=None):
return socketutil_pyro5.create_socket(bind, connect, reuseaddr, keepalive,
timeout, noinherit, ipv6, nodelay, sslContext)
@staticmethod
def createBroadcastSocket(bind=None, reuseaddr=False, timeout=-1, ipv6=False):
return socketutil_pyro5.create_bc_socket(bind, reuseaddr, timeout, ipv6)
@staticmethod
def receiveData(sock, size):
return socketutil_pyro5.receive_data(sock, size)
@staticmethod
def sendData(sock, data):
return socketutil_pyro5.send_data(sock, data)
socketutil = SocketUtilModule()
class ConstantsModule:
from .. import __version__ as VERSION
from ..core import DAEMON_NAME, NAMESERVER_NAME
from ..protocol import PROTOCOL_VERSION
constants = ConstantsModule()
# make sure that subsequent from Pyro4 import ... will work:
sys.modules["Pyro4"] = sys.modules[__name__]
sys.modules["Pyro4.errors"] = errors
sys.modules["Pyro4.core"] = core
sys.modules["Pyro4.naming"] = naming
sys.modules["Pyro4.util"] = util
sys.modules["Pyro4.socketutil"] = socketutil
sys.modules["Pyro4.constants"] = constants
sys.modules["Pyro4.message"] = message
|
pfe/player-feature-extractor/projects/attribute_recognition/datasets/dataset.py | dimahwang88/py-mcftracker | 3,465 | 11066567 | <filename>pfe/player-feature-extractor/projects/attribute_recognition/datasets/dataset.py
from __future__ import division, print_function, absolute_import
import os.path as osp
from torchreid.utils import read_image
class Dataset(object):
def __init__(
self,
train,
val,
test,
attr_dict,
transform=None,
mode='train',
verbose=True,
**kwargs
):
self.train = train
self.val = val
self.test = test
self._attr_dict = attr_dict
self._num_attrs = len(self.attr_dict)
self.transform = transform
if mode == 'train':
self.data = self.train
elif mode == 'val':
self.data = self.val
else:
self.data = self.test
if verbose:
self.show_summary()
@property
def num_attrs(self):
return self._num_attrs
@property
def attr_dict(self):
return self._attr_dict
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path, attrs = self.data[index]
img = read_image(img_path)
if self.transform is not None:
img = self.transform(img)
return img, attrs, img_path
def check_before_run(self, required_files):
"""Checks if required files exist before going deeper.
Args:
required_files (str or list): string file name(s).
"""
if isinstance(required_files, str):
required_files = [required_files]
for fpath in required_files:
if not osp.exists(fpath):
raise RuntimeError('"{}" is not found'.format(fpath))
def show_summary(self):
num_train = len(self.train)
num_val = len(self.val)
num_test = len(self.test)
num_total = num_train + num_val + num_test
print('=> Loaded {}'.format(self.__class__.__name__))
print(" ------------------------------")
print(" subset | # images")
print(" ------------------------------")
print(" train | {:8d}".format(num_train))
print(" val | {:8d}".format(num_val))
print(" test | {:8d}".format(num_test))
print(" ------------------------------")
print(" total | {:8d}".format(num_total))
print(" ------------------------------")
print(" # attributes: {}".format(len(self.attr_dict)))
print(" attributes:")
for label, attr in self.attr_dict.items():
print(' {:3d}: {}'.format(label, attr))
print(" ------------------------------")
|
tools/metrics/histograms/validate_prefix.py | zealoussnow/chromium | 14,668 | 11066696 | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks that the histograms and variants at given xml have correct prefix."""
import logging
import os
import sys
import xml.dom.minidom
import extract_histograms
import split_xml
def ValidatePrefixInFile(xml_path):
"""Validates that all <histogram> and <variants> are put in the correct file.
Args:
xml_path: The path to the histograms.xml file.
Returns:
A boolean that is True if at least a histogram has incorrect prefix, False
otherwise.
"""
prefix = os.path.basename(os.path.dirname(xml_path))
has_prefix_error = False
tree = xml.dom.minidom.parse(xml_path)
for node in extract_histograms.IterElementsWithTag(tree, 'variants', 3):
correct_dir = split_xml.GetDirForNode(node)
if correct_dir != prefix:
variants_name = node.getAttribute('name')
logging.error(
'Variants of name %s is not placed in the correct directory, '
'please remove it from the metadata/%s directory '
'and place it in the metadata/%s directory.', variants_name, prefix,
correct_dir)
has_prefix_error = True
for node in extract_histograms.IterElementsWithTag(tree, 'histogram', 3):
correct_dir = split_xml.GetDirForNode(node)
if correct_dir != prefix:
histogram_name = node.getAttribute('name')
logging.error(
'Histogram of name %s is not placed in the correct directory, '
'please remove it from the metadata/%s directory '
'and place it in the metadata/%s directory.', histogram_name, prefix,
correct_dir)
has_prefix_error = True
return has_prefix_error
def main():
"""Checks that the histograms at given path have prefix that is the dir name.
Args:
sys.argv[1]: The relative path to xml file.
Example usage:
validate_prefix.py metadata/Fingerprint/histograms.xml
"""
if len(sys.argv) != 2:
sys.stderr.write('Usage: %s <rel-path-to-xml>' % sys.argv[0])
sys.exit(1)
xml_path = os.path.join(os.getcwd(), sys.argv[1])
prefix_error = ValidatePrefixInFile(xml_path)
sys.exit(prefix_error)
if __name__ == '__main__':
main()
|
jwt_decoder.py | bbhunter/security-tools | 627 | 11066715 | #!/usr/bin/env python3
# JWT Decoder
import base64
import sys
import hmac
import hashlib
import binascii
# <KEY>
# <KEY>
def get_parts(jwt):
return dict(zip(['header', 'payload', 'signature'], jwt.split('.')))
def decode_part(part):
# use Base64URL decode plus optional padding.
# === makes sure that padding will be always correct
# extraneous padding is ignored
return base64.urlsafe_b64decode(part + '===')
def encode_part(part):
return base64.urlsafe_b64encode(part).replace(b'=', b'')
# doesn't work, needs to be fixed, one day :P
def build_jwt(header, payload, key, alg='hmac'):
message = b'.'.join([
encode_part(header),
encode_part(payload)
])
print(message)
if alg == 'hmac':
signature = hmac.new(key.encode(), message,
hashlib.sha256).hexdigest()
print(encode_part(bytes(signature, encoding='utf8')))
elif alg == 'none':
# if alg is set to 'none'
signature = ''
else:
pass
return f'{message}.{signature}'
parts = get_parts(sys.argv[1])
header = decode_part(parts['header'])
print(header)
payload = decode_part(parts['payload'])
print(payload)
|
atcodertools/client/models/problem_content.py | come2ry/atcoder-tools | 313 | 11066726 | from typing import List, Tuple, Optional
from bs4 import BeautifulSoup
from atcodertools.client.models.sample import Sample
import unicodedata
def remove_non_jp_characters(content):
return "".join([x for x in content if is_japanese(x)])
def normalize(content: str) -> str:
return content.strip().replace('\r', '') + "\n"
def is_japanese(ch):
# Thank you!
# http://minus9d.hatenablog.com/entry/2015/07/16/231608
try:
name = unicodedata.name(ch)
if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
return True
except ValueError:
pass
return False
class SampleDetectionError(Exception):
pass
class InputFormatDetectionError(Exception):
pass
class ProblemContent:
def __init__(self, input_format_text: Optional[str] = None,
samples: Optional[List[Sample]] = None,
original_html: Optional[str] = None,
):
self.samples = samples
self.input_format_text = input_format_text
self.original_html = original_html
@classmethod
def from_html(cls, html: str):
res = ProblemContent(original_html=html)
soup = BeautifulSoup(html, "html.parser")
res.input_format_text, res.samples = res._extract_input_format_and_samples(
soup)
return res
def get_input_format(self) -> str:
return self.input_format_text
def get_samples(self) -> List[Sample]:
return self.samples
@staticmethod
def _extract_input_format_and_samples(soup) -> Tuple[str, List[Sample]]:
# Remove English statements
for e in soup.findAll("span", {"class": "lang-en"}):
e.extract()
# Focus on AtCoder's usual contest's html structure
tmp = soup.select('.part')
if tmp:
tmp[0].extract()
try:
try:
input_format_tag, input_tags, output_tags = ProblemContent._primary_strategy(
soup)
if input_format_tag is None:
raise InputFormatDetectionError
except InputFormatDetectionError:
input_format_tag, input_tags, output_tags = ProblemContent._secondary_strategy(
soup)
except Exception as e:
raise InputFormatDetectionError(e)
if len(input_tags) != len(output_tags):
raise SampleDetectionError
try:
res = [Sample(normalize(in_tag.text), normalize(out_tag.text))
for in_tag, out_tag in zip(input_tags, output_tags)]
if input_format_tag is None:
raise InputFormatDetectionError
input_format_text = normalize(input_format_tag.text)
except AttributeError:
raise InputFormatDetectionError
return input_format_text, res
@staticmethod
def _primary_strategy(soup):
input_tags = []
output_tags = []
input_format_tag = None
for tag in soup.select('section'):
h3tag = tag.find('h3')
if h3tag is None:
continue
# Some problems have strange characters in h3 tags which should be
# removed
section_title = remove_non_jp_characters(tag.find('h3').get_text())
if section_title.startswith("入力例"):
input_tags.append(tag.find('pre'))
elif section_title.startswith("入力"):
input_format_tag = tag.find('pre')
if section_title.startswith("出力例"):
output_tags.append(tag.find('pre'))
return input_format_tag, input_tags, output_tags
@staticmethod
def _secondary_strategy(soup): # TODO: more descriptive name
pre_tags = soup.select('pre')
sample_tags = pre_tags[1:]
input_tags = sample_tags[0::2]
output_tags = sample_tags[1::2]
input_format_tag = pre_tags[0]
return input_format_tag, input_tags, output_tags
|
delft/textClassification/data_generator.py | tantikristanti/delft | 333 | 11066738 | <reponame>tantikristanti/delft
import numpy as np
# seed is fixed for reproducibility
from delft.utilities.numpy import shuffle_triple_with_view
np.random.seed(7)
from tensorflow import set_random_seed
set_random_seed(7)
import keras
from delft.textClassification.preprocess import to_vector_single, to_vector_simple_with_elmo, to_vector_simple_with_bert
from delft.utilities.Tokenizer import tokenizeAndFilterSimple
# generate batch of data to feed text classification model, both for training and prediction
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, x, y, batch_size=256, maxlen=300, list_classes=[], embeddings=(), shuffle=True):
'Initialization'
self.x = x
self.y = y
self.batch_size = batch_size
self.maxlen = maxlen
self.embeddings = embeddings
self.list_classes = list_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
# The number of batches is set so that each training sample is seen at most once per epoch
return int(np.floor(len(self.x) / self.batch_size) + 1)
def __getitem__(self, index):
'Generate one batch of data'
# generate data for the current batch index
batch_x, batch_y = self.__data_generation(index)
return batch_x, batch_y
def on_epoch_end(self):
# If we are predicting, we don't need to shuffle
if self.y is None:
return
# shuffle dataset at each epoch
if self.shuffle:
self.x, self.y, _ = shuffle_triple_with_view(self.x, self.y)
def __data_generation(self, index):
'Generates data containing batch_size samples'
max_iter = min(self.batch_size, len(self.x)-self.batch_size*index)
# restrict data to index window
sub_x = self.x[(index*self.batch_size):(index*self.batch_size)+max_iter]
batch_x = np.zeros((max_iter, self.maxlen, self.embeddings.embed_size), dtype='float32')
batch_y = None
if self.y is not None:
batch_y = np.zeros((max_iter, len(self.list_classes)), dtype='float32')
x_tokenized = []
for i in range(0, max_iter):
tokens = tokenizeAndFilterSimple(sub_x[i])
x_tokenized.append(tokens)
if self.embeddings.use_ELMo:
#batch_x = to_vector_elmo(x_tokenized, self.embeddings, max_length_x)
batch_x = to_vector_simple_with_elmo(x_tokenized, self.embeddings, self.maxlen)
if self.embeddings.use_BERT:
batch_x = to_vector_simple_with_bert(x_tokenized, self.embeddings, self.maxlen)
# Generate data
for i in range(0, max_iter):
# Store sample
if not self.embeddings.use_ELMo and not self.embeddings.use_BERT:
batch_x[i] = to_vector_single(self.x[(index*self.batch_size)+i], self.embeddings, self.maxlen)
# Store class
# classes are numerical, so nothing to vectorize for y
if self.y is not None:
batch_y[i] = self.y[(index*self.batch_size)+i]
return batch_x, batch_y
|
third_party/gae_ts_mon/gae_ts_mon/protobuf/google/auth/_helpers.py | tingshao/catapult | 2,151 | 11066745 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for commonly used utilities."""
import base64
import calendar
import datetime
import six
from six.moves import urllib
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
CLOCK_SKEW = datetime.timedelta(seconds=CLOCK_SKEW_SECS)
def copy_docstring(source_class):
"""Decorator that copies a method's docstring from another class.
Args:
source_class (type): The class that has the documented method.
Returns:
Callable: A decorator that will copy the docstring of the same
named method in the source class to the decorated method.
"""
def decorator(method):
"""Decorator implementation.
Args:
method (Callable): The method to copy the docstring to.
Returns:
Callable: the same method passed in with an updated docstring.
Raises:
ValueError: if the method already has a docstring.
"""
if method.__doc__:
raise ValueError('Method already has a docstring.')
source_method = getattr(source_class, method.__name__)
method.__doc__ = source_method.__doc__
return method
return decorator
def utcnow():
"""Returns the current UTC datetime.
Returns:
datetime: The current time in UTC.
"""
return datetime.datetime.utcnow()
def datetime_to_secs(value):
"""Convert a datetime object to the number of seconds since the UNIX epoch.
Args:
value (datetime): The datetime to convert.
Returns:
int: The number of seconds since the UNIX epoch.
"""
return calendar.timegm(value.utctimetuple())
def to_bytes(value, encoding='utf-8'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value))
def from_bytes(value):
"""Converts bytes to a string value, if necessary.
Args:
value (Union[str, bytes]): The value to be converted.
Returns:
str: The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError: If the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError(
'{0!r} could not be converted to unicode'.format(value))
def update_query(url, params, remove=None):
"""Updates a URL's query parameters.
Replaces any current values if they are already present in the URL.
Args:
url (str): The URL to update.
params (Mapping[str, str]): A mapping of query parameter
keys to values.
remove (Sequence[str]): Parameters to remove from the query string.
Returns:
str: The URL with updated query parameters.
Examples:
>>> url = 'http://example.com?a=1'
>>> update_query(url, {'a': '2'})
http://example.com?a=2
>>> update_query(url, {'b': '3'})
http://example.com?a=1&b=3
>> update_query(url, {'b': '3'}, remove=['a'])
http://example.com?b=3
"""
if remove is None:
remove = []
# Split the URL into parts.
parts = urllib.parse.urlparse(url)
# Parse the query string.
query_params = urllib.parse.parse_qs(parts.query)
# Update the query parameters with the new parameters.
query_params.update(params)
# Remove any values specified in remove.
query_params = {
key: value for key, value
in six.iteritems(query_params)
if key not in remove}
# Re-encoded the query string.
new_query = urllib.parse.urlencode(query_params, doseq=True)
# Unsplit the url.
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts)
def scopes_to_string(scopes):
"""Converts scope value to a string suitable for sending to OAuth 2.0
authorization servers.
Args:
scopes (Sequence[str]): The sequence of scopes to convert.
Returns:
str: The scopes formatted as a single string.
"""
return ' '.join(scopes)
def string_to_scopes(scopes):
"""Converts stringifed scopes value to a list.
Args:
scopes (Union[Sequence, str]): The string of space-separated scopes
to convert.
Returns:
Sequence(str): The separated scopes.
"""
if not scopes:
return []
return scopes.split(' ')
def padded_urlsafe_b64decode(value):
"""Decodes base64 strings lacking padding characters.
Google infrastructure tends to omit the base64 padding characters.
Args:
value (Union[str, bytes]): The encoded value.
Returns:
bytes: The decoded value
"""
b64string = to_bytes(value)
padded = b64string + b'=' * (-len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
|
insights/parsers/interrupts.py | mglantz/insights-core | 121 | 11066805 | <gh_stars>100-1000
"""
Interrupts - file ``/proc/interrupts``
======================================
Provides parsing for contents of ``/proc/interrupts``.
The contents of a typical ``interrupts`` file looks like::
CPU0 CPU1 CPU2 CPU3
0: 37 0 0 0 IR-IO-APIC 2-edge timer
1: 3 2 1 0 IR-IO-APIC 1-edge i8042
8: 0 1 0 0 IR-IO-APIC 8-edge rtc0
9: 11107 2316 4040 1356 IR-IO-APIC 9-fasteoi acpi
NMI: 210 92 179 96 Non-maskable interrupts
LOC: 7561411 2488524 6527767 2448192 Local timer interrupts
ERR: 0
MIS: 0
The information is parsed by the ``Interrupts`` class. The information is stored
as a list of dictionaries in order corresponding to the output.
The counts in the CPU# columns are represented in a ``list``.
The following is a sample of the parsed information stored in an ``Interrupts``
class object::
[
{ 'irq': '0',
'num_cpus': 4,
'counts': [37, 0, 0, 0],
{ 'irq': 'MIS',
'num_cpus': 4,
'counts': [0, ]}
]
Examples:
>>> int_info = shared[Interrupts]
>>> int_info.data[0]
{'irq': '0', 'num_cpus': 4, 'counts': [37, 0, 0, 0],
'type_device': 'IR-IO-APIC 2-edge timer'}
>>> int_info.num_cpus
4
>>> int_info.get('i8042')
[{'irq': '1', 'num_cpus': 4, 'counts': [3, 2, 1, 0],
'type_device': 'IR-IO-APIC 1-edge i8042'}]
>>> [i['irq'] for i in int_info if i['counts'][0] > 1000]
['9', 'LOC']
"""
from .. import Parser, parser
from ..parsers import ParseException
from insights.specs import Specs
@parser(Specs.interrupts)
class Interrupts(Parser):
"""Parse contents of ``/proc/interrupts``.
Attributes:
data (list of dict): List of dictionaries with each entry representing
a row of the command output after the first line of headings.
Raises:
ParseException: Returned if first line is invalid, or no data is found to parse.
"""
def get(self, filter):
"""list: Returns list of records containing ``filter`` in the type/device field."""
return [i for i in self.data if 'type_device' in i and filter in i['type_device']]
def __iter__(self):
return iter(self.data)
@property
def num_cpus(self):
"""int: Returns total number of CPUs."""
return int(self.data[0]['num_cpus'])
def parse_content(self, content):
self.data = []
try:
cpu_names = content[0].split()
except:
raise ParseException("Invalid first line of content for /proc/interrupts")
if len(cpu_names) < 1 or not cpu_names[0].startswith("CPU"):
raise ParseException("Unable to determine number of CPUs in /proc/interrupts")
for line in content[1:]:
parts = line.split(None, len(cpu_names) + 1)
one_int = {'irq': parts[0].replace(":", "")}
one_int['num_cpus'] = len(cpu_names)
one_int['counts'] = []
if len(parts) == len(cpu_names) + 2:
one_int['type_device'] = parts[-1]
for part, cpu in zip(parts[1:-1], cpu_names):
one_int['counts'].append(int(part))
else:
for part, cpu in zip(parts[1:], cpu_names):
one_int['counts'].append(int(part))
self.data.append(one_int)
if len(self.data) < 1:
raise ParseException("No information in /proc/interrupts")
|
locations/spiders/speedway.py | davidchiles/alltheplaces | 297 | 11066813 | # -*- coding: utf-8 -*-
import scrapy
import json
from zipfile import ZipFile
from io import BytesIO
from time import sleep
from locations.items import GeojsonPointItem
class SpeedwaySpider(scrapy.Spider):
name = "speedway"
item_attributes = {'brand': "Speedway"}
allowed_domains = ["mobileapps.speedway.com"]
def start_requests(self):
# We make this request to start a session and store cookies that the actual request requires
yield scrapy.Request('https://mobileapps.speedway.com/', callback=self.get_results)
def get_results(self, response):
self.logger.debug(
'Waiting 5 seconds to make make the session cookie stick...')
sleep(5)
yield scrapy.Request(
'https://mobileapps.speedway.com/S3cur1ty/VServices/StoreService.svc/getallstores/321036B0-4359-4F4D-A01E-A8DDEE0EC2F7'
)
def parse(self, response):
z = ZipFile(BytesIO(response.body))
stores = json.loads(z.read('SpeedwayStores.json').decode(
'utf-8', 'ignore').encode('utf-8'))
for store in stores:
amenities = store['amenities']
fuels = store['fuelItems']
yield GeojsonPointItem(
lat=store['latitude'],
lon=store['longitude'],
name=store['brandName'],
addr_full=store['address'],
city=store['city'],
state=store['state'],
postcode=store['zip'],
country='US',
opening_hours='24/7' if any('Open 24 Hours' ==
a['name'] for a in amenities) else None,
phone=store['phoneNumber'],
website=f"https://www.speedway.com/locations/store/{store['costCenterId']}",
ref=store['costCenterId'],
extras={
'amenity:fuel': True,
'atm': any('ATM' == a['name'] for a in amenities),
'car_wash': any('Car Wash' == a['name'] for a in amenities),
'fuel:diesel': any('DSL' in f['description'] for f in fuels) or None,
'fuel:e15': any('E15' == f['description'] for f in fuels) or None,
'fuel:e20': any('E20' == f['description'] for f in fuels) or None,
'fuel:e30': any('E30' == f['description'] for f in fuels) or None,
'fuel:e85': any('E85' == f['description'] for f in fuels) or None,
'fuel:HGV_diesel': any('Truck' in f['description'] for f in fuels) or any('Truck' in a['name'] for a in amenities) or None,
'fuel:octane_100': any('Racing' == f['description'] for f in fuels) or None,
'fuel:octane_87': any('Unleaded' == f['description'] for f in fuels) or None,
'fuel:octane_89': any('Plus' == f['description'] for f in fuels) or None,
'fuel:octane_90': any('90' in f['description'] for f in fuels) or None,
'fuel:octane_90': any('91' in f['description'] for f in fuels) or None,
'fuel:octane_93': any('Premium' == f['description'] for f in fuels) or None,
'fuel:propane': any('Propane' in a['name'] for a in amenities) or None,
'hgv': any('Truck' in f['description'] for f in fuels) or None
}
)
|
coldtype/time/timeable.py | rohernandezz/coldtype | 142 | 11066844 | from coldtype.time.easing import ease
from copy import copy
import math
class Timing():
def __init__(self, t, loop_t, loop, easefn):
self.t = t
self.loop_t = loop_t
self.loop = loop
self.loop_phase = int(loop%2 != 0)
self.e, self.s = self.ease(easefn)
def ease(self, easefn):
easer = easefn
if not isinstance(easer, str) and not hasattr(easer, "value") and not type(easefn).__name__ == "Glyph":
try:
iter(easefn) # is-iterable
if len(easefn) > self.loop:
easer = easefn[self.loop]
elif len(easefn) == 2:
easer = easefn[self.loop % 2]
elif len(easefn) == 1:
easer = easefn[0]
else:
easer = easefn[0]
except TypeError:
print("failed")
pass
v, tangent = ease(easer, self.loop_t)
return min(1, max(0, v)), tangent
class Timeable():
"""
Abstract base class for anything with a concept of `start` and `end`/`duration`
Implements additional methods to make it easier to work with time-based concepts
"""
def __init__(self, start, end, index=0, name=None, data={}, timeline=None):
self.start = start
self.end = end
self.index = index
self.idx = index
self.i = index
self.name = name
self.feedback = 0
self.data = data
self.timeline = timeline
@property
def duration(self):
return self.end - self.start
def __repr__(self):
return f"Timeable({self.start}, {self.end} ({self.duration}))"
def delay(self, frames_delayed, feedback) -> 'Timeable':
t = copy(self)
t.start = t.start + frames_delayed
t.end = t.end + frames_delayed
t.feedback = feedback
t.data = {}
return t
def retime(self, start=0, end=0, duration=-1):
self.start = self.start + start
self.end = self.end + end
if duration > -1:
self.end = self.start + duration
return self
def now(self, i):
return self.start <= i < self.end
def _normalize_fi(self, fi):
if hasattr(self, "timeline") and self.timeline:
if self.end > self.timeline.duration and fi < self.start:
return fi + self.timeline.duration
return fi
def e(self, fi, easefn="eeio", loops=1, rng=(0, 1), cyclic=True, to1=False, out1=False):
if not isinstance(easefn, str):
loops = easefn
easefn = "eeio"
fi = self._normalize_fi(fi)
t = self.progress(fi, loops=loops, easefn=easefn, cyclic=cyclic, to1=to1, out1=out1)
e = t.e
ra, rb = rng
if ra > rb:
e = 1 - e
rb, ra = ra, rb
return ra + e*(rb - ra)
def io(self, fi, length, ei="eei", eo="eei", negative=False):
"""
Somewhat like ``progress()``, but can be used to fade in/out (hence the name (i)n/(o)ut)
* ``length`` refers to the lenght of the ease, in frames
* ``ei=`` takes the ease-in mnemonic
* ``eo=`` takes the ease-out mnemonic
"""
try:
length_i, length_o = length
except:
length_i = length
length_o = length
fi = self._normalize_fi(fi)
if fi < self.start:
return 1
if fi > self.end:
return 0
to_end = self.end - fi
to_start = fi - self.start
easefn = None
in_end = False
if to_end < length_o and eo:
in_end = True
v = 1-to_end/length_o
easefn = eo
elif to_start <= length_i and ei:
v = 1-to_start/length_i
easefn = ei
else:
v = 0
if v == 0 or not easefn:
return 0
else:
a, _ = ease(easefn, v)
if negative and in_end:
return -a
else:
return a
def io2(self, fi, length, easefn="eeio", negative=False):
try:
length_i, length_o = length
except:
length_i = length
length_o = length
if isinstance(length_i, float):
length_i = int(self.duration*(length_i/2))
if isinstance(length_o, float):
length_o = int(self.duration*(length_o/2))
if fi < self.start or fi > self.end:
return 0
try:
ei, eo = easefn
except ValueError:
ei, eo = easefn, easefn
to_end = self.end - fi
to_start = fi - self.start
easefn = None
in_end = False
if to_end < length_o and eo:
in_end = True
v = to_end/length_o
easefn = eo
elif to_start <= length_i and ei:
v = to_start/length_i
easefn = ei
else:
v = 1
if v == 1 or not easefn:
return 1
else:
a, _ = ease(easefn, v)
return a
if negative and in_end:
return -a
else:
return a
def _loop(self, t, times=1, cyclic=True, negative=False):
lt = t*times*2
ltf = math.floor(lt)
ltc = math.ceil(lt)
if False:
if ltc % 2 != 0: # looping back
lt = 1 - (ltc - lt)
else: # looping forward
lt = ltc - lt
lt = lt - ltf
if cyclic and ltf%2 == 1:
if negative:
lt = -lt
else:
lt = 1 - lt
return lt, ltf
def progress(self, i, loops=0, cyclic=True, negative=False, easefn="linear", to1=False, out1=True) -> Timing:
"""
Given an easing function (``easefn=``), calculate the amount of progress as a Timing object
``easefn=`` takes a mnemonic as enumerated in :func:`coldtype.time.easing.ease`
"""
if i < self.start:
return Timing(0, 0, 0, easefn)
if i > self.end:
if out1:
return Timing(1, 1, 0, easefn)
else:
return Timing(0, 0, 0, easefn)
d = self.duration
if to1:
d = d - 1
t = (i-self.start) / d
if loops == 0:
return Timing(t, t, 0, easefn)
else:
loop_t, loop_index = self._loop(t, times=loops, cyclic=cyclic, negative=negative)
return Timing(t, loop_t, loop_index, easefn)
def halfover(self, i):
e = self.progress(i, to1=1).e
return e >= 0.5
#prg = progress
class TimeableView(Timeable):
def __init__(self, timeable, value, svalue, count, index, position, start, end):
self.timeable = timeable
self.value = value
self.svalue = svalue
self.count = count
self.index = index
self.position = position
self.start = start
self.end = end
super().__init__(start, end)
def ease(self, eo="eei", ei="eei"):
return ease(eo, self.value)[0]
def __repr__(self):
return f"<TimeableView:{self.timeable}/>"
class TimeableSet():
def __init__(self, timeables, name=None, start=-1, end=-1, data={}):
self.timeables = timeables
self.name = name
self._start = start
self._end = end
self.data = data
def flat_timeables(self):
ts = []
for t in self.timeables:
if isinstance(t, TimeableSet):
ts.extend(t.flat_timeables())
else:
ts.append(t)
return ts
def constrain(self, start, end):
self._start = start
self._end = end
@property
def start(self):
if self._start > -1:
return self._start
_start = -1
for t in self.timeables:
ts = t.start
if _start == -1:
_start = ts
elif ts < _start:
_start = ts
return _start
@property
def end(self):
if self._end > -1:
return self._end
_end = -1
for t in self.timeables:
te = t.end
if _end == -1:
_end = te
elif te > _end:
_end = te
return _end
def __getitem__(self, index):
return self.timeables[index]
def current(self, frame):
for idx, t in enumerate(self.flat_timeables()):
t:Timeable
if t.start <= frame and frame < t.end:
return t
def fv(self, frame, filter_fn=None, reverb=[0,5], duration=-1, accumulate=0):
pre, post = reverb
count = 0
timeables_on = []
ts_duration = self.end - self.start
for idx, t in enumerate(self.flat_timeables()):
if filter_fn and not filter_fn(t):
continue
t_start = t.start
t_end = t.end
if duration > -1:
t_end = t_start + duration
pre_start = t_start - pre
post_end = t_end + post
t_index = count
if frame >= pre_start: # correct?
count += 1
value = 0
pos = 0
fi = frame
if frame >= t_start and frame <= t_end: # truly on
pos = 0
value = 1
else:
if post_end > ts_duration and frame < pre_start:
fi = frame + ts_duration
elif pre_start < 0 and frame > post_end:
fi = frame - ts_duration
if fi < t_start and fi >= pre_start:
pos = 1
value = (fi - pre_start) / pre
elif fi > t_end and fi < post_end:
pos = -1
value = (post_end - fi) / post
if value > 0:
timeables_on.append(TimeableView(t, value, -1, count, idx, pos, pre_start, post_end))
else:
pass
if accumulate:
return timeables_on
else:
if len(timeables_on) == 0:
return TimeableView(None, 0, 0, count, -1, 0, 0, 0)
else:
return max(timeables_on, key=lambda tv: tv.value)
def __repr__(self):
return "<TimeableSet ({:s}){:04d}>".format(self.name if self.name else "?", len(self.timeables)) |
tensorflow/contrib/training/python/training/batch_sequences_with_states_test.py | connectthefuture/tensorflow | 101 | 11066885 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.batch_sequences_with_states."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
class BatchSequencesWithStatesTest(tf.test.TestCase):
def setUp(self):
super(BatchSequencesWithStatesTest, self).setUp()
self.value_length = 4
self.batch_size = 2
self.key = tf.string_join(["key_", tf.as_string(tf.cast(
10000 * tf.random_uniform(()), tf.int32))])
self.sequences = {"seq1": np.random.rand(self.value_length, 5),
"seq2": np.random.rand(self.value_length, 4, 2)}
self.context = {"context1": [3, 4]}
self.initial_states = {"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)}
def _prefix(self, key_value):
return set(
[s.decode("ascii").split(":")[0].encode("ascii") for s in key_value])
def _testBasics(self, num_unroll, length, pad,
expected_seq1_batch1, expected_seq2_batch1,
expected_seq1_batch2, expected_seq2_batch2):
with self.test_session() as sess:
next_batch = tf.contrib.training.batch_sequences_with_states(
input_key=self.key,
input_sequences=self.sequences,
input_context=self.context,
input_length=length,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after finishing
# all segments of the first ones.
capacity=2,
pad=pad)
state1 = next_batch.state("state1")
state2 = next_batch.state("state2")
state1_update = next_batch.save_state("state1", state1 + 1)
state2_update = next_batch.save_state("state2", state2 - 1)
# Make sure queue runner with SQSS is added properly to meta graph def.
# Saver requires at least one variable.
v0 = tf.Variable(10.0, name="v0")
tf.add_to_collection("variable_collection", v0)
tf.global_variables_initializer()
save = tf.train.Saver([v0])
test_dir = os.path.join(tf.test.get_temp_dir(), "sqss_test")
filename = os.path.join(test_dir, "metafile")
meta_graph_def = save.export_meta_graph(filename)
qr_saved = meta_graph_def.collection_def[tf.GraphKeys.QUEUE_RUNNERS]
self.assertTrue(qr_saved.bytes_list.value is not None)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Step 1
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key,
next_batch.next_key,
next_batch.sequences["seq1"],
next_batch.sequences["seq2"],
next_batch.context["context1"],
state1,
state2,
next_batch.length,
state1_update,
state2_update))
expected_first_keys = set([b"00000_of_00002"])
expected_second_keys = set([b"00001_of_00002"])
expected_final_keys = set([b"STOP"])
self.assertEqual(expected_first_keys, self._prefix(key_value))
self.assertEqual(expected_second_keys, self._prefix(next_key_value))
self.assertAllEqual(
np.tile(self.context["context1"], (self.batch_size, 1)),
context1_value)
self.assertAllEqual(expected_seq1_batch1, seq1_value)
self.assertAllEqual(expected_seq2_batch1, seq2_value)
self.assertAllEqual(
np.tile(self.initial_states["state1"], (self.batch_size, 1, 1)),
state1_value)
self.assertAllEqual(
np.tile(self.initial_states["state2"], (self.batch_size, 1)),
state2_value)
self.assertAllEqual(length_value, [num_unroll, num_unroll])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key,
next_batch.next_key,
next_batch.sequences["seq1"],
next_batch.sequences["seq2"],
next_batch.context["context1"],
next_batch.state("state1"),
next_batch.state("state2"),
next_batch.length,
state1_update,
state2_update))
self.assertEqual(expected_second_keys, self._prefix(key_value))
self.assertEqual(expected_final_keys, self._prefix(next_key_value))
self.assertAllEqual(
np.tile(self.context["context1"], (self.batch_size, 1)),
context1_value)
self.assertAllEqual(expected_seq1_batch2, seq1_value)
self.assertAllEqual(expected_seq2_batch2, seq2_value)
self.assertAllEqual(
1 + np.tile(self.initial_states["state1"], (self.batch_size, 1, 1)),
state1_value)
self.assertAllEqual(
-1 + np.tile(self.initial_states["state2"], (self.batch_size, 1)),
state2_value)
self.assertAllEqual([1, 1], length_value)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=2)
def testBasicPadding(self):
num_unroll = 2 # Divisor of value_length - so no padding necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
expected_seq1_batch2 = np.tile(
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
(self.batch_size, 1, 1))
expected_seq2_batch2 = np.tile(
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :, :],
(self.batch_size, 1, 1, 1))
self._testBasics(num_unroll=num_unroll, length=3, pad=True,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch2=expected_seq2_batch2)
def testBasics(self):
num_unroll = 2 # Divisor of value_length - so no padding necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
expected_seq1_batch2 = np.tile(
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
(self.batch_size, 1, 1))
expected_seq2_batch2 = np.tile(
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :, :],
(self.batch_size, 1, 1, 1))
self._testBasics(num_unroll=num_unroll, length=3, pad=False,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch2=expected_seq2_batch2)
def testNotAMultiple(self):
num_unroll = 3 # Not a divisor of value_length -
# so padding would have been necessary.
with self.test_session() as sess:
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
".*should be a multiple of: 3, but saw "
"value: 4. Consider setting pad=True."):
coord = tf.train.Coordinator()
threads = None
try:
with coord.stop_on_exception():
next_batch = tf.contrib.training.batch_sequences_with_states(
input_key=self.key,
input_sequences=self.sequences,
input_context=self.context,
input_length=3,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after
# finishing all segments of the first ones.
capacity=2,
pad=False)
threads = tf.train.start_queue_runners(coord=coord)
sess.run([next_batch.key])
except tf.errors.OutOfRangeError:
pass
finally:
coord.request_stop()
if threads is not None:
coord.join(threads, stop_grace_period_secs=2)
def testAdvancedPadding(self):
num_unroll = 3 # Not a divisor of value_length - so padding to 6 necessary.
expected_seq1_batch1 = np.tile(
self.sequences["seq1"][np.newaxis, 0:num_unroll, :],
(self.batch_size, 1, 1))
expected_seq2_batch1 = np.tile(
self.sequences["seq2"][np.newaxis, 0:num_unroll, :, :],
(self.batch_size, 1, 1, 1))
padded_seq1 = np.concatenate([
self.sequences["seq1"][np.newaxis, num_unroll:self.value_length, :],
np.zeros((1, 1, 5)), np.zeros((1, 1, 5))], axis=1)
expected_seq1_batch2 = np.concatenate([padded_seq1] * self.batch_size,
axis=0)
padded_seq2 = np.concatenate([
self.sequences["seq2"][np.newaxis, num_unroll:self.value_length, :],
np.zeros((1, 1, 4, 2)), np.zeros((1, 1, 4, 2))], axis=1)
expected_seq2_batch2 = np.concatenate([padded_seq2] * self.batch_size,
axis=0)
self._testBasics(num_unroll=num_unroll, length=None, pad=True,
expected_seq1_batch1=expected_seq1_batch1,
expected_seq2_batch1=expected_seq2_batch1,
expected_seq1_batch2=expected_seq1_batch2,
expected_seq2_batch2=expected_seq2_batch2)
class PaddingTest(tf.test.TestCase):
def testPaddingInvalidLengths(self):
with tf.Graph().as_default() as g, self.test_session(graph=g):
sequences = {"key_1": tf.constant([1, 2, 3]), # length 3
"key_2": tf.constant([1.5, 2.5])} # length 2
_, padded_seq = sqss._padding(sequences, 2)
with self.assertRaisesOpError(
".*All sequence lengths must match, but received lengths.*"):
padded_seq["key_1"].eval()
def testPadding(self):
with tf.Graph().as_default() as g, self.test_session(graph=g):
sequences = {"key_1": tf.constant([1, 2]),
"key_2": tf.constant([0.5, -1.0]),
"key_3": tf.constant(["a", "b"]), # padding strings
"key_4": tf.constant([[1, 2, 3], [4, 5, 6]])}
_, padded_seq = sqss._padding(sequences, 5)
expected_padded_seq = {
"key_1": [1, 2, 0, 0, 0],
"key_2": [0.5, -1.0, 0.0, 0.0, 0.0],
"key_3": ["a", "b", "", "", ""],
"key_4": [[1, 2, 3], [4, 5, 6], [0, 0, 0], [0, 0, 0], [0, 0, 0]]}
for key, val in expected_padded_seq.items():
self.assertTrue(tf.reduce_all(tf.equal(val, padded_seq[key])).eval())
if __name__ == "__main__":
tf.test.main()
|
python_modules/dagster/dagster_tests/general_tests/grpc_tests/test_persistent_container.py | dbatten5/dagster | 4,606 | 11066906 | import pytest
from dagster import seven
@pytest.mark.skipif(seven.IS_WINDOWS, reason="docker doesn't work on windows tests")
def test_ping(docker_grpc_client):
assert docker_grpc_client.ping("foobar") == "foobar"
@pytest.mark.skipif(seven.IS_WINDOWS, reason="docker doesn't work on windows tests")
def test_streaming(docker_grpc_client):
results = [
result for result in docker_grpc_client.streaming_ping(sequence_length=10, echo="foo")
]
assert len(results) == 10
for sequence_number, result in enumerate(results):
assert result["sequence_number"] == sequence_number
assert result["echo"] == "foo"
|
cupid/io/table/pd.py | wjsi/aliyun-odps-python-sdk | 412 | 11066911 | # Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import threading
import multiprocessing
import logging
from odps import options
from odps.tunnel.pdio import TunnelPandasReader, BasePandasWriter
from cupid.errors import SubprocessStreamEOFError
import numpy as np
del np
logger = logging.getLogger(__name__)
class CupidPandasReader(TunnelPandasReader):
def __init__(self, schema, input_stream, columns=None):
if isinstance(input_stream, tuple):
self._refill_data = input_stream
input_stream = None
else:
self._refill_data = None
super(CupidPandasReader, self).__init__(schema, input_stream, columns=columns)
self._input_stream = input_stream
self._table_schema = schema
self._input_columns = columns
self._stream_eof = False
self._closed = False
def to_forkable(self):
return type(self)(self._table_schema, self._build_refill_data(), self._input_columns)
def __repr__(self):
cls = type(self)
if self._refill_data is not None:
return '<%s.%s (slave) at 0x%x>' % (cls.__module__, cls.__name__, id(self))
else:
return '<%s.%s at 0x%x>' % (cls.__module__, cls.__name__, id(self))
def _build_refill_data(self):
if self._input_stream is None:
return self._refill_data
if self._refill_data is not None:
return self._refill_data
from multiprocessing.sharedctypes import RawArray
req_queue = multiprocessing.Queue()
rep_queue = multiprocessing.Queue()
buf = RawArray(ctypes.c_char, options.cupid.mp_buffer_size)
def _mp_thread():
try:
while True:
req_body = req_queue.get(timeout=60)
if req_body is None:
return
left_size, bound = req_body
try:
buf[:left_size] = buf[bound - left_size:bound]
read_size = self._input_stream.readinto(buf, left_size)
except SubprocessStreamEOFError:
return
rep_queue.put(read_size)
finally:
rep_queue.put(-1)
self.close()
stream_thread = threading.Thread(target=_mp_thread)
stream_thread.daemon = True
stream_thread.start()
self._refill_data = (buf, req_queue, rep_queue)
return self._refill_data
def refill_cache(self):
if self._refill_data is None:
return super(CupidPandasReader, self).refill_cache()
if self._stream_eof or self._closed:
return 0
buf, req_queue, rep_queue = self._refill_data
left_size = self.mem_cache_bound - self.row_mem_ptr
req_queue.put((left_size, self.mem_cache_bound))
read_size = rep_queue.get(timeout=60)
if read_size <= 0:
self._stream_eof = True
self.close()
return 0
self.reset_positions(buf, read_size + left_size)
return read_size
def close(self):
super(CupidPandasReader, self).close()
if self._input_stream is None and self._refill_data:
buf, req_queue, rep_queue = self._refill_data
req_queue.put(None)
self._closed = True
class CupidPandasWriter(BasePandasWriter):
def __init__(self, schema, output_stream):
super(CupidPandasWriter, self).__init__(schema, output_stream)
self._stream = output_stream
self._block_id = None
self._partition_spec = None
self._table_schema = schema
@property
def block_id(self):
return self._block_id
@property
def partition_spec(self):
return self._partition_spec
def write_stream(self, data, length):
self._stream.write(data, length)
def close(self):
super(CupidPandasWriter, self).close()
# sync by get result
result = self._stream.result()
logger.debug('Result fetched on writer close: %s', result)
self._stream.close()
|
dataloader/dataset_vg.py | Deanplayerljx/bottom-up-attention.pytorch | 210 | 11066919 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from .load_vg_json import load_vg_json
SPLITS = {
"visual_genome_train": ("vg/images", "vg/annotations/train.json"),
"visual_genome_val": ("vg/images", "vg/annotations/val.json"),
}
for key, (image_root, json_file) in SPLITS.items():
# Assume pre-defined datasets live in `./datasets`.
json_file = os.path.join("datasets", json_file)
image_root = os.path.join("datasets", image_root)
DatasetCatalog.register(
key,
lambda key=key, json_file=json_file, image_root=image_root: load_vg_json(
json_file, image_root, key
),
)
MetadataCatalog.get(key).set(
json_file=json_file, image_root=image_root
)
|
Section 5/ch05_r05_py.py | Asif-packt/Modern-Python-Solutions-Part-1 | 107 | 11066931 | """Python Cookbook
Chapter 5, recipe 5, part "py"
"""
import cmd
import sys
class REPL(cmd.Cmd):
prompt=">>> "
def preloop(self):
print( sys.version )
def do_def(self, arg):
pass
def do_class(self, arg):
pass
def do_EOF(self, arg):
return True
def default(self, arg):
if "=" in arg:
print( "Assignment" )
else:
print( "Expression" )
if __name__ == "__main__":
py = REPL()
py.cmdloop()
|
lib/symbioticpy/symbiotic/targets/predator.py | paldebjit/symbiotic | 235 | 11066939 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 <NAME>
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . tool import SymbioticBaseTool
from symbiotic.utils.process import runcmd
from symbiotic.utils.watch import DbgWatch
try:
import benchexec.util as util
import benchexec.result as result
except ImportError:
# fall-back solution (at least for now)
import symbiotic.benchexec.util as util
import symbiotic.benchexec.result as result
try:
from symbiotic.versions import llvm_version
except ImportError:
# the default version
llvm_version='8.0.1'
class SymbioticTool(SymbioticBaseTool):
"""
Predator integraded into Symbiotic
"""
def __init__(self, opts):
SymbioticBaseTool.__init__(self, opts)
self._memsafety = self._options.property.memsafety()
def name(self):
return 'predator'
def executable(self):
return util.find_executable('check-property.sh',
'sl_build/check-property.sh')
def llvm_version(self):
"""
Return required version of LLVM
"""
return llvm_version
def set_environment(self, symbiotic_dir, opts):
"""
Set environment for the tool
"""
# do not link any functions
opts.linkundef = []
def passes_before_verification(self):
"""
Passes that should run before CPAchecker
"""
# llvm2c has a bug with PHI nodes
return ["-lowerswitch", "-simplifycfg"]
def actions_before_verification(self, symbiotic):
output = symbiotic.curfile + '.c'
runcmd(['llvm2c', symbiotic.curfile, '--o', output], DbgWatch('all'))
symbiotic.curfile = output
def passes_before_verification(self):
return ['-delete-undefined']
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
#cmd = PredatorTool.cmdline(self, executable, options,
# tasks, propertyfile, rlimits)
cmd = [self.executable(), '--trace=/dev/null',
'--propertyfile', propertyfile, '--'] + tasks
if self._options.is32bit:
cmd.append("-m32")
else:
cmd.append("-m64")
return cmd
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = "UNKNOWN"
for line in (l.decode('ascii') for l in output):
if "UNKNOWN" in line:
status = result.RESULT_UNKNOWN
elif "TRUE" in line:
status = result.RESULT_TRUE_PROP
elif "FALSE(valid-memtrack)" in line:
status = result.RESULT_FALSE_MEMTRACK
elif "FALSE(valid-deref)" in line:
status = result.RESULT_FALSE_DEREF
elif "FALSE(valid-free)" in line:
status = result.RESULT_FALSE_FREE
elif "FALSE(valid-memcleanup)" in line:
status = result.RESULT_FALSE_MEMCLEANUP
elif "FALSE" in line:
status = result.RESULT_FALSE_REACH
if status == "UNKNOWN" and isTimeout:
status = "TIMEOUT"
return status
|
third_party/polymer/v1_0/css_strip_prefixes.py | zipated/src | 2,151 | 11066943 | <gh_stars>1000+
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import os
import re
# List of CSS properties to be removed.
CSS_PROPERTIES_TO_REMOVE = [
'-moz-appearance',
'-moz-box-sizing',
'-moz-flex-basis',
'-moz-user-select',
'-ms-align-content',
'-ms-align-self',
'-ms-flex',
'-ms-flex-align',
'-ms-flex-basis',
'-ms-flex-line-pack',
'-ms-flexbox',
'-ms-flex-direction',
'-ms-flex-pack',
'-ms-flex-wrap',
'-ms-inline-flexbox',
'-ms-user-select',
'-webkit-align-content',
'-webkit-align-items',
'-webkit-align-self',
'-webkit-animation',
'-webkit-animation-duration',
'-webkit-animation-iteration-count',
'-webkit-animation-name',
'-webkit-animation-timing-function',
'-webkit-flex',
'-webkit-flex-basis',
'-webkit-flex-direction',
'-webkit-flex-wrap',
'-webkit-inline-flex',
'-webkit-justify-content',
'-webkit-transform',
'-webkit-transform-origin',
'-webkit-transition',
'-webkit-transition-delay',
'-webkit-transition-property',
'-webkit-user-select',
]
# Regex to detect a CSS line of interest (helps avoiding edge cases, like
# removing the 1st line of a multi-line CSS rule).
CSS_LINE_REGEX = '^\s*[^;\s]+:\s*[^;]+;\s*(/\*.+/*/)*\s*$';
def ProcessFile(filename):
# Gather indices of lines to be removed.
indices_to_remove = [];
with open(filename) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if ShouldRemoveLine(line):
indices_to_remove.append(i)
if len(indices_to_remove):
print 'stripping CSS from: ' + filename
# Process line numbers in descinding order, such that the array can be
# modified in-place.
indices_to_remove.reverse()
for i in indices_to_remove:
del lines[i]
# Reconstruct file.
with open(filename, 'w') as f:
for l in lines:
f.write(l)
return
def ShouldRemoveLine(line):
pred = lambda p: re.search(CSS_LINE_REGEX, line) and re.search(p, line)
return any(pred(p) for p in CSS_PROPERTIES_TO_REMOVE)
def main():
html_files = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk('components-chromium')
for f in fnmatch.filter(files, '*.html')]
for f in html_files:
ProcessFile(f)
if __name__ == '__main__':
main()
|
setup.py | klj123wan/python-torngas | 492 | 11066947 | <reponame>klj123wan/python-torngas
from setuptools import setup, find_packages
import torngas
setup(
name="torngas",
version=torngas.__version__,
description="torngas based on tornado",
long_description="torngas is based on tornado,django like web framework.",
keywords='python torngas django tornado',
author="mqingyn",
url="https://github.com/mqingyn/torngas",
license="BSD",
packages=find_packages(),
package_data={'torngas': ['resource/exception.html']},
author_email="<EMAIL>",
requires=['tornado', 'futures'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
scripts=[],
install_requires=[
'futures',
],
)
|
cassiopeia/datastores/kernel/thirdpartycode.py | artemigkh/cassiopeia | 437 | 11066948 | <reponame>artemigkh/cassiopeia
from typing import Type, TypeVar, MutableMapping, Any, Iterable, Generator
from datapipelines import DataSource, PipelineContext, Query, NotFoundError, validate_query
from .common import KernelSource, APINotFoundError
from ...data import Platform
from ...dto.thirdpartycode import VerificationStringDto
from ..uniquekeys import convert_region_to_platform
T = TypeVar("T")
def _get_default_locale(query: MutableMapping[str, Any], context: PipelineContext) -> str:
return query["platform"].default_locale
class ThirdPartyCodeAPI(KernelSource):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
#######################
# Verification String #
#######################
_validate_get_verification_string_query = Query. \
has("platform").as_(Platform).also. \
has("summoner.id").as_(str)
@get.register(VerificationStringDto)
@validate_query(_validate_get_verification_string_query, convert_region_to_platform)
def get_verification_string(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> VerificationStringDto:
parameters = {"platform": query["platform"].value}
endpoint = "lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(summonerId=query["summoner.id"])
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except (ValueError, APINotFoundError) as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = query["platform"].region.value
data["summonerId"] = query["summoner.id"]
return VerificationStringDto(data)
_validate_get_many_verification_string_query = Query. \
has("platforms").as_(Iterable).also. \
has("summoner.ids").as_(Iterable)
@get_many.register(VerificationStringDto)
@validate_query(_validate_get_many_verification_string_query, convert_region_to_platform)
def get_many_verification_string(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> Generator[VerificationStringDto, None, None]:
def generator():
parameters = {"platform": query["platform"].value}
for platform, summoner_id in zip(query["platforms"], query["summoner.ids"]):
platform = Platform(platform.upper())
endpoint = "lol/platform/v4/third-party-code/by-summoner/{summonerId}".format(summonerId=summoner_id)
try:
data = self._get(endpoint=endpoint, parameters=parameters)
except APINotFoundError as error:
raise NotFoundError(str(error)) from error
data = {"string": data}
data["region"] = platform.region.value
data["summonerId"] = summoner_id
yield VerificationStringDto(data)
return generator()
|
proximal/algorithms/admm.py | kyleaj/ProxImaL | 101 | 11066952 | from __future__ import division, print_function
from proximal.lin_ops import CompGraph, scale, vstack
from proximal.utils.timings_log import TimingsLog, TimingsEntry
from .invert import get_least_squares_inverse, get_diag_quads
import numpy as np
import numexpr as ne
def partition(prox_fns, try_diagonalize=True):
"""Divide the proxable functions into sets Psi and Omega.
"""
# Merge these quadratic functions with the v update.
quad_funcs = []
# All lin ops must be gram diagonal for the least squares problem
# to be diagonal.
func_opts = {True: [], False: []}
for freq in [True, False]:
if all([fn.lin_op.is_gram_diag(freq) for fn in prox_fns]):
func_opts[freq] = get_diag_quads(prox_fns, freq)
# Quad funcs is the max cardinality set.
if len(func_opts[True]) >= len(func_opts[False]):
quad_funcs = func_opts[True]
else:
quad_funcs = func_opts[False]
psi_fns = [fn for fn in prox_fns if fn not in quad_funcs]
return psi_fns, quad_funcs
def solve(psi_fns, omega_fns, rho=1.0,
max_iters=1000, eps_abs=1e-3, eps_rel=1e-3, x0=None,
lin_solver="cg", lin_solver_options=None,
try_diagonalize=True, try_fast_norm=False,
scaled=True, conv_check=100,
implem=None,
metric=None, convlog=None, verbose=0):
prox_fns = psi_fns + omega_fns
stacked_ops = vstack([fn.lin_op for fn in psi_fns])
K = CompGraph(stacked_ops)
# Rescale so (rho/2)||x - b||^2_2
rescaling = np.sqrt(2. / rho)
quad_ops = []
const_terms = []
for fn in omega_fns:
fn = fn.absorb_params()
quad_ops.append(scale(rescaling * fn.beta, fn.lin_op))
const_terms.append(fn.b.flatten() * rescaling)
# Check for fast inverse.
op_list = [func.lin_op for func in psi_fns] + quad_ops
stacked_ops = vstack(op_list)
# Get optimize inverse (tries spatial and frequency diagonalization)
v_update = get_least_squares_inverse(op_list, None, try_diagonalize, verbose)
# Initialize everything to zero.
v = np.zeros(K.input_size)
z = np.zeros(K.output_size)
u = np.zeros(K.output_size)
# Initialize
if x0 is not None:
v[:] = np.reshape(x0, K.input_size)
K.forward(v, z)
# Buffers.
Kv = np.zeros(K.output_size)
KTu = np.zeros(K.input_size)
s = np.zeros(K.input_size)
# Log for prox ops.
prox_log = TimingsLog(prox_fns)
# Time iterations.
iter_timing = TimingsEntry("ADMM iteration")
# Convergence log for initial iterate
if convlog is not None:
K.update_vars(v)
objval = sum([func.value for func in prox_fns])
convlog.record_objective(objval)
convlog.record_timing(0.0)
for i in range(max_iters):
iter_timing.tic()
if convlog is not None:
convlog.tic()
z_prev = z.copy()
# Update v.
tmp = np.hstack([z - u] + const_terms)
v = v_update.solve(tmp, x_init=v, lin_solver=lin_solver, options=lin_solver_options)
# Update z.
K.forward(v, Kv)
Kv_u = Kv + u
offset = 0
for fn in psi_fns:
slc = slice(offset, offset + fn.lin_op.size, None)
Kv_u_slc = np.reshape(Kv_u[slc], fn.lin_op.shape)
# Apply and time prox.
prox_log[fn].tic()
z[slc] = fn.prox(rho, Kv_u_slc, i).flatten()
prox_log[fn].toc()
offset += fn.lin_op.size
# Update u.
ne.evaluate('u + Kv - z', out=u)
# Check convergence.
if i % conv_check == 0:
r = Kv - z
K.adjoint(u, KTu)
K.adjoint(rho * (z - z_prev), s)
eps_pri = np.sqrt(K.output_size) * eps_abs + eps_rel * \
max([np.linalg.norm(Kv), np.linalg.norm(z)])
eps_dual = np.sqrt(K.input_size) * eps_abs + eps_rel * np.linalg.norm(KTu) * rho
# Convergence log
if convlog is not None:
convlog.toc()
K.update_vars(v)
objval = sum([fn.value for fn in prox_fns])
convlog.record_objective(objval)
# Show progess
if verbose > 0 and i % conv_check == 0:
# Evaluate objective only if required (expensive !)
objstr = ''
if verbose == 2:
K.update_vars(v)
objstr = ", obj_val = %02.03e" % sum([fn.value for fn in prox_fns])
# Evaluate metric potentially
metstr = '' if metric is None else ", {}".format(metric.message(v))
print("iter %d: ||r||_2 = %.3f, eps_pri = %.3f, ||s||_2 = %.3f, eps_dual = %.3f%s%s" % (
i, np.linalg.norm(r), eps_pri, np.linalg.norm(s), eps_dual, objstr, metstr))
iter_timing.toc()
# Exit if converged.
if np.linalg.norm(r) <= eps_pri and np.linalg.norm(s) <= eps_dual:
break
# Print out timings info.
if verbose > 0:
print(iter_timing)
print("prox funcs:")
print(prox_log)
print("K forward ops:")
print(K.forward_log)
print("K adjoint ops:")
print(K.adjoint_log)
# Assign values to variables.
K.update_vars(v)
# Return optimal value.
return sum([fn.value for fn in prox_fns])
|
pylxd/tests/models/test_image.py | jonans/pylxd | 247 | 11066964 | <gh_stars>100-1000
import hashlib
import json
from io import StringIO
from pylxd import exceptions, models
from pylxd.tests import testing
class TestImage(testing.PyLXDTestCase):
"""Tests for pylxd.models.Image."""
def test_get(self):
"""An image is fetched."""
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image.get(self.client, fingerprint)
self.assertEqual(fingerprint, a_image.fingerprint)
def test_get_not_found(self):
"""LXDAPIException is raised when the image isn't found."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
}
)
fingerprint = hashlib.sha256(b"").hexdigest()
self.assertRaises(
exceptions.LXDAPIException, models.Image.get, self.client, fingerprint
)
def test_get_error(self):
"""LXDAPIException is raised on error."""
def error(request, context):
context.status_code = 500
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 500}
)
self.add_rule(
{
"text": error,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
}
)
fingerprint = hashlib.sha256(b"").hexdigest()
self.assertRaises(
exceptions.LXDAPIException, models.Image.get, self.client, fingerprint
)
def test_get_by_alias(self):
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image.get_by_alias(self.client, "an-alias")
self.assertEqual(fingerprint, a_image.fingerprint)
def test_exists(self):
"""An image is fetched."""
fingerprint = hashlib.sha256(b"").hexdigest()
self.assertTrue(models.Image.exists(self.client, fingerprint))
def test_exists_by_alias(self):
"""An image is fetched."""
self.assertTrue(models.Image.exists(self.client, "an-alias", alias=True))
def test_not_exists(self):
"""LXDAPIException is raised when the image isn't found."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
}
)
fingerprint = hashlib.sha256(b"").hexdigest()
self.assertFalse(models.Image.exists(self.client, fingerprint))
def test_all(self):
"""A list of all images is returned."""
images = models.Image.all(self.client)
self.assertEqual(1, len(images))
def test_create(self):
"""An image is created."""
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image.create(self.client, b"", public=True, wait=True)
self.assertIsInstance(a_image, models.Image)
self.assertEqual(fingerprint, a_image.fingerprint)
def test_create_with_metadata(self):
"""An image with metadata is created."""
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image.create(
self.client, b"", metadata=b"", public=True, wait=True
)
self.assertIsInstance(a_image, models.Image)
self.assertEqual(fingerprint, a_image.fingerprint)
def test_create_with_metadata_streamed(self):
"""An image with metadata is created."""
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image.create(
self.client, StringIO(""), metadata=StringIO(""), public=True, wait=True
)
self.assertIsInstance(a_image, models.Image)
self.assertEqual(fingerprint, a_image.fingerprint)
def test_update(self):
"""An image is updated."""
a_image = self.client.images.all()[0]
a_image.sync()
a_image.save()
def test_fetch(self):
"""A partial object is fetched and populated."""
a_image = self.client.images.all()[0]
a_image.sync()
self.assertEqual(1, a_image.size)
def test_fetch_notfound(self):
"""A bogus image fetch raises LXDAPIException."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
}
)
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image(self.client, fingerprint=fingerprint)
self.assertRaises(exceptions.LXDAPIException, a_image.sync)
def test_fetch_error(self):
"""A 500 error raises LXDAPIException."""
def not_found(request, context):
context.status_code = 500
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 500}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
}
)
fingerprint = hashlib.sha256(b"").hexdigest()
a_image = models.Image(self.client, fingerprint=fingerprint)
self.assertRaises(exceptions.LXDAPIException, a_image.sync)
def test_delete(self):
"""An image is deleted."""
# XXX: rockstar (03 Jun 2016) - This just executes
# a code path. There should be an assertion here, but
# it's not clear how to assert that, just yet.
a_image = self.client.images.all()[0]
a_image.delete(wait=True)
def test_export(self):
"""An image is exported."""
expected = "e2943f8d0b0e7d5835f9533722a6e25f669acb8980daee378b4edb44da212f51"
a_image = self.client.images.all()[0]
data = a_image.export()
data_sha = hashlib.sha256(data.read()).hexdigest()
self.assertEqual(expected, data_sha)
def test_export_not_found(self):
"""LXDAPIException is raised on export of bogus image."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/export$",
}
)
a_image = self.client.images.all()[0]
self.assertRaises(exceptions.LXDAPIException, a_image.export)
def test_export_error(self):
"""LXDAPIException is raised on API error."""
def error(request, context):
context.status_code = 500
return json.dumps(
{"type": "error", "error": "LOLOLOLOL", "error_code": 500}
)
self.add_rule(
{
"text": error,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/export$",
}
)
a_image = self.client.images.all()[0]
self.assertRaises(exceptions.LXDAPIException, a_image.export)
def test_add_alias(self):
"""Try to add an alias."""
a_image = self.client.images.all()[0]
a_image.add_alias("lol", "Just LOL")
aliases = [a["name"] for a in a_image.aliases]
self.assertTrue("lol" in aliases, "Image didn't get updated.")
def test_add_alias_duplicate(self):
"""Adding a alias twice should raise an LXDAPIException."""
def error(request, context):
context.status_code = 409
return json.dumps(
{"type": "error", "error": "already exists", "error_code": 409}
)
self.add_rule(
{
"text": error,
"method": "POST",
"url": r"^http://pylxd.test/1.0/images/aliases$",
}
)
a_image = self.client.images.all()[0]
self.assertRaises(
exceptions.LXDAPIException, a_image.add_alias, "lol", "Just LOL"
)
def test_remove_alias(self):
"""Try to remove an-alias."""
a_image = self.client.images.all()[0]
a_image.delete_alias("an-alias")
self.assertEqual(0, len(a_image.aliases), "Alias didn't get deleted.")
def test_remove_alias_error(self):
"""Try to remove an non existant alias."""
def error(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "not found", "error_code": 404}
)
self.add_rule(
{
"text": error,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/images/aliases/lol$",
}
)
a_image = self.client.images.all()[0]
self.assertRaises(exceptions.LXDAPIException, a_image.delete_alias, "lol")
def test_remove_alias_not_in_image(self):
"""Try to remove an alias which is not in the current image."""
a_image = self.client.images.all()[0]
a_image.delete_alias("b-alias")
def test_copy(self):
"""Try to copy an image to another LXD instance."""
from pylxd.client import Client
a_image = self.client.images.all()[0]
client2 = Client(endpoint="http://pylxd2.test")
copied_image = a_image.copy(client2, wait=True)
self.assertEqual(a_image.fingerprint, copied_image.fingerprint)
def test_copy_public(self):
"""Try to copy a public image."""
from pylxd.client import Client
def image_get(request, context):
context.status_code = 200
return json.dumps(
{
"type": "sync",
"metadata": {
"aliases": [
{
"name": "an-alias",
"fingerprint": "<KEY>",
}
],
"architecture": "x86_64",
"cached": False,
"filename": "a_image.tar.bz2",
"fingerprint": "<KEY>",
"public": True,
"properties": {},
"size": 1,
"auto_update": False,
"created_at": "1983-06-16T02:42:00Z",
"expires_at": "1983-06-16T02:42:00Z",
"last_used_at": "1983-06-16T02:42:00Z",
"uploaded_at": "1983-06-16T02:42:00Z",
},
}
)
self.add_rule(
{
"text": image_get,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
}
)
a_image = self.client.images.all()[0]
self.assertTrue(a_image.public)
client2 = Client(endpoint="http://pylxd2.test")
copied_image = a_image.copy(client2, wait=True)
self.assertEqual(a_image.fingerprint, copied_image.fingerprint)
def test_copy_no_wait(self):
"""Try to copy and don't wait."""
from pylxd.client import Client
a_image = self.client.images.all()[0]
client2 = Client(endpoint="http://pylxd2.test")
a_image.copy(client2, public=False, auto_update=False)
def test_create_from_simplestreams(self):
"""Try to create an image from simplestreams."""
image = self.client.images.create_from_simplestreams(
"https://cloud-images.ubuntu.com/releases", "trusty/amd64"
)
self.assertEqual(
"<KEY>",
image.fingerprint,
)
def test_create_from_url(self):
"""Try to create an image from an URL."""
image = self.client.images.create_from_url("https://dl.stgraber.org/lxd")
self.assertEqual(
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
image.fingerprint,
)
|
homeassistant/components/mycroft/__init__.py | tbarbette/core | 22,481 | 11066966 | <gh_stars>1000+
"""Support for Mycroft AI."""
import voluptuous as vol
from homeassistant.const import CONF_HOST
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
DOMAIN = "mycroft"
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_HOST): cv.string})}, extra=vol.ALLOW_EXTRA
)
def setup(hass, config):
"""Set up the Mycroft component."""
hass.data[DOMAIN] = config[DOMAIN][CONF_HOST]
discovery.load_platform(hass, "notify", DOMAIN, {}, config)
return True
|
django_irods/models.py | hydroshare/hydroshare | 178 | 11066990 | from django.db import models as m
from django.contrib.auth.models import User
class RodsEnvironment(m.Model):
owner = m.ForeignKey(User)
host = m.CharField(verbose_name='Hostname', max_length=255)
port = m.IntegerField()
def_res = m.CharField(verbose_name="Default resource", max_length=255)
home_coll = m.CharField(verbose_name="Home collection", max_length=255)
cwd = m.TextField(verbose_name="Working directory")
username = m.CharField(max_length=255)
zone = m.TextField()
auth = m.TextField(verbose_name='Password')
def __unicode__(self):
return '{username}@{host}:{port}//{def_res}/{home_coll}'.format(
username = self.username,
host = self.host,
port = self.port,
def_res = self.def_res,
home_coll = self.home_coll
)
class Meta:
verbose_name = 'iRODS Environment'
|
tests/plugins/test_stdout_filters.py | dexy/dexy | 136 | 11067010 | <filename>tests/plugins/test_stdout_filters.py<gh_stars>100-1000
from tests.utils import assert_in_output
from tests.utils import assert_output
from tests.utils import assert_output_matches
import inspect
import os
def test_node():
assert_output("nodejs", "console.log('hello');", "hello\n")
def test_rd():
rd = """
\\name{load}
\\alias{load}
\\title{Reload Saved Datasets}
\description{
Reload the datasets written to a file with the function
\code{save}.
}
"""
expected = "Reload the datasets written to a file with the function \u2018save\u2019."
assert_in_output('rdconv', rd, expected, ext=".Rd")
def test_redcloth():
expected = "<p>hello <strong>bold</strong></p>" + os.linesep
assert_output("redcloth", "hello *bold*", expected)
def test_redclothl():
expected = "hello \\textbf{bold}" + os.linesep + os.linesep
assert_output("redclothl", "hello *bold*", expected)
def test_lynxdump():
assert_output_matches('lynxdump', "<p>hello</p>", "\s*hello\s*", ext=".html")
def test_strings():
assert_output('strings', "hello\bmore", "hello\nmore\n")
def test_php():
php = inspect.cleandoc("""<?php
echo(1+1);
?>""")
assert_output('php', php, "2")
def test_ragel_ruby_dot():
ragel = inspect.cleandoc("""
%%{
machine hello_and_welcome;
main := ( 'h' @ { puts "hello world!" }
| 'w' @ { puts "welcome" }
)*;
}%%
data = 'whwwwwhw'
%% write data;
%% write init;
%% write exec;
""")
assert_in_output('ragelrubydot', ragel, "digraph hello_and_welcome", ext=".rl")
def test_python():
assert_output('py', 'print(1+1)', "2" + os.linesep)
def test_bash():
assert_output('bash', 'echo "hello"', "hello\n")
def test_rhino():
assert_output('rhino', "print(6*7)", "42\n")
def test_cowsay():
assert_in_output('cowsay', 'hello', 'hello')
def test_cowthink():
assert_in_output('cowthink', 'hello', 'hello')
def test_figlet():
assert_in_output('figlet', 'hello', "| |__ ___| | | ___ ")
def test_man_page():
assert_in_output('man', 'ls', 'list directory contents')
def test_ruby():
assert_output('rb', 'puts "hello"', "hello\n")
def test_sloccount():
assert_in_output('sloccount', 'puts "hello"', "ruby=1", ext=".rb")
def test_irb_subprocess_stdout_filter():
assert_in_output('irbout', 'puts "hello"', '> puts "hello"')
def test_lua():
assert_output('lua', 'print ("Hello")', "Hello\n")
def test_wiki2beamer():
wiki = inspect.cleandoc("""==== A simple frame ====
* with a funky
* bullet list
*# and two
*# numbered sub-items
""")
assert_in_output('wiki2beamer', wiki, "\\begin{frame}")
assert_in_output('wiki2beamer', wiki, "\\begin{enumerate}")
|
cami/scripts/intro_defines.py | hugmyndakassi/hvmi | 677 | 11067023 | #
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
defines = {
"MAX_VERSION_STRING_SIZE" : 64,
"CAMI_MAGIC_WORD" : 0x494d4143 # CAMI
}
version_any = {
"WIN_PATTERN_MIN_VERSION_ANY" : 0,
"WIN_PATTERN_MAX_VERSION_ANY" : 0xFFFFFFFF
}
section_hints = {
"supported_os": 0x0001,
"syscalls" : 0x0002,
"dist_sigs" : 0x0004,
"linux" : 0x0200,
"windows" : 0x0100,
}
process_options_flags = {
"name_utf16": 0x0001,
}
# those defines, save the stack ones, match the REG_* from disasm/registers.h
detour_args = {
# describes arguments passed through GPRs.
"DET_ARG_RAX": 0,
"DET_ARG_RCX": 1,
"DET_ARG_RDX": 2,
"DET_ARG_RBX": 3,
"DET_ARG_RSP": 4,
"DET_ARG_RBP": 5,
"DET_ARG_RSI": 6,
"DET_ARG_RDI": 7,
"DET_ARG_R8": 8,
"DET_ARG_R9": 9,
"DET_ARG_R10": 10,
"DET_ARG_R11": 11,
"DET_ARG_R12": 12,
"DET_ARG_R13": 13,
"DET_ARG_R14": 14,
"DET_ARG_R15": 15,
# describes arguments passed through the stack.
"DET_ARG_STACK0": 0x0FFFF,
"DET_ARG_STACK1": 0x1FFFF,
"DET_ARG_STACK2": 0x2FFFF,
"DET_ARG_STACK3": 0x3FFFF,
"DET_ARG_STACK4": 0x4FFFF,
"DET_ARG_STACK5": 0x5FFFF,
"DET_ARG_STACK6": 0x6FFFF,
"DET_ARG_STACK7": 0x7FFFF,
"DET_ARG_STACK8": 0x8FFFF,
"DET_ARG_STACK9": 0x9FFFF,
"DET_ARG_STACK10": 0xAFFFF,
"DET_ARG_STACK11": 0xBFFFF,
"DET_ARG_STACK12": 0xCFFFF,
"DET_ARG_STACK13": 0xDFFFF,
"DET_ARG_STACK14": 0xEFFFF,
"DET_ARG_STACK15": 0xFFFFF,
"DET_ARGS_MAX": 8,
}
intro_options = {
"NONE" : 0x000000000000000,
"PROT_KM_NT" : 0x0000000000000001,
"PROT_KM_HAL" : 0x0000000000000002,
"PROT_KM_SSDT" : 0x0000000000000004,
"PROT_KM_IDT" : 0x0000000000000008,
"PROT_KM_HDT" : 0x0000000000000010,
"PROT_KM_SYSTEM_CR3" : 0x0000000000000020,
"PROT_KM_TOKENS" : 0x0000000000000040,
"PROT_KM_NT_DRIVERS" : 0x0000000000000080,
"PROT_KM_AV_DRIVERS" : 0x0000000000000100,
"PROT_KM_XEN_DRIVERS" : 0x0000000000000200,
"PROT_KM_DRVOBJ" : 0x0000000000000400,
"PROT_KM_CR4" : 0x0000000000000800,
"PROT_KM_MSR_SYSCALL" : 0x0000000000001000,
"PROT_KM_IDTR" : 0x0000000000002000,
"PROT_KM_HAL_HEAP_EXEC" : 0x0000000000004000,
"PROT_KM_HAL_INT_CTRL" : 0x0000000000008000,
"PROT_UM_MISC_PROCS" : 0x0000000000010000,
"PROT_UM_SYS_PROCS" : 0x0000000000020000,
"PROT_KM_SELF_MAP_ENTRY" : 0x0000000000040000,
"PROT_KM_GDTR" : 0x0000000000080000,
"EVENT_PROCESSES" : 0x0000000000100000,
"EVENT_MODULES" : 0x0000000000200000,
"EVENT_OS_CRASH" : 0x0000000000400000,
"EVENT_PROCESS_CRASH" : 0x0000000000800000,
"AGENT_INJECTION" : 0x0000000001000000,
"FULL_PATH" : 0x0000000002000000,
"KM_BETA_DETECTIONS" : 0x0000000004000000,
"NOTIFY_ENGINES" : 0x0000000008000000,
"IN_GUEST_PT_FILTER" : 0x0000000010000000,
"BUGCHECK_CLEANUP" : 0x0000000020000000,
"SYSPROC_BETA_DETECTIONS" : 0x0000000040000000,
"VE" : 0x0000000080000000,
"ENABLE_CONNECTION_EVENTS" : 0x0000000100000000,
"PROT_KM_LOGGER_CONTEXT" : 0x0000000200000000,
"DPI_DEBUG" : 0x0000000400000000,
"DPI_STACK_PIVOT" : 0x0000000800000000,
"DPI_TOKEN_STEAL" : 0x0000001000000000,
"DPI_HEAP_SPRAY" : 0x0000002000000000,
"NT_EAT_READS" : 0x0000004000000000,
# Process options
"RESERVED_1" : 0x00000001,
"RESERVED_2" : 0x00000002,
"HOOKS" : 0x00000004,
"CORE_HOOKS" : 0x00000004,
"UNPACK" : 0x00000008,
"WRITE_MEM" : 0x00000010,
"WSOCK_HOOKS" : 0x00000020,
"EXPLOIT" : 0x00000040,
"SET_THREAD_CTX" : 0x00000080,
"QUEUE_APC" : 0x00000100,
"PREVENT_CHILD_CREATION" : 0x00000200,
"DOUBLE_AGENT" : 0x00000400,
"ENG_CMD_LINE" : 0x00000800,
"REMEDIATE" : 0x20000000,
"KILL_ON_EXPLOIT" : 0x40000000,
"BETA" : 0x80000000,
# Shemu options
"NOP_SLED" : 0x00000001,
"LOAD_RIP" : 0x00000002,
"WRITE_SELF" : 0x00000004,
"TIB_ACCESS" : 0x00000008,
"SYSCALL" : 0x00000010,
"STACK_STR" : 0x00000020,
}
|
tools/create_colormap.py | KoryakovDmitry/synthtiger | 153 | 11067036 | """
SynthTIGER
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import argparse
import os
import pprint
import time
import traceback
from concurrent.futures import ProcessPoolExecutor, as_completed
import numpy as np
import scipy.cluster
from PIL import Image
def search_files(root, names=None, exts=None):
paths = []
for dir_path, _, file_names in os.walk(root):
for file_name in file_names:
file_path = os.path.join(dir_path, file_name)
file_ext = os.path.splitext(file_name)[1]
if names is not None and file_name not in names:
continue
if exts is not None and file_ext.lower() not in exts:
continue
paths.append(file_path)
return paths
def write_cluster(fp, clusters):
texts = []
for center, std in clusters:
center = ",".join(list(map(str, center)))
std = str(std)
texts.append(f"{center}\t{std}")
text = "\t".join(texts)
fp.write(f"{text}\n")
def get_cluster(path, k, rgb=False):
clusters = []
mode = "RGB" if rgb else "L"
channel = 3 if rgb else 1
image = Image.open(path).convert(mode)
image = np.array(image, dtype=np.float32).reshape(-1, channel)
centers, _ = scipy.cluster.vq.kmeans(image, k)
if len(centers) != k:
return None
vecs, _ = scipy.cluster.vq.vq(image, centers)
stds = [np.std(image[vecs == idx]) for idx in range(len(centers))]
for center, std in zip(centers, stds):
clusters.append((list(center), std))
clusters = sorted(clusters)
return clusters
def run(args):
paths = search_files(args.input, exts=[".jpg", ".jpeg", ".png", ".bmp"])
os.makedirs(os.path.dirname(args.output), exist_ok=True)
output_file = open(args.output, "w", encoding="utf-8")
executor = ProcessPoolExecutor(max_workers=args.worker)
count = 0
for k in range(2, args.max_k + 1):
futures = {}
for path in paths:
future = executor.submit(get_cluster, path, k, args.rgb)
futures[future] = path
for future in as_completed(futures):
path = futures[future]
try:
clusters = future.result()
except:
print(f"{traceback.format_exc()} ({path})")
continue
if clusters is not None:
write_cluster(output_file, clusters)
count += 1
print(f"Created colormap ({k} colors) ({path})")
executor.shutdown()
output_file.close()
print(f"Created {count} colormaps")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, default=None, required=True)
parser.add_argument("--output", type=str, default=None, required=True)
parser.add_argument("--max_k", type=int, default=3)
parser.add_argument("--rgb", action="store_true", default=False)
parser.add_argument("--worker", type=int, default=1)
args = parser.parse_args()
pprint.pprint(vars(args))
return args
def main():
start_time = time.time()
args = parse_args()
run(args)
end_time = time.time()
print(f"{end_time - start_time:.2f} seconds elapsed")
if __name__ == "__main__":
main()
|
src/clusterfuzz/_internal/tests/core/bot/fuzzers/libFuzzer/engine_test.py | mspectorgoogle/clusterfuzz | 5,023 | 11067037 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for libFuzzer engine."""
# pylint: disable=unused-argument
import os
import shutil
import tempfile
import unittest
import mock
import parameterized
import pyfakefs.fake_filesystem_unittest as fake_fs_unittest
import six
from clusterfuzz._internal.bot.fuzzers import engine_common
from clusterfuzz._internal.bot.fuzzers import libfuzzer
from clusterfuzz._internal.bot.fuzzers import strategy_selection
from clusterfuzz._internal.bot.fuzzers import utils as fuzzer_utils
from clusterfuzz._internal.bot.fuzzers.libFuzzer import constants
from clusterfuzz._internal.bot.fuzzers.libFuzzer import engine
from clusterfuzz._internal.build_management import build_manager
from clusterfuzz._internal.fuzzing import strategy
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.platforms import android
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import new_process
from clusterfuzz._internal.system import process_handler
from clusterfuzz._internal.system import shell
from clusterfuzz._internal.tests.test_libs import android_helpers
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
try:
from shlex import quote
except ImportError:
from pipes import quote
TEST_PATH = os.path.abspath(os.path.dirname(__file__))
TEST_DIR = os.path.join(TEST_PATH, 'libfuzzer_test_data')
TEMP_DIR = os.path.join(TEST_PATH, 'temp')
DATA_DIR = os.path.join(TEST_PATH, 'data')
ANDROID_DATA_DIR = os.path.join(DATA_DIR, 'android')
_get_directory_file_count_orig = shell.get_directory_file_count
class PrepareTest(fake_fs_unittest.TestCase):
"""Prepare() tests."""
def setUp(self):
"""Setup for fake filesystem prepare test."""
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.engine_common.unpack_seed_corpus_if_needed',
])
self.fs.create_dir('/inputs')
self.fs.create_file('/path/target')
self.fs.create_file('/path/blah.dict')
self.fs.create_file('/path/target_seed_corpus.zip')
self.fs.create_file(
'/path/target.options',
contents=('[libfuzzer]\n'
'max_len=31337\n'
'timeout=11\n'
'dict=blah.dict\n'))
os.environ['FAIL_RETRIES'] = '1'
os.environ['FUZZ_INPUTS_DISK'] = '/inputs'
test_helpers.patch(
self, ['clusterfuzz._internal.bot.fuzzers.libfuzzer.pick_strategies'])
self.mock.pick_strategies.return_value = libfuzzer.StrategyInfo(
fuzzing_strategies=[
'unknown_1', 'value_profile', 'corpus_subset_20', 'fork_2'
],
arguments=['-arg1'],
additional_corpus_dirs=['/new_corpus_dir'],
extra_env={'extra_env': '1'},
use_dataflow_tracing=False,
is_mutations_run=True)
def test_prepare(self):
"""Test prepare."""
engine_impl = engine.Engine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
self.assertEqual('/corpus_dir', options.corpus_dir)
six.assertCountEqual(self, [
'-max_len=31337', '-timeout=11', '-rss_limit_mb=2560', '-arg1',
'-dict=/path/blah.dict'
], options.arguments)
self.assertDictEqual({
'value_profile': 1,
'corpus_subset': 20,
'fork': 2
}, options.strategies)
six.assertCountEqual(self, ['/new_corpus_dir', '/corpus_dir'],
options.fuzz_corpus_dirs)
self.assertDictEqual({'extra_env': '1'}, options.extra_env)
self.assertFalse(options.use_dataflow_tracing)
self.assertTrue(options.is_mutations_run)
self.mock.unpack_seed_corpus_if_needed.assert_called_with(
'/path/target', '/corpus_dir')
def test_prepare_invalid_dict(self):
"""Test prepare with an invalid dict path."""
with open('/path/target.options', 'w') as f:
f.write('[libfuzzer]\n'
'max_len=31337\n'
'timeout=11\n'
'dict=not_exist.dict\n')
engine_impl = engine.Engine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
six.assertCountEqual(
self, ['-max_len=31337', '-timeout=11', '-rss_limit_mb=2560', '-arg1'],
options.arguments)
def test_prepare_auto_add_dict(self):
"""Test prepare automatically adding dict argument."""
with open('/path/target.options', 'w') as f:
f.write('[libfuzzer]\n' 'max_len=31337\n' 'timeout=11\n')
self.fs.create_file('/path/target.dict')
engine_impl = engine.Engine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
six.assertCountEqual(self, [
'-max_len=31337', '-timeout=11', '-rss_limit_mb=2560', '-arg1',
'-dict=/path/target.dict'
], options.arguments)
class FuzzAdditionalProcessingTimeoutTest(unittest.TestCase):
"""fuzz_additional_processing_timeout tests."""
def test_no_mutations(self):
"""Test no mutations."""
engine_impl = engine.Engine()
options = engine.LibFuzzerOptions(
'/corpus',
arguments=[],
strategies=[],
fuzz_corpus_dirs=[],
extra_env={},
use_dataflow_tracing=False,
is_mutations_run=False)
self.assertEqual(2100.0,
engine_impl.fuzz_additional_processing_timeout(options))
def test_mutations(self):
"""Test with mutations."""
engine_impl = engine.Engine()
options = engine.LibFuzzerOptions(
'/corpus',
arguments=[],
strategies=[],
fuzz_corpus_dirs=[],
extra_env={},
use_dataflow_tracing=False,
is_mutations_run=True)
self.assertEqual(2700.0,
engine_impl.fuzz_additional_processing_timeout(options))
class PickStrategiesTest(fake_fs_unittest.TestCase):
"""pick_strategies tests."""
def setUp(self):
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.engine_common.is_lpm_fuzz_target',
'random.SystemRandom.randint',
])
self.mock.is_lpm_fuzz_target.return_value = False
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/path/corpus')
self.fs.create_file('/path/target')
def test_max_length_strategy_with_override(self):
"""Tests max length strategy with override."""
strategy_pool = set_strategy_pool([strategy.RANDOM_MAX_LENGTH_STRATEGY])
strategy_info = libfuzzer.pick_strategies(strategy_pool, '/path/target',
'/path/corpus', ['-max_len=100'])
six.assertCountEqual(self, [], strategy_info.arguments)
def test_max_length_strategy_without_override(self):
"""Tests max length strategy without override."""
self.mock.randint.return_value = 1337
strategy_pool = set_strategy_pool([strategy.RANDOM_MAX_LENGTH_STRATEGY])
strategy_info = libfuzzer.pick_strategies(strategy_pool, '/path/target',
'/path/corpus', [])
six.assertCountEqual(self, ['-max_len=1337'], strategy_info.arguments)
class FuzzTest(fake_fs_unittest.TestCase):
"""Fuzz() tests."""
def setUp(self):
"""Setup for fake filesystem fuzz test."""
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/corpus')
self.fs.create_dir('/fuzz-inputs')
self.fs.create_dir('/fake')
self.fs.create_file('/target')
self.fs.add_real_directory(TEST_DIR)
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.libFuzzer.engine._is_multistep_merge_supported',
'clusterfuzz._internal.bot.fuzzers.libfuzzer.LibFuzzerRunner.fuzz',
'clusterfuzz._internal.bot.fuzzers.libfuzzer.LibFuzzerRunner.merge',
'os.getpid',
])
os.environ['TEST_TIMEOUT'] = '65'
os.environ['JOB_NAME'] = 'libfuzzer_asan_job'
os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs'
self.mock._is_multistep_merge_supported = True # pylint: disable=protected-access
self.mock.getpid.return_value = 9001
self.maxDiff = None # pylint: disable=invalid-name
def test_fuzz(self):
"""Test fuzz."""
engine_impl = engine.Engine()
options = engine.LibFuzzerOptions('/corpus', [
'-arg=1',
'-timeout=123',
'-dict=blah.dict',
'-max_len=9001',
'-use_value_profile=1',
], [], ['/corpus'], {}, False, False)
with open(os.path.join(TEST_DIR, 'crash.txt')) as f:
fuzz_output = f.read()
def mock_fuzz(*args, **kwargs): # pylint: disable=unused-argument
"""Mock fuzz."""
self.fs.create_file('/fuzz-inputs/temp-9001/new/A')
self.fs.create_file('/fuzz-inputs/temp-9001/new/B')
return new_process.ProcessResult(
command='command',
return_code=0,
output=fuzz_output,
time_executed=2.0,
timed_out=False)
# Record the merge calls manually as the mock module duplicates the second
# call and overwrites the first call arguments.
mock_merge_calls = []
def mock_merge(*args, **kwargs): # pylint: disable=unused-argument
"""Mock merge."""
mock_merge_calls.append(self.mock.merge.mock_calls[-1])
self.assertTrue(len(mock_merge_calls) <= 2)
merge_output_file = 'merge_step_%d.txt' % len(mock_merge_calls)
with open(os.path.join(TEST_DIR, merge_output_file)) as f:
merge_output = f.read()
self.fs.create_file('/fuzz-inputs/temp-9001/merge-corpus/A')
return new_process.ProcessResult(
command='merge-command',
return_code=0,
output=merge_output,
time_executed=2.0,
timed_out=False)
self.mock.fuzz.side_effect = mock_fuzz
self.mock.merge.side_effect = mock_merge
result = engine_impl.fuzz('/target', options, '/fake', 3600)
self.assertEqual(1, len(result.crashes))
self.assertEqual(fuzz_output, result.logs)
crash = result.crashes[0]
self.assertEqual('/fake/crash-1e15825e6f0b2240a5af75d84214adda1b6b5340',
crash.input_path)
self.assertEqual(fuzz_output, crash.stacktrace)
six.assertCountEqual(self, ['-arg=1', '-timeout=60'], crash.reproduce_args)
self.assertEqual(2, crash.crash_time)
self.mock.fuzz.assert_called_with(
mock.ANY, ['/fuzz-inputs/temp-9001/new', '/corpus'],
additional_args=[
'-arg=1',
'-timeout=123',
'-dict=blah.dict',
'-max_len=9001',
'-use_value_profile=1',
],
artifact_prefix='/fake',
extra_env={},
fuzz_timeout=3600)
self.assertEqual(2, len(mock_merge_calls))
# Main things to test are:
# 1) The new corpus directory is used in the second call only.
# 2) the merge control file is explicitly specified for both calls.
mock_merge_calls[0].assert_called_with(
mock.ANY, [
'/fuzz-inputs/temp-9001/merge-corpus',
'/corpus',
],
additional_args=[
'-arg=1',
'-timeout=123',
'-merge_control_file=/fuzz-inputs/temp-9001/merge-workdir/MCF',
],
artifact_prefix=None,
merge_timeout=1800.0,
tmp_dir='/fuzz-inputs/temp-9001/merge-workdir')
mock_merge_calls[1].assert_called_with(
mock.ANY, [
'/fuzz-inputs/temp-9001/merge-corpus',
'/corpus',
'/fuzz-inputs/temp-9001/new',
],
additional_args=[
'-arg=1',
'-timeout=123',
'-merge_control_file=/fuzz-inputs/temp-9001/merge-workdir/MCF',
],
artifact_prefix=None,
merge_timeout=1800.0,
tmp_dir='/fuzz-inputs/temp-9001/merge-workdir')
self.assertDictEqual({
'actual_duration': 2,
'average_exec_per_sec': 21,
'bad_instrumentation': 0,
'corpus_crash_count': 0,
'corpus_size': 0,
'crash_count': 1,
'dict_used': 1,
'edge_coverage': 411,
'edges_total': 398467,
'expected_duration': 3600,
'feature_coverage': 1873,
'fuzzing_time_percent': 0.05555555555555555,
'initial_edge_coverage': 410,
'initial_feature_coverage': 1869,
'leak_count': 0,
'log_lines_from_engine': 2,
'log_lines_ignored': 67,
'log_lines_unwanted': 0,
'manual_dict_size': 0,
'max_len': 9001,
'merge_edge_coverage': 0,
'new_edges': 1,
'new_features': 4,
'new_units_added': 1,
'new_units_generated': 0,
'number_of_executed_units': 1249,
'oom_count': 0,
'peak_rss_mb': 1197,
'recommended_dict_size': 0,
'slow_unit_count': 0,
'slow_units_count': 0,
'slowest_unit_time_sec': 0,
'startup_crash_count': 0,
'strategy_corpus_mutations_ml_rnn': 0,
'strategy_corpus_mutations_radamsa': 0,
'strategy_corpus_subset': 0,
'strategy_dataflow_tracing': 0,
'strategy_fork': 0,
'strategy_mutator_plugin': 0,
'strategy_mutator_plugin_radamsa': 0,
'strategy_peach_grammar_mutation': '',
'strategy_random_max_len': 0,
'strategy_recommended_dict': 0,
'strategy_selection_method': 'default',
'strategy_value_profile': 0,
'timeout_count': 0,
'timeout_limit': 123,
}, result.stats)
def set_strategy_pool(strategies=None):
"""Helper method to create instances of strategy pools
for patching use."""
strategy_pool = strategy_selection.StrategyPool()
if strategies is not None:
for strategy_tuple in strategies:
strategy_pool.add_strategy(strategy_tuple)
return strategy_pool
def mock_random_choice(seq):
"""Always returns first element from the sequence."""
# We could try to mock a particular |seq| to be a list with a single element,
# but it does not work well, as random_choice returns a 'mock.mock.MagicMock'
# object that behaves differently from the actual type of |seq[0]|.
return seq[0]
def clear_temp_dir():
"""Clear temp directory."""
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
os.mkdir(TEMP_DIR)
def setup_testcase_and_corpus(testcase, corpus):
"""Setup testcase and corpus."""
clear_temp_dir()
copied_testcase_path = os.path.join(TEMP_DIR, testcase)
shutil.copy(os.path.join(DATA_DIR, testcase), copied_testcase_path)
copied_corpus_path = os.path.join(TEMP_DIR, corpus)
src_corpus_path = os.path.join(DATA_DIR, corpus)
if os.path.exists(src_corpus_path):
shutil.copytree(src_corpus_path, copied_corpus_path)
else:
os.mkdir(copied_corpus_path)
return copied_testcase_path, copied_corpus_path
def mock_get_directory_file_count(dir_path):
"""Mocked version, always return 1 for new testcases directory."""
if dir_path == os.path.join(fuzzer_utils.get_temp_dir(), 'new'):
return 1
return _get_directory_file_count_orig(dir_path)
class BaseIntegrationTest(unittest.TestCase):
"""Base integration tests."""
def setUp(self):
self.maxDiff = None # pylint: disable=invalid-name
test_helpers.patch_environ(self)
os.environ['BUILD_DIR'] = DATA_DIR
os.environ['FAIL_RETRIES'] = '1'
os.environ['FUZZ_INPUTS_DISK'] = TEMP_DIR
os.environ['FUZZ_TEST_TIMEOUT'] = '4800'
os.environ['TEST_TIMEOUT'] = '65'
os.environ['JOB_NAME'] = 'libfuzzer_asan'
os.environ['INPUT_DIR'] = TEMP_DIR
os.environ['CACHE_DIR'] = TEMP_DIR
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.dictionary_manager.DictionaryManager.'
'update_recommended_dictionary',
'clusterfuzz._internal.bot.fuzzers.engine_common.get_merge_timeout',
'clusterfuzz._internal.bot.fuzzers.engine_common.random_choice',
'clusterfuzz._internal.bot.fuzzers.mutator_plugin._download_mutator_plugin_archive',
'clusterfuzz._internal.bot.fuzzers.mutator_plugin._get_mutator_plugins_from_bucket',
'clusterfuzz._internal.bot.fuzzers.strategy_selection.'
'generate_weighted_strategy_pool',
'clusterfuzz._internal.bot.fuzzers.libfuzzer.get_dictionary_analysis_timeout',
'clusterfuzz._internal.bot.fuzzers.libfuzzer.get_fuzz_timeout',
'os.getpid',
'clusterfuzz._internal.system.minijail.MinijailChroot._mknod',
])
self.mock.getpid.return_value = 1337
self.mock._get_mutator_plugins_from_bucket.return_value = [] # pylint: disable=protected-access
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool()
self.mock.get_dictionary_analysis_timeout.return_value = 5
self.mock.get_merge_timeout.return_value = 10
self.mock.random_choice.side_effect = mock_random_choice
@test_utils.integration
class IntegrationTests(BaseIntegrationTest):
"""Base libFuzzer libfuzzer tests."""
def setUp(self):
BaseIntegrationTest.setUp(self)
self.crash_dir = TEMP_DIR
def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Compare expected arguments."""
self.assertListEqual(actual,
[target_path] + arguments + corpora_or_testcase)
def assert_has_stats(self, stats):
"""Asserts that libFuzzer stats are in output."""
self.assertIn('number_of_executed_units', stats)
self.assertIn('average_exec_per_sec', stats)
self.assertIn('new_units_added', stats)
self.assertIn('slowest_unit_time_sec', stats)
self.assertIn('peak_rss_mb', stats)
def test_single_testcase_crash(self):
"""Tests libfuzzer with a crashing testcase."""
testcase_path, _ = setup_testcase_and_corpus('crash', 'empty_corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
result = engine_impl.reproduce(target_path, testcase_path,
['-timeout=60', '-rss_limit_mb=2560'], 65)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer'),
['-timeout=60', '-rss_limit_mb=2560', '-runs=100'], [testcase_path],
result.command)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.output)
@test_utils.slow
def test_fuzz_no_crash(self):
"""Tests fuzzing (no crash)."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.VALUE_PROFILE_STRATEGY])
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5.0)
self.assert_has_stats(results.stats)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer'), [
'-max_len=256', '-timeout=25', '-rss_limit_mb=2560',
'-use_value_profile=1', '-dict=' + dict_path,
'-artifact_prefix=' + TEMP_DIR + '/', '-max_total_time=5',
'-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'corpus')
], results.command)
self.assertEqual(0, len(results.crashes))
# New items should've been added to the corpus.
self.assertNotEqual(0, len(os.listdir(corpus_path)))
# The incremental stats are not zero as the two step merge was used.
self.assertNotEqual(0, results.stats['new_edges'])
self.assertNotEqual(0, results.stats['new_features'])
@test_utils.slow
def test_fuzz_no_crash_with_old_libfuzzer(self):
"""Tests fuzzing (no crash) with an old version of libFuzzer."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.VALUE_PROFILE_STRATEGY])
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer_old')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assert_has_stats(results.stats)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer_old'), [
'-max_len=256', '-timeout=25', '-rss_limit_mb=2560',
'-use_value_profile=1', '-dict=' + dict_path,
'-artifact_prefix=' + TEMP_DIR + '/', '-max_total_time=5',
'-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'corpus')
], results.command)
self.assertEqual(0, len(results.crashes))
# New items should've been added to the corpus.
self.assertNotEqual(0, len(os.listdir(corpus_path)))
# The incremental stats are zero as the single step merge was used.
self.assertEqual(0, results.stats['new_edges'])
self.assertEqual(0, results.stats['new_features'])
def test_fuzz_crash(self):
"""Tests fuzzing (crash)."""
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'always_crash_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assert_has_stats(results.stats)
self.compare_arguments(
os.path.join(DATA_DIR, 'always_crash_fuzzer'), [
'-max_len=100', '-timeout=25', '-rss_limit_mb=2560',
'-artifact_prefix=' + TEMP_DIR + '/', '-max_total_time=5',
'-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'corpus')
], results.command)
self.assertEqual(1, len(results.crashes))
self.assertTrue(os.path.exists(results.crashes[0].input_path))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(results.logs, results.crashes[0].stacktrace)
self.assertListEqual([
'-rss_limit_mb=2560',
'-timeout=60',
], results.crashes[0].reproduce_args)
self.assertIn('Test unit written to {0}/crash-'.format(self.crash_dir),
results.logs)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address '
'0x000000000000', results.logs)
def test_fuzz_from_subset(self):
"""Tests fuzzing from corpus subset."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.CORPUS_SUBSET_STRATEGY])
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer'), [
'-max_len=256', '-timeout=25', '-rss_limit_mb=2560',
'-dict=' + dict_path, '-artifact_prefix=' + TEMP_DIR + '/',
'-max_total_time=5', '-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'temp-1337/subset')
], results.command)
self.assert_has_stats(results.stats)
def test_minimize(self):
"""Tests minimize."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
minimize_output_path = os.path.join(TEMP_DIR, 'minimized_testcase')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.minimize_testcase(target_path, [], testcase_path,
minimize_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(minimize_output_path))
with open(minimize_output_path) as f:
result = f.read()
self.assertEqual('A', result)
def test_cleanse(self):
"""Tests cleanse."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
cleanse_output_path = os.path.join(TEMP_DIR, 'cleansed_testcase')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.cleanse(target_path, [], testcase_path,
cleanse_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(cleanse_output_path))
with open(cleanse_output_path) as f:
result = f.read()
self.assertFalse(all(c == 'A' for c in result))
def test_analyze_dict(self):
"""Tests recommended dictionary analysis."""
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.dictionary_manager.DictionaryManager.'
'parse_recommended_dictionary_from_log_lines',
])
self.mock.parse_recommended_dictionary_from_log_lines.return_value = set([
'"USELESS_0"',
'"APPLE"',
'"USELESS_1"',
'"GINGER"',
'"USELESS_2"',
'"BEET"',
'"USELESS_3"',
])
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'analyze_dict_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
expected_recommended_dictionary = set([
'"APPLE"',
'"GINGER"',
'"BEET"',
])
self.assertIn(expected_recommended_dictionary,
self.mock.update_recommended_dictionary.call_args[0])
def test_fuzz_with_mutator_plugin(self):
"""Tests fuzzing with a mutator plugin."""
os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(TEMP_DIR,
'mutator-plugins')
# TODO(metzman): Remove the old binary and switch the test to the new one.
fuzz_target_name = 'test_fuzzer_old'
plugin_archive_name = (
'custom_mutator_plugin-libfuzzer_asan-test_fuzzer_old.zip')
# Call before setting up the plugin since this call will erase the directory
# the plugin is written to.
_, corpus_path = setup_testcase_and_corpus('empty', 'empty_corpus')
plugin_archive_path = os.path.join(DATA_DIR, plugin_archive_name)
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.MUTATOR_PLUGIN_STRATEGY])
self.mock._get_mutator_plugins_from_bucket.return_value = [ # pylint: disable=protected-access
plugin_archive_name
]
self.mock._download_mutator_plugin_archive.return_value = ( # pylint: disable=protected-access
plugin_archive_path)
custom_mutator_print_string = 'CUSTOM MUTATOR\n'
try:
target_path = engine_common.find_fuzzer_path(DATA_DIR, fuzz_target_name)
engine_impl = engine.Engine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
finally:
shutil.rmtree(os.environ['MUTATOR_PLUGINS_DIR'])
# custom_mutator_print_string gets printed before the custom mutator mutates
# a test case. Assert that the count is greater than 1 to ensure that the
# function didn't crash on its first execution (after printing).
self.assertGreater(results.logs.count(custom_mutator_print_string), 1)
def test_merge_reductions(self):
"""Tests that reduced testcases are merged back into the original corpus
without deleting the larger version."""
_, corpus_path = setup_testcase_and_corpus('empty', 'empty_corpus')
fuzz_target_name = 'analyze_dict_fuzzer'
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.libFuzzer.engine.Engine.'
'_create_merge_corpus_dir',
'clusterfuzz._internal.system.shell.get_directory_file_count',
])
self.mock.get_directory_file_count.side_effect = (
mock_get_directory_file_count)
minimal_unit_contents = 'APPLE'
minimal_unit_hash = '569bea285d70dda2218f89ef5454ea69fb5111ef'
nonminimal_unit_contents = 'APPLEO'
nonminimal_unit_hash = '07aef0e305db0779f3b52ab4dad975a1b737c461'
def mocked_create_merge_directory(_):
"""A mocked version of create_merge_directory that adds some interesting
files to the merge corpus and initial corpus."""
merge_directory_path = libfuzzer.create_corpus_directory('merge-corpus')
# Write the minimal unit to the new corpus directory.
new_corpus_directory_path = libfuzzer.create_corpus_directory('new')
minimal_unit_path = os.path.join(new_corpus_directory_path,
minimal_unit_hash)
with open(minimal_unit_path, 'w+') as file_handle:
file_handle.write(minimal_unit_contents)
# Write the nonminimal unit to the corpus directory.
nonminimal_unit_path = os.path.join(corpus_path, nonminimal_unit_hash)
with open(nonminimal_unit_path, 'w+') as file_handle:
file_handle.write(nonminimal_unit_contents)
return merge_directory_path
# pylint: disable=protected-access
self.mock._create_merge_corpus_dir.side_effect = (
mocked_create_merge_directory)
target_path = engine_common.find_fuzzer_path(DATA_DIR, fuzz_target_name)
engine_impl = engine.Engine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.arguments.append('-runs=10')
engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
# Verify that both the newly found minimal testcase and the nonminimal
# testcase are in the corpus.
self.assertIn(minimal_unit_hash, os.listdir(corpus_path))
self.assertIn(nonminimal_unit_hash, os.listdir(corpus_path))
def test_exit_failure_logged(self):
"""Test that we log when libFuzzer's exit code indicates it ran into an
error."""
test_helpers.patch(self, [
'clusterfuzz._internal.metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertIn(engine.ENGINE_ERROR_MESSAGE, args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'exit_fuzzer')
engine_impl = engine.Engine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.extra_env['EXIT_FUZZER_CODE'] = '1'
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assertEqual(1, self.mock.log_error.call_count)
self.assertEqual(1, len(results.crashes))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(0, os.path.getsize(results.crashes[0].input_path))
@parameterized.parameterized.expand(['77', '27'])
def test_exit_target_bug_not_logged(self, exit_code):
"""Test that we don't log when exit code indicates bug found in target."""
test_helpers.patch(self, [
'clusterfuzz._internal.metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertNotIn(engine.ENGINE_ERROR_MESSAGE, args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'exit_fuzzer')
engine_impl = engine.Engine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.extra_env['EXIT_FUZZER_CODE'] = exit_code
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assertEqual(1, len(results.crashes))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(0, os.path.getsize(results.crashes[0].input_path))
def test_fuzz_invalid_dict(self):
"""Tests fuzzing with an invalid dictionary (ParseDictionaryFile crash)."""
test_helpers.patch(self, [
'clusterfuzz._internal.metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertIn('Dictionary parsing failed (target=test_fuzzer, line=2).',
args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
invalid_dict_path = os.path.join(DATA_DIR, 'invalid.dict')
options.arguments.append('-dict=' + invalid_dict_path)
engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
@test_utils.integration
class UnshareIntegrationTests(IntegrationTests):
"""Unshare runner integration tests."""
def setUp(self):
super().setUp()
os.environ['USE_UNSHARE'] = 'True'
def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Compare expected arguments."""
self.assertListEqual(actual, [
os.path.join(
environment.get_value('ROOT_DIR'), 'resources', 'platform', 'linux',
'unshare'), '-c', '-n', target_path
] + arguments + corpora_or_testcase)
@test_utils.integration
class MinijailIntegrationTests(IntegrationTests):
"""Minijail integration tests."""
def setUp(self):
IntegrationTests.setUp(self)
os.environ['USE_MINIJAIL'] = 'True'
self.crash_dir = '/temp'
def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Overridden compare_arguments."""
def _to_chroot_path(path):
"""Convert to chroot path."""
return '/' + os.path.basename(path.rstrip('/'))
for i, argument in enumerate(arguments):
if not argument.startswith(constants.ARTIFACT_PREFIX_FLAG):
continue
arguments[i] = constants.ARTIFACT_PREFIX_FLAG + _to_chroot_path(
argument[len(constants.ARTIFACT_PREFIX_FLAG):]) + '/'
expected_arguments = [target_path] + arguments + [
_to_chroot_path(item) for item in corpora_or_testcase
]
# Ignore minijail arguments
self.assertListEqual(expected_arguments, actual[-len(expected_arguments):])
def test_exit_failure_logged(self):
"""Exit failure is not logged in minijail."""
@parameterized.parameterized.expand(['1', '77', '27'])
def test_exit_target_bug_not_logged(self, exit_code):
"""Test that we don't log when exit code indicates bug found in target."""
test_helpers.patch(self, [
'clusterfuzz._internal.metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertNotIn(engine.ENGINE_ERROR_MESSAGE, args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'exit_fuzzer')
engine_impl = engine.Engine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.extra_env['EXIT_FUZZER_CODE'] = exit_code
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assertEqual(1, len(results.crashes))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(0, os.path.getsize(results.crashes[0].input_path))
@test_utils.integration
@test_utils.with_cloud_emulators('datastore')
class IntegrationTestsFuchsia(BaseIntegrationTest):
"""libFuzzer tests (Fuchsia)."""
def setUp(self):
BaseIntegrationTest.setUp(self)
self.temp_dir = tempfile.mkdtemp()
builds_dir = os.path.join(self.temp_dir, 'builds')
os.mkdir(builds_dir)
urls_dir = os.path.join(self.temp_dir, 'urls')
os.mkdir(urls_dir)
environment.set_value('BUILDS_DIR', builds_dir)
environment.set_value('BUILD_URLS_DIR', urls_dir)
environment.set_value('QUEUE_OVERRIDE', 'FUCHSIA')
environment.set_value('OS_OVERRIDE', 'FUCHSIA')
environment.set_value(
'RELEASE_BUILD_BUCKET_PATH',
'gs://clusterfuchsia-builds-test/libfuzzer/'
'fuchsia-([0-9]+).zip')
environment.set_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES', True)
test_helpers.patch(self, [
'clusterfuzz._internal.system.shell.clear_temp_directory',
])
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia test until build size reduced.')
def test_fuzzer_can_boot_and_run_with_corpus(self):
"""Tests running a single round of fuzzing on a Fuchsia target, using
a toy fuzzer that should crash very quickly.
Additionally, tests that pushing a corpus to the target works & produces
an expanded corpus."""
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
environment.set_value('FUZZ_TARGET', 'example-fuzzers/crash_fuzzer')
build_manager.setup_build()
_, corpus_path = setup_testcase_and_corpus('aaaa', 'fuchsia_corpus')
num_files_original = len(os.listdir(corpus_path))
engine_impl = engine.Engine()
options = engine_impl.prepare(corpus_path, 'example-fuzzers/crash_fuzzer',
DATA_DIR)
results = engine_impl.fuzz('example-fuzzers/crash_fuzzer', options,
TEMP_DIR, 20)
# If we don't get a crash, something went wrong.
self.assertIn('Test unit written to', results.logs)
# Check that the command was invoked with a corpus argument.
self.assertIn('data/corpus/new', results.command)
# Check that new units were added to the corpus.
num_files_new = len(os.listdir(os.path.join(TEMP_DIR, 'temp-1337/new')))
self.assertGreater(num_files_new, num_files_original)
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia tests until build size reduced.')
def test_fuzzer_can_boot_and_run_reproducer(self):
"""Tests running a testcase that should cause a fast, predictable crash."""
environment.set_value('FUZZ_TARGET', 'example-fuzzers/overflow_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
build_manager.setup_build()
testcase_path, _ = setup_testcase_and_corpus('fuchsia_crash',
'empty_corpus')
engine_impl = engine.Engine()
result = engine_impl.reproduce('example-fuzzers/overflow_fuzzer',
testcase_path,
['-timeout=25', '-rss_limit_mb=2560'], 30)
self.assertIn('ERROR: AddressSanitizer: heap-buffer-overflow on address',
result.output)
self.assertIn('Running: data/fuchsia_crash', result.output)
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia tests until build size reduced.')
def test_qemu_logs_returned_on_error(self):
"""Test running against a qemu that has died"""
test_helpers.patch(self, ['clusterfuzz._internal.metrics.logs.log_warn'])
# Pass-through logs just so we can see what's going on (but moving from
# log_warn to plain log to avoid creating a loop)
self.mock.log_warn.side_effect = logs.log
environment.set_value('FUZZ_TARGET', 'example-fuzzers/crash_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
build_manager.setup_build()
testcase_path, _ = setup_testcase_and_corpus('fuchsia_crash',
'empty_corpus')
engine_impl = engine.Engine()
# Check that it's up properly
results = engine_impl.reproduce('example-fuzzers/overflow_fuzzer',
testcase_path,
['-timeout=25', '-rss_limit_mb=2560'], 30)
self.assertEqual(0, results.return_code)
# Force termination
process_handler.terminate_processes_matching_names('qemu-system-x86_64')
# Try to fuzz against the dead qemu to trigger log dump (and automatic
# recovery behavior, when undercoat is disabled)
try:
engine_impl.reproduce('example-fuzzers/overflow_fuzzer', testcase_path,
['-timeout=25', '-rss_limit_mb=2560'], 30)
except:
# With undercoat, this is expected to dump logs but ultimately fail to run
if not environment.get_value('FUCHSIA_USE_UNDERCOAT'):
raise
# Check the logs for syslog presence
self.assertIn('{{{reset}}}', self.mock.log_warn.call_args[0][0])
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia tests until build size reduced.')
def test_minimize_testcase(self):
"""Tests running a testcase that should be able to minimize."""
environment.set_value('FUZZ_TARGET', 'example-fuzzers/crash_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
build_manager.setup_build()
testcase_path, _ = setup_testcase_and_corpus('fuchsia_overlong_crash',
'empty_corpus')
minimize_output_path = os.path.join(TEMP_DIR, 'output')
engine_impl = engine.Engine()
result = engine_impl.minimize_testcase('example-fuzzers/crash_fuzzer',
['-runs=1000000'], testcase_path,
minimize_output_path, 30)
with open(minimize_output_path) as f:
result = f.read()
self.assertEqual('HI!', result)
@test_utils.integration
@test_utils.with_cloud_emulators('datastore')
class IntegrationTestsAndroid(BaseIntegrationTest, android_helpers.AndroidTest):
"""libFuzzer tests (Android)."""
def setUp(self):
android_helpers.AndroidTest.setUp(self)
BaseIntegrationTest.setUp(self)
if android.settings.get_sanitizer_tool_name() != 'hwasan':
raise Exception('Device is not set up with HWASan.')
environment.set_value('BUILD_DIR', ANDROID_DATA_DIR)
environment.set_value('JOB_NAME', 'libfuzzer_hwasan_android_device')
environment.reset_current_memory_tool_options()
self.crash_dir = TEMP_DIR
self.adb_path = android.adb.get_adb_path()
self.hwasan_options = 'HWASAN_OPTIONS="%s"' % quote(
environment.get_value('HWASAN_OPTIONS'))
def device_path(self, local_path):
"""Return device path for a local path."""
return os.path.join(
android.constants.DEVICE_FUZZING_DIR,
os.path.relpath(local_path, environment.get_root_directory()))
def assert_has_stats(self, stats):
"""Asserts that libFuzzer stats are in output."""
self.assertIn('number_of_executed_units', stats)
self.assertIn('average_exec_per_sec', stats)
self.assertIn('new_units_added', stats)
self.assertIn('slowest_unit_time_sec', stats)
self.assertIn('peak_rss_mb', stats)
def test_single_testcase_crash(self):
"""Tests libfuzzer with a crashing testcase."""
testcase_path, _ = setup_testcase_and_corpus('crash', 'empty_corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'test_fuzzer')
result = engine_impl.reproduce(target_path, testcase_path,
['-timeout=60', '-rss_limit_mb=2560'], 65)
self.assertEqual([
self.adb_path, 'shell', self.hwasan_options,
self.device_path(target_path), '-timeout=60', '-rss_limit_mb=2560',
'-runs=100',
self.device_path(testcase_path)
], result.command)
self.assertIn(
'ERROR: HWAddressSanitizer: SEGV on unknown address 0x000000000000',
result.output)
@test_utils.slow
def test_fuzz_no_crash(self):
"""Tests fuzzing (no crash)."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.VALUE_PROFILE_STRATEGY])
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, ANDROID_DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assert_has_stats(results.stats)
self.assertEqual([
self.adb_path,
'shell',
self.hwasan_options,
self.device_path(target_path),
'-max_len=256',
'-timeout=25',
'-rss_limit_mb=2560',
'-use_value_profile=1',
'-dict=' + self.device_path(dict_path),
'-artifact_prefix=' + self.device_path(TEMP_DIR) + '/',
'-max_total_time=5',
'-print_final_stats=1',
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/new')),
self.device_path(os.path.join(TEMP_DIR, 'corpus')),
], results.command)
self.assertTrue(android.adb.file_exists(self.device_path(dict_path)))
self.assertEqual(0, len(results.crashes))
# New items should've been added to the corpus.
self.assertNotEqual(0, len(os.listdir(corpus_path)))
self.assertNotIn('HWAddressSanitizer:', results.logs)
self.assertIn('Logcat:', results.logs)
def test_fuzz_crash(self):
"""Tests fuzzing (crash)."""
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'always_crash_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, ANDROID_DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assert_has_stats(results.stats)
self.assertEqual([
self.adb_path,
'shell',
self.hwasan_options,
self.device_path(target_path),
'-max_len=100',
'-timeout=25',
'-rss_limit_mb=2560',
'-artifact_prefix=' + self.device_path(TEMP_DIR) + '/',
'-max_total_time=5',
'-print_final_stats=1',
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/new')),
self.device_path(os.path.join(TEMP_DIR, 'corpus')),
], results.command)
self.assertEqual(1, len(results.crashes))
self.assertTrue(os.path.exists(results.crashes[0].input_path))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(results.logs, results.crashes[0].stacktrace)
self.assertListEqual([
'-rss_limit_mb=2560',
'-timeout=60',
], results.crashes[0].reproduce_args)
self.assertIn(
'Test unit written to {0}/crash-'.format(
self.device_path(self.crash_dir)), results.logs)
self.assertIn(
'ERROR: HWAddressSanitizer: SEGV on unknown address '
'0x000000000000', results.logs)
self.assertNotIn('Logcat:', results.logs)
def test_fuzz_from_subset(self):
"""Tests fuzzing from corpus subset."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.CORPUS_SUBSET_STRATEGY])
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, ANDROID_DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 5)
self.assertEqual([
self.adb_path,
'shell',
self.hwasan_options,
self.device_path(target_path),
'-max_len=256',
'-timeout=25',
'-rss_limit_mb=2560',
'-dict=' + self.device_path(dict_path),
'-artifact_prefix=' + self.device_path(TEMP_DIR) + '/',
'-max_total_time=5',
'-print_final_stats=1',
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/new')),
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/subset')),
], results.command)
self.assertTrue(android.adb.file_exists(self.device_path(dict_path)))
self.assert_has_stats(results.stats)
def test_minimize(self):
"""Tests minimize."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
minimize_output_path = os.path.join(TEMP_DIR, 'minimized_testcase')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.minimize_testcase(target_path, [], testcase_path,
minimize_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(minimize_output_path))
with open(minimize_output_path) as f:
result = f.read()
self.assertEqual('A', result)
def test_cleanse(self):
"""Tests cleanse."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
cleanse_output_path = os.path.join(TEMP_DIR, 'cleansed_testcase')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.cleanse(target_path, [], testcase_path,
cleanse_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(cleanse_output_path))
with open(cleanse_output_path) as f:
result = f.read()
self.assertFalse(all(c == 'A' for c in result))
def test_analyze_dict(self):
"""Tests recommended dictionary analysis."""
test_helpers.patch(self, [
'clusterfuzz._internal.bot.fuzzers.dictionary_manager.DictionaryManager.'
'parse_recommended_dictionary_from_log_lines',
])
self.mock.parse_recommended_dictionary_from_log_lines.return_value = set([
'"USELESS_0"',
'"APPLE"',
'"USELESS_1"',
'"GINGER"',
'"USELESS_2"',
'"BEET"',
'"USELESS_3"',
])
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.Engine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'analyze_dict_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
engine_impl.fuzz(target_path, options, TEMP_DIR, 5.0)
expected_recommended_dictionary = set([
'"APPLE"',
'"GINGER"',
'"BEET"',
])
self.assertIn(expected_recommended_dictionary,
self.mock.update_recommended_dictionary.call_args[0])
|
utils/common_utils.py | PrinceSJ/EigenDamage | 127 | 11067047 | import os
import time
import json
import logging
import torch
from pprint import pprint
from easydict import EasyDict as edict
def get_logger(name, logpath, filepath, package_files=[],
displaying=True, saving=True):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
log_path = logpath + name + time.strftime("-%Y%m%d-%H%M%S")
makedirs(log_path)
if saving:
info_file_handler = logging.FileHandler(log_path)
info_file_handler.setLevel(logging.INFO)
logger.addHandler(info_file_handler)
logger.info(filepath)
with open(filepath, 'r') as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
return logger
def makedirs(filename):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
def str_to_list(src, delimiter, converter):
"""Conver a string to list.
"""
src_split = src.split(delimiter)
res = [converter(_) for _ in src_split]
return res
def get_config_from_json(json_file):
"""
Get the config from a json file
:param json_file:
:return: config(namespace) or config(dictionary)
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
config = edict(config_dict)
return config, config_dict
def process_config(json_file):
"""Process a json file into a config file.
Where we can access the value using .xxx
Note: we will need to create a similar directory as the config file.
"""
config, _ = get_config_from_json(json_file)
paths = json_file.split('/')[1:-1]
summary_dir = ["./runs/pruning"] + paths + [config.exp_name, "summary/"]
ckpt_dir = ["./runs/pruning"] + paths + [config.exp_name, "checkpoint/"]
config.summary_dir = os.path.join(*summary_dir)
config.checkpoint_dir = os.path.join(*ckpt_dir)
return config
def try_contiguous(x):
if not x.is_contiguous():
x = x.contiguous()
return x
def try_cuda(x):
if torch.cuda.is_available():
x = x.cuda()
return x
def tensor_to_list(tensor):
if len(tensor.shape) == 1:
return [tensor[_].item() for _ in range(tensor.shape[0])]
else:
return [tensor_to_list(tensor[_]) for _ in range(tensor.shape[0])]
# =====================================================
# For learning rate schedule
# =====================================================
class StairCaseLRScheduler(object):
def __init__(self, start_at, interval, decay_rate):
self.start_at = start_at
self.interval = interval
self.decay_rate = decay_rate
def __call__(self, optimizer, iteration):
start_at = self.start_at
interval = self.interval
decay_rate = self.decay_rate
if (start_at >= 0) \
and (iteration >= start_at) \
and (iteration + 1) % interval == 0:
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_rate
print('[%d]Decay lr to %f' % (iteration, param_group['lr']))
@staticmethod
def get_lr(optimizer):
for param_group in optimizer.param_groups:
lr = param_group['lr']
return lr
class PresetLRScheduler(object):
"""Using a manually designed learning rate schedule rules.
"""
def __init__(self, decay_schedule):
# decay_schedule is a dictionary
# which is for specifying iteration -> lr
self.decay_schedule = decay_schedule
print('=> Using a preset learning rate schedule:')
pprint(decay_schedule)
self.for_once = True
def __call__(self, optimizer, iteration):
for param_group in optimizer.param_groups:
lr = self.decay_schedule.get(iteration, param_group['lr'])
param_group['lr'] = lr
@staticmethod
def get_lr(optimizer):
for param_group in optimizer.param_groups:
lr = param_group['lr']
return lr
# =======================================================
# For math computation
# =======================================================
def prod(l):
val = 1
if isinstance(l, list):
for v in l:
val *= v
else:
val = val * l
return val |
chainer/functions/activation/elu.py | zaltoprofen/chainer | 3,705 | 11067063 | import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class ELU(function_node.FunctionNode):
"""Exponential Linear Unit."""
def __init__(self, alpha=1.0):
self.alpha = float(alpha)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
if self.alpha < 0:
self.retain_inputs((0,))
x, = inputs
y = x.copy()
negzero_indices = y <= 0
y[negzero_indices] = self.alpha * numpy.expm1(y[negzero_indices])
self.retain_outputs((0,))
return y,
def forward_gpu(self, inputs):
if self.alpha < 0:
self.retain_inputs((0,))
x, = inputs
y = cuda.elementwise(
'T x, T alpha', 'T y',
'y = x > 0 ? x : (T)(alpha * expm1(x))',
'elu_fwd')(x, self.alpha)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
y, = self.get_retained_outputs()
if self.alpha < 0:
cond, = self.get_retained_inputs()
else:
cond = y
gy, = grad_outputs
return ELUGrad(self.alpha, cond.array).apply((y,))[0] * gy,
class ELUGrad(function_node.FunctionNode):
"""Exponential Linear Unit gradient function."""
def __init__(self, alpha, cond):
self.alpha = alpha
self.cond = cond
def forward_cpu(self, inputs):
y, = inputs
gx = utils.force_array(y + y.dtype.type(self.alpha))
gx[self.cond > 0] = 1
return gx,
def forward_gpu(self, inputs):
y, = inputs
gx = cuda.elementwise(
'T y, T alpha, T cond', 'T gx',
'gx = cond > 0 ? (T)1 : (T)(y + alpha)',
'elu_bwd')(y, self.alpha, self.cond)
return gx,
def backward(self, indexes, grad_outputs):
ggx, = grad_outputs
gy2 = ggx * (self.cond <= 0)
return gy2,
def elu(x, alpha=1.0):
"""Exponential Linear Unit function.
For a parameter :math:`\\alpha`, it is expressed as
.. math::
f(x) = \\left \\{ \\begin{array}{ll}
x & {\\rm if}~ x \\ge 0 \\\\
\\alpha (\\exp(x) - 1) & {\\rm if}~ x < 0,
\\end{array} \\right.
See: https://arxiv.org/abs/1511.07289
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
alpha (float): Parameter :math:`\\alpha`. Default is 1.0.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3]], np.float32)
>>> x
array([[-1., 0.],
[ 2., -3.]], dtype=float32)
>>> y = F.elu(x, alpha=1.)
>>> y.array
array([[-0.63212055, 0. ],
[ 2. , -0.95021296]], dtype=float32)
"""
return ELU(alpha=alpha).apply((x,))[0]
|
sfaira/data/dataloaders/loaders/d10_1038_s41586_019_1654_9/human_brain_2019_10x3v2sequencing_kanton_001.py | johnmous/sfaira | 110 | 11067078 | import anndata
import os
import scipy.io
import zipfile
import pandas
def load(data_dir, **kwargs):
cell_line_dict = {
'409b2': '409B2',
'H9': 'WA09',
'Wibj2': 'HPSI0214i-wibj_2',
'Sc102a1': 'SC102A-1',
'Kucg2': 'HPSI0214i-kucg_2',
'Hoik1': 'HPSI0314i-hoik_1',
'Sojd3': 'HPSI0314i-sojd_3',
}
fn = [
os.path.join(data_dir, "E-MTAB-7552.processed.3.zip"),
os.path.join(data_dir, "E-MTAB-7552.processed.1.zip"),
os.path.join(data_dir, "E-MTAB-7552.processed.7.zip")
]
with zipfile.ZipFile(fn[0]) as archive:
x = scipy.io.mmread(archive.open('human_cell_counts_GRCh38.mtx')).T.tocsr()
with zipfile.ZipFile(fn[1]) as archive:
var = pandas.read_csv(archive.open('genes_GRCh38.txt'), sep="\t", index_col=1, names=['ensembl', 'genetype'])
with zipfile.ZipFile(fn[2]) as archive:
obs = pandas.read_csv(archive.open('metadata_human_cells.tsv'), sep="\t", index_col=0)
adata = anndata.AnnData(X=x, var=var, obs=obs)
adata.obs["Line"] = [cell_line_dict[x] for x in adata.obs["Line"]]
return adata
|
runner/pose/open_pose_test.py | qrsforever/torchcv | 171 | 11067079 | <reponame>qrsforever/torchcv
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME> (<EMAIL>)
# Class Definition for Pose Estimator.
import math
import os
import cv2
import numpy as np
import torch
from scipy.ndimage.filters import gaussian_filter
from data.pose.data_loader import DataLoader
from lib.runner.blob_helper import BlobHelper
from lib.runner.runner_helper import RunnerHelper
from model.pose.model_manager import ModelManager
from lib.tools.helper.image_helper import ImageHelper
from lib.tools.helper.json_helper import JsonHelper
from lib.tools.util.logger import Logger as Log
from lib.tools.parser.pose_parser import PoseParser
from lib.tools.vis.pose_visualizer import PoseVisualizer
class OpenPoseTest(object):
def __init__(self, configer):
self.configer = configer
self.blob_helper = BlobHelper(configer)
self.pose_visualizer = PoseVisualizer(configer)
self.pose_parser = PoseParser(configer)
self.pose_model_manager = ModelManager(configer)
self.pose_data_loader = DataLoader(configer)
self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
self.pose_net = None
self._init_model()
def _init_model(self):
self.pose_net = self.pose_model_manager.get_pose_model()
self.pose_net = RunnerHelper.load_net(self, self.pose_net)
self.pose_net.eval()
def _get_blob(self, ori_image, scale=None):
assert scale is not None
image = self.blob_helper.make_input(image=ori_image, scale=scale)
b, c, h, w = image.size()
border_hw = [h, w]
if self.configer.exists('test', 'fit_stride'):
stride = self.configer.get('test', 'fit_stride')
pad_w = 0 if (w % stride == 0) else stride - (w % stride) # right
pad_h = 0 if (h % stride == 0) else stride - (h % stride) # down
expand_image = torch.zeros((b, c, h + pad_h, w + pad_w)).to(image.device)
expand_image[:, :, 0:h, 0:w] = image
image = expand_image
return image, border_hw
def __test_img(self, image_path, json_path, raw_path, vis_path):
Log.info('Image Path: {}'.format(image_path))
ori_image = ImageHelper.read_image(image_path,
tool=self.configer.get('data', 'image_tool'),
mode=self.configer.get('data', 'input_mode'))
ori_width, ori_height = ImageHelper.get_size(ori_image)
ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out')))
multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height
for scale in self.configer.get('test', 'scale_search')]
stride = self.configer.get('network', 'stride')
for i, scale in enumerate(multiplier):
image, border_hw = self._get_blob(ori_image, scale=scale)
with torch.no_grad():
paf_out_list, heatmap_out_list = self.pose_net(image)
paf_out = paf_out_list[-1]
heatmap_out = heatmap_out_list[-1]
# extract outputs, resize, and remove padding
heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]],
(ori_width, ori_height), interpolation=cv2.INTER_CUBIC)
paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(paf[:border_hw[0], :border_hw[1]],
(ori_width, ori_height), interpolation=cv2.INTER_CUBIC)
heatmap_avg = heatmap_avg + heatmap / len(multiplier)
paf_avg = paf_avg + paf / len(multiplier)
all_peaks = self.__extract_heatmap_info(heatmap_avg)
special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks)
subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate)
image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict)
image_canvas = self.pose_parser.link_points(image_canvas, json_dict)
ImageHelper.save(image_canvas, vis_path)
ImageHelper.save(ori_img_bgr, raw_path)
Log.info('Json Save Path: {}'.format(json_path))
JsonHelper.save_file(json_dict, json_path)
def __get_info_tree(self, image_raw, subset, candidate):
json_dict = dict()
height, width, _ = image_raw.shape
json_dict['image_height'] = height
json_dict['image_width'] = width
object_list = list()
for n in range(len(subset)):
if subset[n][-1] < self.configer.get('res', 'num_threshold'):
continue
if subset[n][-2] / subset[n][-1] < self.configer.get('res', 'avg_threshold'):
continue
object_dict = dict()
object_dict['kpts'] = np.zeros((self.configer.get('data', 'num_kpts'), 3)).tolist()
for j in range(self.configer.get('data', 'num_kpts')):
index = subset[n][j]
if index == -1:
object_dict['kpts'][j][0] = -1
object_dict['kpts'][j][1] = -1
object_dict['kpts'][j][2] = -1
else:
object_dict['kpts'][j][0] = candidate[index.astype(int)][0]
object_dict['kpts'][j][1] = candidate[index.astype(int)][1]
object_dict['kpts'][j][2] = 1
object_dict['score'] = subset[n][-2]
object_list.append(object_dict)
json_dict['objects'] = object_list
return json_dict
def __extract_heatmap_info(self, heatmap_avg):
all_peaks = []
peak_counter = 0
for part in range(self.configer.get('data', 'num_kpts')):
map_ori = heatmap_avg[:, :, part]
map_gau = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map_gau.shape)
map_left[1:, :] = map_gau[:-1, :]
map_right = np.zeros(map_gau.shape)
map_right[:-1, :] = map_gau[1:, :]
map_up = np.zeros(map_gau.shape)
map_up[:, 1:] = map_gau[:, :-1]
map_down = np.zeros(map_gau.shape)
map_down[:, :-1] = map_gau[:, 1:]
peaks_binary = np.logical_and.reduce(
(map_gau >= map_left, map_gau >= map_right, map_gau >= map_up,
map_gau >= map_down, map_gau > self.configer.get('res', 'part_threshold')))
peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse
peaks = list(peaks)
'''
del_flag = [0 for i in range(len(peaks))]
for i in range(len(peaks)):
if del_flag[i] == 0:
for j in range(i+1, len(peaks)):
if max(abs(peaks[i][0] - peaks[j][0]), abs(peaks[i][1] - peaks[j][1])) <= 6:
del_flag[j] = 1
new_peaks = list()
for i in range(len(peaks)):
if del_flag[i] == 0:
new_peaks.append(peaks[i])
peaks = new_peaks
'''
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
ids = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (ids[i],) for i in range(len(ids))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
return all_peaks
def __extract_paf_info(self, img_raw, paf_avg, all_peaks):
connection_all = []
special_k = []
mid_num = self.configer.get('res', 'mid_point_num')
for k in range(len(self.configer.get('details', 'limb_seq'))):
score_mid = paf_avg[:, :, [k*2, k*2+1]]
candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1]
candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1]
nA = len(candA)
nB = len(candB)
if nA != 0 and nB != 0:
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9
vec = np.divide(vec, norm)
startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
np.linspace(candA[i][1], candB[j][1], num=mid_num))
startend = list(startend)
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0]
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1]
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(score_midpts)
score_with_dist_prior += min(0.5 * img_raw.shape[0] / norm - 1, 0)
num_positive = len(np.nonzero(score_midpts > self.configer.get('res', 'limb_threshold'))[0])
criterion1 = num_positive > int(self.configer.get('res', 'limb_pos_ratio') * len(score_midpts))
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append(
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if i not in connection[:, 3] and j not in connection[:, 4]:
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if len(connection) >= min(nA, nB):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
return special_k, connection_all
def __get_subsets(self, connection_all, special_k, all_peaks):
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, self.configer.get('data', 'num_kpts') + 2))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in self.configer.get('details', 'mini_tree'):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(self.configer.get('details', 'limb_seq')[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if (subset[j][indexB] != partBs[i]):
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found:
row = -1 * np.ones(self.configer.get('data', 'num_kpts') + 2)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
return subset, candidate
def debug(self, vis_dir):
for i, data_dict in enumerate(self.pose_data_loader.get_trainloader()):
inputs = data_dict['img']
maskmap = data_dict['maskmap']
heatmap = data_dict['heatmap']
vecmap = data_dict['vecmap']
for j in range(inputs.size(0)):
count = count + 1
if count > 10:
exit(1)
Log.info(heatmap.size())
image_bgr = self.blob_helper.tensor2bgr(inputs[j])
mask_canvas = maskmap[j].repeat(3, 1, 1).numpy().transpose(1, 2, 0)
mask_canvas = (mask_canvas * 255).astype(np.uint8)
mask_canvas = cv2.resize(mask_canvas, (0, 0), fx=self.configer.get('network', 'stride'),
fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
image_bgr = cv2.addWeighted(image_bgr, 0.6, mask_canvas, 0.4, 0)
heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0)
heatmap_avg = cv2.resize(heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'),
fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
paf_avg = vecmap[j].numpy().transpose(1, 2, 0)
paf_avg = cv2.resize(paf_avg, (0, 0), fx=self.configer.get('network', 'stride'),
fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr)
self.pose_visualizer.vis_paf(paf_avg, image_bgr)
all_peaks = self.__extract_heatmap_info(heatmap_avg)
special_k, connection_all = self.__extract_paf_info(image_bgr, paf_avg, all_peaks)
subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
json_dict = self.__get_info_tree(image_bgr, subset, candidate)
image_canvas = self.pose_parser.draw_points(image_bgr, json_dict)
image_canvas = self.pose_parser.link_points(image_canvas, json_dict)
cv2.imwrite(os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas)
cv2.imshow('main', image_canvas)
cv2.waitKey()
|
tests/processtest.py | NAnnamalai/gramex | 130 | 11067096 | <gh_stars>100-1000
import os
import sys
def main():
'Print cwd and sys.argv'
sys.stderr.write('stderr starts\n')
sys.stdout.write('stdout starts\n')
sys.stdout.write('os.getcwd: %s\n' % os.path.abspath(os.getcwd()))
for index, arg in enumerate(sys.argv):
sys.stdout.write('sys.argv[%d]: %s\n' % (index, arg))
sys.stderr.write('stderr ends\n')
sys.stdout.write('stdout ends\n')
if __name__ == '__main__':
main()
|
palladium/tests/test_server.py | vishalbelsare/palladium | 528 | 11067105 | <filename>palladium/tests/test_server.py
from datetime import datetime
import io
import json
import math
from threading import Thread
from time import sleep
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
import dateutil.parser
from flask import request
import numpy as np
import pytest
import ujson
from werkzeug.exceptions import BadRequest
def dec(func):
def inner(*args, **kwargs):
"""dec"""
return func(*args, **kwargs) + '_decorated'
return inner
class TestPredictService:
@pytest.fixture
def PredictService(self):
from palladium.server import PredictService
return PredictService
def test_functional(self, PredictService, flask_app):
model = Mock()
model.threshold = 0.3
model.size = 10
# needed as hasattr would evaluate to True otherwise
del model.threshold2
del model.size2
model.predict.return_value = np.array(['class1'])
service = PredictService(
mapping=[
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
('color', 'str'),
('age', 'int'),
('active', 'bool'),
('austrian', 'bool'),
],
params=[
('threshold', 'float'), # default will be overwritten
('size', 'int'), # not provided, default value kept
('threshold2', 'float'), # will be used, no default value
('size2', 'int'), # not provided, no default value
])
with flask_app.test_request_context():
with patch('palladium.util.get_config') as get_config:
get_config.return_value = {
'service_metadata': {
'service_name': 'iris',
'service_version': '0.1'
}
}
request = Mock(
args=dict([
('sepal length', '5.2'),
('sepal width', '3.5'),
('petal length', '1.5'),
('petal width', '0.2'),
('color', 'purple'),
('age', '1'),
('active', 'True'),
('austrian', 'False'),
('threshold', '0.7'),
('threshold2', '0.8'),
]),
method='GET',
)
resp = service(model, request)
assert (model.predict.call_args[0][0] ==
np.array([[5.2, 3.5, 1.5, 0.2,
'purple', 1, True, False]], dtype='object')).all()
assert model.predict.call_args[1]['threshold'] == 0.7
assert model.predict.call_args[1]['size'] == 10
assert model.predict.call_args[1]['threshold2'] == 0.8
assert 'size2' not in model.predict.call_args[1]
assert resp.status_code == 200
expected_resp_data = {
"metadata": {
"status": "OK",
"error_code": 0,
"service_name": "iris",
"service_version": "0.1",
},
"result": "class1"
}
assert json.loads(resp.get_data(as_text=True)) == expected_resp_data
def test_bad_request(self, PredictService, flask_app):
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
bad_request = BadRequest()
bad_request.args = ('daniel',)
psd.side_effect = bad_request
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 400
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": -1,
"error_message": "BadRequest: ('daniel',)"
}
}
def test_predict_error(self, PredictService, flask_app):
from palladium.interfaces import PredictError
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
psd.side_effect = PredictError("mymessage", 123)
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": 123,
"error_message": "mymessage",
}
}
def test_generic_error(self, PredictService, flask_app):
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
psd.side_effect = KeyError("model")
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": -1,
"error_message": "KeyError: 'model'",
}
}
def test_sample_from_data(self, PredictService):
predict_service = PredictService(
mapping=[
('name', 'str'),
('sepal width', 'int'),
],
)
model = Mock()
request_args = {'name': 'myflower', 'sepal width': 3}
sample = predict_service.sample_from_data(model, request_args)
assert sample[0] == 'myflower'
assert sample[1] == 3
def test_unwrap_sample_get(self, PredictService, flask_app):
predict_service = PredictService(
mapping=[('text', 'str')],
unwrap_sample=True,
)
model = Mock()
model.predict.return_value = np.array([1])
with flask_app.test_request_context():
request = Mock(
args=dict([
('text', 'Hi this is text'),
]),
method='GET',
)
resp = predict_service(model, request)
assert model.predict.call_args[0][0].ndim == 1
model.predict.assert_called_with(np.array(['Hi this is text']))
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": 1,
}
def test_unwrap_sample_post(self, PredictService, flask_app):
predict_service = PredictService(
mapping=[('text', 'str')],
unwrap_sample=True,
)
model = Mock()
model.predict.return_value = np.array([1, 2])
with flask_app.test_request_context():
request = Mock(
json=[
{'text': 'First piece of text'},
{'text': 'Second piece of text'},
],
method='POST',
mimetype='application/json',
)
resp = predict_service(model, request)
assert model.predict.call_args[0][0].ndim == 1
assert (
model.predict.call_args[0] ==
np.array(['First piece of text', 'Second piece of text'])
).all()
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [1, 2],
}
def test_probas(self, PredictService, flask_app):
model = Mock()
model.predict_proba.return_value = np.array([[0.1, 0.5, math.pi]])
predict_service = PredictService(mapping=[], predict_proba=True)
with flask_app.test_request_context():
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [0.1, 0.5, math.pi],
}
def test_post_request(self, PredictService, flask_app):
model = Mock()
model.predict.return_value = np.array([3, 2])
service = PredictService(
mapping=[
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
],
params=[
('threshold', 'float'),
],
)
request = Mock(
json=[
{
'sepal length': '5.2',
'sepal width': '3.5',
'petal length': '1.5',
'petal width': '0.2',
},
{
'sepal length': '5.7',
'sepal width': '4.0',
'petal length': '2.0',
'petal width': '0.7',
},
],
args=dict(threshold=1.0),
method='POST',
mimetype='application/json',
)
with flask_app.test_request_context():
resp = service(model, request)
assert (model.predict.call_args[0][0] == np.array([
[5.2, 3.5, 1.5, 0.2],
[5.7, 4.0, 2.0, 0.7],
],
dtype='object',
)).all()
assert model.predict.call_args[1]['threshold'] == 1.0
assert resp.status_code == 200
expected_resp_data = {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [3, 2],
}
assert json.loads(resp.get_data(as_text=True)) == expected_resp_data
@pytest.yield_fixture
def mock_predict(self, monkeypatch):
def mock_predict(model_persister, predict_service):
return predict_service.entry_point
monkeypatch.setattr(
'palladium.server.predict', mock_predict)
yield mock_predict
def test_entry_point_not_set(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
}
# set default predict_decorators
config['predict_decorators'] = ['palladium.tests.test_server.dec']
with flask_app_test.test_request_context():
process_config(config)
resp1 = flask_client.get(
'predict?param=bla')
# decorated result: default predict_decorators is defined
assert resp1.get_data().decode('utf-8') == '/predict_decorated'
def test_entry_point_multiple(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['my_predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1',
}
config['my_predict_service2'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict2',
'decorator_list_name': 'predict_decorators2',
}
# only second predict service uses decorator list
config['predict_decorators2'] = ['palladium.tests.test_server.dec']
with flask_app_test.test_request_context():
process_config(config)
resp1 = flask_client.get(
'predict1?param=bla')
# no decorated result: default predict_decorators is not defined
assert resp1.get_data().decode('utf-8') == '/predict1'
resp2 = flask_client.get(
'predict2?param=bla')
# decorated result using predict_decorators2
assert resp2.get_data().decode('utf-8') == '/predict2_decorated'
def test_entry_point_multiple_conflict(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['my_predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1', # <--
}
config['my_predict_service2'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1', # conflict: entry point exists
}
with pytest.raises(AssertionError):
with flask_app_test.test_request_context():
process_config(config)
class TestPredict:
@pytest.fixture
def predict(self):
from palladium.server import predict
return predict
def test_predict_functional(self, config, flask_app_test, flask_client):
from palladium.server import make_ujson_response
model_persister = config['model_persister'] = Mock()
predict_service = config['predict_service'] = Mock()
with flask_app_test.test_request_context():
from palladium.server import create_predict_function
create_predict_function(
'/predict', predict_service, 'predict_decorators', config)
predict_service.return_value = make_ujson_response(
'a', status_code=200)
model = model_persister.read()
resp = flask_client.get(
'predict?sepal length=1.0&sepal width=1.1&'
'petal length=0.777&petal width=5')
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data == 'a'
assert resp.status_code == 200
with flask_app_test.test_request_context():
predict_service.assert_called_with(model, request)
def test_unknown_exception(self, predict, flask_app):
model_persister = Mock()
model_persister.read.side_effect = KeyError('model')
with flask_app.test_request_context():
resp = predict(model_persister, Mock())
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"status": "ERROR",
"error_code": -1,
"error_message": "KeyError: 'model'",
}
class TestAliveFunctional:
def test_empty_process_state(self, config, flask_client):
config['service_metadata'] = {'hello': 'world'}
resp = flask_client.get('alive')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert sorted(resp_data.keys()) == ['memory_usage',
'memory_usage_vms',
'palladium_version',
'process_metadata',
'service_metadata']
assert resp_data['service_metadata'] == config['service_metadata']
def test_filled_process_state(self, config, process_store, flask_client):
config['alive'] = {'process_store_required': ('model', 'data')}
before = datetime.now()
process_store['model'] = Mock(__metadata__={'hello': 'is it me'})
process_store['data'] = Mock(__metadata__={'bye': 'not you'})
after = datetime.now()
resp = flask_client.get('alive')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
model_updated = dateutil.parser.parse(resp_data['model']['updated'])
data_updated = dateutil.parser.parse(resp_data['data']['updated'])
assert before < model_updated < after
assert resp_data['model']['metadata'] == {'hello': 'is it me'}
assert before < data_updated < after
assert resp_data['data']['metadata'] == {'bye': 'not you'}
def test_missing_process_state(self, config, process_store, flask_client):
config['alive'] = {'process_store_required': ('model', 'data')}
process_store['model'] = Mock(__metadata__={'hello': 'is it me'})
resp = flask_client.get('alive')
assert resp.status_code == 503
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data['model']['metadata'] == {'hello': 'is it me'}
assert resp_data['data'] == 'N/A'
class TestPredictStream:
@pytest.fixture
def PredictStream(self):
from palladium.server import PredictStream
return PredictStream
@pytest.fixture
def stream(self, config, PredictStream):
config['model_persister'] = Mock()
predict_service = config['predict_service'] = Mock()
predict_service.sample_from_data.side_effect = (
lambda model, data: data)
predict_service.params_from_data.side_effect = (
lambda model, data: data)
return PredictStream()
def test_listen_direct_exit(self, stream):
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
io_in.write('EXIT\n')
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert len(io_out.read()) == 0
assert len(io_err.read()) == 0
assert stream.predict_service.predict.call_count == 0
def test_listen(self, stream):
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
lines = [
'[{"id": 1, "color": "blue", "length": 1.0}]\n',
'[{"id": 1, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
'[{"id": 1, "color": "blue", "length": 1.0}, {"id": 2, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
]
for line in lines:
io_in.write(line)
io_in.write('EXIT\n')
io_in.seek(0)
predict = stream.predict_service.predict
predict.side_effect = (
lambda model, samples, **params:
np.array([{'result': 1}] * len(samples))
)
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert len(io_err.read()) == 0
assert io_out.read() == (
('[{"result":1}]\n' * 2) + ('[{"result":1},{"result":1}]\n'))
assert predict.call_count == 3
# check if the correct arguments are passed to predict call
assert predict.call_args_list[0][0][1] == np.array([
{'id': 1, 'color': 'blue', 'length': 1.0}])
assert predict.call_args_list[1][0][1] == np.array([
{'id': 1, 'color': '{"a": 1, "b": 2}', 'length': 1.0}])
assert (predict.call_args_list[2][0][1] == np.array([
{'id': 1, 'color': 'blue', 'length': 1.0},
{'id': 2, 'color': '{"a": 1, "b": 2}', 'length': 1.0},
])).all()
# check if string representation of attribute can be converted to json
assert ujson.loads(predict.call_args_list[1][0][1][0]['color']) == {
"a": 1, "b": 2}
def test_predict_error(self, stream):
from palladium.interfaces import PredictError
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
line = '[{"hey": "1"}]\n'
io_in.write(line)
io_in.write('EXIT\n')
io_in.seek(0)
stream.predict_service.predict.side_effect = PredictError('error')
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert io_out.read() == '[]\n'
assert io_err.read() == (
"Error while processing input row: {}"
"<class 'palladium.interfaces.PredictError'>: "
"error (-1)\n".format(line))
assert stream.predict_service.predict.call_count == 1
def test_predict_params(self, config, stream):
from palladium.server import PredictService
line = '[{"length": 1.0, "width": 1.0, "turbo": "true"}]'
model = Mock()
model.predict.return_value = np.array([[{'class': 'a'}]])
model.turbo = False
model.magic = False
stream.model = model
mapping = [
('length', 'float'),
('width', 'float'),
]
params = [
('turbo', 'bool'), # will be set by request args
('magic', 'bool'), # default value will be used
]
stream.predict_service = PredictService(
mapping=mapping,
params=params,
)
expected = [{'class': 'a'}]
result = stream.process_line(line)
assert result == expected
assert model.predict.call_count == 1
assert (model.predict.call_args[0][0] == np.array([[1.0, 1.0]])).all()
assert model.predict.call_args[1]['turbo'] is True
assert model.predict.call_args[1]['magic'] is False
class TestList:
@pytest.fixture
def list(self):
from palladium.server import list
return list
def test_it(self, config, process_store, flask_client):
mp = config['model_persister'] = Mock()
mp.list_models.return_value = ['one', 'two']
mp.list_properties.return_value = {'hey': 'there'}
resp = flask_client.get('list')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data == {
'models': ['one', 'two'],
'properties': {'hey': 'there'},
}
class TestFitFunctional:
@pytest.fixture
def fit(self):
from palladium.server import fit
return fit
@pytest.fixture
def jobs(self, process_store):
jobs = process_store['process_metadata'].setdefault('jobs', {})
yield jobs
jobs.clear()
def test_it(self, fit, config, jobs, flask_app):
dsl, model, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
X, y = Mock(), Mock()
dsl.return_value = X, y
config['dataset_loader_train'] = dsl
config['model'] = model
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = fit()
sleep(0.05)
resp_json = json.loads(resp.get_data(as_text=True))
job = jobs[resp_json['job_id']]
assert job['status'] == 'finished'
assert job['info'] == str(model)
@pytest.mark.parametrize('args, args_expected', [
(
{'persist': '1', 'activate': '0', 'evaluate': 't'},
{'persist': True, 'activate': False, 'evaluate': True},
),
(
{'persist_if_better_than': '0.234'},
{'persist_if_better_than': 0.234},
),
])
def test_pass_args(self, fit, flask_app, args, args_expected):
with patch('palladium.server.fit_base') as fit_base:
fit_base.__name__ = 'mock'
with flask_app.test_request_context(method='POST', data=args):
fit()
sleep(0.02)
assert fit_base.call_args == call(**args_expected)
class TestUpdateModelCacheFunctional:
@pytest.fixture
def update_model_cache(self):
from palladium.server import update_model_cache
return update_model_cache
@pytest.fixture
def jobs(self, process_store):
jobs = process_store['process_metadata'].setdefault('jobs', {})
yield jobs
jobs.clear()
def test_success(self, update_model_cache, config, jobs, flask_app):
model_persister = Mock()
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = update_model_cache()
sleep(0.02)
resp_json = json.loads(resp.get_data(as_text=True))
job = jobs[resp_json['job_id']]
assert job['status'] == 'finished'
assert job['info'] == repr(model_persister.update_cache())
def test_unavailable(self, update_model_cache, config, jobs, flask_app):
model_persister = Mock()
del model_persister.update_cache
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = update_model_cache()
assert resp.status_code == 503
class TestActivateFunctional:
@pytest.fixture
def activate(self):
from palladium.server import activate
return activate
@pytest.fixture
def activate_base_mock(self, monkeypatch):
func = Mock()
monkeypatch.setattr('palladium.server.activate_base', func)
return func
def test_success(self, activate, activate_base_mock, config, flask_app):
model_persister = Mock(
list_models=lambda: {'be': 'first'},
list_properties=lambda: {'be': 'twice'},
)
config['model_persister'] = model_persister
with flask_app.test_request_context(
method='POST',
data={'model_version': 123},
):
resp = activate()
assert resp.status_code == 200
assert resp.json == {
'models': {'be': 'first'},
'properties': {'be': 'twice'},
}
def test_lookuperror(self, activate, activate_base_mock, flask_app):
activate_base_mock.side_effect = LookupError
with flask_app.test_request_context(
method='POST',
data={'model_version': 123},
):
resp = activate()
assert resp.status_code == 503
def _test_add_url_rule_func():
return b'A OK'
class TestAddUrlRule:
@pytest.fixture
def add_url_rule(self):
from palladium.server import add_url_rule
return add_url_rule
def test_it(self, add_url_rule, flask_client):
add_url_rule(
'/okay',
view_func='palladium.tests.test_server._test_add_url_rule_func',
)
resp = flask_client.get('/okay')
assert resp.data == b'A OK'
|
indy_node/test/anon_creds/test_incorrect_revoc_reg_def.py | Rob-S/indy-node | 627 | 11067122 | <reponame>Rob-S/indy-node<gh_stars>100-1000
import json
import pytest
from indy_common.constants import CRED_DEF_ID, CLAIM_DEF_SCHEMA_REF, CLAIM_DEF_SIGNATURE_TYPE, \
CLAIM_DEF_TAG, VALUE, ISSUANCE_TYPE, REVOKED, PREV_ACCUM, ISSUANCE_BY_DEFAULT
from indy_common.state.domain import make_state_path_for_claim_def
from indy_node.test.anon_creds.conftest import build_revoc_reg_entry_for_given_revoc_reg_def
from plenum.common.exceptions import RequestNackedException
from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_and_check
def test_incorrect_revoc_reg_def(looper,
txnPoolNodeSet,
sdk_wallet_steward,
sdk_pool_handle,
send_claim_def,
build_revoc_def_by_default):
_, author_did = sdk_wallet_steward
claim_def_req = send_claim_def[0]
revoc_reg = build_revoc_def_by_default
revoc_reg['operation'][CRED_DEF_ID] = \
make_state_path_for_claim_def(author_did,
str(claim_def_req['operation'][CLAIM_DEF_SCHEMA_REF]),
claim_def_req['operation'][CLAIM_DEF_SIGNATURE_TYPE],
claim_def_req['operation'][CLAIM_DEF_TAG]
).decode()
# test incorrect ISSUANCE_TYPE
revoc_reg['operation'][VALUE][ISSUANCE_TYPE] = "incorrect_type"
revoc_req = sdk_sign_request_from_dict(looper, sdk_wallet_steward, revoc_reg['operation'])
with pytest.raises(RequestNackedException, match='unknown value'):
sdk_send_and_check([json.dumps(revoc_req)], looper, txnPoolNodeSet, sdk_pool_handle)
# test correct ISSUANCE_TYPE
revoc_reg['operation'][VALUE][ISSUANCE_TYPE] = ISSUANCE_BY_DEFAULT
revoc_req = sdk_sign_request_from_dict(looper, sdk_wallet_steward, revoc_reg['operation'])
sdk_send_and_check([json.dumps(revoc_req)], looper, txnPoolNodeSet, sdk_pool_handle)
# send revoc_reg_entry to check that revoc_reg_def ordered correctly
rev_reg_entry = build_revoc_reg_entry_for_given_revoc_reg_def(revoc_req)
rev_reg_entry[VALUE][REVOKED] = [1, 2, 3, 4, 5]
del rev_reg_entry[VALUE][PREV_ACCUM]
rev_entry_req = sdk_sign_request_from_dict(looper, sdk_wallet_steward, rev_reg_entry)
sdk_send_and_check([json.dumps(rev_entry_req)], looper, txnPoolNodeSet, sdk_pool_handle)
|
helpdesk/migrations/0026_kbitem_attachments.py | AmatorAVG/django-helpdesk-atoria | 789 | 11067123 | <gh_stars>100-1000
# Generated by Django 2.0.5 on 2019-03-07 20:30
from django.db import migrations, models
import django.db.models.deletion
import helpdesk.models
class Migration(migrations.Migration):
dependencies = [
('helpdesk', '0025_queue_dedicated_time'),
]
operations = [
migrations.CreateModel(
name='KBIAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(max_length=1000, upload_to=helpdesk.models.attachment_path, verbose_name='File')),
('filename', models.CharField(max_length=1000, verbose_name='Filename')),
('mime_type', models.CharField(max_length=255, verbose_name='MIME Type')),
('size', models.IntegerField(help_text='Size of this file in bytes', verbose_name='Size')),
('kbitem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='helpdesk.KBItem', verbose_name='Knowledge base item')),
],
options={
'verbose_name': 'Attachment',
'verbose_name_plural': 'Attachments',
'ordering': ('filename',),
'abstract': False,
},
),
migrations.RenameModel(
old_name='Attachment',
new_name='FollowUpAttachment',
),
]
|
atcoder/abc058/b.py | Ashindustry007/competitive-programming | 506 | 11067131 | <gh_stars>100-1000
#!/usr/bin/env python3
# https://abc058.contest.atcoder.jp/tasks/abc058_b
o = input()
e = input()
x = []
for i in range(len(e)):
x.append(o[i])
x.append(e[i])
if len(o) > len(e): x.append(o[-1])
print(''.join(x))
|
etl/parsers/etw/Microsoft_Windows_WER_SystemErrorReporting.py | IMULMUL/etl-parser | 104 | 11067135 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-WER-SystemErrorReporting
GUID : abce23e7-de45-4366-8631-84fa6c525952
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("abce23e7-de45-4366-8631-84fa6c525952"), event_id=1000, version=0)
class Microsoft_Windows_WER_SystemErrorReporting_1000_0(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("abce23e7-de45-4366-8631-84fa6c525952"), event_id=1001, version=0)
class Microsoft_Windows_WER_SystemErrorReporting_1001_0(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("abce23e7-de45-4366-8631-84fa6c525952"), event_id=1018, version=0)
class Microsoft_Windows_WER_SystemErrorReporting_1018_0(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString
)
|
test/test_ssr_check.py | centaur679/ShadowSocksShare | 2,341 | 11067142 | <reponame>centaur679/ShadowSocksShare
#!/usr/bin/env python3
from ssshare import *
import threading
from ssshare.ss import crawler, ssr_check
import requests
def test2():
for i in range(30):
data = requests.get('http://laptop.pythonic.life:8080/json').text
print('data', i)
data = data.replace('"obfs": "",', '').replace('"protocol_param": "",', '').replace('"obfs_param": "",', '').replace('"protocol": "",', '')
w = ssr_check.test_socks_server(str_json=data)
print('>>>>>>>结果:', w)
if w is True:
print(data)
elif w == -1:
print(data)
def test3():
data = crawler.main()
for i in data:
print(i['info'])
for j in i['data']:
w = ssr_check.test_socks_server(str_json=j['json'])
print('>>>>>>>结果:', w)
if w is True:
print(j['json'])
elif w == -1:
print(j['json'])
def test4():
data = crawler.main(debug=['no_validate'])
data = ssr_check.validate(data)
for i in data:
print(i['info'])
for j in i['data']:
print(j['status'])
print('-----------测试:子线程----------')
t = threading.Thread(target=test4)
t.start()
t.join()
|
qt__pyqt__pyside__pyqode/QWebEngine__runJavaScript__sync__click_on_element/main.py | DazEB2/SimplePyScripts | 117 | 11067148 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtCore import QUrl, QEventLoop
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
def run_js_code(page: QWebEnginePage, code: str) -> object:
loop = QEventLoop()
result_value = {
'value': None
}
def _on_callback(result: object):
result_value['value'] = result
loop.quit()
page.runJavaScript(code, _on_callback)
loop.exec()
return result_value['value']
with open('../QWebEngine__append_custom_javascript__jQuery/js/jquery-3.1.1.min.js') as f:
jquery_text = f.read()
jquery_text += "\nvar qt = { 'jQuery': jQuery.noConflict(true) };"
app = QApplication([])
view = QWebEngineView()
view.load(QUrl('https://гибдд.рф/request_main'))
def _on_load_finished(ok: bool):
page = view.page()
print(page.url().toString())
page.runJavaScript(jquery_text)
result = run_js_code(page, "document.title")
print('run_java_script:', result)
# Клик на флажок "С информацией ознакомлен"
run_js_code(page, """qt.jQuery('input[name="agree"]').click();""")
# Клик на кнопку "Подать обращение"
run_js_code(page, """qt.jQuery('button.u-form__sbt').click();""")
print()
view.loadProgress.connect(lambda value: view.setWindowTitle('{} ({}%)'.format(view.url().toString(), value)))
view.loadFinished.connect(_on_load_finished)
mw = QMainWindow()
mw.setCentralWidget(view)
mw.resize(500, 500)
mw.show()
app.exec()
|
blackmamba/lib/future/__init__.py | oz90210/blackmamba | 2,151 | 11067199 | """
future: Easy, safe support for Python 2/3 compatibility
=======================================================
``future`` is the missing compatibility layer between Python 2 and Python
3. It allows you to use a single, clean Python 3.x-compatible codebase to
support both Python 2 and Python 3 with minimal overhead.
It is designed to be used as follows::
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import (
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
followed by predominantly standard, idiomatic Python 3 code that then runs
similarly on Python 2.6/2.7 and Python 3.3+.
The imports have no effect on Python 3. On Python 2, they shadow the
corresponding builtins, which normally have different semantics on Python 3
versus 2, to provide their Python 3 semantics.
Standard library reorganization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``future`` supports the standard library reorganization (PEP 3108) through the
following Py3 interfaces:
>>> # Top-level packages with Py3 names provided on Py2:
>>> import html.parser
>>> import queue
>>> import tkinter.dialog
>>> import xmlrpc.client
>>> # etc.
>>> # Aliases provided for extensions to existing Py2 module names:
>>> from future.standard_library import install_aliases
>>> install_aliases()
>>> from collections import Counter, OrderedDict # backported to Py2.6
>>> from collections import UserDict, UserList, UserString
>>> import urllib.request
>>> from itertools import filterfalse, zip_longest
>>> from subprocess import getoutput, getstatusoutput
Automatic conversion
--------------------
An included script called `futurize
<http://python-future.org/automatic_conversion.html>`_ aids in converting
code (from either Python 2 or Python 3) to code compatible with both
platforms. It is similar to ``python-modernize`` but goes further in
providing Python 3 compatibility through the use of the backported types
and builtin functions in ``future``.
Documentation
-------------
See: http://python-future.org
Credits
-------
:Author: <NAME>
:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
Ltd, Singapore. http://pythoncharmers.com
:Others: See docs/credits.rst or http://python-future.org/credits.html
Licensing
---------
Copyright 2013-2016 Python Charmers Pty Ltd, Australia.
The software is distributed under an MIT licence. See LICENSE.txt.
"""
__title__ = 'future'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013-2016 Python Charmers Pty Ltd'
__ver_major__ = 0
__ver_minor__ = 16
__ver_patch__ = 0
__ver_sub__ = ''
__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
__ver_patch__, __ver_sub__)
|
examples/using_sprites_pyglet.py | conductiveIT/pymunk-1 | 670 | 11067234 | <filename>examples/using_sprites_pyglet.py
"""This example is a clone of the using_sprites example with the difference
that it uses pyglet instead of pygame to showcase sprite drawing.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import math
import random
from typing import List
import pyglet
import pymunk
from pymunk import Vec2d
window = pyglet.window.Window(width=600, height=600)
fps_display = pyglet.window.FPSDisplay(window)
logo_img = pyglet.resource.image("pymunk_logo_googlecode.png")
logo_img.anchor_x = logo_img.width / 2
logo_img.anchor_y = logo_img.height / 2
logos: List[pymunk.Shape] = []
batch = pyglet.graphics.Batch()
### Physics stuff
space = pymunk.Space()
space.gravity = Vec2d(0.0, -900.0)
### Static line
static_lines = [
pymunk.Segment(space.static_body, (11.0, 280.0), (407.0, 246.0), 0.0),
pymunk.Segment(space.static_body, (407.0, 246.0), (407.0, 343.0), 0.0),
]
for l in static_lines:
l.friction = 0.5
space.add(*static_lines)
@window.event
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.P:
pyglet.image.get_buffer_manager().get_color_buffer().save(
"using_sprites_pyglet.png"
)
@window.event
def on_draw():
window.clear()
fps_display.draw()
for line in static_lines:
body = line.body
pv1 = body.position + line.a.rotated(body.angle)
pv2 = body.position + line.b.rotated(body.angle)
pyglet.graphics.draw(
2,
pyglet.gl.GL_LINES,
("v2f", (pv1.x, pv1.y, pv2.x, pv2.y)),
("c3f", (0.8, 0.8, 0.8) * 2),
)
batch.draw()
# debug draw
for logo_sprite in logos:
ps = logo_sprite.shape.get_vertices()
ps = [p.rotated(logo_sprite.body.angle) + logo_sprite.body.position for p in ps]
n = len(ps)
ps = [c for p in ps for c in p]
pyglet.graphics.draw(
n, pyglet.gl.GL_LINE_LOOP, ("v2f", ps), ("c3f", (1, 0, 0) * n)
)
def update(dt):
dt = 1.0 / 60.0 # override dt to keep physics simulation stable
space.step(dt)
for sprite in logos:
# We need to rotate the image 180 degrees because we have y pointing
# up in pymunk coords.
sprite.rotation = math.degrees(-sprite.body.angle) + 180
sprite.position = sprite.body.position.x, sprite.body.position.y
def spawn_logo(dt):
x = random.randint(20, 400)
y = 500
angle = random.random() * math.pi
vs = [(-23, 26), (23, 26), (0, -26)]
mass = 10
moment = pymunk.moment_for_poly(mass, vs)
body = pymunk.Body(mass, moment)
shape = pymunk.Poly(body, vs)
shape.friction = 0.5
body.position = x, y
body.angle = angle
space.add(body, shape)
sprite = pyglet.sprite.Sprite(logo_img, batch=batch)
sprite.shape = shape
sprite.body = body
logos.append(sprite)
pyglet.clock.schedule_interval(update, 1 / 60.0)
pyglet.clock.schedule_once(spawn_logo, 0.1)
pyglet.clock.schedule_interval(spawn_logo, 10 / 6.0)
pyglet.app.run()
|
skrf/media/tests/test_all_construction.py | dxxx9/scikit-rf | 379 | 11067236 | """
this test on tests ability for all media class to pass construction
of all general circuit components
"""
import unittest
from scipy.constants import *
import skrf as rf
from skrf.media import Freespace, CPW, RectangularWaveguide, DistributedCircuit
class MediaTestCase():
"""Base class, contains tests for all media."""
def test_gamma(self):
self.media.gamma
def test_Z0_value(self):
self.media.Z0
def test_match(self):
self.media.match()
def test_load(self):
self.media.load(1)
def test_short(self):
self.media.short()
def test_open(self):
self.media.open()
def test_capacitor(self):
self.media.capacitor(1)
def test_inductor(self):
self.media.inductor(1)
def test_impedance_mismatch(self):
self.media.impedance_mismatch(1, 2)
def test_tee(self):
self.media.tee()
def test_splitter(self):
self.media.splitter(4)
def test_thru(self):
self.media.thru()
def test_line(self):
self.media.line(1)
def test_delay_load(self):
self.media.delay_load(1,2)
def test_delay_short(self):
self.media.delay_short(1)
def test_delay_open(self):
self.media.delay_open(1)
def test_shunt_delay_load(self):
self.media.shunt_delay_load(1,1)
def test_shunt_delay_short(self):
self.media.shunt_delay_short(1)
def test_shunt_delay_open(self):
self.media.shunt_delay_open(1)
def test_shunt_capacitor(self):
self.media.shunt_capacitor(1)
def test_shunt_inductor(self):
self.media.shunt_inductor(1)
class FreespaceTestCase(MediaTestCase, unittest.TestCase):
def setUp(self):
self.frequency = rf.Frequency(75,110,101,'ghz')
self.media = Freespace(self.frequency)
def test_Z0_value(self):
self.assertEqual(round(\
self.media.Z0[0].real), 377)
class CPWTestCase(MediaTestCase, unittest.TestCase):
def setUp(self):
self.frequency = rf.Frequency(75,110,101,'ghz')
self.media = CPW(\
frequency=self.frequency,
w=10e-6,
s=5e-6,
ep_r=11.7,
t=1e-6,
rho=22e-9)
class RectangularWaveguideTestCase(MediaTestCase, unittest.TestCase):
def setUp(self):
self.frequency = rf.Frequency(75,110,101,'ghz')
self.media = RectangularWaveguide(\
frequency=self.frequency,
a=100*mil,
)
class DistributedCircuitTestCase(MediaTestCase, unittest.TestCase):
def setUp(self):
self.frequency = rf.Frequency(75,110,101,'ghz')
self.media = DistributedCircuit(\
frequency=self.frequency,
L=1,C=1,R=0,G=0
)
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTests([\
loader.loadTestsFromTestCase(FreespaceTestCase),
loader.loadTestsFromTestCase(CPWTestCase),
loader.loadTestsFromTestCase(RectangularWaveguideTestCase),
loader.loadTestsFromTestCase(DistributedCircuitTestCase),
])
#suite = unittest.TestLoader().loadTestsFromTestCase(FreespaceTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
BitTornado/Types/tests/test_primitives.py | alahoo/BitTornado | 116 | 11067263 | <reponame>alahoo/BitTornado
import unittest
import random
import math
from ..primitives import FixedLengthBytes, SixBytes, TwentyBytes, \
UnsignedInt, UnsignedShort
class FixedLengthTests(unittest.TestCase):
def test_fixedlengthbytes(self):
self.assertRaises(NotImplementedError, FixedLengthBytes)
self.assertRaises(NotImplementedError, FixedLengthBytes, b'')
def test_sixbytes(self):
self.assertRaises(ValueError, SixBytes, b'')
self.assertEqual(SixBytes(), b'\x00' * 6)
self.assertEqual(SixBytes(b'abcdef'), b'abcdef')
def test_twentybytes(self):
self.assertRaises(ValueError, TwentyBytes, b'')
self.assertEqual(TwentyBytes(), b'\x00' * 20)
self.assertEqual(TwentyBytes(b'a' * 20), b'a' * 20)
class UnsignedIntTests(unittest.TestCase):
def test_create(self):
self.assertRaises(OverflowError, UnsignedInt, -1)
for i in range(1, 30):
UnsignedInt('1' * i)
UnsignedInt.from_bytes(b'\x01' * i, 'big')
def test_bytelength(self):
for _ in range(10):
x = UnsignedInt(random.randrange(2**128))
self.assertGreaterEqual(x.byte_length() * 8, x.bit_length())
self.assertLess((x.byte_length() - 1) * 8, x.bit_length())
def test_bytestring(self):
for _ in range(10):
x = UnsignedInt(random.randrange(2**128))
self.assertEqual(len(x.bytestring()), x.byte_length())
self.assertEqual(int.from_bytes(x.bytestring(), 'big'), x)
class BoundedMixin:
def test_create(self):
self.assertRaises(OverflowError, self.cls, -1)
self.assertRaises(OverflowError, self.cls, 2 ** self.cls.bits)
for _ in range(10):
self.cls(random.randrange(2 ** self.cls.bits))
def test_bytelength(self):
for _ in range(10):
x = self.cls(random.randrange(2 ** self.cls.bits))
self.assertEqual(x.byte_length(), int(math.ceil(x.bits / 8.0)))
self.assertLessEqual(x.bit_length(), x.bits)
def test_bytestring(self):
for _ in range(10):
x = self.cls(random.randrange(2 ** self.cls.bits))
self.assertEqual(self.cls.from_bytes(x.bytestring(), 'big'), x)
class UnsignedShortTests(unittest.TestCase, BoundedMixin):
cls = UnsignedShort
class OddBoundedTests(unittest.TestCase, BoundedMixin):
class cls(UnsignedInt):
bits = 7
|
ros/third_party/lib/python2.7/dist-packages/rospkg/manifest.py | numberen/apollo-platform | 742 | 11067269 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for processing 'manifest' files, i.e. manifest.xml and
stack.xml.
"""
import os
import sys
import xml.dom.minidom as dom
from .common import MANIFEST_FILE, PACKAGE_FILE, STACK_FILE
# stack.xml and manifest.xml have the same internal tags right now
REQUIRED = ['license']
ALLOWXHTML = ['description']
OPTIONAL = ['author', 'logo', 'url', 'brief', 'description', 'status',
'notes', 'depend', 'rosdep', 'export', 'review',
'versioncontrol', 'platform', 'version', 'rosbuild2',
'catkin']
VALID = REQUIRED + OPTIONAL
class InvalidManifest(Exception):
pass
def _get_nodes_by_name(n, name):
return [t for t in n.childNodes if t.nodeType == t.ELEMENT_NODE and t.tagName == name]
def _check_optional(name, allowXHTML=False, merge_multiple=False):
"""
Validator for optional elements.
:raise: :exc:`InvalidManifest` If validation fails
"""
def check(n, filename):
n = _get_nodes_by_name(n, name)
if len(n) > 1 and not merge_multiple:
raise InvalidManifest("Invalid manifest file [%s]: must have a single '%s' element"%(filename, name))
if n:
values = []
for child in n:
if allowXHTML:
values.append(''.join([x.toxml() for x in child.childNodes]))
else:
values.append(_get_text(child.childNodes).strip())
return ', '.join(values)
return check
def _check_required(name, allowXHTML=False, merge_multiple=False):
"""
Validator for required elements.
:raise: :exc:`InvalidManifest` If validation fails
"""
def check(n, filename):
n = _get_nodes_by_name(n, name)
if not n:
return ''
if len(n) != 1 and not merge_multiple:
raise InvalidManifest("Invalid manifest file: must have only one '%s' element"%name)
values = []
for child in n:
if allowXHTML:
values.append(''.join([x.toxml() for x in child.childNodes]))
else:
values.append(_get_text(child.childNodes).strip())
return ', '.join(values)
return check
def _check_platform(n, filename):
"""
Validator for manifest platform.
:raise: :exc:`InvalidManifest` If validation fails
"""
platforms = _get_nodes_by_name(n, 'platform')
try:
vals = [(p.attributes['os'].value, p.attributes['version'].value, p.getAttribute('notes')) for p in platforms]
except KeyError as e:
raise InvalidManifest("<platform> tag is missing required '%s' attribute"%str(e))
return [Platform(*v) for v in vals]
def _check_depends(type_, n, filename):
"""
Validator for manifest depends.
:raise: :exc:`InvalidManifest` If validation fails
"""
nodes = _get_nodes_by_name(n, 'depend')
# TDS 20110419: this is a hack.
# rosbuild2 has a <depend thirdparty="depname"/> tag,
# which is confusing this subroutine with
# KeyError: 'package'
# for now, explicitly don't consider thirdparty depends
depends = [e.attributes for e in nodes if 'thirdparty' not in e.attributes.keys()]
try:
depend_names = [d[type_].value for d in depends]
except KeyError:
raise InvalidManifest("Invalid manifest file [%s]: depends is missing '%s' attribute"%(filename, type_))
return [Depend(name, type_) for name in depend_names]
def _check_rosdeps(n, filename):
"""
Validator for stack rosdeps.
:raises: :exc:`InvalidManifest` If validation fails
"""
try:
nodes = _get_nodes_by_name(n, 'rosdep')
rosdeps = [e.attributes for e in nodes]
names = [d['name'].value for d in rosdeps]
return [RosDep(n) for n in names]
except KeyError:
raise InvalidManifest("invalid rosdep tag in [%s]"%(filename))
def _attrs(node):
attrs = {}
for k in node.attributes.keys():
attrs[k] = node.attributes.get(k).value
return attrs
def _check_exports(n, filename):
ret_val = []
for e in _get_nodes_by_name(n, 'export'):
elements = [c for c in e.childNodes if c.nodeType == c.ELEMENT_NODE]
ret_val.extend([Export(t.tagName, _attrs(t), _get_text(t.childNodes)) for t in elements])
return ret_val
def _check(name, merge_multiple=False):
"""
Generic validator for text-based tags.
"""
if name in REQUIRED:
if name in ALLOWXHTML:
return _check_required(name, True, merge_multiple)
return _check_required(name, merge_multiple=merge_multiple)
elif name in OPTIONAL:
if name in ALLOWXHTML:
return _check_optional(name, True, merge_multiple)
return _check_optional(name, merge_multiple=merge_multiple)
class Export(object):
"""
Manifest 'export' tag
"""
def __init__(self, tag, attrs, str):
"""
Create new export instance.
:param tag: name of the XML tag
@type tag: str
:param attrs: dictionary of XML attributes for this export tag
@type attrs: dict
:param str: string value contained by tag, if any
@type str: str
"""
self.tag = tag
self.attrs = attrs
self.str = str
def get(self, attr):
"""
:returns: value of attribute or ``None`` if attribute not set, ``str``
"""
return self.attrs.get(attr, None)
class Platform(object):
"""
Manifest 'platform' tag
"""
__slots__ = ['os', 'version', 'notes']
def __init__(self, os_, version, notes=None):
"""
Create new depend instance.
:param os_: OS name. must be non-empty, ``str``
:param version: OS version. must be non-empty, ``str``
:param notes: (optional) notes about platform support, ``str``
"""
if not os_:
raise ValueError("bad 'os' attribute")
if not version:
raise ValueError("bad 'version' attribute")
self.os = os_
self.version = version
self.notes = notes
def __str__(self):
return "%s %s"%(self.os, self.version)
def __repr__(self):
return "%s %s"%(self.os, self.version)
def __eq__(self, obj):
"""
Override equality test. notes *are* considered in the equality test.
"""
if not isinstance(obj, Platform):
return False
return self.os == obj.os and self.version == obj.version and self.notes == obj.notes
def __hash__(self):
"""
:returns: an integer, which must be the same for two equal instances.
Since __eq__ is defined, Python 3 requires that this class also provide a __hash__ method.
"""
return hash(self.os) ^ hash(self.version) ^ hash(self.notes)
class Depend(object):
"""
Manifest 'depend' tag
"""
__slots__ = ['name', 'type']
def __init__(self, name, type_):
"""
Create new depend instance.
:param name: dependency name (e.g. package/stack). Must be non-empty
@type name: str
:param type_: dependency type, e.g. 'package', 'stack'. Must be non-empty.
@type type_: str
@raise ValueError: if parameters are invalid
"""
if not name:
raise ValueError("bad '%s' attribute"%(type_))
if not type_:
raise ValueError("type_ must be specified")
self.name = name
self.type = type_
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, obj):
if not isinstance(obj, Depend):
return False
return self.name == obj.name and self.type == obj.type
def __hash__(self):
"""
:returns: an integer, which must be the same for two equal instances.
Since __eq__ is defined, Python 3 requires that this class also provide a __hash__ method.
"""
return hash(self.name) ^ hash(self.type)
class RosDep(object):
"""
Manifest 'rosdep' tag
"""
__slots__ = ['name',]
def __init__(self, name):
"""
Create new rosdep instance.
:param name: dependency name. Must be non-empty. ``str``
"""
if not name:
raise ValueError("bad 'name' attribute")
self.name = name
class Manifest(object):
"""
Object representation of a ROS manifest file (``manifest.xml`` and ``stack.xml``)
"""
__slots__ = ['description', 'brief', \
'author', 'license', 'license_url', 'url', \
'depends', 'rosdeps','platforms',\
'exports', 'version',\
'status', 'notes',\
'unknown_tags', 'type', 'filename',\
'is_catkin']
def __init__(self, type_='package', filename=None, is_catkin=False):
"""
:param type: `'package'` or `'stack'`
:param filename: location of manifest file. Necessary if
converting ``${prefix}`` in ``<export>`` values, ``str``.
"""
self.description = self.brief = self.author = \
self.license = self.license_url = \
self.url = self.status = \
self.version = self.notes = ''
self.depends = []
self.rosdeps = []
self.exports = []
self.platforms = []
self.is_catkin = is_catkin
self.type = type_
self.filename = filename
# store unrecognized tags during parsing
self.unknown_tags = []
def get_export(self, tag, attr, convert=True):
"""
:param tag: Name of XML tag to retrieve, ``str``
:param attr: Name of XML attribute to retrieve from tag, ``str``
:param convert: If ``True``, interpret variables (e.g. ``${prefix}``) export values.
:returns: exports that match the specified tag and attribute, e.g. 'python', 'path'. ``[str]``
"""
vals = [e.get(attr) for e in self.exports if e.tag == tag if e.get(attr) is not None]
if convert:
if not self.filename:
raise ValueError("cannot convert export values when filename for Manifest is not set")
prefix = os.path.dirname(self.filename)
vals = [v.replace('${prefix}', prefix) for v in vals]
return vals
def _get_text(nodes):
"""
DOM utility routine for getting contents of text nodes
"""
return "".join([n.data for n in nodes if n.nodeType == n.TEXT_NODE])
_static_rosdep_view = None
def parse_manifest_file(dirpath, manifest_name, rospack=None):
"""
Parse manifest file (package, stack). Type will be inferred from manifest_name.
:param dirpath: directory of manifest file, ``str``
:param manifest_name: ``MANIFEST_FILE`` or ``STACK_FILE``, ``str``
:param rospack: a RosPack instance to identify local packages as ROS packages
:returns: return :class:`Manifest` instance, populated with parsed fields
:raises: :exc:`InvalidManifest`
:raises: :exc:`IOError`
"""
filename = os.path.join(dirpath, manifest_name)
if not os.path.isfile(filename):
# hack for backward compatibility
package_filename = os.path.join(dirpath, PACKAGE_FILE)
if not os.path.isfile(package_filename):
raise IOError("Invalid/non-existent manifest file: %s" % filename)
manifest = Manifest(filename=filename, is_catkin=True)
# extract all information from package.xml
from catkin_pkg.package import parse_package
p = parse_package(package_filename)
# put these into manifest
manifest.description = p.description
manifest.author = ', '.join([('Maintainer: %s' % str(m)) for m in p.maintainers] + [str(a) for a in p.authors])
manifest.license = ', '.join(p.licenses)
if p.urls:
manifest.url = str(p.urls[0])
manifest.version = p.version
for export in p.exports:
manifest.exports.append(Export(export.tagname, export.attributes, export.content))
# split ros and system dependencies (using rosdep)
try:
from rosdep2.rospack import init_rospack_interface, is_ros_package, is_system_dependency, is_view_empty
global _static_rosdep_view
# initialize rosdep view once
if _static_rosdep_view is None:
_static_rosdep_view = init_rospack_interface()
if is_view_empty(_static_rosdep_view):
sys.stderr.write("the rosdep view is empty: call 'sudo rosdep init' and 'rosdep update'\n")
_static_rosdep_view = False
if _static_rosdep_view:
depends = set([])
rosdeps = set([])
for d in (p.buildtool_depends + p.build_depends + p.run_depends + p.test_depends):
if (rospack and d.name in rospack.list()) or is_ros_package(_static_rosdep_view, d.name):
depends.add(d.name)
if is_system_dependency(_static_rosdep_view, d.name):
rosdeps.add(d.name)
for name in depends:
manifest.depends.append(Depend(name, 'package'))
for name in rosdeps:
manifest.rosdeps.append(RosDep(name))
except ImportError:
pass
return manifest
with open(filename, 'r') as f:
return parse_manifest(manifest_name, f.read(), filename)
def parse_manifest(manifest_name, string, filename='string'):
"""
Parse manifest string contents.
:param manifest_name: ``MANIFEST_FILE`` or ``STACK_FILE``, ``str``
:param string: manifest.xml contents, ``str``
:param filename: full file path for debugging, ``str``
:returns: return parsed :class:`Manifest`
"""
if manifest_name == MANIFEST_FILE:
type_ = 'package'
elif manifest_name == STACK_FILE:
type_ = 'stack'
try:
d = dom.parseString(string)
except Exception as e:
raise InvalidManifest("[%s] invalid XML: %s"%(filename, e))
m = Manifest(type_, filename)
p = _get_nodes_by_name(d, type_)
if len(p) != 1:
raise InvalidManifest("manifest [%s] must have a single '%s' element"%(filename, type_))
p = p[0]
m.description = _check('description')(p, filename)
m.brief = ''
try:
tag = _get_nodes_by_name(p, 'description')[0]
m.brief = tag.getAttribute('brief') or ''
except:
# means that 'description' tag is missing
pass
m.depends = _check_depends(type_, p, filename)
m.rosdeps = _check_rosdeps(p, filename)
m.platforms = _check_platform(p, filename)
m.exports = _check_exports(p, filename)
m.license = _check('license')(p, filename)
m.license_url = ''
try:
tag = _get_nodes_by_name(p, 'license')[0]
m.license_url = tag.getAttribute('url') or ''
except:
pass #manifest is missing required 'license' tag
m.status='unreviewed'
try:
tag = _get_nodes_by_name(p, 'review')[0]
m.status = tag.getAttribute('status') or ''
except:
pass #manifest is missing optional 'review status' tag
m.notes = ''
try:
tag = _get_nodes_by_name(p, 'review')[0]
m.notes = tag.getAttribute('notes') or ''
except:
pass #manifest is missing optional 'review notes' tag
m.author = _check('author', True)(p, filename)
m.url = _check('url')(p, filename)
m.version = _check('version')(p, filename)
# do some validation on what we just parsed
if type_ == 'stack':
if m.exports:
raise InvalidManifest("stack manifests are not allowed to have exports")
if m.rosdeps:
raise InvalidManifest("stack manifests are not allowed to have rosdeps")
m.is_catkin = bool(_get_nodes_by_name(p, 'catkin')) or bool(_get_nodes_by_name(p, 'name'))
# store unrecognized tags
m.unknown_tags = [e for e in p.childNodes if e.nodeType == e.ELEMENT_NODE and e.tagName not in VALID]
return m
|
build/android/pylib/utils/reraiser_thread.py | iplo/Chain | 231 | 11067288 | <reponame>iplo/Chain
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Thread and ThreadGroup that reraise exceptions on the main thread."""
import logging
import sys
import threading
import time
import traceback
import watchdog_timer
class TimeoutError(Exception):
"""Module-specific timeout exception."""
pass
def LogThreadStack(thread):
"""Log the stack for the given thread.
Args:
thread: a threading.Thread instance.
"""
stack = sys._current_frames()[thread.ident]
logging.critical('*' * 80)
logging.critical('Stack dump for thread \'%s\'', thread.name)
logging.critical('*' * 80)
for filename, lineno, name, line in traceback.extract_stack(stack):
logging.critical('File: "%s", line %d, in %s', filename, lineno, name)
if line:
logging.critical(' %s', line.strip())
logging.critical('*' * 80)
class ReraiserThread(threading.Thread):
"""Thread class that can reraise exceptions."""
def __init__(self, func, args=[], kwargs={}, name=None):
"""Initialize thread.
Args:
func: callable to call on a new thread.
args: list of positional arguments for callable, defaults to empty.
kwargs: dictionary of keyword arguments for callable, defaults to empty.
name: thread name, defaults to Thread-N.
"""
super(ReraiserThread, self).__init__(name=name)
self.daemon = True
self._func = func
self._args = args
self._kwargs = kwargs
self._exc_info = None
def ReraiseIfException(self):
"""Reraise exception if an exception was raised in the thread."""
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
#override
def run(self):
"""Overrides Thread.run() to add support for reraising exceptions."""
try:
self._func(*self._args, **self._kwargs)
except:
self._exc_info = sys.exc_info()
raise
class ReraiserThreadGroup(object):
"""A group of ReraiserThread objects."""
def __init__(self, threads=[]):
"""Initialize thread group.
Args:
threads: a list of ReraiserThread objects; defaults to empty.
"""
self._threads = threads
def Add(self, thread):
"""Add a thread to the group.
Args:
thread: a ReraiserThread object.
"""
self._threads.append(thread)
def StartAll(self):
"""Start all threads."""
for thread in self._threads:
thread.start()
def _JoinAll(self, watcher=watchdog_timer.WatchdogTimer(None)):
"""Join all threads without stack dumps.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread.
Args:
watcher: Watchdog object providing timeout, by default waits forever.
"""
alive_threads = self._threads[:]
while alive_threads:
for thread in alive_threads[:]:
if watcher.IsTimedOut():
raise TimeoutError('Timed out waiting for %d of %d threads.' %
(len(alive_threads), len(self._threads)))
# Allow the main thread to periodically check for interrupts.
thread.join(0.1)
if not thread.isAlive():
alive_threads.remove(thread)
# All threads are allowed to complete before reraising exceptions.
for thread in self._threads:
thread.ReraiseIfException()
def JoinAll(self, watcher=watchdog_timer.WatchdogTimer(None)):
"""Join all threads.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread. Unfinished threads'
stacks will be logged on watchdog timeout.
Args:
watcher: Watchdog object providing timeout, by default waits forever.
"""
try:
self._JoinAll(watcher)
except TimeoutError:
for thread in (t for t in self._threads if t.isAlive()):
LogThreadStack(thread)
raise
|
models/allennlp_multi_head_similarity.py | zeta1999/tsalib | 241 | 11067304 | #Original file : https://github.com/allenai/allennlp/blob/master/allennlp/modules/similarity_functions/multiheaded.py
# The annotations in the `forward` function are sufficient to explain the module's functionality
import sys
sys.path.append('../')
from tsalib import dim_vars
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.common.checks import ConfigurationError
from allennlp.modules.similarity_functions.similarity_function import SimilarityFunction
from allennlp.modules.similarity_functions.dot_product import DotProductSimilarity
@SimilarityFunction.register("multiheaded")
class MultiHeadedSimilarity(SimilarityFunction):
"""
This similarity function uses multiple "heads" to compute similarity. That is, we take the
input tensors and project them into a number of new tensors, and compute similarities on each
of the projected tensors individually. The result here has one more dimension than a typical
similarity function.
For example, say we have two input tensors, both of shape ``(batch_size, sequence_length,
100)``, and that we want 5 similarity heads. We'll project these tensors with a ``100x100``
matrix, then split the resultant tensors to have shape ``(batch_size, sequence_length, 5,
20)``. Then we call a wrapped similarity function on the result (by default just a dot
product), giving a tensor of shape ``(batch_size, sequence_length, 5)``.
Parameters
----------
num_heads : ``int``
The number of similarity heads to compute.
tensor_1_dim : ``int``
The dimension of the first tensor described above. This is ``tensor.size()[-1]`` - the
length of the vector `before` the multi-headed projection. We need this so we can build
the weight matrix correctly.
tensor_1_projected_dim : ``int``, optional
The dimension of the first tensor `after` the multi-headed projection, `before` we split
into multiple heads. This number must be divisible evenly by ``num_heads``. If not given,
we default to ``tensor_1_dim``.
tensor_2_dim : ``int``, optional
The dimension of the second tensor described above. This is ``tensor.size()[-1]`` - the
length of the vector `before` the multi-headed projection. We need this so we can build
the weight matrix correctly. If not given, we default to ``tensor_1_dim``.
tensor_2_projected_dim : ``int``, optional
The dimension of the second tensor `after` the multi-headed projection, `before` we split
into multiple heads. This number must be divisible evenly by ``num_heads``. If not given,
we default to ``tensor_2_dim``.
internal_similarity : ``SimilarityFunction``, optional
The ``SimilarityFunction`` to call on the projected, multi-headed tensors. The default is
to use a dot product.
"""
def __init__(self,
num_heads: int,
tensor_1_dim: int,
tensor_1_projected_dim: int = None,
tensor_2_dim: int = None,
tensor_2_projected_dim: int = None,
internal_similarity: SimilarityFunction = DotProductSimilarity()) -> None:
super(MultiHeadedSimilarity, self).__init__()
self.num_heads = num_heads
self._internal_similarity = internal_similarity
tensor_1_projected_dim = tensor_1_projected_dim or tensor_1_dim
tensor_2_dim = tensor_2_dim or tensor_1_dim
tensor_2_projected_dim = tensor_2_projected_dim or tensor_2_dim
if tensor_1_projected_dim % num_heads != 0:
raise ConfigurationError("Projected dimension not divisible by number of heads: %d, %d"
% (tensor_1_projected_dim, num_heads))
if tensor_2_projected_dim % num_heads != 0:
raise ConfigurationError("Projected dimension not divisible by number of heads: %d, %d"
% (tensor_2_projected_dim, num_heads))
# tsalib dim vars defined locally (to minimize changes from original implementation)
# better: define and store them in the config dictionary and use everywhere
self.D1, self.D2, self.D1p, self.D2p = dim_vars('D1:{0} D2:{1} D1p:{2} D2p:{3}'
.format(tensor_1_dim, tensor_2_dim, tensor_1_projected_dim, tensor_2_projected_dim))
# original impl
self._tensor_1_projection = Parameter(torch.Tensor(tensor_1_dim, tensor_1_projected_dim))
self._tensor_2_projection = Parameter(torch.Tensor(tensor_2_dim, tensor_2_projected_dim))
# with tsalib:
self._tensor_1_projection: (self.D1, self.D1p) = Parameter(torch.Tensor(self.D1, self.D1p))
self._tensor_2_projection: (self.D2, self.D2p) = Parameter(torch.Tensor(self.D2, self.D2p))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._tensor_1_projection)
torch.nn.init.xavier_uniform_(self._tensor_2_projection)
def forward_old(self, tensor_1: 'b,t,d1', tensor_2: 'b,t,d2') :
# This is the original `forward` implementation
# note the shape 'surgery' below
H = self.num_heads
B, T = dim_vars('Batch(b):{tensor_1.shape(0)} T(t):{tensor_1.shape(1)}')
D1, D2, D1p, D2p = self.D1, self.D2, self.D1p, self.D2p
projected_tensor_1: (B, T, D1p) = torch.matmul(tensor_1, self._tensor_1_projection)
projected_tensor_2: (B, T, D2p) = torch.matmul(tensor_2, self._tensor_2_projection)
# Here we split the last dimension of the tensors from (..., projected_dim) to
# (..., num_heads, projected_dim / num_heads), using tensor.view().
last_dim_size = projected_tensor_1.size(-1) // H
new_shape = list(projected_tensor_1.size())[:-1] + [H, last_dim_size]
split_tensor_1: (B, T, H, D1p // H) = projected_tensor_1.view(*new_shape)
last_dim_size = projected_tensor_2.size(-1) // H
new_shape = list(projected_tensor_2.size())[:-1] + [H, last_dim_size]
split_tensor_2: (B, T, H, D2p // H) = projected_tensor_2.view(*new_shape)
# And then we pass this off to our internal similarity function. Because the similarity
# functions don't care what dimension their input has, and only look at the last dimension,
# we don't need to do anything special here. It will just compute similarity on the
# projection dimension for each head, returning a tensor of shape (..., num_heads).
ret : (B, T, H) = self._internal_similarity(split_tensor_1, split_tensor_2)
return ret
@overrides
def forward(self, tensor_1: 'b,t,d1', tensor_2: 'b,t,d2') :
# Cleaner implementation with tsalib
#B, T, H defined locally here (to minimize changes to original implementation)
# better: define and store them in the config dictionary and use everywhere
B, T, H = dim_vars(f'Batch(b):{tensor_1.shape(0)} T(t):{tensor_1.shape(1)} H(h):{self.num_heads}')
D1, D2, D1p, D2p = self.D1, self.D2, self.D1p, self.D2p
projected_tensor_1: (B, T, D1p) = torch.matmul(tensor_1, self._tensor_1_projection)
projected_tensor_2: (B, T, D2p) = torch.matmul(tensor_2, self._tensor_2_projection)
split_tensor_1 = projected_tensor_1.view(B, T, H, D1p // H)
split_tensor_2 = projected_tensor_2.view(B, T, H, D2p // H)
# And then we pass this off to our internal similarity function. Because the similarity
# functions don't care what dimension their input has, and only look at the last dimension,
# we don't need to do anything special here. It will just compute similarity on the
# projection dimension for each head, returning a tensor of shape (..., num_heads).
ret : (B, T, H) = self._internal_similarity(split_tensor_1, split_tensor_2)
return ret |
Flaircounting/flaircounting.py | zatherz/reddit | 444 | 11067308 | <gh_stars>100-1000
#/u/GoldenSights
import praw
import time
import sqlite3
import datetime
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "Cinemasins"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
PRINTFILE = "userflair.txt"
#The file where the flairs will be shown
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
WAITS = str(WAIT)
lastwikiupdate = 0
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(NAME TEXT, FLAIR TEXT)')
print('Loaded Completed table')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def scan():
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
posts = []
posts += subreddit.get_new(limit=MAXPOSTS)
posts += subreddit.get_comments(limit=MAXPOSTS)
for post in posts:
try:
pauthor = post.author.name
try:
pflair = post.author_flair_text
if pflair is not None:
cur.execute('SELECT * FROM users WHERE NAME=?', [pauthor])
fetched = cur.fetchone()
if not fetched:
cur.execute('INSERT INTO users VALUES(?, ?)', [pauthor, pflair])
print('New user flair: ' + pauthor + ' : ' + pflair)
else:
oldflair = fetched[1]
if pflair != oldflair:
cur.execute('UPDATE users SET FLAIR=? WHERE NAME=?', [pflair, pauthor])
print('Updating user flair: ' + pauthor + ' : ' + pflair)
sql.commit()
else:
print(post.id, "No flair")
except AttributeError:
print(post.id, "No flair")
except AttributeError:
print(post.id, "Author is deleted")
flairfile = open(PRINTFILE, 'w')
cur.execute('SELECT * FROM users')
fetch = cur.fetchall()
fetch.sort(key=lambda x: x[0])
flaircounts = {}
for item in fetch:
itemflair = item[1]
if itemflair not in flaircounts:
flaircounts[itemflair] = 1
else:
flaircounts[itemflair] += 1
print('FLAIR: NO. OF USERS WITH THAT FLAIR', file=flairfile)
presorted = []
for flairkey in flaircounts:
presorted.append(flairkey + ': ' + str(flaircounts[flairkey]))
presorted.sort()
for flair in presorted:
print(flair, file=flairfile)
print('\n\n', file=flairfile)
print('NAME: USER\'S FLAIR', file=flairfile)
for user in fetch:
print(user[0] + ': ' + user[1], file=flairfile)
flairfile.close()
while True:
try:
scan()
except EOFError:
print("Error:", e)
sql.commit()
print('Running again in ' + str(WAIT) + ' seconds')
time.sleep(WAIT) |
Data Structures/Priority Queue/Python/priority_queue.py | strangestroad/interview-techdev-guide | 320 | 11067310 | class PriorityQueue(dict):
def __init__(self,*args,**kwargs):
super(PriorityQueue,self).__init__(*args,**kwargs)
self._rebuild_heap()
def _rebuild_heap(self):
self._heap=[(v,k) for k,v in self.items()]
heapify(self._heap)
def smallest(self):
heap=self._heap
v,k=heap[0]
while k not in self or self[k]!=v:
heappop(heap)
v,k=heap[0]
return k
def pop_smallest(self):
heap=self._heap
v,k=heappop(heap)
while k not in self or self[k]!=v:
v,k=heappop(heap)
del self[k]
return k
def __setitem__(self,key,val):
super(PriorityQueue,self).__setitem__(key,val)
if len(self._heap)<2*len(self):
heappush(self._heap,(val,key))
else:
self._rebuild_heap()
def setdefault(self,key,val):
if key not in self:
self[key]=val
return val
return self[key]
def update(self,*args,**kwargs):
super(PriorityQueue,self).update(*args,**kwargs)
self._rebuild_heap()
def sorted_iter(self):
while self:
yield self.pop_smallest()
if __name__ == "__main__":
queue = PriorityQueue()
# queue stores data similar to a dictonary
queue[5] = 0 # here 0 is priority
|
wagtail/tests/urls.py | brownaa/wagtail | 8,851 | 11067311 | <reponame>brownaa/wagtail<gh_stars>1000+
from django.http import HttpResponse
from django.urls import include, path
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.api.v2.router import WagtailAPIRouter
from wagtail.api.v2.views import PagesAPIViewSet
from wagtail.contrib.sitemaps import Sitemap
from wagtail.contrib.sitemaps import views as sitemaps_views
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from wagtail.documents.api.v2.views import DocumentsAPIViewSet
from wagtail.images import urls as wagtailimages_urls
from wagtail.images.api.v2.views import ImagesAPIViewSet
from wagtail.images.tests import urls as wagtailimages_test_urls
from wagtail.tests.testapp import urls as testapp_urls
from wagtail.tests.testapp.models import EventSitemap
api_router = WagtailAPIRouter('wagtailapi_v2')
api_router.register_endpoint('pages', PagesAPIViewSet)
api_router.register_endpoint('images', ImagesAPIViewSet)
api_router.register_endpoint('documents', DocumentsAPIViewSet)
urlpatterns = [
path('admin/', include(wagtailadmin_urls)),
path('documents/', include(wagtaildocs_urls)),
path('testimages/', include(wagtailimages_test_urls)),
path('images/', include(wagtailimages_urls)),
path('api/main/', api_router.urls),
path('sitemap.xml', sitemaps_views.sitemap),
path('sitemap-index.xml', sitemaps_views.index, {
'sitemaps': {'pages': Sitemap, 'events': EventSitemap(request=None)},
'sitemap_url_name': 'sitemap',
}),
path('sitemap-<str:section>.xml', sitemaps_views.sitemap, name='sitemap'),
path('testapp/', include(testapp_urls)),
path('fallback/', lambda: HttpResponse('ok'), name='fallback'),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
path('', include(wagtail_urls)),
]
|
ch11-程序性能检测及优化/11.getTickCount.py | makelove/OpenCV-Python-Tutorial | 2,875 | 11067342 | <filename>ch11-程序性能检测及优化/11.getTickCount.py
# -*- coding: utf-8 -*-
import cv2
import numpy as np
'''
使用 OpenCV 检测程序效率
'''
img1 = cv2.imread('../data/ml.jpg')
e1 = cv2.getTickCount()
for i in range(5, 49, 2):
img1 = cv2.medianBlur(img1, i)
e2 = cv2.getTickCount()
t = (e2 - e1) / cv2.getTickFrequency() # 时钟频率 或者 每秒钟的时钟数
print(t) # 0.034773332
# Result I got is 0.521107655 seconds
|
research/object_detection/dataset_tools/seq_example_util.py | akshit-protonn/models | 82,518 | 11067346 | <gh_stars>1000+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility for object detection tf.train.SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def context_float_feature(ndarray):
"""Converts a numpy float array to a context float feature.
Args:
ndarray: A numpy float array.
Returns:
A context float feature.
"""
feature = tf.train.Feature()
for val in ndarray:
feature.float_list.value.append(val)
return feature
def context_int64_feature(ndarray):
"""Converts a numpy array to a context int64 feature.
Args:
ndarray: A numpy int64 array.
Returns:
A context int64 feature.
"""
feature = tf.train.Feature()
for val in ndarray:
feature.int64_list.value.append(val)
return feature
def context_bytes_feature(ndarray):
"""Converts a numpy bytes array to a context bytes feature.
Args:
ndarray: A numpy bytes array.
Returns:
A context bytes feature.
"""
feature = tf.train.Feature()
for val in ndarray:
if isinstance(val, np.ndarray):
val = val.tolist()
feature.bytes_list.value.append(tf.compat.as_bytes(val))
return feature
def sequence_float_feature(ndarray):
"""Converts a numpy float array to a sequence float feature.
Args:
ndarray: A numpy float array.
Returns:
A sequence float feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
feature = feature_list.feature.add()
if row.size:
feature.float_list.value[:] = row
return feature_list
def sequence_int64_feature(ndarray):
"""Converts a numpy int64 array to a sequence int64 feature.
Args:
ndarray: A numpy int64 array.
Returns:
A sequence int64 feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
feature = feature_list.feature.add()
if row.size:
feature.int64_list.value[:] = row
return feature_list
def sequence_bytes_feature(ndarray):
"""Converts a bytes float array to a sequence bytes feature.
Args:
ndarray: A numpy bytes array.
Returns:
A sequence bytes feature.
"""
feature_list = tf.train.FeatureList()
for row in ndarray:
if isinstance(row, np.ndarray):
row = row.tolist()
feature = feature_list.feature.add()
if row:
row = [tf.compat.as_bytes(val) for val in row]
feature.bytes_list.value[:] = row
return feature_list
def sequence_strings_feature(strings):
new_str_arr = []
for single_str in strings:
new_str_arr.append(tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[single_str.encode('utf8')])))
return tf.train.FeatureList(feature=new_str_arr)
def boxes_to_box_components(bboxes):
"""Converts a list of numpy arrays (boxes) to box components.
Args:
bboxes: A numpy array of bounding boxes.
Returns:
Bounding box component lists.
"""
ymin_list = []
xmin_list = []
ymax_list = []
xmax_list = []
for bbox in bboxes:
if bbox != []: # pylint: disable=g-explicit-bool-comparison
bbox = np.array(bbox).astype(np.float32)
ymin, xmin, ymax, xmax = np.split(bbox, 4, axis=1)
else:
ymin, xmin, ymax, xmax = [], [], [], []
ymin_list.append(np.reshape(ymin, [-1]))
xmin_list.append(np.reshape(xmin, [-1]))
ymax_list.append(np.reshape(ymax, [-1]))
xmax_list.append(np.reshape(xmax, [-1]))
return ymin_list, xmin_list, ymax_list, xmax_list
def make_sequence_example(dataset_name,
video_id,
encoded_images,
image_height,
image_width,
image_format=None,
image_source_ids=None,
timestamps=None,
is_annotated=None,
bboxes=None,
label_strings=None,
detection_bboxes=None,
detection_classes=None,
detection_scores=None,
use_strs_for_source_id=False,
context_features=None,
context_feature_length=None,
context_features_image_id_list=None):
"""Constructs tf.SequenceExamples.
Args:
dataset_name: String with dataset name.
video_id: String with video id.
encoded_images: A [num_frames] list (or numpy array) of encoded image
frames.
image_height: Height of the images.
image_width: Width of the images.
image_format: Format of encoded images.
image_source_ids: (Optional) A [num_frames] list of unique string ids for
each image.
timestamps: (Optional) A [num_frames] list (or numpy array) array with image
timestamps.
is_annotated: (Optional) A [num_frames] list (or numpy array) array
in which each element indicates whether the frame has been annotated
(1) or not (0).
bboxes: (Optional) A list (with num_frames elements) of [num_boxes_i, 4]
numpy float32 arrays holding boxes for each frame.
label_strings: (Optional) A list (with num_frames_elements) of [num_boxes_i]
numpy string arrays holding object string labels for each frame.
detection_bboxes: (Optional) A list (with num_frames elements) of
[num_boxes_i, 4] numpy float32 arrays holding prediction boxes for each
frame.
detection_classes: (Optional) A list (with num_frames_elements) of
[num_boxes_i] numpy int64 arrays holding predicted classes for each frame.
detection_scores: (Optional) A list (with num_frames_elements) of
[num_boxes_i] numpy float32 arrays holding predicted object scores for
each frame.
use_strs_for_source_id: (Optional) Whether to write the source IDs as
strings rather than byte lists of characters.
context_features: (Optional) A list or numpy array of features to use in
Context R-CNN, of length num_context_features * context_feature_length.
context_feature_length: (Optional) The length of each context feature, used
for reshaping.
context_features_image_id_list: (Optional) A list of image ids of length
num_context_features corresponding to the context features.
Returns:
A tf.train.SequenceExample.
"""
num_frames = len(encoded_images)
image_encoded = np.expand_dims(encoded_images, axis=-1)
if timestamps is None:
timestamps = np.arange(num_frames)
image_timestamps = np.expand_dims(timestamps, axis=-1)
# Context fields.
context_dict = {
'example/dataset_name': context_bytes_feature([dataset_name]),
'clip/start/timestamp': context_int64_feature([image_timestamps[0][0]]),
'clip/end/timestamp': context_int64_feature([image_timestamps[-1][0]]),
'clip/frames': context_int64_feature([num_frames]),
'image/channels': context_int64_feature([3]),
'image/height': context_int64_feature([image_height]),
'image/width': context_int64_feature([image_width]),
'clip/media_id': context_bytes_feature([video_id])
}
# Sequence fields.
feature_list = {
'image/encoded': sequence_bytes_feature(image_encoded),
'image/timestamp': sequence_int64_feature(image_timestamps),
}
# Add optional fields.
if image_format is not None:
context_dict['image/format'] = context_bytes_feature([image_format])
if image_source_ids is not None:
if use_strs_for_source_id:
feature_list['image/source_id'] = sequence_strings_feature(
image_source_ids)
else:
feature_list['image/source_id'] = sequence_bytes_feature(image_source_ids)
if bboxes is not None:
bbox_ymin, bbox_xmin, bbox_ymax, bbox_xmax = boxes_to_box_components(bboxes)
feature_list['region/bbox/xmin'] = sequence_float_feature(bbox_xmin)
feature_list['region/bbox/xmax'] = sequence_float_feature(bbox_xmax)
feature_list['region/bbox/ymin'] = sequence_float_feature(bbox_ymin)
feature_list['region/bbox/ymax'] = sequence_float_feature(bbox_ymax)
if is_annotated is None:
is_annotated = np.ones(num_frames, dtype=np.int64)
is_annotated = np.expand_dims(is_annotated, axis=-1)
feature_list['region/is_annotated'] = sequence_int64_feature(is_annotated)
if label_strings is not None:
feature_list['region/label/string'] = sequence_bytes_feature(
label_strings)
if detection_bboxes is not None:
det_bbox_ymin, det_bbox_xmin, det_bbox_ymax, det_bbox_xmax = (
boxes_to_box_components(detection_bboxes))
feature_list['predicted/region/bbox/xmin'] = sequence_float_feature(
det_bbox_xmin)
feature_list['predicted/region/bbox/xmax'] = sequence_float_feature(
det_bbox_xmax)
feature_list['predicted/region/bbox/ymin'] = sequence_float_feature(
det_bbox_ymin)
feature_list['predicted/region/bbox/ymax'] = sequence_float_feature(
det_bbox_ymax)
if detection_classes is not None:
feature_list['predicted/region/label/index'] = sequence_int64_feature(
detection_classes)
if detection_scores is not None:
feature_list['predicted/region/label/confidence'] = sequence_float_feature(
detection_scores)
if context_features is not None:
context_dict['image/context_features'] = context_float_feature(
context_features)
if context_feature_length is not None:
context_dict['image/context_feature_length'] = context_int64_feature(
context_feature_length)
if context_features_image_id_list is not None:
context_dict['image/context_features_image_id_list'] = (
context_bytes_feature(context_features_image_id_list))
context = tf.train.Features(feature=context_dict)
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
sequence_example = tf.train.SequenceExample(
context=context,
feature_lists=feature_lists)
return sequence_example
|
modules/xlsxwriter/sharedstrings.py | noraj/Kvasir | 194 | 11067401 | ###############################################################################
#
# SharedStrings - A class for writing the Excel XLSX sharedStrings file.
#
# Copyright 2013, <NAME>, <EMAIL>
#
# Standard packages.
import re
# Package imports.
from . import xmlwriter
class SharedStrings(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX sharedStrings file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(SharedStrings, self).__init__()
self.string_table = None
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the sst element.
self._write_sst()
# Write the sst strings.
self._write_sst_strings()
# Close the sst tag.
self._xml_end_tag('sst')
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_sst(self):
# Write the <sst> element.
xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main'
attributes = [
('xmlns', xmlns),
('count', self.string_table.count),
('uniqueCount', self.string_table.unique_count),
]
self._xml_start_tag('sst', attributes)
def _write_sst_strings(self):
# Write the sst string elements.
for string in (self.string_table._get_strings()):
self._write_si(string)
def _write_si(self, string):
# Write the <si> element.
attributes = []
# TODO: Fix control char encoding when unit test is ported.
# Excel escapes control characters with _xHHHH_ and also escapes any
# literal strings of that type by encoding the leading underscore.
# So "\0" -> _x0000_ and "_x0000_" -> _x005F_x0000_.
# The following substitutions deal with those cases.
# Escape the escape.
# string =~ s/(_x[0-9a-fA-F]{4}_)/_x005F1/g
# Convert control character to the _xHHHH_ escape.
# string =~ s/([\x00-\x08\x0B-\x1F])/sprintf "_x04X_", ord(1)/eg
# Add attribute to preserve leading or trailing whitespace.
if re.search('^\s', string) or re.search('\s$', string):
attributes.append(('xml:space', 'preserve'))
# Write any rich strings without further tags.
if re.search('^<r>', string) and re.search('</r>$', string):
self._xml_rich_si_element(string)
else:
self._xml_si_element(string, attributes)
# A metadata class to store Excel strings between worksheets.
class SharedStringTable(object):
"""
A class to track Excel shared strings between worksheets.
"""
def __init__(self):
self.count = 0
self.unique_count = 0
self.string_table = {}
self.string_array = []
def _get_shared_string_index(self, string):
"""" Get the index of the string in the Shared String table. """
if string not in self.string_table:
# String isn't already stored in the table so add it.
index = self.unique_count
self.string_table[string] = index
self.count += 1
self.unique_count += 1
return index
else:
# String exists in the table.
index = self.string_table[string]
self.count += 1
return index
def _get_shared_string(self, index):
"""" Get a shared string from the index. """
return self.string_array[index]
def _sort_string_data(self):
"""" Sort the shared string data and convert from dict to list. """
self.string_array = sorted(self.string_table,
key=self.string_table.__getitem__)
self.string_table = {}
def _get_strings(self):
"""" Return the sorted string list. """
return self.string_array
|
tests/parsers/olecf.py | roshanmaskey/plaso | 1,253 | 11067405 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the OLE Compound Files (OLECF) parser."""
import unittest
from plaso.containers import sessions
from plaso.containers import warnings
from plaso.lib import definitions
from plaso.parsers import olecf
from plaso.parsers import olecf_plugins # pylint: disable=unused-import
from tests.parsers import test_lib
class OLECFParserTest(test_lib.ParserTestCase):
"""Tests for the OLE Compound Files (OLECF) parser."""
# pylint: disable=protected-access
def testEnablePlugins(self):
"""Tests the EnablePlugins function."""
parser = olecf.OLECFParser()
number_of_plugins = len(parser._plugin_classes)
parser.EnablePlugins([])
self.assertEqual(len(parser._plugins), 0)
parser.EnablePlugins(parser.ALL_PLUGINS)
# Extract 1 for the default plugin.
self.assertEqual(len(parser._plugins), number_of_plugins - 1)
parser.EnablePlugins(['olecf_document_summary'])
self.assertEqual(len(parser._plugins), 1)
def testParse(self):
"""Tests the Parse function."""
parser = olecf.OLECFParser()
storage_writer = self._ParseFile(['Document.doc'], parser)
# OLE Compound File information:
# Version : 3.62
# Sector size : 512
# Short sector size : 64
self.assertEqual(storage_writer.number_of_events, 9)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'olecf:item',
'date_time': '2013-05-16 02:29:49.7850000',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[8], expected_event_values)
session = sessions.Session()
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(session, storage_writer)
parser = olecf.OLECFParser()
parser.ParseFileObject(parser_mediator, None)
self.assertEqual(storage_writer.number_of_events, 0)
self.assertEqual(storage_writer.number_of_extraction_warnings, 1)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
generator = storage_writer.GetAttributeContainers(
warnings.ExtractionWarning.CONTAINER_TYPE)
test_warnings = list(generator)
test_warning = test_warnings[0]
self.assertIsNotNone(test_warning)
expected_message = (
'unable to open file with error: pyolecf_file_open_file_object: ')
self.assertTrue(test_warning.message.startswith(expected_message))
if __name__ == '__main__':
unittest.main()
|
test/unit/reductions/conftest.py | alliesaizan/fairlearn | 1,142 | 11067431 | <gh_stars>1000+
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
from test.unit.input_convertors import ensure_list, ensure_series
def is_invalid_transformation(**kwargs):
A_two_dim = kwargs["A_two_dim"]
transform = kwargs["transformA"]
if A_two_dim and transform in [ensure_list, ensure_series]:
return True
return False
|
tumblr/tumblr.py | czahoi/crawler-py | 127 | 11067434 | # -*- coding=utf-8 -*-
"""
tumblr多线程下载脚本。
feature:
1. 支持下载多个用户视频
2. 多线程下载
3. 自动去重已失效视频
- 兼容Python2.7以上版本
- windows下的兼容性未测试
- 安装依赖包:
pip install requests
- 修改脚本最后的tumblr用户名列表。
示例:
names=['username1','username2']
- 运行脚本
python tumblr.py
"""
from threading import Thread
import requests
import re
import os
import sys
if sys.version_info[0]==2:
py3=False
import Queue
else:
py3=True
import queue as Queue
import time
download_path='/root/tumblr/download'
if not os.path.exists(download_path):
os.mkdir(download_path)
link_path='/root/tumblr/jiexi'
if not os.path.exists(link_path):
os.mkdir(link_path)
api_url='http://%s.tumblr.com/api/read?&num=50&start='
UQueue=Queue.Queue()
def getpost(uid,queue):
url='http://%s.tumblr.com/api/read?&num=50'%uid
page=requests.get(url).text
try:
total=re.findall('<posts start="0" total="(.*?)">',page)[0]
total=int(total)
a=[i*50 for i in range(1000) if i*50-total<0]
ul=api_url%uid
for i in a:
queue.put(ul+str(i))
except Exception as e:
print(u'geting posts from {} error:{}'.format(uid,e))
return False
extractpicre = re.compile(r'(?<=<photo-url max-width="1280">).+?(?=</photo-url>)',flags=re.S) #search for url of maxium size of a picture, which starts with '<photo-url max-width="1280">' and ends with '</photo-url>'
extractvideore=re.compile('source src=".*?/tumblr_(.*?)" type="video/mp4"')
video_links = []
pic_links = []
vhead = 'https://vt.tumblr.com/tumblr_%s.mp4'
class Consumer(Thread):
def __init__(self, l_queue):
super(Consumer,self).__init__()
self.queue = l_queue
def run(self):
global video_links,pic_links
while 1:
link = self.queue.get()
try:
t=time.time()
content = requests.get(link,timeout=10).text
t2=time.time()
videos = extractvideore.findall(content)
t3=time.time()
video_links.extend([vhead % v for v in videos])
pic_links.extend(extractpicre.findall(content))
t4=time.time()
print('{} cost total {}s; get content {}s; get video {}s;get pictures {}s\n;'.format(link,round(t4-t,1),round(t2-t,1),round(t3-t2,1),round(t4-t3,1)))
print('video length:{};pictures length:{}'.format(len(video_links),len(pic_links)))
except Exception as e:
print('url: {} parse failed {}'.format(link,e))
if self.queue.empty():
break
class Downloader(Thread):
"""docstring for Downloader"""
def __init__(self, queue):
super(Downloader, self).__init__()
self.queue = queue
def run(self):
while 1:
info=self.queue.get()
url=info['url']
path=info['path']
try:
r=requests.get(url,stream=True,timeout=10)
with open(path,'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
print(u'download {} success'.format(path))
except:
print(u'download {} fail'.format(path))
if self.queue.empty():
break
def write(name):
global video_links,pic_links
videos=list(set([i.replace('/480','').replace('.mp4.mp4','.mp4') for i in video_links]))
pictures=list(set(pic_links))
pic_path=os.path.join(link_path,'%s_pictures.txt'%name)
vid_path=os.path.join(link_path,'%s_videos.txt'%name)
with open(pic_path,'w') as f:
for i in pictures:
try:
f.write(u'{}\n'.format(i))
except Exception as e:
print('write fail!')
with open(vid_path,'w') as f:
for i in videos:
try:
f.write(u'{}\n'.format(i))
except Exception as e:
print i
print('write fail!')
def download_from_text(name,d_type):
if d_type=='0':
print(u"无需下载")
elif d_type=='1':
pic_path=os.path.join(link_path,'%s_pictures.txt'%name)
vid_path=os.path.join(link_path,'%s_videos.txt'%name)
print(u'开始下载视频')
download(name,vid_path)
print(u'开始下载图片')
download(name,pic_path)
elif d_type=='2':
vid_path=os.path.join(link_path,'%s_videos.txt'%name)
print(u'开始下载视频')
download(name,vid_path)
else:
pic_path=os.path.join(link_path,'%s_pictures.txt'%name)
print(u'开始下载图片')
download(name,pic_path)
def download(username,filename,thread_num=10,threshold=1000):
type_=re.findall('([^/]*?)_(pictures|videos)\.txt',filename)[0][1]
queue=Queue.Queue()
u_path=os.path.join(download_path,username)
r_path=os.path.join(u_path,type_)
if not os.path.exists(u_path):
os.mkdir(u_path)
if not os.path.exists(r_path):
os.mkdir(r_path)
with open(filename) as f:
links=[i.strip() for i in f.readlines()]
for link in links:
name=os.path.basename(link)
filepath=os.path.join(r_path,name)
if not os.path.exists(filepath):
queue.put(dict(url=link,path=filepath))
###download
tasks=[]
for i in range(min(thread_num,queue.qsize())):
t=Downloader(queue)
t.start()
tasks.append(t)
for t in tasks:
t.join()
##remove invalid video
invalidno=0
files=[os.path.join(r_path,i) for i in os.listdir(r_path)]
for file in files:
if os.path.getsize(file)<=threshold:
os.remove(file)
invalidno+=1
print(u'从 {} 删除 {} 个 大小小于 {}kb的文件'.format(r_path,invalidno,threshold))
def main(names):
print(u"解析完毕后是否下载?\n 0. 不下载; 1. 全部下载; 2. 仅下载视频; 3. 仅下载图片")
if py3:
d_type=input()
else:
d_type=raw_input()
for name in names:
global video_links,pic_links
video_links = []
pic_links = []
a=getpost(name,UQueue)
if a!=False:
task=[]
for i in range(min(5,UQueue.qsize())):
t=Consumer(UQueue)
t.start()
task.append(t)
for t in task:
t.join()
write(name)
print(u"解析完毕,请查看同目录下的文件")
##下载
download_from_text(name,d_type)
if __name__=='__main__':
names=[] #需下载的tumblr用户名列表
main(names)
|
3d_segmentation/torch/unet_evaluation_array.py | tommydino93/tutorials | 535 | 11067443 | <filename>3d_segmentation/torch/unet_evaluation_array.py<gh_stars>100-1000
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
from glob import glob
import nibabel as nib
import numpy as np
import torch
from torch.utils.data import DataLoader
from monai import config
from monai.data import ImageDataset, create_test_image_3d, decollate_batch
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, SaveImage, ScaleIntensity, EnsureType
def main(tempdir):
config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
# define transforms for image and segmentation
imtrans = Compose([ScaleIntensity(), AddChannel(), EnsureType()])
segtrans = Compose([AddChannel(), EnsureType()])
val_ds = ImageDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False)
# sliding window inference for one image at every iteration
val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)
post_trans = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold_values=True)])
saver = SaveImage(output_dir="./output", output_ext=".nii.gz", output_postfix="seg")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
model.load_state_dict(torch.load("best_metric_model_segmentation3d_array.pth"))
model.eval()
with torch.no_grad():
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
val_labels = decollate_batch(val_labels)
meta_data = decollate_batch(val_data[2])
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_labels)
for val_output, data in zip(val_outputs, meta_data):
saver(val_output, data)
# aggregate the final mean dice result
print("evaluation metric:", dice_metric.aggregate().item())
# reset the status
dice_metric.reset()
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tempdir:
main(tempdir)
|
custom_components/nuki_ng/__init__.py | gasecki/Home-Assistant_Config | 163 | 11067465 | from __future__ import annotations
from .nuki import NukiCoordinator
from .constants import DOMAIN, PLATFORMS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import service
# from homeassistant.helpers import device_registry
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
)
import logging
OPENER_TYPE = 1
LOCK_TYPE = 0
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
data = entry.as_dict()["data"]
_LOGGER.debug(f"async_setup_entry: {data}")
coordinator = NukiCoordinator(hass, entry, data)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = coordinator
for p in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, p))
return True
async def async_unload_entry(hass: HomeAssistant, entry):
await hass.data[DOMAIN][entry.entry_id].unload()
for p in PLATFORMS:
await hass.config_entries.async_forward_entry_unload(entry, p)
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def async_setup(hass: HomeAssistant, config) -> bool:
hass.data[DOMAIN] = dict()
async def async_reboot(call):
for entry_id in await service.async_extract_config_entry_ids(hass, call):
await hass.data[DOMAIN][entry_id].do_reboot()
async def async_fwupdate(call):
for entry_id in await service.async_extract_config_entry_ids(hass, call):
await hass.data[DOMAIN][entry_id].do_fwupdate()
async def async_delete_callback(call):
for entry_id in await service.async_extract_config_entry_ids(hass, call):
await hass.data[DOMAIN][entry_id].do_delete_callback(call.data.get("callback"))
hass.services.async_register(DOMAIN, "bridge_reboot", async_reboot)
hass.services.async_register(DOMAIN, "bridge_fwupdate", async_fwupdate)
hass.services.async_register(
DOMAIN, "bridge_delete_callback", async_delete_callback)
return True
class NukiEntity(CoordinatorEntity):
def __init__(self, coordinator, device_id: str):
super().__init__(coordinator)
self.device_id = device_id
def set_id(self, prefix: str, suffix: str):
self.id_prefix = prefix
self.id_suffix = suffix
def set_name(self, name: str):
self._attr_name_suffix = name
@property
def name_suffix(self):
return self._attr_name_suffix
@property
def get_name(self):
return "Nuki %s" % (self.data.get("name", self.device_id))
@property
def name(self) -> str:
return "%s %s" % (self.get_name, self.name_suffix)
@property
def unique_id(self) -> str:
return "nuki-%s-%s" % (self.device_id, self.id_suffix)
@property
def available(self):
if "nukiId" not in self.data:
return False
return super().available
@property
def data(self) -> dict:
return self.coordinator.device_data(self.device_id)
@property
def last_state(self) -> dict:
return self.data.get("lastKnownState", {})
@property
def model(self) -> str:
if self.coordinator.is_lock(self.device_id):
return "Nuki Smart Lock"
if self.coordinator.is_opener(self.device_id):
return "Nuki Opener"
@property
def device_info(self):
return {
"identifiers": {("id", self.device_id)},
"name": self.get_name,
"manufacturer": "Nuki",
"model": self.model,
"sw_version": self.data.get("firmwareVersion"),
"via_device": (
"id",
self.coordinator.info_data().get("ids", {}).get("hardwareId")
)
}
class NukiBridge(CoordinatorEntity):
def set_id(self, suffix: str):
self.id_suffix = suffix
def set_name(self, name: str):
self.name_suffix = name
@property
def name(self) -> str:
return "Nuki Bridge %s" % (self.name_suffix)
@property
def unique_id(self) -> str:
return "nuki-bridge-%s-%s" % (self.get_id, self.id_suffix)
@property
def data(self) -> dict:
return self.coordinator.data.get("bridge_info", {})
@property
def get_id(self):
return self.data.get("ids", {}).get("hardwareId")
@property
def device_info(self):
model = "Hardware Bridge" if self.data.get(
"bridgeType", 1) else "Software Bridge"
versions = self.data.get("versions", {})
return {
"identifiers": {("id", self.get_id)},
"name": "Nuki Bridge",
"manufacturer": "Nuki",
"model": model,
"sw_version": versions.get("firmwareVersion"),
}
|
python3/pracmln/mln/inference/mcmc.py | seba90/pracmln | 123 | 11067484 | # -*- coding: utf-8 -*-
#
# Markov Logic Networks
#
# (C) 2012-2015 by <NAME>
# 2006-2011 by <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import random
from dnutils import logs
from .infer import Inference
from ..util import fstr
from ..constants import ALL
logger = logs.getlogger(__name__)
class MCMCInference(Inference):
"""
Abstract super class for Markov chain Monte Carlo-based inference.
"""
def __init__(self, mrf, queries=ALL, **params):
Inference.__init__(self, mrf, queries, **params)
def random_world(self, evidence=None):
"""
Get a random possible world, taking the evidence into account.
"""
if evidence is None:
world = list(self.mrf.evidence)
else:
world = list(evidence)
for var in self.mrf.variables:
evdict = var.value2dict(var.evidence_value(world))
valuecount = var.valuecount(evdict)
if valuecount > 1:
# get a random value of the variable
validx = random.randint(0, valuecount - 1)
value = [v for _, v in var.itervalues(evdict)][validx]
var.setval(value, world)
return world
class Chain:
"""
Represents the state of a Markov Chain.
"""
def __init__(self, infer, queries):
self.queries = queries
self.soft_evidence = None
self.steps = 0
self.truths = [0] * len(self.queries)
self.converged = False
self.lastresult = 10
self.infer = infer
# copy the current evidence as this chain's state
# initialize remaining variables randomly (but consistently with the evidence)
self.state = infer.random_world()
def update(self, state):
self.steps += 1
self.state = state
# keep track of counts for queries
for i, q in enumerate(self.queries):
self.truths[i] += q(self.state)
# check if converged !!! TODO check for all queries
if self.steps % 50 == 0:
result = self.results()[0]
diff = abs(result - self.lastresult)
if diff < 0.001:
self.converged = True
self.lastresult = result
# keep track of counts for soft evidence
if self.soft_evidence is not None:
for se in self.soft_evidence:
self.softev_counts[se["expr"]] += se["formula"](self.state)
def set_soft_evidence(self, soft_evidence):
self.soft_evidence = soft_evidence
self.softev_counts = {}
for se in soft_evidence:
if 'formula' not in se:
formula = self.infer.mrf.mln.logic.parse_formula(se['expr'])
se['formula'] = formula.ground(self.infer.mrf, {})
se['expr'] = fstr(se['formula'])
self.softev_counts[se["expr"]] = se["formula"](self.state)
def soft_evidence_frequency(self, formula):
if self.steps == 0: return 0
return float(self.softev_counts[fstr(formula)]) / self.steps
def results(self):
results = []
for i in range(len(self.queries)):
results.append(float(self.truths[i]) / self.steps)
return results
class ChainGroup:
def __init__(self, infer):
self.chains = []
self.infer = infer
def chain(self, chain):
self.chains.append(chain)
def results(self):
chains = float(len(self.chains))
queries = self.chains[0].queries
# compute average
results = [0.0] * len(queries)
for chain in self.chains:
cr = chain.results()
for i in range(len(queries)):
results[i] += cr[i] / chains
# compute variance
var = [0.0 for i in range(len(queries))]
for chain in self.chains:
cr = chain.results()
for i in range(len(self.chains[0].queries)):
var[i] += (cr[i] - results[i]) ** 2 / chains
return dict([(str(q), p) for q, p in zip(queries, results)]), var
def avgtruth(self, formula):
""" returns the fraction of chains in which the given formula is currently true """
t = 0.0
for c in self.chains:
t += formula(c.state)
return t / len(self.chains)
# def write(self, short=False):
# if len(self.chains) > 1:
# for i in range(len(self.infer.queries)):
# self.infer.additionalQueryInfo[i] = "[%d x %d steps, sd=%.3f]" % (len(self.chains), self.chains[0].steps, sqrt(self.var[i]))
# self.inferObject._writeResults(sys.stdout, self.results, shortOutput)
|
glue/core/tests/test_coordinates.py | HPLegion/glue | 550 | 11067487 | <filename>glue/core/tests/test_coordinates.py
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from glue.core.tests.test_state import clone
from glue.tests.helpers import requires_astropy
from ..coordinate_helpers import (axis_label, world_axis,
pixel2world_single_axis, dependent_axes)
from ..coordinates import (coordinates_from_header, IdentityCoordinates,
WCSCoordinates, AffineCoordinates,
header_from_string)
@requires_astropy
class TestWcsCoordinates(object):
def default_header(self):
from astropy.io import fits
hdr = fits.Header()
hdr['NAXIS'] = 2
hdr['CRVAL1'] = 0
hdr['CRVAL2'] = 5
hdr['CRPIX1'] = 250
hdr['CRPIX2'] = 187.5
hdr['CTYPE1'] = 'GLON-TAN'
hdr['CTYPE2'] = 'GLAT-TAN'
hdr['CD1_1'] = -0.0166666666667
hdr['CD1_2'] = 0.
hdr['CD2_1'] = 0.
hdr['CD2_2'] = 0.01666666666667
return hdr
def test_pixel2world_scalar(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
x, y = 250., 187.5
result = coord.pixel_to_world_values(x, y)
expected = 359.9832692105993601, 5.0166664867400375
assert_allclose(result[0], expected[0])
assert_allclose(result[1], expected[1])
def test_pixel2world_different_input_types(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
x, y = 250, 187.5
result = coord.pixel_to_world_values(x, y)
expected = 359.9832692105993601, 5.0166664867400375
assert_allclose(result[0], expected[0])
assert_allclose(result[1], expected[1])
def test_pixel2world_list(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
x, y = [250, 250], [187.5, 187.5]
result = coord.pixel_to_world_values(x, y)
expected = ([359.9832692105993601, 359.9832692105993601],
[5.0166664867400375, 5.0166664867400375])
for i in range(0, 1):
for r, e in zip(result[i], expected[i]):
assert_allclose(r, e)
def test_pixel2world_numpy(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
x, y = np.array([250, 250]), np.array([187.5, 187.5])
result = coord.pixel_to_world_values(x, y)
expected = (np.array([359.9832692105993601, 359.9832692105993601]),
np.array([5.0166664867400375, 5.0166664867400375]))
np.testing.assert_array_almost_equal(result[0], expected[0])
np.testing.assert_array_almost_equal(result[1], expected[1])
def test_world2pixel_numpy(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
x, y = np.array([0, 0]), np.array([0, 0])
expected = (np.array([249.0000000000000284, 249.0000000000000284]),
np.array([-114.2632689899972434, -114.2632689899972434]))
result = coord.world_to_pixel_values(x, y)
np.testing.assert_array_almost_equal(result[0], expected[0], 3)
np.testing.assert_array_almost_equal(result[1], expected[1], 3)
def test_world2pixel_list(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
x, y = [0, 0], [0, 0]
expected = ([249.0000000000000284, 249.0000000000000284],
[-114.2632689899972434, -114.2632689899972434])
result = coord.world_to_pixel_values(x, y)
for i in range(0, 1):
for r, e in zip(result[i], expected[i]):
assert_allclose(r, e)
def test_world2pixel_scalar(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
expected = 249.0000000000000284, -114.2632689899972434
x, y = 0, 0
result = coord.world_to_pixel_values(x, y)
assert_allclose(result[0], expected[0], 3)
assert_allclose(result[1], expected[1], 3)
def test_world2pixel_mismatched_input(self):
coord = WCSCoordinates(self.default_header())
x, y = 0., [0.]
expected = coord.world_to_pixel_values(x, y[0])
result = coord.world_to_pixel_values(x, y)
assert_allclose(result[0], expected[0])
assert_allclose(result[1], expected[1])
def test_pixel2world_mismatched_input(self):
coord = WCSCoordinates(self.default_header())
x, y = [250.], 187.5
expected = coord.pixel_to_world_values(x[0], y)
result = coord.pixel_to_world_values(x, y)
assert_allclose(result[0], expected[0])
assert_allclose(result[1], expected[1])
def test_axis_label(self):
hdr = self.default_header()
coord = WCSCoordinates(hdr)
assert axis_label(coord, 0) == 'Galactic Latitude'
assert axis_label(coord, 1) == 'Galactic Longitude'
@requires_astropy
def test_world_axis_wcs():
from astropy.io import fits
hdr = fits.Header()
hdr['NAXIS'] = 2
hdr['CRVAL1'] = 0
hdr['CRVAL2'] = 5
hdr['CRPIX1'] = 2
hdr['CRPIX2'] = 1
hdr['CTYPE1'] = 'XOFFSET'
hdr['CTYPE2'] = 'YOFFSET'
hdr['CD1_1'] = -2.
hdr['CD1_2'] = 0.
hdr['CD2_1'] = 0.
hdr['CD2_2'] = 2.
data = np.ones((3, 4))
coord = WCSCoordinates(hdr)
# pixel_axis and world_axis are in WCS order
assert_allclose(world_axis(coord, data, pixel_axis=0, world_axis=0), [2, 0, -2, -4])
assert_allclose(world_axis(coord, data, pixel_axis=1, world_axis=1), [5, 7, 9])
class TestCoordinatesFromHeader(object):
def test_2d_nowcs(self):
hdr = {"NAXIS": 2}
coord = coordinates_from_header(hdr)
assert type(coord) == IdentityCoordinates
assert coord.pixel_n_dim == 2
assert coord.world_n_dim == 2
def test_2d(self):
hdr = header_from_string(HDR_2D_VALID)
coord = coordinates_from_header(hdr)
assert type(coord) == WCSCoordinates
def test_3d_nowcs(self):
hdr = HDR_3D_VALID_NOWCS
coord = coordinates_from_header(header_from_string(hdr))
assert type(coord) == IdentityCoordinates
assert coord.pixel_n_dim == 3
assert coord.world_n_dim == 3
def test_3d(self):
hdr = header_from_string(HDR_3D_VALID_WCS)
coord = coordinates_from_header(hdr)
assert type(coord) == WCSCoordinates
def test_nod(self):
hdr = 0
coord = coordinates_from_header(hdr)
assert type(coord) == IdentityCoordinates
HDR_2D_VALID = """
SIMPLE = T / Written by IDL: Wed Jul 27 10:01:47 2011
BITPIX = -32 / number of bits per data pixel
NAXIS = 2 / number of data axes
NAXIS1 = 501 / length of data axis 1
NAXIS2 = 376 / length of data axis 2
EXTEND = T / FITS dataset may contain extensions
RADESYS = 'FK5 ' / Frame of reference
CRVAL1 = 0. / World coordinate 1 at reference point
CRVAL2 = 5. / World coordinate 2 at reference point
CRPIX1 = 250.000 / Pixel coordinate 1 at reference point
CRPIX2 = 187.500 / Pixel coordinate 2 at reference point
CTYPE1 = 'GLON-TAN' / Projection type
CTYPE2 = 'GLAT-TAN' / Projection type
CUNIT1 = 'deg ' / Unit used for axis 1
CUNIT2 = 'deg ' / Unit used for axis 2
CD1_1 = -0.016666667 / Pixel trasformation matrix
CD1_2 = 0.
CD2_1 = 0.
CD2_2 = 0.016666667
"""
HDR_3D_VALID_NOWCS = """SIMPLE = T / Written by IDL: Fri Mar 18 11:58:30 2011
BITPIX = -32 / Number of bits per data pixel
NAXIS = 3 / Number of data axes
NAXIS1 = 128 /
NAXIS2 = 128 /
NAXIS3 = 128 /
"""
HDR_3D_VALID_WCS = """SIMPLE = T / Written by IDL: Thu Jul 7 15:37:21 2011
BITPIX = -32 / Number of bits per data pixel
NAXIS = 3 / Number of data axes
NAXIS1 = 82 /
NAXIS2 = 82 /
NAXIS3 = 248 /
DATE = '2011-07-07' / Creation UTC (CCCC-MM-DD) date of FITS header
COMMENT FITS (Flexible Image Transport System) format is defined in 'Astronomy
COMMENT and Astrophysics', volume 376, page 359; bibcode 2001A&A...376..359H
CTYPE1 = 'RA---CAR' /
CTYPE2 = 'DEC--CAR' /
CTYPE3 = 'VELO-LSR' /
CRVAL1 = 55.3500 /
CRPIX1 = 41.5000 /
CDELT1 = -0.00638888900000 /
CRVAL2 = 31.8944 /
CRPIX2 = 41.5000 /
CDELT2 = 0.00638888900000 /
CRVAL3 = -9960.07902777 /
CRPIX3 = -102.000 /
CDELT3 = 66.4236100000 /
"""
@requires_astropy
def test_coords_preserve_shape_2d():
coord = coordinates_from_header(header_from_string(HDR_2D_VALID))
x = np.zeros(12)
y = np.zeros(12)
result = coord.pixel_to_world_values(x, y)
for r in result:
assert r.shape == x.shape
result = coord.world_to_pixel_values(x, y)
for r in result:
assert r.shape == x.shape
x.shape = (4, 3)
y.shape = (4, 3)
result = coord.pixel_to_world_values(x, y)
for r in result:
assert r.shape == x.shape
result = coord.world_to_pixel_values(x, y)
for r in result:
assert r.shape == x.shape
x.shape = (2, 2, 3)
y.shape = (2, 2, 3)
result = coord.pixel_to_world_values(x, y)
for r in result:
assert r.shape == x.shape
result = coord.world_to_pixel_values(x, y)
for r in result:
assert r.shape == x.shape
@requires_astropy
def test_coords_preserve_shape_3d():
coord = coordinates_from_header(header_from_string(HDR_3D_VALID_NOWCS))
x = np.zeros(12)
y = np.zeros(12)
z = np.zeros(12)
result = coord.pixel_to_world_values(x, y, z)
for r in result:
assert r.shape == x.shape
result = coord.world_to_pixel_values(x, y, z)
for r in result:
assert r.shape == x.shape
x.shape = (4, 3)
y.shape = (4, 3)
z.shape = (4, 3)
result = coord.pixel_to_world_values(x, y, z)
for r in result:
assert r.shape == x.shape
result = coord.world_to_pixel_values(x, y, z)
for r in result:
assert r.shape == x.shape
x.shape = (2, 2, 3)
y.shape = (2, 2, 3)
z.shape = (2, 2, 3)
result = coord.pixel_to_world_values(x, y, z)
for r in result:
assert r.shape == x.shape
result = coord.world_to_pixel_values(x, y, z)
for r in result:
assert r.shape == x.shape
def test_world_axis_units():
coord = coordinates_from_header(header_from_string(HDR_3D_VALID_WCS))
assert coord.world_axis_units[0] == 'deg'
assert coord.world_axis_units[1] == 'deg'
assert coord.world_axis_units[2] in ['m s-1', 'm.s**-1']
def test_dependent_axes_non_diagonal_pc():
# Fix a bug that occurred when non-diagonal PC elements
# were present in the WCS - in that case all other axes
# were returned as dependent axes even if this wasn't
# the case.
coord = WCSCoordinates(naxis=3)
coord.wcs.ctype = 'HPLN-TAN', 'HPLT-TAN', 'Time'
coord.wcs.crval = 1, 1, 1
coord.wcs.crpix = 1, 1, 1
coord.wcs.cd = [[0.9, 0.1, 0], [-0.1, 0.9, 0], [0, 0, 1]]
# Remember that the axes numbers below are reversed compared
# to the WCS order above.
assert_equal(dependent_axes(coord, 0), [0])
assert_equal(dependent_axes(coord, 1), [1, 2])
assert_equal(dependent_axes(coord, 2), [1, 2])
def test_pixel2world_single_axis():
# Regression test for a bug in pixel2world_single_axis which was due to
# incorrect indexing order (WCS vs Numpy)
coord = WCSCoordinates(naxis=3)
coord.wcs.ctype = 'HPLN-TAN', 'HPLT-TAN', 'Time'
coord.wcs.crval = 1, 1, 1
coord.wcs.crpix = 1, 1, 1
coord.wcs.cd = [[0.9, 0.1, 0], [-0.1, 0.9, 0], [0, 0, 1]]
x = np.array([0.2, 0.4, 0.6])
y = np.array([0.3, 0.6, 0.9])
z = np.array([0.5, 0.5, 0.5])
assert_allclose(pixel2world_single_axis(coord, x, y, z, world_axis=0), [1.21004705, 1.42012044, 1.63021455])
assert_allclose(pixel2world_single_axis(coord, x, y, z, world_axis=1), [1.24999002, 1.499947, 1.74985138])
assert_allclose(pixel2world_single_axis(coord, x, y, z, world_axis=2), [1.5, 1.5, 1.5])
def test_affine():
matrix = np.array([[2, 3, -1], [1, 2, 2], [0, 0, 1]])
coords = AffineCoordinates(matrix)
assert axis_label(coords, 1) == 'World 1'
assert axis_label(coords, 0) == 'World 0'
assert coords.world_axis_units[1] == ''
assert coords.world_axis_units[0] == ''
# First the scalar case
xp, yp = 1, 2
xw, yw = coords.pixel_to_world_values(xp, yp)
assert_allclose(xw, 2 * 1 + 3 * 2 - 1)
assert_allclose(yw, 1 * 1 + 2 * 2 + 2)
assert np.ndim(xw) == 0
assert np.ndim(yw) == 0
xpc, ypc = coords.world_to_pixel_values(xw, yw)
assert_allclose(xpc, 1)
assert_allclose(ypc, 2)
assert np.ndim(xpc) == 0
assert np.ndim(ypc) == 0
# Next the array case
xp = np.array([1, 2, 3])
yp = np.array([2, 3, 4])
xw, yw = coords.pixel_to_world_values(xp, yp)
assert_allclose(xw, 2 * xp + 3 * yp - 1)
assert_allclose(yw, 1 * xp + 2 * yp + 2)
xpc, ypc = coords.world_to_pixel_values(xw, yw)
assert_allclose(xpc, [1, 2, 3])
assert_allclose(ypc, [2, 3, 4])
# Check that serialization/deserialization works
coords2 = clone(coords)
xw, yw = coords2.pixel_to_world_values(xp, yp)
assert_allclose(xw, 2 * xp + 3 * yp - 1)
assert_allclose(yw, 1 * xp + 2 * yp + 2)
xpc, ypc = coords.world_to_pixel_values(xw, yw)
assert_allclose(xpc, [1, 2, 3])
assert_allclose(ypc, [2, 3, 4])
def test_affine_labels_units():
matrix = np.array([[2, 3, -1], [1, 2, 2], [0, 0, 1]])
coords = AffineCoordinates(matrix, units=['km', 'km'], labels=['xw', 'yw'])
assert axis_label(coords, 1) == 'Xw'
assert axis_label(coords, 0) == 'Yw'
assert coords.world_axis_units[1] == 'km'
assert coords.world_axis_units[0] == 'km'
coords2 = clone(coords)
assert axis_label(coords2, 1) == 'Xw'
assert axis_label(coords2, 0) == 'Yw'
assert coords2.world_axis_units[1] == 'km'
assert coords2.world_axis_units[0] == 'km'
def test_affine_invalid():
matrix = np.array([[2, 3, -1], [1, 2, 2], [0, 0, 1]])
with pytest.raises(ValueError) as exc:
AffineCoordinates(matrix[0])
assert exc.value.args[0] == 'Affine matrix should be two-dimensional'
with pytest.raises(ValueError) as exc:
AffineCoordinates(matrix[:-1])
assert exc.value.args[0] == 'Affine matrix should be square'
with pytest.raises(ValueError) as exc:
AffineCoordinates(matrix, labels=['a', 'b', 'c'])
assert exc.value.args[0] == 'Expected 2 labels, got 3'
with pytest.raises(ValueError) as exc:
AffineCoordinates(matrix, units=['km', 'km', 'km'])
assert exc.value.args[0] == 'Expected 2 units, got 3'
matrix[-1] = 1
with pytest.raises(ValueError) as exc:
AffineCoordinates(matrix)
assert exc.value.args[0] == 'Last row of matrix should be zeros and a one'
def test_affine_highd():
# Test AffineCoordinates when higher dimensional objects are transformed
matrix = np.array([[2, 3, -1], [1, 2, 2], [0, 0, 1]])
coords = AffineCoordinates(matrix)
xp = np.ones((2, 4, 1, 2, 5))
yp = np.ones((2, 4, 1, 2, 5))
xw, yw = coords.pixel_to_world_values(xp, yp)
xpc, ypc = coords.world_to_pixel_values(xw, yw)
assert_allclose(xp, xpc)
assert_allclose(yp, ypc)
|
python/glow/logging/__init__.py | bcajes/glow | 214 | 11067499 | from .hlseventlogger import *
|
toollib/tcli/commands/_sync.py | atpuxiner/toollib | 113 | 11067528 | """
@author axiner
@version v1.0.0
@created 2022/5/2 9:35
@abstract
@description
@history
"""
from toollib.decorator import sys_required
from toollib.tcli.base import BaseCmd
from toollib.tcli.commands.plugins.sync import execute
from toollib.tcli.option import Options, Arg
class Cmd(BaseCmd):
def __init__(self):
super().__init__()
def add_options(self):
options = Options(
name='sync',
desc='文件同步',
optional={
self.sync: [
Arg('-s', '--src', required=True, type=str, help='源'),
Arg('-d', '--dest', required=True, type=str, help='目标'),
Arg('-i', '--ip', required=True, type=str, help='ip'),
Arg('-u', '--user', required=True, type=str, help='用户'),
Arg('-p', '--port', default=22, type=int, help='端口'),
Arg('--suffix', type=str, help='后缀'),
]}
)
return options
@sys_required('centos|\.el\d', errmsg='centos|el')
def sync(self):
src = self.parse_args.src
dest = self.parse_args.dest
ip = self.parse_args.ip
user = self.parse_args.user
port = self.parse_args.port
suffix = self.parse_args.suffix
execute(src, dest, ip, user, port, suffix)
|
arelle/ViewWinFactList.py | hamscher/Arelle | 292 | 11067529 | <reponame>hamscher/Arelle
'''
Created on Oct 5, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import ViewWinTree, ModelDtsObject, XbrlConst
from arelle.ModelRelationshipSet import ModelRelationshipSet
from arelle.ModelDtsObject import ModelResource
from arelle.ModelInstanceObject import ModelFact
def viewFacts(modelXbrl, tabWin, lang=None):
modelXbrl.modelManager.showStatus(_("viewing facts"))
view = ViewFactList(modelXbrl, tabWin, lang)
view.treeView["columns"] = ("sequence", "contextID", "unitID", "decimals", "precision", "language", "footnoted", "value")
view.treeView.column("#0", width=200, anchor="w")
view.treeView.heading("#0", text=_("Label"))
view.treeView.column("sequence", width=40, anchor="e", stretch=False)
view.treeView.heading("sequence", text=_("Seq"))
view.treeView.column("contextID", width=100, anchor="w", stretch=False)
view.treeView.heading("contextID", text="contextRef")
view.treeView.column("unitID", width=75, anchor="w", stretch=False)
view.unitDisplayID = False # start displaying measures
view.treeView.heading("unitID", text="Unit")
view.treeView.column("decimals", width=50, anchor="center", stretch=False)
view.treeView.heading("decimals", text=_("Dec"))
view.treeView.column("precision", width=50, anchor="w", stretch=False)
view.treeView.heading("precision", text=_("Prec"))
view.treeView.column("language", width=36, anchor="w", stretch=False)
view.treeView.heading("language",text=_("Lang"))
view.treeView.column("footnoted", width=18, anchor="center", stretch=False)
view.treeView.heading("footnoted",text=_("Fn"))
view.treeView.column("value", width=200, anchor="w", stretch=False)
view.treeView.heading("value", text=_("Value"))
view.treeView["displaycolumns"] = ("sequence", "contextID", "unitID", "decimals", "precision", \
"language", "footnoted", "value")
view.footnotesRelationshipSet = ModelRelationshipSet(modelXbrl, "XBRL-footnotes")
view.blockSelectEvent = 1
view.blockViewModelObject = 0
view.view()
view.treeView.bind("<<TreeviewSelect>>", view.treeviewSelect, '+')
view.treeView.bind("<Enter>", view.treeviewEnter, '+')
view.treeView.bind("<Leave>", view.treeviewLeave, '+')
# intercept menu click before pops up to set the viewable tuple (if tuple clicked)
view.treeView.bind( view.modelXbrl.modelManager.cntlr.contextMenuClick, view.setViewTupleChildMenuItem, '+' )
menu = view.contextMenu()
if menu is not None:
view.menu.insert_cascade(0, label=_("View Tuple Children"), underline=0, command=view.viewTuplesGrid)
view.menu.entryconfigure(0, state='disabled')
view.menuAddExpandCollapse()
view.menuAddClipboard()
view.menuAddLangs()
view.menuAddLabelRoles(includeConceptName=True)
view.menuAddUnitDisplay()
class ViewFactList(ViewWinTree.ViewTree):
def __init__(self, modelXbrl, tabWin, lang):
super(ViewFactList, self).__init__(modelXbrl, tabWin, "Fact List", True, lang)
def setViewTupleChildMenuItem(self, event=None):
if event is not None and self.menu is not None:
#self.menu.delete(0, 0) # remove old filings
menuRow = self.treeView.identify_row(event.y) # this is the object ID
modelFact = self.modelXbrl.modelObject(menuRow)
if modelFact is not None and modelFact.isTuple:
self.menu.entryconfigure(0, state='normal')
self.viewedTupleId = menuRow
else:
self.menu.entryconfigure(0, state='disabled')
self.viewedTupleId = None
def viewTuplesGrid(self):
from arelle.ViewWinTupleGrid import viewTuplesGrid
viewTuples = viewTuplesGrid(self.modelXbrl, self.tabWin, self.viewedTupleId, self.lang)
self.modelXbrl.modelManager.showStatus(_("Ready..."), clearAfter=2000)
viewTuples.select() # bring new grid to foreground
def view(self):
self.id = 1
self.tag_has = {}
self.clearTreeView()
self.setColumnsSortable(initialSortCol="sequence")
self.viewFacts(self.modelXbrl.facts, "", 1)
def viewFacts(self, modelFacts, parentNode, n):
for modelFact in modelFacts:
try:
concept = modelFact.concept
lang = ""
if concept is not None:
lbl = concept.label(self.labelrole, lang=self.lang, linkroleHint=XbrlConst.defaultLinkRole)
objectIds = (modelFact.objectId(),concept.objectId())
if concept.baseXsdType in ("string", "normalizedString"):
lang = modelFact.xmlLang
else:
lbl = (modelFact.qname or modelFact.prefixedName) # defective inline facts may have no qname
objectIds = (modelFact.objectId())
node = self.treeView.insert(parentNode, "end", modelFact.objectId(self.id),
text=lbl,
tags=("odd" if n & 1 else "even",))
for tag in objectIds:
self.tag_has.setdefault(tag,[]).append(node)
self.treeView.set(node, "sequence", str(self.id))
if concept is not None and not modelFact.concept.isTuple:
self.treeView.set(node, "contextID", modelFact.contextID)
if modelFact.unitID:
self.treeView.set(node, "unitID", modelFact.unitID if self.unitDisplayID else modelFact.unit.value)
self.treeView.set(node, "decimals", modelFact.decimals)
self.treeView.set(node, "precision", modelFact.precision)
self.treeView.set(node, "language", lang)
if self.footnotesRelationshipSet.fromModelObject(modelFact):
self.treeView.set(node, "footnoted", "*")
self.treeView.set(node, "value", modelFact.effectiveValue.strip())
self.id += 1;
n += 1
self.viewFacts(modelFact.modelTupleFacts, node, n)
except AttributeError: # not a fact or no concept
pass
except:
raise # reraise error (debug stop here to see what's happening)
def getToolTip(self, tvRowId, tvColId):
# override tool tip when appropriate
if tvColId == "#7": # footnote column
try:
modelFact = self.modelXbrl.modelObject(tvRowId) # this is a fact object
footnoteRels = self.footnotesRelationshipSet.fromModelObject(modelFact)
if footnoteRels:
fns = []
for i, footnoteRel in enumerate(footnoteRels):
modelObject = footnoteRel.toModelObject
if isinstance(modelObject, ModelResource):
fns.append("Footnote {}: {}".format(
i+1,
modelObject.viewText()))
elif isinstance(modelObject, ModelFact):
fns.append("Footnoted fact {}: {} context: {} value: {}".format(
i+1,
modelObject.qname,
modelObject.contextID,
modelObject.value))
return "\n".join(fns)
else:
return None
except (AttributeError, KeyError):
pass
return None
def treeviewEnter(self, *args):
self.blockSelectEvent = 0
def treeviewLeave(self, *args):
self.blockSelectEvent = 1
def treeviewSelect(self, *args):
if self.blockSelectEvent == 0 and self.blockViewModelObject == 0:
self.blockViewModelObject += 1
self.modelXbrl.viewModelObject(self.treeView.selection()[0])
self.blockViewModelObject -= 1
def viewModelObject(self, modelObject):
if self.blockViewModelObject == 0:
self.blockViewModelObject += 1
try:
if isinstance(modelObject, ModelDtsObject.ModelRelationship):
conceptId = modelObject.toModelObject.objectId()
else:
conceptId = modelObject.objectId()
#items = self.treeView.tag_has(conceptId)
items = self.tag_has.get(conceptId,[])
if len(items) > 0 and self.treeView.exists(items[0]):
self.treeView.see(items[0])
self.treeView.selection_set(items[0])
except (AttributeError, KeyError):
self.treeView.selection_set(())
self.blockViewModelObject -= 1
|
models/modbus-sungrow-sg8kd.py | johschmitz/ModbusTCP2MQTT | 130 | 11067536 | read_register = {
"5003": "daily_power_yield_0.01", # Wh
"5004": "total_power_yield_100", # MWh
"5008": "internal_temp_10", # C
"5011": "pv1_voltage_10", # V
"5012": "pv1_current_10", # A
"5013": "pv2_voltage_10", # V
"5014": "pv2_current_10", # A
"5017": "total_pv_power", # W
"5019": "grid_voltage_10", # V
"5022": "inverter_current_10", # A
"5031": "total_active_power", # W
"5036": "grid_frequency_10" # Hz
}
holding_register = {
"5000": "year",
"5001": "month",
"5002": "day",
"5003": "hour",
"5004": "minute",
"5005": "second"
}
scan = """{
"read": [
{
"start": "5000",
"range": "100"
},
{
"start": "5100",
"range": "50"
}
],
"holding": [
{
"start": "4999",
"range": "10"
}
]
}"""
# Match Modbus registers to pvoutput api fields
# Reference: https://pvoutput.org/help/api_specification.html#add-status-service
pvoutput = {
"Energy Generation": "daily_power_yield",
"Power Generation": "total_active_power",
"Temperature": "internal_temp",
"Voltage": "grid_voltage"
}
|
src/visitpy/visit_utils/tests/visit_test.py | visit-dav/vis | 226 | 11067551 | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: visit_test.py
author: <NAME> <<EMAIL>>
created: 4/9/2010
description:
Provides a decorator that allows us to skip visit related tests,
when the module is used outside of visit.
"""
import sys
def visit_test(fn):
"""
Decorator that skips tests that require visit if
we aren't running in the cli.
"""
def run_fn(*args):
if "visit" in list(sys.modules.keys()):
return fn(*args)
else:
print("[VisIt module not found, skipping test that requires VisIt]")
return None
return run_fn
def pyside_test(fn):
"""
Decorator that skips tests that require visit if
we aren't running in the cli.
"""
def run_fn(*args):
if "PySide2.QtCore" in list(sys.modules.keys()):
return fn(*args)
else:
print("[PySide not found, skipping test that requires PySide]")
return None
return run_fn
|
buildroot/support/testing/tests/package/test_python_smmap2.py | bramkragten/operating-system | 349 | 11067569 | from tests.package.test_python import TestPythonPackageBase
class TestPythonPy2Smmap2(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON=y
BR2_PACKAGE_PYTHON_SMMAP2=y
"""
sample_scripts = ["tests/package/sample_python_smmap2.py"]
class TestPythonPy3Smmap2(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON3=y
BR2_PACKAGE_PYTHON_SMMAP2=y
"""
sample_scripts = ["tests/package/sample_python_smmap2.py"]
|
growler/__init__.py | pyGrowler/Growler | 806 | 11067571 | <reponame>pyGrowler/Growler
#
# growler/__init__.py
#
# flake8: noqa
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A general purpose asynchronous framework, supporting the asynchronous
primitives (async/await) introduced in Python 3.5.
The original goal was to serve http, and while this capability is still
built-in (see growler.http), the structure of Growler allows for a
larger set of capabilities.
To get started, import `Growler` from this package and create an
instance (customarily named 'app'). Add functionality to the app object
via the 'use' method decorator over your functions. This functions
may be asynchronous, and must accept a request and response object.
These are called (in the same order as 'use'd) upon a client connection.
Growler does not include its own server or event loop - but provides a
standard asynchronous interface to be used with an event loop of the users
choosing. Python includes its own event-loop package, asyncio, which
works fine with Growler. The asyncio interface is located in
`growler.aio`; this is merely a convience for quick startup, asyncio is
not required (or even imported) unless the user wants to.
"""
from .__meta__ import (
version as __version__,
author as __author__,
date as __date__,
copyright as __copyright__,
license as __license__,
)
from .application import (
Application,
GrowlerStopIteration,
)
from .routing import (
Router,
RouterMeta,
routerclass,
get_routing_attributes,
MiddlewareChain,
)
# growler module self reference `from growler import growler, App`
import sys
growler = sys.modules[__name__]
del sys
# alias Application
Growler = App = Application
__all__ = [
"App",
"Growler",
"Application",
"Router",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.