ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfd664f9ff8141ae35c7ba2946d87e2535f36e8 | from typing import Optional
from volcengine_ml_platform.annotation.annotation import Annotation
from volcengine_ml_platform.annotation.annotation import get_annotation_section
from volcengine_ml_platform.annotation.annotation import get_data_section
class TextEntitySetAnnotation(Annotation):
def __init__(self, manifest_file: Optional[str] = None):
Annotation.__init__(self, manifest_file)
def _get_url(self, manifest_line):
data = get_data_section(manifest_line)
return data["TextUrl"]
def extract_annotation(self, manifest_line):
annotation = get_annotation_section(manifest_line)
label_result = []
for result in annotation["Result"]:
labels = self._get_labels(result)
text_selector = result["Text"]
label_result.append(
{"labels": labels, "text_selector": text_selector},
)
return label_result
|
py | 7dfd66963d0cbcc14ee20ea4b5813e227f02f9ff | from __future__ import print_function, absolute_import, division
import unittest
import os
import random
import time
import tensorflow as tf
import numpy as np
from datetime import datetime
from timeit import default_timer
from parameterized import parameterized
from . import run_on_rpc_and_gpu, run_on_sessions, run_on_devices, assertAllClose, tfDistributedEndpointOrSkip
from . import networks
from .lib import tfhelper
from .lib.datasets import fake_data_ex
def run_superres(sess, input_data, batch_size=100, isEval=False):
batch_size = tfhelper.batch_size_from_env(batch_size)
print("{}: Using batch size {}".format(datetime.now(), batch_size))
input_images, target_images = input_data(batch_size=batch_size)
model = networks.SuperRes(input_images, target_images, batch_size=batch_size)
model.build_model(isEval=isEval)
eval_interval = os.environ.get('SALUS_TFBENCH_EVAL_INTERVAL', '0.1')
eval_rand_factor = os.environ.get('SALUS_TFBENCH_EVAL_RAND_FACTOR', '5')
eval_block = os.environ.get('SALUS_TFBENCH_EVAL_BLOCK', 'true')
if eval_block != 'true':
raise ValueError("SALUS_TFBENCH_EVAL_BLOCK=false is not supported")
salus_marker = tf.no_op(name="salus_main_iter")
losses = []
with tfhelper.initialized_scope(sess) as coord:
speeds = []
JCT = default_timer()
for i in range(tfhelper.iteration_num_from_env()):
if coord.should_stop():
break
print("{}: Start running step {}".format(datetime.now(), i))
if isEval:
start_time = default_timer()
loss_value, _ = sess.run([model.g_loss, salus_marker])
end_time = default_timer()
else:
start_time = default_timer()
_, loss_value, _ = sess.run([model.g_optim, model.g_loss, salus_marker])
end_time = default_timer()
duration = end_time - start_time
examples_per_sec = batch_size / duration
sec_per_batch = float(duration)
speeds.append(sec_per_batch)
fmt_str = '{}: step {}, loss = {:.2f} ({:.1f} examples/sec; {:.3f} sec/batch)'
print(fmt_str.format(datetime.now(), i, loss_value, examples_per_sec, sec_per_batch))
losses.append(loss_value)
if isEval and eval_rand_factor != '0':
factor = 1
if eval_rand_factor != "1":
factor = random.randint(1, int(eval_rand_factor))
time.sleep(float(eval_interval) * factor)
JCT = default_timer() - JCT
print('Training time is %.3f sec' % JCT)
print('Average: %.3f sec/batch' % np.average(speeds))
if len(speeds) > 1:
print('First iteration: %.3f sec/batch' % speeds[0])
print('Average excluding first iteration: %.3f sec/batch' % np.average(speeds[1:]))
return losses
class TestSuperRes(unittest.TestCase):
def _config(self, isEval=False, **kwargs):
KB = 1024
MB = 1024 * KB
if isEval:
memusages = {
1: (137 * MB, 1 * MB),
5: (150 * MB, 1 * MB),
10: (166 * MB, 1 * MB),
}
else:
memusages = {
32: (252.79296875 * MB, 17.503280639648438 * MB),
64: (500 * MB, 31.690780639648438 * MB),
128: (992.9807167053223 * MB, 60.44078063964844 * MB),
}
batch_size = kwargs.get('batch_size', 100)
config = tf.ConfigProto()
config.allow_soft_placement = True
config.salus_options.resource_map.temporary['MEMORY:GPU'] = memusages[batch_size][0]
config.salus_options.resource_map.persistant['MEMORY:GPU'] = memusages[batch_size][1]
config.salus_options.resource_map.temporary['MEMORY:GPU0'] = memusages[batch_size][0]
config.salus_options.resource_map.persistant['MEMORY:GPU0'] = memusages[batch_size][1]
return config
def _get_func(self, batch_size, isEval=False):
def func():
def input_data(batch_size):
variable_specs = [
([32, 32, 3], {'dtype': tf.float32}, 'images'),
([128, 128, 3], {'dtype': tf.float32}, 'targets'),
]
input_images, target_images = fake_data_ex(batch_size, variable_specs=variable_specs)
return input_images, target_images
sess = tf.get_default_session()
return run_superres(sess, input_data, batch_size=batch_size, isEval=isEval)
return func
@parameterized.expand([(1,), (5,), (10,)])
def test_gpu_eval(self, batch_size):
config = self._config(batch_size=batch_size, isEval=True)
config.allow_soft_placement = True
run_on_devices(self._get_func(batch_size, isEval=True), '/device:GPU:0', config=config)
@parameterized.expand([(32,), (64,), (128,)])
def test_gpu(self, batch_size):
config = self._config(batch_size=batch_size)
config.allow_soft_placement = True
run_on_devices(self._get_func(batch_size), '/device:GPU:0', config=config)
@parameterized.expand([(1,), (5,), (10,)])
def test_distributed_eval(self, batch_size):
run_on_sessions(self._get_func(batch_size, isEval=True),
tfDistributedEndpointOrSkip(),
dev='/job:tfworker/device:GPU:0',
config=self._config(batch_size=batch_size, isEval=True))
@parameterized.expand([(32,), (64,), (128,)])
def test_distributed(self, batch_size):
run_on_sessions(self._get_func(batch_size),
tfDistributedEndpointOrSkip(),
dev='/job:tfworker/device:GPU:0',
config=self._config(batch_size=batch_size))
@parameterized.expand([(64,)])
@unittest.skip("No need to run on CPU")
def test_cpu_eval(self, batch_size):
run_on_devices(self._get_func(batch_size, isEval=True), '/device:CPU:0',
config=self._config(batch_size=batch_size, isEval=True))
@parameterized.expand([(64,)])
@unittest.skip("No need to run on CPU")
def test_cpu(self, batch_size):
run_on_devices(self._get_func(batch_size), '/device:CPU:0',
config=self._config(batch_size=batch_size))
@parameterized.expand([(1,), (5,), (10,)])
def test_rpc_eval(self, batch_size):
run_on_sessions(self._get_func(batch_size, isEval=True),
'zrpc://tcp://127.0.0.1:5501', dev='/device:GPU:0',
config=self._config(batch_size=batch_size, isEval=True))
@parameterized.expand([(32,), (64,), (128,)])
def test_rpc(self, batch_size):
run_on_sessions(self._get_func(batch_size),
'zrpc://tcp://127.0.0.1:5501', dev='/device:GPU:0',
config=self._config(batch_size=batch_size))
@parameterized.expand([(64,)])
def test_correctness(self, batch_size):
config = self._config(batch_size=batch_size)
config.allow_soft_placement = True
actual, expected = run_on_rpc_and_gpu(self._get_func(batch_size), config=config)
assertAllClose(actual, expected)
if __name__ == '__main__':
unittest.main()
|
py | 7dfd67130f4dc611725402f1986b7a98c24dd479 | #game/__init__.py
from . import game
__all__ = ['game']
|
py | 7dfd68b3d8d4ff4230efaf46cf75f660a4de764f | import re
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.encoding import iri_to_uri
from funfactory.urlresolvers import reverse
from tower import ugettext_lazy as _lazy
from mozillians.common.helpers import redirect
LOGIN_MESSAGE = _lazy(u'You must be logged in to continue.')
GET_VOUCHED_MESSAGE = _lazy(u'You must be vouched to continue.')
class StrongholdMiddleware(object):
"""Keep unvouched users out, unless explicitly allowed in.
Inspired by https://github.com/mgrouchy/django-stronghold/
"""
def __init__(self):
self.exceptions = getattr(settings, 'STRONGHOLD_EXCEPTIONS', [])
def process_view(self, request, view_func, view_args, view_kwargs):
for view_url in self.exceptions:
if re.match(view_url, request.path):
return None
allow_public = getattr(view_func, '_allow_public', None)
if allow_public:
return None
if not request.user.is_authenticated():
messages.warning(request, LOGIN_MESSAGE)
return (login_required(view_func, login_url=reverse('phonebook:home'))
(request, *view_args, **view_kwargs))
if request.user.userprofile.is_vouched:
return None
allow_unvouched = getattr(view_func, '_allow_unvouched', None)
if allow_unvouched:
return None
messages.error(request, GET_VOUCHED_MESSAGE)
return redirect('phonebook:home')
@contextmanager
def safe_query_string(request):
"""Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed URL, but it
has to be ascii to go in a Location header. iri_to_uri seems like
a good compromise.
"""
qs = request.META['QUERY_STRING']
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
|
py | 7dfd6a17bc82b70bc9778fe63d97f9389308eba6 | #
from providers import ExtRemotingProvider, ExtPollingProvider
from store import ExtDirectStore
from crud import ExtDirectCRUD
from decorators import remoting, polling, crud
|
py | 7dfd6a40fb3ade619c0ca2e290f8065cc7c5f9fc | #!/usr/bin/python
# Copyright (c) 2019 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ftd_install
short_description: Installs FTD pkg image on the firewall
description:
- Provisioning module for FTD devices that installs ROMMON image (if needed) and
FTD pkg image on the firewall.
- Can be used with `httpapi` and `local` connection types. The `httpapi` is preferred,
the `local` connection should be used only when the device cannot be accessed via
REST API.
version_added: "2.8"
requirements: [ "python >= 3.5", "firepower-kickstart" ]
notes:
- Requires `firepower-kickstart` library that should be installed separately and requires Python >= 3.5.
- On localhost, Ansible can be still run with Python >= 2.7, but the interpreter for this particular module must be
Python >= 3.5.
- Python interpreter for the module can overwritten in `ansible_python_interpreter` variable.
author: "Cisco Systems, Inc. (@annikulin)"
options:
device_hostname:
description:
- Hostname of the device as appears in the prompt (e.g., 'firepower-5516').
required: true
type: str
device_username:
description:
- Username to login on the device.
- Defaulted to 'admin' if not specified.
required: false
type: str
default: admin
device_password:
description:
- Password to login on the device.
required: true
type: str
device_sudo_password:
description:
- Root password for the device. If not specified, `device_password` is used.
required: false
type: str
device_new_password:
description:
- New device password to set after image installation.
- If not specified, current password from `device_password` property is reused.
- Not applicable for ASA5500-X series devices.
required: false
type: str
device_ip:
description:
- Device IP address of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_gateway:
description:
- Device gateway of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_netmask:
description:
- Device netmask of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
device_model:
description:
- Platform model of the device (e.g., 'Cisco ASA5506-X Threat Defense').
- If not specified and connection is 'httpapi`, the module tries to fetch the device model via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
choices:
- Cisco ASA5506-X Threat Defense
- Cisco ASA5508-X Threat Defense
- Cisco ASA5516-X Threat Defense
- Cisco Firepower 2110 Threat Defense
- Cisco Firepower 2120 Threat Defense
- Cisco Firepower 2130 Threat Defense
- Cisco Firepower 2140 Threat Defense
dns_server:
description:
- DNS IP address of management interface.
- If not specified and connection is 'httpapi`, the module tries to fetch the existing value via REST API.
- For 'local' connection type, this parameter is mandatory.
required: false
type: str
console_ip:
description:
- IP address of a terminal server.
- Used to set up an SSH connection with device's console port through the terminal server.
required: true
type: str
console_port:
description:
- Device's port on a terminal server.
required: true
type: str
console_username:
description:
- Username to login on a terminal server.
required: true
type: str
console_password:
description:
- Password to login on a terminal server.
required: true
type: str
rommon_file_location:
description:
- Path to the boot (ROMMON) image on TFTP server.
- Only TFTP is supported.
required: true
type: str
image_file_location:
description:
- Path to the FTD pkg image on the server to be downloaded.
- FTP, SCP, SFTP, TFTP, or HTTP protocols are usually supported, but may depend on the device model.
required: true
type: str
image_version:
description:
- Version of FTD image to be installed.
- Helps to compare target and current FTD versions to prevent unnecessary reinstalls.
required: true
type: str
force_install:
description:
- Forces the FTD image to be installed even when the same version is already installed on the firewall.
- By default, the module stops execution when the target version is installed in the device.
required: false
type: bool
default: false
search_domains:
description:
- Search domains delimited by comma.
- Defaulted to 'cisco.com' if not specified.
required: false
type: str
default: cisco.com
"""
EXAMPLES = """
- name: Install image v6.3.0 on FTD 5516
ftd_install:
device_hostname: firepower
device_password: pass
device_ip: 192.168.0.1
device_netmask: 255.255.255.0
device_gateway: 192.168.0.254
dns_server: 8.8.8.8
console_ip: 10.89.0.0
console_port: 2004
console_username: console_user
console_password: console_pass
rommon_file_location: 'tftp://10.89.0.11/installers/ftd-boot-9.10.1.3.lfbff'
image_file_location: 'https://10.89.0.11/installers/ftd-6.3.0-83.pkg'
image_version: 6.3.0-83
"""
RETURN = """
msg:
description: The message saying whether the image was installed or explaining why the installation failed.
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.ftd.configuration import BaseConfigurationResource, ParamName
from ansible.module_utils.network.ftd.device import assert_kick_is_installed, FtdPlatformFactory, FtdModel
from ansible.module_utils.network.ftd.operation import FtdOperations, get_system_info
REQUIRED_PARAMS_FOR_LOCAL_CONNECTION = ['device_ip', 'device_netmask', 'device_gateway', 'device_model', 'dns_server']
def main():
fields = dict(
device_hostname=dict(type='str', required=True),
device_username=dict(type='str', required=False, default='admin'),
device_password=dict(type='str', required=True, no_log=True),
device_sudo_password=dict(type='str', required=False, no_log=True),
device_new_password=dict(type='str', required=False, no_log=True),
device_ip=dict(type='str', required=False),
device_netmask=dict(type='str', required=False),
device_gateway=dict(type='str', required=False),
device_model=dict(type='str', required=False, choices=FtdModel.supported_models()),
dns_server=dict(type='str', required=False),
search_domains=dict(type='str', required=False, default='cisco.com'),
console_ip=dict(type='str', required=True),
console_port=dict(type='str', required=True),
console_username=dict(type='str', required=True),
console_password=dict(type='str', required=True, no_log=True),
rommon_file_location=dict(type='str', required=True),
image_file_location=dict(type='str', required=True),
image_version=dict(type='str', required=True),
force_install=dict(type='bool', required=False, default=False)
)
module = AnsibleModule(argument_spec=fields)
assert_kick_is_installed(module)
use_local_connection = module._socket_path is None
if use_local_connection:
check_required_params_for_local_connection(module, module.params)
platform_model = module.params['device_model']
check_that_model_is_supported(module, platform_model)
else:
connection = Connection(module._socket_path)
resource = BaseConfigurationResource(connection, module.check_mode)
system_info = get_system_info(resource)
platform_model = module.params['device_model'] or system_info['platformModel']
check_that_model_is_supported(module, platform_model)
check_that_update_is_needed(module, system_info)
check_management_and_dns_params(resource, module.params)
ftd_platform = FtdPlatformFactory.create(platform_model, module.params)
ftd_platform.install_ftd_image(module.params)
module.exit_json(changed=True,
msg='Successfully installed FTD image %s on the firewall device.' % module.params["image_version"])
def check_required_params_for_local_connection(module, params):
missing_params = [k for k, v in iteritems(params) if k in REQUIRED_PARAMS_FOR_LOCAL_CONNECTION and v is None]
if missing_params:
message = "The following parameters are mandatory when the module is used with 'local' connection: %s." % \
', '.join(sorted(missing_params))
module.fail_json(msg=message)
def check_that_model_is_supported(module, platform_model):
if platform_model not in FtdModel.supported_models():
module.fail_json(msg="Platform model '%s' is not supported by this module." % platform_model)
def check_that_update_is_needed(module, system_info):
target_ftd_version = module.params["image_version"]
if not module.params["force_install"] and target_ftd_version == system_info['softwareVersion']:
module.exit_json(changed=False, msg="FTD already has %s version of software installed." % target_ftd_version)
def check_management_and_dns_params(resource, params):
if not all([params['device_ip'], params['device_netmask'], params['device_gateway']]):
management_ip = resource.execute_operation(FtdOperations.GET_MANAGEMENT_IP_LIST, {})['items'][0]
params['device_ip'] = params['device_ip'] or management_ip['ipv4Address']
params['device_netmask'] = params['device_netmask'] or management_ip['ipv4NetMask']
params['device_gateway'] = params['device_gateway'] or management_ip['ipv4Gateway']
if not params['dns_server']:
dns_setting = resource.execute_operation(FtdOperations.GET_DNS_SETTING_LIST, {})['items'][0]
dns_server_group_id = dns_setting['dnsServerGroup']['id']
dns_server_group = resource.execute_operation(FtdOperations.GET_DNS_SERVER_GROUP,
{ParamName.PATH_PARAMS: {'objId': dns_server_group_id}})
params['dns_server'] = dns_server_group['dnsServers'][0]['ipAddress']
if __name__ == '__main__':
main()
|
py | 7dfd6b1ed79425914ba621f779c227649ca6cadd | #!/usr/bin/env python3
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
# 1. Genesis height is 104. Current height is 102.
# 2. Send tx1 and tx2 that are valid before Genesis (multiple ELSEs). They are accepted to mempool. (tx2: SCRIPT_GENESIS flag is off)
# 3. Generate an empty block. Height is 103. Mempool is cleared.
# 4. Send tx1 and tx2 again. Tx2 should not be accepted to mempool (Genesis rules).
# 5. Send tx1 and tx2 again in block. Block should be rejected. (tx2: SCRIPT_GENESIS flag is on)
###
# 6. Current height is 103.
# 7. Send tx3 and tx4 that are valid after Genesis (disabled opcodes in unexecuted branches). They are accepted to mempool. (tx4: SCRIPT_GENESIS flag is on)
# 8. Invalidate block 103. Mempool is cleared.
# 9. Send tx3 again and generate new block with tx3 in it (height is now 103).
# 10. Send tx4 again. It should not be accepted to mempool because tx3 (UTXO) is pre-Genesis.
# 11. Send tx4 in block. Block should be rejected. (tx4: SCRIPT_GENESIS flag is on)
###
# 12. Current height is 103.
# 13. Generate new block with tx5 and tx6 that are valid after genesis. It is on genesis height (104).
# 14. Invalidate block on height 104. tx5 and tx6 are in mempool.
# 15. Invalidate block on height 103. tx5 and tx6 are deleted from mempool.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.script import *
from test_framework.blocktools import create_transaction, create_block, create_coinbase
from test_framework.util import assert_equal
from test_framework.comptool import TestInstance
from test_framework.mininode import msg_tx, msg_block
from time import sleep
def add_tx_to_block(block, txs):
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
block.solve()
class BSVGenesisMempoolScriptCache(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.genesisactivationheight = 104
self.extra_args = [['-whitelist=127.0.0.1', '-genesisactivationheight=%d' % self.genesisactivationheight]]
def run_test(self):
self.test.run()
def get_tests(self):
# shorthand for functions
block = self.chain.next_block
node = self.nodes[0]
self.chain.set_genesis_hash( int(node.getbestblockhash(), 16) )
# Create a new block
block(0)
self.chain.save_spendable_output()
yield self.accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(101):
block(5000 + i)
test.blocks_and_transactions.append([self.chain.tip, True])
self.chain.save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(self.chain.get_spendable_output())
########## SCENARIO 1
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 2)
# Create and send tx1 and tx2 that are valid before genesis.
tx1 = create_transaction(out[0].tx, out[0].n, b'', 100000, CScript([OP_IF, OP_0, OP_ELSE, OP_1, OP_ELSE, OP_2, OP_ENDIF]))
tx2 = create_transaction(tx1, 0, CScript([OP_0]), 1, CScript([OP_TRUE]))
self.test.connections[0].send_message(msg_tx(tx1))
self.test.connections[0].send_message(msg_tx(tx2))
# wait for transaction processing
sleep(1)
# Both transactions are accepted.
assert_equal(True, tx1.hash in node.getrawmempool())
assert_equal(True, tx2.hash in node.getrawmempool())
# Generate an empty block, height is then 103 and mempool is cleared.
block103 = block(1, spend=out[1])
yield self.accepted()
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 1)
assert_equal(len(node.getrawmempool()), 0)
# Send transactions tx1 and tx2 once again, this time with Genesis rules (mempool height is 104).
self.test.connections[0].send_message(msg_tx(tx1))
self.test.connections[0].send_message(msg_tx(tx2))
# wait for transaction processing
sleep(1)
# Tx2 should not be valid anymore.
assert_equal(len(node.getrawmempool()), 1)
assert_equal(True, tx1.hash in node.getrawmempool())
assert_equal(False, tx2.hash in node.getrawmempool())
# Now send tx1 and tx2 again, but this time in block. Block should be rejected.
block = create_block(int("0x" + node.getbestblockhash(), 16), create_coinbase(height=1, outputValue=25))
add_tx_to_block(block, [tx1,tx2])
rejected_blocks = []
def on_reject(conn, msg):
if (msg.message == b'block'):
rejected_blocks.append(msg)
assert_equal(msg.reason, b'blk-bad-inputs')
self.test.connections[0].cb.on_reject = on_reject
self.test.connections[0].send_message(msg_block(block))
sleep(1)
assert_equal(rejected_blocks[0].data, block.sha256)
assert_equal(False, block.hash == node.getbestblockhash())
########## SCENARIO 2
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 1)
# Create and send tx3 and tx4 that are valid after genesis.
tx3 = create_transaction(out[2].tx, out[2].n, b'', 100000, CScript([OP_IF, OP_2, OP_2MUL, OP_ENDIF, OP_1]))
tx4 = create_transaction(tx3, 0, CScript([OP_0]), 1, CScript([OP_TRUE]))
self.test.connections[0].send_message(msg_tx(tx3))
self.test.connections[0].send_message(msg_tx(tx4))
# wait for transaction processing
sleep(1)
# Both transactions are accepted.
assert_equal(True, tx3.hash in node.getrawmempool())
assert_equal(True, tx4.hash in node.getrawmempool())
# Invalidate block --> we are then at state before Genesis. Mempool is cleared.
node.invalidateblock(format(block103.sha256, 'x'))
assert_equal(False, tx3.hash in node.getrawmempool())
assert_equal(False, tx4.hash in node.getrawmempool())
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 2)
# Send tx3 again, this time in pre-genesis rules. It is accepted to mempool.
self.test.connections[0].send_message(msg_tx(tx3))
sleep(1)
assert_equal(True, tx3.hash in node.getrawmempool())
# Generate a block (height 103) with tx3 in it.
node.generate(1)
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 1)
assert_equal(True, tx3.hash in node.getblock(node.getbestblockhash())['tx'])
# Send tx4 again, again with Genesis rules. It should not be accepted to mempool.
self.test.connections[0].send_message(msg_tx(tx4))
sleep(1)
assert_equal(len(node.getrawmempool()), 0)
# Send tx4 again, this time in block. Block should be rejected.
block = create_block(int("0x" + node.getbestblockhash(), 16), create_coinbase(height=1, outputValue=25))
add_tx_to_block(block, [tx4])
self.test.connections[0].send_message(msg_block(block))
sleep(1)
assert_equal(rejected_blocks[1].data, block.sha256)
assert_equal(False, block.hash == node.getbestblockhash())
########## SCENARIO 3
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 1)
# Generate a block (height 104) with tx5 and tx6 (valid after genesis).
tx5 = create_transaction(out[3].tx, out[3].n, b'', 100000, CScript([OP_IF, OP_2, OP_2MUL, OP_ENDIF, OP_1]))
tx6 = create_transaction(tx5, 0, CScript([OP_0]), 1, CScript([OP_TRUE]))
blockGenesis = create_block(int("0x" + node.getbestblockhash(), 16), create_coinbase(height=1, outputValue=25))
add_tx_to_block(blockGenesis, [tx5, tx6])
self.test.connections[0].send_message(msg_block(blockGenesis))
sleep(1)
assert_equal(True, tx5.hash in node.getblock(node.getbestblockhash())['tx'])
assert_equal(True, tx6.hash in node.getblock(node.getbestblockhash())['tx'])
# Invalidate block 104. tx5 and tx6 are in now in mempool.
node.invalidateblock(format(blockGenesis.sha256, 'x'))
assert_equal(True, tx5.hash in node.getrawmempool())
assert_equal(True, tx6.hash in node.getrawmempool())
assert_equal(node.getblock(node.getbestblockhash())['height'], self.genesisactivationheight - 1)
# Invalidate block 103. tx5 and tx6 are not in mempool anymore.
node.invalidateblock(node.getbestblockhash())
assert_equal(False, tx5.hash in node.getrawmempool())
assert_equal(False, tx6.hash in node.getrawmempool())
if __name__ == '__main__':
BSVGenesisMempoolScriptCache().main()
|
py | 7dfd6b95b6f9ae9e97ac113fb382ff725b7bd1cc | '''
Created on Nov. 23, 2017
@author Andrew Habib
'''
import json
import os
from collections import OrderedDict, namedtuple
from xml.etree import cElementTree as ET
class DataReader(object):
def __init__(self, data_paths):
self.data_paths = data_paths
def __iter__(self):
for data_path in self.data_paths:
name = os.path.split(data_path)[1]
with open(data_path, 'r') as file:
content = file.readlines()
yield name, content
class XmlReader(object):
def __init__(self, data_paths):
self.data_paths = data_paths
def __iter__(self):
for data_path in self.data_paths:
name = os.path.split(data_path)[1]
with open(data_path, 'r') as file:
yield name.replace('.xml', ''), ET.iterparse(file)
class JsonReader(object):
def __init__(self, data_path):
self.data_path = data_path
def __iter__(self):
with open(self.data_path, 'r') as file:
entries = json.load(file)
for entry in entries:
yield entry
class JsonDataReader(object):
def __init__(self, data_paths):
self.data_paths = data_paths
def __iter__(self):
for data_path in self.data_paths:
name = os.path.split(data_path)[1]
if os.path.getsize(data_path) < 1:
yield name, None
else:
with open(data_path, 'r') as file:
entries = json.load(file)
for entry in entries:
yield name, entry
def load_json_list(json_file):
json_list = []
for entry in JsonReader(json_file):
json_list.append(entry)
return json_list
def get_list_of_uniq_jsons(lst):
uniq = []
for new in lst:
found = False
for old in uniq:
if new == old:
found = True
break
if not found:
uniq.append(new)
return uniq
class PrettyDict(dict):
def __str__(self):
return "{" + ", ".join("%r: %r\n" % (key, self[key]) for key in sorted(self)) + "}"
__repr__ = __str__
class ErrorproneMsg(object):
keys = [' Proj',
'Class',
' Type',
' Cat',
' Msg',
' Code',
' Mark',
' Line']
def __init__(self, proj, cls, typ, cat, msg, code, mark, line):
self.proj = proj
self.cls = cls
self.typ = typ
self.cat = cat
self.msg = msg
self.code = code
self.mark = mark
self.line = int(line)
self.values = [self.proj, self.cls, self.typ, self.cat,
self.msg, self.code, self.mark, self.line]
def __str__(self):
return("\n" + "\n".join(k + ": " + str(v) for (k, v) in zip(ErrorproneMsg.keys, self.values)) + "\n")
__repr__ = __str__
class SpotbugsMsg(object):
keys = [' Proj',
' Class',
' Cat',
' Abbrev',
' Type',
'Priority',
' Rank',
' Msg',
' Method',
' Field',
' Lines']
def __init__(self, proj, cls, cat, abbrev, typ, prio, rank, msg, mth, field, lines):
self.proj = proj
self.cls = cls
self.cat = cat
self.abbrev = abbrev
self.typ = typ
self.prio = prio
self.rank = rank
self.msg = msg
self.mth = mth
self.field = field
# lines could be list of tuples during serialization
# or list of lists during deserialization
# so construct namedtuples here instead of passing it from outside
# so that it works during deserialization also.
self.lines = []
for l in lines:
self.lines.append(SpotbugsSrcline(int(l[0]), int(l[1]), l[2]))
self.values = [self.proj, self.cls, self.cat, self.abbrev, self.typ, self.prio,
self.rank, self.msg, self.mth, self.field, self.lines]
def __str__(self):
return("\n" + "\n".join(k + ": " + str(v) for (k, v) in zip(SpotbugsMsg.keys, self.values)) + "\n")
__repr__ = __str__
def unrollLines(self):
lines = []
for l in self.lines:
lines.extend(range(l.start, l.end + 1))
return list(set(lines))
SpotbugsSrcline = namedtuple('SpotbugsSrcline', ['start', 'end', 'role'])
'''
InferIssue and InferBugTrace are slightly modified to cope
with the new json format in Infer 0.15.0
'''
class InferIssue(object):
# keys = ['bug_class', 'kind', 'bug_type', 'qualifier', 'severity', 'visibility', 'line',
# 'column', 'procedure', 'procedure_id', 'procedure_start_line', 'file', 'bug_trace',
# 'key', 'qualifier_tags', 'hash', 'bug_type_hum']
keys = ['bug_class', 'kind', 'bug_type', 'qualifier', 'severity', 'visibility', 'line',
'column', 'procedure', 'procedure_id', 'procedure_start_line', 'file', 'bug_trace',
'key', 'node_key', 'hash', 'bug_type_hum']
def __init__(self, bug_class, kind, bug_type, qualifier, severity, visibility,
line, column, procedure, procedure_id, procedure_start_line,
file, bug_trace, key, qualifier_tags, hashh, bug_type_hum):
self.bug_class = bug_class
self.kind = kind
self.bug_type = bug_type
self.qualifier = qualifier
self.severity = severity
self.visibility = visibility
self.line = line
self.column = column
self.procedure = procedure
self.procedure_id = procedure_id
self.procedure_start_line = procedure_start_line
self.file = file
self.bug_trace = []
for t in bug_trace:
self.bug_trace.append(InferBugTrace(*list(t[k] for k in InferBugTrace.keys)))
self.key = key
# self.qualifier_tags = qualifier_tags
self.hashh = hashh
self.bug_type_hum = bug_type_hum
self.values = [self.bug_class, self.kind, self.bug_type, self.qualifier,
self.severity, self.visibility, self.line, self.column,
self.procedure, self.procedure_id, self.procedure_start_line,
self.file, self.bug_trace, self.key,
# self.qualifier_tags,
self.hashh, self.bug_type_hum]
def __str__(self):
return("\n" + "\n".join(k + ": " + str(v) for (k, v) in zip(InferIssue.keys, self.values)) + "\n")
__repr__ = __str__
class InferBugTrace(object):
# keys = ['level', 'filename', 'line_number', 'column_number', 'description', 'node_tags']
keys = ['level', 'filename', 'line_number', 'column_number', 'description']
# def __init__(self, level, filename, line, column, desc, tags):
def __init__(self, level, filename, line, column, desc):
self.level = level
self.filename = filename
self.line = line
self.column = column
self.desc = desc
# self.tags = tags
# self.values = [self.level, self.filename, self.line, self.column, self.desc, self.tags]
self.values = [self.level, self.filename, self.line, self.column, self.desc]
def __str__(self):
return("\n" + "\n".join(k + ": " + str(v) for (k, v) in zip(InferBugTrace.keys, self.values)) + "\n")
__repr__ = __str__
class InferMsg(object):
keys = [' Proj',
' Class',
' Bug_Class',
' Kind',
' Bug_Type',
' Msg',
' Severity',
'Visibility',
' Lines',
' Procedure']
def __init__(self, proj, cls, bug_class, kind, bug_type, msg,
severity, visibility, lines, procedure):
self.proj = proj
self.cls = cls
self.bug_class = bug_class
self.kind = kind
self.bug_type = bug_type
self.msg = msg
self.severity = severity
self.visibility = visibility
self.lines = lines
self.procedure = procedure
self.values = [self.proj, self.cls, self.bug_class, self.kind, self.bug_type, self.msg,
self.severity, self.visibility, self.lines, self.procedure]
def __str__(self):
return("\n" + "\n".join(k + ": " + str(v) for (k, v) in zip(InferMsg.keys, self.values)))
__repr__ = __str__
class FileDiff(object):
keys = ['Project: ',
' Class: ',
' Lines: ']
def __init__(self, proj, cls, lines):
self.proj = proj
self.cls = cls
self.lines = set(int(i) for i in lines)
self.values = [self.proj, self.cls, self.lines]
def __str__(self):
return("\n" + "\n".join(k + str(v) for (k, v) in zip(FileDiff.keys, self.values)) + "\n")
__repr__ = __str__
class CustomEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ErrorproneMsg):
return OrderedDict(zip(ErrorproneMsg.keys, o.values))
elif isinstance(o, InferIssue):
return OrderedDict(zip(InferIssue.keys, o.values))
elif isinstance(o, InferMsg):
return OrderedDict(zip(InferMsg.keys, o.values))
elif isinstance(o, SpotbugsMsg):
return OrderedDict(zip(SpotbugsMsg.keys, o.values))
elif isinstance(o, FileDiff):
return OrderedDict(zip(FileDiff.keys, o.values))
elif isinstance(o, set):
return list(o)
else:
json.JSONEncoder.default(self, o)
def load_parsed_diffs(diffs_file):
diffs_ = []
for diff in JsonReader(diffs_file):
inst = FileDiff(*list(diff[k] for k in FileDiff.keys))
diffs_.append(inst)
return diffs_
def load_parsed_ep(ep_file):
ep_res_ = []
for msg in JsonReader(ep_file):
inst = ErrorproneMsg(*list(msg[k] for k in ErrorproneMsg.keys))
ep_res_.append(inst)
return ep_res_
def load_parsed_sb(sb_file):
sb_res_ = []
for msg in JsonReader(sb_file):
inst = SpotbugsMsg(*list(msg[k] for k in SpotbugsMsg.keys))
sb_res_.append(inst)
return sb_res_
def load_parsed_inf(inf_file):
inf_res_ = []
for msg in JsonReader(inf_file):
inst = InferMsg(*list(msg[k] for k in InferMsg.keys))
inf_res_.append(inst)
return inf_res_
def find_msg_by_proj_and_cls(proj, cls, msgs):
found_messages = []
for m in msgs:
if m.proj == proj and m.cls == cls:
found_messages.append(m)
return found_messages
LineMatchesToMessages = namedtuple('LineMatchesToMessages', ['lines', 'messages'])
def get_cls_name_from_file_path(cls_path):
cls = None
if '/com/' in cls_path:
cls = 'com.' + cls_path.split('/com/')[1].replace('/', '.').replace('.java', '')
elif '/org/' in cls_path:
cls = 'org.' + cls_path.split('/org/')[1].replace('/', '.').replace('.java', '')
return cls
def prepare_tool(path, proj):
proj_dir = os.path.join(path, proj)
with open(os.path.join(proj_dir, 'prop-source-dir')) as file:
proj_src = file.read()
proj_src = os.path.join(proj_dir, proj_src)
with open(os.path.join(proj_dir, 'prop-compile-path')) as file:
proj_cp = file.read()
with open(os.path.join(proj_dir, 'prop-buggy-classes')) as file:
proj_buggy_classes = file.read().splitlines()
try:
with open(os.path.join(proj_dir, 'prop-exclude-classes')) as file:
proj_exclude_classes = file.read().splitlines()
except IOError:
proj_exclude_classes = []
proj_buggy_classes = set(proj_buggy_classes) - set(proj_exclude_classes)
proj_buggy_files = map(lambda f: os.path.join(proj_src, f.replace('.', '/') + '.java'), proj_buggy_classes)
try:
with open(os.path.join(proj_dir, 'prop-javac-options')) as file:
proj_javac_opts = file.read()
except IOError:
proj_javac_opts = ""
return proj_src, proj_cp, proj_javac_opts, proj_buggy_files, proj_buggy_classes
NO_WARNING = "NO_WARNING" |
py | 7dfd6ca683d09ec51c3af3afe4bbe8ca318ab830 | """
Constant Definitions
"""
import os
from enum import IntEnum
import igibson
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
class ViewerMode(IntEnum):
NAVIGATION = 0
MANIPULATION = 1
PLANNING = 2
class SimulatorMode(IntEnum):
GUI_INTERACTIVE = 1
GUI_NON_INTERACTIVE = 2
HEADLESS = 3
HEADLESS_TENSOR = 4
VR = 5
class SemanticClass(IntEnum):
BACKGROUND = 0
ROBOTS = 1
USER_ADDED_OBJS = 2
SCENE_OBJS = 3
class ShadowPass(IntEnum):
NO_SHADOW = 0
HAS_SHADOW_RENDER_SHADOW = 1
HAS_SHADOW_RENDER_SCENE = 2
class OccupancyGridState(object):
OBSTACLES = 0.0
UNKNOWN = 0.5
FREESPACE = 1.0
# PyBullet-related
class PyBulletSleepState(IntEnum):
AWAKE = 1
PYBULLET_BASE_LINK_INDEX = -1
# BEHAVIOR-related
FLOOR_SYNSET = "floor.n.01"
NON_SAMPLEABLE_OBJECTS = []
non_sampleable_category_txt = os.path.join(igibson.ig_dataset_path, "metadata/non_sampleable_categories.txt")
if os.path.isfile(non_sampleable_category_txt):
with open(non_sampleable_category_txt) as f:
NON_SAMPLEABLE_OBJECTS = [FLOOR_SYNSET] + [line.strip() for line in f.readlines()]
MAX_TASK_RELEVANT_OBJS = 50
TASK_RELEVANT_OBJS_OBS_DIM = 9
AGENT_POSE_DIM = 6
UNDER_OBJECTS = [
"breakfast_table",
"coffee_table",
"console_table",
"desk",
"gaming_table",
"pedestal_table",
"pool_table",
"stand",
"armchair",
"chaise_longue",
"folding_chair",
"highchair",
"rocking_chair",
"straight_chair",
"swivel_chair",
"bench",
]
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
NamedRenderingPresets = {
"NO_PBR": MeshRendererSettings(enable_pbr=False, enable_shadow=False),
"PBR_NOSHADOW": MeshRendererSettings(enable_pbr=True, enable_shadow=True),
"PBR_SHADOW_MSAA": MeshRendererSettings(enable_pbr=True, enable_shadow=True, msaa=True),
"NO_PBR_OPT": MeshRendererSettings(enable_pbr=False, enable_shadow=False, optimized=True),
"PBR_NOSHADOW_OPT": MeshRendererSettings(enable_pbr=True, enable_shadow=True, optimized=True),
"PBR_SHADOW_MSAA_OPT": MeshRendererSettings(enable_pbr=True, enable_shadow=True, msaa=True, optimized=True),
"HQ_WITH_BG_OPT": MeshRendererSettings(
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
msaa=True,
light_dimming_factor=1.0,
optimized=True,
),
"VISUAL_RL": MeshRendererSettings(enable_pbr=True, enable_shadow=False, msaa=False, optimized=True),
"PERCEPTION": MeshRendererSettings(
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
msaa=True,
light_dimming_factor=1.0,
optimized=True,
),
}
AVAILABLE_MODALITIES = ("rgb", "normal", "3d", "seg", "optical_flow", "scene_flow", "ins_seg")
MAX_INSTANCE_COUNT = 1024
MAX_CLASS_COUNT = 512
# Encodings
RAW_ENCODING = 0
COPY_RECTANGLE_ENCODING = 1
RRE_ENCODING = 2
CORRE_ENCODING = 4
HEXTILE_ENCODING = 5
ZLIB_ENCODING = 6
TIGHT_ENCODING = 7
ZLIBHEX_ENCODING = 8
ZRLE_ENCODING = 16
# 0xffffff00 to 0xffffffff tight options
PSEUDO_CURSOR_ENCODING = -239
# Keycodes
KEY_BackSpace = 0xFF08
KEY_Tab = 0xFF09
KEY_Return = 0xFF0D
KEY_Escape = 0xFF1B
KEY_Insert = 0xFF63
KEY_Delete = 0xFFFF
KEY_Home = 0xFF50
KEY_End = 0xFF57
KEY_PageUp = 0xFF55
KEY_PageDown = 0xFF56
KEY_Left = 0xFF51
KEY_Up = 0xFF52
KEY_Right = 0xFF53
KEY_Down = 0xFF54
KEY_F1 = 0xFFBE
KEY_F2 = 0xFFBF
KEY_F3 = 0xFFC0
KEY_F4 = 0xFFC1
KEY_F5 = 0xFFC2
KEY_F6 = 0xFFC3
KEY_F7 = 0xFFC4
KEY_F8 = 0xFFC5
KEY_F9 = 0xFFC6
KEY_F10 = 0xFFC7
KEY_F11 = 0xFFC8
KEY_F12 = 0xFFC9
KEY_F13 = 0xFFCA
KEY_F14 = 0xFFCB
KEY_F15 = 0xFFCC
KEY_F16 = 0xFFCD
KEY_F17 = 0xFFCE
KEY_F18 = 0xFFCF
KEY_F19 = 0xFFD0
KEY_F20 = 0xFFD1
KEY_ShiftLeft = 0xFFE1
KEY_ShiftRight = 0xFFE2
KEY_ControlLeft = 0xFFE3
KEY_ControlRight = 0xFFE4
KEY_MetaLeft = 0xFFE7
KEY_MetaRight = 0xFFE8
KEY_AltLeft = 0xFFE9
KEY_AltRight = 0xFFEA
KEY_Scroll_Lock = 0xFF14
KEY_Sys_Req = 0xFF15
KEY_Num_Lock = 0xFF7F
KEY_Caps_Lock = 0xFFE5
KEY_Pause = 0xFF13
KEY_Super_L = 0xFFEB
KEY_Super_R = 0xFFEC
KEY_Hyper_L = 0xFFED
KEY_Hyper_R = 0xFFEE
KEY_KP_0 = 0xFFB0
KEY_KP_1 = 0xFFB1
KEY_KP_2 = 0xFFB2
KEY_KP_3 = 0xFFB3
KEY_KP_4 = 0xFFB4
KEY_KP_5 = 0xFFB5
KEY_KP_6 = 0xFFB6
KEY_KP_7 = 0xFFB7
KEY_KP_8 = 0xFFB8
KEY_KP_9 = 0xFFB9
KEY_KP_Enter = 0xFF8D
KEY_ForwardSlash = 0x002F
KEY_BackSlash = 0x005C
KEY_SpaceBar = 0x0020
# TODO: build this programmatically?
KEYMAP = {
"bsp": KEY_BackSpace,
"tab": KEY_Tab,
"return": KEY_Return,
"enter": KEY_Return,
"esc": KEY_Escape,
"ins": KEY_Insert,
"delete": KEY_Delete,
"del": KEY_Delete,
"home": KEY_Home,
"end": KEY_End,
"pgup": KEY_PageUp,
"pgdn": KEY_PageDown,
"ArrowLeft": KEY_Left,
"left": KEY_Left,
"ArrowUp": KEY_Up,
"up": KEY_Up,
"ArrowRight": KEY_Right,
"right": KEY_Right,
"ArrowDown": KEY_Down,
"down": KEY_Down,
"slash": KEY_BackSlash,
"bslash": KEY_BackSlash,
"fslash": KEY_ForwardSlash,
"spacebar": KEY_SpaceBar,
"space": KEY_SpaceBar,
"sb": KEY_SpaceBar,
"f1": KEY_F1,
"f2": KEY_F2,
"f3": KEY_F3,
"f4": KEY_F4,
"f5": KEY_F5,
"f6": KEY_F6,
"f7": KEY_F7,
"f8": KEY_F8,
"f9": KEY_F9,
"f10": KEY_F10,
"f11": KEY_F11,
"f12": KEY_F12,
"f13": KEY_F13,
"f14": KEY_F14,
"f15": KEY_F15,
"f16": KEY_F16,
"f17": KEY_F17,
"f18": KEY_F18,
"f19": KEY_F19,
"f20": KEY_F20,
"lshift": KEY_ShiftLeft,
"shift": KEY_ShiftLeft,
"rshift": KEY_ShiftRight,
"lctrl": KEY_ControlLeft,
"ctrl": KEY_ControlLeft,
"rctrl": KEY_ControlRight,
"lmeta": KEY_MetaLeft,
"meta": KEY_MetaLeft,
"rmeta": KEY_MetaRight,
"lalt": KEY_AltLeft,
"alt": KEY_AltLeft,
"ralt": KEY_AltRight,
"scrlk": KEY_Scroll_Lock,
"sysrq": KEY_Sys_Req,
"numlk": KEY_Num_Lock,
"caplk": KEY_Caps_Lock,
"pause": KEY_Pause,
"lsuper": KEY_Super_L,
"super": KEY_Super_L,
"rsuper": KEY_Super_R,
"lhyper": KEY_Hyper_L,
"hyper": KEY_Hyper_L,
"rhyper": KEY_Hyper_R,
"kp0": KEY_KP_0,
"kp1": KEY_KP_1,
"kp2": KEY_KP_2,
"kp3": KEY_KP_3,
"kp4": KEY_KP_4,
"kp5": KEY_KP_5,
"kp6": KEY_KP_6,
"kp7": KEY_KP_7,
"kp8": KEY_KP_8,
"kp9": KEY_KP_9,
"kpenter": KEY_KP_Enter,
}
|
py | 7dfd6d18600b7ead6fdcbafc398f16ec1fd0d47c | """
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import concurrent
import logging
from google.cloud.sql.connector.instance_connection_manager import (
InstanceConnectionManager,
IPTypes,
)
from google.cloud.sql.connector.utils import generate_keys
from google.auth.credentials import Credentials
from threading import Thread
from typing import Any, Dict, Optional
logger = logging.getLogger(name=__name__)
_default_connector = None
class Connector:
"""A class to configure and create connections to Cloud SQL instances.
:type ip_type: IPTypes
:param ip_type
The IP type (public or private) used to connect. IP types
can be either IPTypes.PUBLIC or IPTypes.PRIVATE.
:type enable_iam_auth: bool
:param enable_iam_auth
Enables IAM based authentication (Postgres only).
:type timeout: int
:param timeout
The time limit for a connection before raising a TimeoutError.
:type credentials: google.auth.credentials.Credentials
:param credentials
Credentials object used to authenticate connections to Cloud SQL server.
If not specified, Application Default Credentials are used.
"""
def __init__(
self,
ip_type: IPTypes = IPTypes.PUBLIC,
enable_iam_auth: bool = False,
timeout: int = 30,
credentials: Optional[Credentials] = None,
) -> None:
self._loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()
self._thread: Thread = Thread(target=self._loop.run_forever, daemon=True)
self._thread.start()
self._keys: concurrent.futures.Future = asyncio.run_coroutine_threadsafe(
generate_keys(), self._loop
)
self._instances: Dict[str, InstanceConnectionManager] = {}
# set default params for connections
self._timeout = timeout
self._enable_iam_auth = enable_iam_auth
self._ip_type = ip_type
self._credentials = credentials
def connect(
self, instance_connection_string: str, driver: str, **kwargs: Any
) -> Any:
"""Prepares and returns a database connection object and starts a
background thread to refresh the certificates and metadata.
:type instance_connection_string: str
:param instance_connection_string:
A string containing the GCP project name, region name, and instance
name separated by colons.
Example: example-proj:example-region-us6:example-instance
:type driver: str
:param: driver:
A string representing the driver to connect with. Supported drivers are
pymysql, pg8000, and pytds.
:param kwargs:
Pass in any driver-specific arguments needed to connect to the Cloud
SQL instance.
:rtype: Connection
:returns:
A DB-API connection to the specified Cloud SQL instance.
"""
# Initiate event loop and run in background thread.
#
# Create an InstanceConnectionManager object from the connection string.
# The InstanceConnectionManager should verify arguments.
#
# Use the InstanceConnectionManager to establish an SSL Connection.
#
# Return a DBAPI connection
if instance_connection_string in self._instances:
icm = self._instances[instance_connection_string]
else:
enable_iam_auth = kwargs.pop("enable_iam_auth", self._enable_iam_auth)
icm = InstanceConnectionManager(
instance_connection_string,
driver,
self._keys,
self._loop,
self._credentials,
enable_iam_auth,
)
self._instances[instance_connection_string] = icm
if "ip_types" in kwargs:
ip_type = kwargs.pop("ip_types")
logger.warning(
"Deprecation Warning: Parameter `ip_types` is deprecated and may be removed"
"in a future release. Please use `ip_type` instead."
)
else:
ip_type = kwargs.pop("ip_type", self._ip_type)
if "timeout" in kwargs:
return icm.connect(driver, ip_type, **kwargs)
elif "connect_timeout" in kwargs:
timeout = kwargs["connect_timeout"]
else:
timeout = self._timeout
try:
return icm.connect(driver, ip_type, timeout, **kwargs)
except Exception as e:
# with any other exception, we attempt a force refresh, then throw the error
icm.force_refresh()
raise (e)
def connect(instance_connection_string: str, driver: str, **kwargs: Any) -> Any:
"""Uses a Connector object with default settings and returns a database
connection object with a background thread to refresh the certificates and metadata.
For more advanced configurations, callers should instantiate Connector on their own.
:type instance_connection_string: str
:param instance_connection_string:
A string containing the GCP project name, region name, and instance
name separated by colons.
Example: example-proj:example-region-us6:example-instance
:type driver: str
:param: driver:
A string representing the driver to connect with. Supported drivers are
pymysql, pg8000, and pytds.
:param kwargs:
Pass in any driver-specific arguments needed to connect to the Cloud
SQL instance.
:rtype: Connection
:returns:
A DB-API connection to the specified Cloud SQL instance.
"""
global _default_connector
if _default_connector is None:
_default_connector = Connector()
return _default_connector.connect(instance_connection_string, driver, **kwargs)
|
py | 7dfd6d331fe997340d626183b44c13c69363821c | from __future__ import annotations
from typing import TYPE_CHECKING, Optional, cast
from .enums import ChannelType
from .messageable import Messageable
from .permissions import ChannelPermissions
if TYPE_CHECKING:
from .message import Message
from .role import Role
from .server import Server
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import Group as GroupDMChannelPayload
from .types import SavedMessages as SavedMessagesPayload
from .types import TextChannel as TextChannelPayload
from .types import VoiceChannel as VoiceChannelPayload
from .user import User
__all__ = ("Channel",)
class Channel:
"""Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server: Optional[:class:`Server`]
The server the channel is part of
"""
__slots__ = ("state", "id", "channel_type", "server_id")
def __init__(self, data: ChannelPayload, state: State):
self.state = state
self.id = data["_id"]
self.channel_type = ChannelType(data["channel_type"])
self.server_id = ""
@property
def server(self) -> Optional[Server]:
return self.state.get_server(self.server_id) if self.server_id else None
def _update(self):
pass
class SavedMessageChannel(Channel, Messageable):
"""The Saved Message Channel"""
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
class DMChannel(Channel, Messageable):
"""A DM channel"""
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
class GroupDMChannel(Channel, Messageable):
__slots__ = ("recipients", "name", "owner", "permissions")
"""A group DM channel"""
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipients = [state.get_user(user_id) for user_id in data["recipients"]]
self.name = data["name"]
self.owner = state.get_user(data["owner"])
if perms := data.get("permissions"):
self.permissions = ChannelPermissions._from_value(perms)
def _update(self, *, name: Optional[str] = None, recipients: Optional[list[str]] = None):
if name:
self.name = name
if recipients:
self.recipients = [self.state.get_user(user_id) for user_id in recipients]
async def set_default_permissions(self, permissions: ChannelPermissions) -> None:
"""Sets the default permissions for a group.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default group permissions
"""
await self.state.http.set_channel_default_permissions(self.id, permissions.value)
class TextChannel(Channel, Messageable):
__slots__ = ("name", "description", "last_message_id", "server_id", "default_permissions", "role_permissions")
"""A text channel"""
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.server_id = data["server"]
self.name = data["name"]
self.description = data.get("description")
last_message_id = data.get("last_message")
self.last_message_id = last_message_id
if perms := data.get("default_permissions"):
self.default_permissions = ChannelPermissions._from_value(perms)
if role_perms := data.get("role_permissions"):
self.role_permissions = {role_id: ChannelPermissions._from_value(perms) for role_id, perms in role_perms.items()}
def _get_channel_id(self) -> str:
return self.id
@property
def last_message(self) -> Message:
return self.state.get_message(self.last_message_id)
def _update(self, *, name: Optional[str] = None, description: Optional[str] = None):
if name:
self.name = name
if description:
self.description = description
async def set_default_permissions(self, permissions: ChannelPermissions) -> None:
"""Sets the default permissions for a channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default channel permissions
"""
await self.state.http.set_channel_default_permissions(self.id, permissions.value)
async def set_role_permissions(self, role: Role, permissions: ChannelPermissions) -> None:
"""Sets the permissions for a role in a channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new channel permissions
"""
await self.state.http.set_channel_role_permissions(self.id, role.id, permissions.value)
class VoiceChannel(Channel):
"""A voice channel"""
def __init__(self, data: VoiceChannelPayload, state: State):
super().__init__(data, state)
self.server_id = data["server"]
self.name = data["name"]
self.description = data.get("description")
if perms := data.get("default_permissions"):
self.default_permissions = ChannelPermissions._from_value(perms)
if role_perms := data.get("role_permissions"):
self.role_permissions = {role_id: ChannelPermissions._from_value(perms) for role_id, perms in role_perms.items()}
def _update(self, *, name: Optional[str] = None, description: Optional[str] = None):
if name:
self.name = name
if description:
self.description = description
async def set_default_permissions(self, permissions: ChannelPermissions) -> None:
"""Sets the default permissions for a voice channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default channel permissions
"""
await self.state.http.set_channel_default_permissions(self.id, permissions.value)
async def set_role_permissions(self, role: Role, permissions: ChannelPermissions) -> None:
"""Sets the permissions for a role in a voice channel
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new channel permissions
"""
await self.state.http.set_channel_role_permissions(self.id, role.id, permissions.value)
def channel_factory(data: ChannelPayload, state: State) -> Channel:
if data["channel_type"] == "SavedMessage":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception
|
py | 7dfd6f866ba4c9c924034f99eb57ff9341148f4a | from sqlalchemy import create_engine, desc, case, cast, func, or_, and_, Column, ForeignKey
from sqlalchemy import String, Integer, Float, DateTime, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import sessionmaker, aliased
import logging
import datetime
log = logging.getLogger(__name__)
Base = declarative_base()
# https://www.blog.pythonlibrary.org/2010/09/10/sqlalchemy-connecting-to-pre-existing-databases/
# http://docs.sqlalchemy.org/en/latest/core/reflection.html
# http://docs.sqlalchemy.org/en/latest/orm/tutorial.html
# http://docs.sqlalchemy.org/en/latest/core/type_basics.html#generic-types
# noinspection PyPep8
class SaltyDB:
def __init__(self, conn_str, elo_stake=0.05, echo=False):
self.elo_stake = elo_stake
self.engine = create_engine(conn_str, echo=echo)
Base.metadata.create_all(self.engine)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def add_ai_logreg_model(self, serialized):
new_model = AILogregModel(betas=serialized)
self.session.add(new_model)
self.session.commit()
log.info('Saved LogReg model: %s' % new_model)
return new_model # this might have issues with threads
def get_best_logreg_model(self, min_bets=0):
q = self.session.query(AILogregModel)
q = q.filter(AILogregModel.won_bets + AILogregModel.lost_bets >= min_bets)
q = q.order_by(desc(AILogregModel.won_bets_pct))
return q.first()
def add_fight(self, p1name, p2name, winner, mode):
if p1name == p2name:
log.warning('Self fight detected. Ignoring. %s' % p1name)
return
p1 = self.get_or_add_fighter(p1name)
p2 = self.get_or_add_fighter(p2name)
if winner == 1:
self.increment_wins(p1.guid, p2.elo if p2 else 0)
self.increment_losses(p2.guid)
elif winner == 2:
self.increment_losses(p1.guid)
self.increment_wins(p2.guid, p1.elo if p1 else 0)
else:
raise RuntimeError("Winner must be in [1, 2]: %s" % winner)
new_fight = Fight(p1=p1.guid, p2=p2.guid, winner=winner, mode=mode)
self.session.add(new_fight)
self.session.commit()
log.info('Fight recorded %s' % new_fight)
return new_fight.guid # TODO: return whole fight
# TODO: refactor to not need this - just keep the session object and update
def increment_session_wins(self, session_guid):
session = self.session.query(Session).filter(Session.guid == session_guid).first()
session.won_bets += 1
self.session.commit()
def increment_model_wins(self, model_guid):
model = self.session.query(AILogregModel).filter(AILogregModel.guid == model_guid).first()
model.won_bets += 1
self.session.commit()
def increment_session_losses(self, session_guid):
session = self.session.query(Session).filter(Session.guid == session_guid).first()
session.lost_bets += 1
self.session.commit()
def increment_model_losses(self, model_guid):
model = self.session.query(AILogregModel).filter(AILogregModel.guid == model_guid).first()
model.lost_bets += 1
self.session.commit()
# returns newly created fighter
def add_fighter(self, name):
new_fighter = Fighter(name=name)
self.session.add(new_fighter)
self.session.commit()
log.info('Fighter added %s' % new_fighter)
return new_fighter
def get_or_add_fighter(self, name):
fighter = self.get_fighter_by_name(name)
if not fighter:
fighter = self.add_fighter(name)
return fighter
# fighter can be name or guid
def get_fighter_by_guid(self, guid):
fighter = self.session.query(Fighter).filter(Fighter.guid == guid).first()
return fighter
def get_fighter_by_name(self, name):
fighter = self.session.query(Fighter).filter(Fighter.name == name).first()
return fighter
# def get_fights(self, p1_guid, p2_guid):
# fighters = [p1_guid, p2_guid]
# fights = self.session.query(Fight).filter(Fight.p1 in fighters and Fight.p2 in fighters).all()
# return fights
def get_fights(self, guid):
fights = self.session.query(Fight).filter(or_(Fight.p1 == guid, Fight.p2 == guid)).all()
return fights
# get p1's wins against p2. includes where #s reversed
# TODO: refactor to just return the number instead of full list (get_n_wins_against)
def get_wins_against(self, p1_guid, p2_guid):
wins = self.session.query(Fight).filter(or_(
and_(Fight.p1 == p1_guid, Fight.p2 == p2_guid, Fight.winner == 1),
and_(Fight.p1 == p2_guid, Fight.p2 == p1_guid, Fight.winner == 2)
)).all()
return wins
def increment_wins(self, fighter_guid, enemy_elo):
fighter = self.get_fighter_by_guid(fighter_guid)
fighter.wins += 1
fighter.elo = fighter.elo + (self.elo_stake * enemy_elo)
self.session.commit()
log.info('Incremented wins: %s' % fighter)
def increment_losses(self, fighter_guid):
fighter = self.get_fighter_by_guid(fighter_guid)
fighter.losses += 1
fighter.elo = fighter.elo - (self.elo_stake * fighter.elo)
self.session.commit()
log.info('Incremented losses: %s' % fighter)
def start_session(self, balance):
if balance is None:
raise TypeError('Balance cannot be None')
open_sessions = self.session.query(Session).filter(Session.end_ts == None).all()
if len(open_sessions) > 0:
raise OpenSessionError('A session is already open!', len(open_sessions))
new_session = Session(start_balance=balance)
self.session.add(new_session)
self.session.commit()
log.info('Session started: %s' % new_session)
return new_session
# does nothing if there are no open sessions, ends only the most recent session
def end_session(self, balance):
if balance is None:
raise TypeError('Balance cannot be None')
open_sessions = self.session.query(Session).filter(Session.end_ts == None).all()
last_fight = self.session.query(func.max(Fight.time)).first()
last_fight = last_fight[0] if last_fight else datetime.datetime.utcnow()
for session in open_sessions:
session.end_ts = last_fight
session.end_balance = balance
self.session.commit()
if len(open_sessions) == 0:
log.info('No sessions to close')
if len(open_sessions) > 1:
log.warning('More than one session closed: %s' % [session.guid for session in open_sessions])
last_session_guid = self.session.query(func.max(Session.guid)).subquery()
last_session = self.session.query(Session).filter(Session.guid == last_session_guid).first()
return last_session
# TODO: this is causing oom errors in the kernel :(
# only need p1elo, p2elo, p1winsvp2, p2winsvp1, p1winpct, p2winpct, winner
def get_training_data(self, test_mode=False, test_limit=100):
log.info('Generating training data, this may take a while...')
f = aliased(Fight, name='f')
p1 = aliased(Fighter, name='p1')
p2 = aliased(Fighter, name='p2')
p1winsvp2 = self.session.query(func.count(1)).filter(or_(
and_(Fight.p1 == f.p1, Fight.p2 == f.p2, Fight.winner == 1),
and_(Fight.p1 == f.p2, Fight.p2 == f.p1, Fight.winner == 2)
)).label('p1winsvp2')
p2winsvp1 = self.session.query(func.count(1)).filter(or_(
and_(Fight.p1 == f.p1, Fight.p2 == f.p2, Fight.winner == 2),
and_(Fight.p1 == f.p2, Fight.p2 == f.p1, Fight.winner == 1)
)).label('p2winsvp1')
fights = self.session.query(f.winner, p1.elo, p2.elo, p1winsvp2, p2winsvp1, p1.winpct, p2.winpct)
fights = fights.join(p1, f.p1==p1.guid).join(p2, f.p2==p2.guid)
if test_mode:
fights = fights.limit(test_limit)
return [{
'elo_diff': p1elo - p2elo,
'wins_diff': p1winsvp2 - p2winsvp1,
'win_pct_diff': p1winpct - p2winpct,
'winner': winner - 1 # -1 to put in range [0,1]
} for winner, p1elo, p2elo, p1winsvp2, p2winsvp1, p1winpct, p2winpct in fights.all()]
class Fighter(Base):
__tablename__ = 'fighters'
guid = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True)
elo = Column(Float, nullable=False, default=100)
wins = Column(Integer, nullable=False, default=0)
losses = Column(Integer, nullable=False, default=0)
def __repr__(self):
return '<Fighter ({guid}): {name}>'.format(
guid = self.guid,
name = self.name
)
@hybrid_property
def winpct(self):
if self.wins + self.losses == 0:
return 50.0
# noinspection PyTypeChecker
return float(self.wins) / (self.wins + self.losses) * 100
@winpct.expression
def winpct(self):
return case(
[(self.wins + self.losses == 0, 50.0)],
else_ =cast(self.wins, Float) / (self.wins + self.losses) * 100
)
class Fight(Base):
__tablename__ = 'fights'
guid = Column(Integer, primary_key=True)
p1 = Column(Integer, ForeignKey('fighters.guid'), nullable=False)
p2 = Column(Integer, ForeignKey('fighters.guid'), nullable=False)
winner = Column(Integer, nullable=False)
time = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
mode = Column(String)
def __repr__(self):
return '<Fight ({guid}): {time} - {p1} vs. {p2}>'.format(
guid = self.guid,
time = self.time,
p1 = self.p1,
p2 = self.p2,
winner = self.winner
)
class Session(Base):
__tablename__ = 'sessions'
guid = Column(Integer, primary_key=True)
start_ts = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
end_ts = Column(DateTime)
start_balance = Column(Integer, nullable=False)
end_balance = Column(Integer)
won_bets = Column(Integer, nullable=False, default=0) # TODO: can we get rid of won/lost count once bets table is implemented?
lost_bets = Column(Integer, nullable=False, default=0)
def __repr__(self):
return '<Session ({guid}): {start} - {end}>'.format(
guid = self.guid,
start = self.start_ts,
end = self.end_ts
)
class Bet(Base):
__tablename__ = 'bets'
guid = Column(Integer, primary_key=True)
fight = Column(Integer, ForeignKey('fights.guid'), nullable=False)
session = Column(Integer, ForeignKey('sessions.guid'), nullable=False)
# model = Column(Integer, ForeignKey('ai_logreg_models.guid'), nullable=False) # TODO: add model column
# on = Column(Integer, ForeignKey('fighters.guid'), nullable=False) # TODO: add on column
amount = Column(Integer, nullable=False)
won = Column(Boolean)
pre_balance = Column(Integer, nullable=False)
profit = Column(Integer)
def __repr__(self):
return '<Bet ({guid}): {wonlost} profit: {profit}>'.format(
guid = self.guid,
wonlost = 'won' if self.won else 'lost',
profit = self.profit
)
class AILogregModel(Base):
__tablename__ = 'ai_logreg_models'
guid = Column(Integer, primary_key=True)
betas = Column(String, nullable=False)
won_bets = Column(Integer, nullable=False, default=0) # TODO: can we get rid of won/lost count once bets table is implemented?
lost_bets = Column(Integer, nullable=False, default=0)
def __repr__(self):
return '<AILogregModel ({guid}): {nfights} - {winpct}%>'.format(
guid = self.guid,
nfights = self.won_bets + self.lost_bets,
winpct = None if self.won_bets + self.lost_bets == 0 else self.won_bets / (self.won_bets + self.lost_bets) * 100
)
@hybrid_property
def won_bets_pct(self):
if self.won_bets + self.lost_bets == 0:
return 0.0
# noinspection PyTypeChecker
return float(self.won_bets) / (self.won_bets + self.lost_bets) * 100.0
# TODO: this doesn't work
@won_bets_pct.expression
def won_bets_pct(self):
return case(
[(self.won_bets + self.lost_bets == 0, 0.0)],
else_ =cast(self.won_bets, Float) / (self.won_bets + self.lost_bets) * 100.0
)
class OpenSessionError(RuntimeError):
def __init__(self, message, open_sessions):
super().__init__(message, open_sessions)
self.message = message
self.open_sessions = open_sessions
if __name__ == '__main__':
db = SaltyDB('sqlite:///salt.db', True)
db.get_training_data(True)
pass
|
py | 7dfd7100990c6012e3e2b4b53edacf675f1a6694 | from flask import g, Flask
import gorbin_tools2
app = Flask(__name__)
gt = gorbin_tools2.mongo_tools(g)
if __name__ == '__main__':
with app.app_context():
#Configure database
gt.remake_files('yes')
gt.remake_users('yes')
#add default admin user
gt.add_user(login = 'admin', pas = gorbin_tools2.hash('admin00'), email = '[email protected]', status='admin')
|
py | 7dfd712254641852bd1570638a19a10d73eacc13 | o1, o2, o3 = map(int, input().split())
if 1 <= o1 <= 1000 and 1 <= o2 <= 1000 and 1 <= o3 <= 1000 :
if o1 == o2 or o1 == o3 or o2 == o3 or o1 == o2+o3 or o2 == o1+o3 or o3 == o1+o2:
print("S")
else:
print("N")
else:
print("N") |
py | 7dfd71269c2b2a8783164c9c4d32e08beaccb5dd | # encoding: utf-8
from __future__ import unicode_literals, print_function
import json
import os
import re
import sys
from tempfile import NamedTemporaryFile
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import warnings
import twitter
import responses
from responses import GET, POST
warnings.filterwarnings('ignore', category=DeprecationWarning)
DEFAULT_URL = re.compile(r'https?://.*\.twitter.com/1\.1/.*')
class ErrNull(object):
""" Suppress output of tests while writing to stdout or stderr. This just
takes in data and does nothing with it.
"""
def write(self, data):
pass
class ApiTest(unittest.TestCase):
def setUp(self):
self.api = twitter.Api(
consumer_key='test',
consumer_secret='test',
access_token_key='test',
access_token_secret='test',
sleep_on_rate_limit=False,
chunk_size=500 * 1024)
self.base_url = 'https://api.twitter.com/1.1'
self._stderr = sys.stderr
sys.stderr = ErrNull()
def tearDown(self):
sys.stderr = self._stderr
pass
def testApiSetUp(self):
self.assertRaises(
twitter.TwitterError,
lambda: twitter.Api(consumer_key='test'))
def testSetAndClearCredentials(self):
api = twitter.Api()
api.SetCredentials(consumer_key='test',
consumer_secret='test',
access_token_key='test',
access_token_secret='test')
self.assertEqual(api._consumer_key, 'test')
self.assertEqual(api._consumer_secret, 'test')
self.assertEqual(api._access_token_key, 'test')
self.assertEqual(api._access_token_secret, 'test')
api.ClearCredentials()
self.assertFalse(all([
api._consumer_key,
api._consumer_secret,
api._access_token_key,
api._access_token_secret
]))
@responses.activate
def testApiRaisesAuthErrors(self):
responses.add(GET, DEFAULT_URL, body='')
api = twitter.Api()
api.SetCredentials(consumer_key='test',
consumer_secret='test',
access_token_key='test',
access_token_secret='test')
api._Api__auth = None
self.assertRaises(twitter.TwitterError, lambda: api.GetFollowers())
@responses.activate
def testAppOnlyAuth(self):
responses.add(method=POST,
url='https://api.twitter.com/oauth2/token',
body='{"token_type":"bearer","access_token":"testing"}')
api = twitter.Api(
consumer_key='test',
consumer_secret='test',
application_only_auth=True)
self.assertEqual(api._bearer_token['access_token'], "testing")
@responses.activate
def testGetHelpConfiguration(self):
with open('testdata/get_help_configuration.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetHelpConfiguration()
self.assertEqual(resp.get('short_url_length_https'), 23)
@responses.activate
def testGetShortUrlLength(self):
with open('testdata/get_help_configuration.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetShortUrlLength()
self.assertEqual(resp, 23)
resp = self.api.GetShortUrlLength(https=True)
self.assertEqual(resp, 23)
@responses.activate
def testGetTrendsCurrent(self):
with open('testdata/get_trends_current.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetTrendsCurrent()
self.assertTrue(type(resp[0]) is twitter.Trend)
@responses.activate
def testGetHomeTimeline(self):
with open('testdata/get_home_timeline.json') as f:
resp_data = f.read()
responses.add(
GET, 'https://api.twitter.com/1.1/statuses/home_timeline.json?tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetHomeTimeline()
status = resp[0]
self.assertEqual(type(status), twitter.Status)
self.assertEqual(status.id, 674674925823787008)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetHomeTimeline(count='literally infinity'))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetHomeTimeline(count=4000))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetHomeTimeline(max_id='also infinity'))
self.assertRaises(twitter.TwitterError,
lambda: self.api.GetHomeTimeline(
since_id='still infinity'))
@responses.activate
def testGetHomeTimelineWithExclusions(self):
with open('testdata/get_home_timeline.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
self.assertTrue(self.api.GetHomeTimeline(count=100,
trim_user=True,
max_id=674674925823787008))
@responses.activate
def testGetUserTimelineByUserID(self):
with open('testdata/get_user_timeline.json') as f:
resp_data = f.read()
responses.add(responses.GET, DEFAULT_URL, body=resp_data, status=200)
resp = self.api.GetUserTimeline(user_id=673483)
self.assertTrue(type(resp[0]) is twitter.Status)
self.assertTrue(type(resp[0].user) is twitter.User)
self.assertEqual(resp[0].user.id, 673483)
@responses.activate
def testGetUserTimelineByScreenName(self):
with open('testdata/get_user_timeline.json') as f:
resp_data = f.read()
responses.add(
GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetUserTimeline(screen_name='dewitt')
self.assertEqual(resp[0].id, 675055636267298821)
self.assertTrue(resp)
@responses.activate
def testGetRetweets(self):
with open('testdata/get_retweets.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetRetweets(statusid=397)
self.assertTrue(type(resp[0]) is twitter.Status)
self.assertTrue(type(resp[0].user) is twitter.User)
@responses.activate
def testGetRetweetsCount(self):
with open('testdata/get_retweets_count.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetRetweets(statusid=312, count=63)
self.assertTrue(len(resp), 63)
@responses.activate
def testGetRetweeters(self):
with open('testdata/get_retweeters.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetRetweeters(status_id=397)
self.assertTrue(type(resp) is list)
self.assertTrue(type(resp[0]) is int)
@responses.activate
def testGetBlocks(self):
with open('testdata/get_blocks_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/blocks/list.json?cursor=-1&stringify_ids=False&include_entities=False&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
with open('testdata/get_blocks_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/blocks/list.json?cursor=1524574483549312671&stringify_ids=False&include_entities=False&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetBlocks()
self.assertTrue(
isinstance(resp, list),
"Expected resp type to be list, got {0}".format(type(resp)))
self.assertTrue(
isinstance(resp[0], twitter.User),
"Expected type of first obj in resp to be twitter.User, got {0}".format(
type(resp[0])))
self.assertEqual(
len(resp), 2,
"Expected len of resp to be 2, got {0}".format(len(resp)))
self.assertEqual(
resp[0].screen_name, 'RedScareBot',
"Expected screen_name of 1st blocked user to be RedScareBot, was {0}".format(
resp[0].screen_name))
self.assertEqual(
resp[0].screen_name, 'RedScareBot',
"Expected screen_name of 2nd blocked user to be RedScareBot, was {0}".format(
resp[0].screen_name))
@responses.activate
def testGetBlocksPaged(self):
with open('testdata/get_blocks_1.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
ncur, pcur, resp = self.api.GetBlocksPaged(cursor=1524574483549312671)
self.assertTrue(
isinstance(resp, list),
"Expected list, got {0}".format(type(resp)))
self.assertTrue(
isinstance(resp[0], twitter.User),
"Expected twitter.User, got {0}".format(type(resp[0])))
self.assertEqual(
len(resp), 1,
"Expected len of resp to be 1, got {0}".format(len(resp)))
self.assertEqual(
resp[0].screen_name, 'RedScareBot',
"Expected username of blocked user to be RedScareBot, got {0}".format(
resp[0].screen_name))
@responses.activate
def testGetBlocksIDs(self):
with open('testdata/get_blocks_ids_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/blocks/ids.json?cursor=-1&stringify_ids=False&include_entities=True&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
with open('testdata/get_blocks_ids_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/blocks/ids.json?cursor=1524566179872860311&stringify_ids=False&include_entities=True&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetBlocksIDs()
self.assertTrue(
isinstance(resp, list),
"Expected list, got {0}".format(type(resp)))
self.assertTrue(
isinstance(resp[0], int),
"Expected list, got {0}".format(type(resp)))
self.assertEqual(
len(resp), 2,
"Expected len of resp to be 2, got {0}".format(len(resp)))
@responses.activate
def testGetBlocksIDsPaged(self):
with open('testdata/get_blocks_ids_1.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
_, _, resp = self.api.GetBlocksIDsPaged(cursor=1524566179872860311)
self.assertTrue(
isinstance(resp, list),
"Expected list, got {0}".format(type(resp)))
self.assertTrue(
isinstance(resp[0], int),
"Expected list, got {0}".format(type(resp)))
self.assertEqual(
len(resp), 1,
"Expected len of resp to be 1, got {0}".format(len(resp)))
@responses.activate
def testGetFriendIDs(self):
# First request for first 5000 friends
with open('testdata/get_friend_ids_0.json') as f:
resp_data = f.read()
responses.add(
GET,
'https://api.twitter.com/1.1/friends/ids.json?count=5000&cursor=-1&stringify_ids=False&screen_name=EricHolthaus&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
# Second (last) request for remaining friends
with open('testdata/get_friend_ids_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/friends/ids.json?stringify_ids=False&count=5000&cursor=1417903878302254556&screen_name=EricHolthaus&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetFriendIDs(screen_name='EricHolthaus')
self.assertTrue(type(resp) is list)
self.assertEqual(len(resp), 6452)
self.assertTrue(type(resp[0]) is int)
# Error checking
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetFriendIDs(total_count='infinity'))
@responses.activate
def testGetFriendIDsPaged(self):
with open('testdata/get_friend_ids_0.json') as f:
resp_data = f.read()
responses.add(responses.GET, DEFAULT_URL, body=resp_data, status=200)
ncursor, pcursor, resp = self.api.GetFriendIDsPaged(screen_name='EricHolthaus')
self.assertLessEqual(len(resp), 5000)
self.assertTrue(ncursor)
self.assertFalse(pcursor)
@responses.activate
def testGetFriendsPaged(self):
with open('testdata/get_friends_paged.json') as f:
resp_data = f.read()
responses.add(responses.GET, DEFAULT_URL, body=resp_data, status=200)
ncursor, pcursor, resp = self.api.GetFriendsPaged(screen_name='codebear', count=200)
self.assertEqual(ncursor, 1494734862149901956)
self.assertEqual(pcursor, 0)
self.assertEqual(len(resp), 200)
self.assertTrue(type(resp[0]) is twitter.User)
@responses.activate
def testGetFriendsPagedUID(self):
with open('testdata/get_friends_paged_uid.json') as f:
resp_data = f.read()
responses.add(responses.GET, DEFAULT_URL, body=resp_data, status=200)
ncursor, pcursor, resp = self.api.GetFriendsPaged(user_id=12, count=200)
self.assertEqual(ncursor, 1510410423140902959)
self.assertEqual(pcursor, 0)
self.assertEqual(len(resp), 200)
self.assertTrue(type(resp[0]) is twitter.User)
@responses.activate
def testGetFriendsAdditionalParams(self):
with open('testdata/get_friends_paged_additional_params.json') as f:
resp_data = f.read()
responses.add(responses.GET, DEFAULT_URL, body=resp_data, status=200)
ncursor, pcursor, resp = self.api.GetFriendsPaged(user_id=12,
count=200,
skip_status=True,
include_user_entities=True)
self.assertEqual(ncursor, 1510492845088954664)
self.assertEqual(pcursor, 0)
self.assertEqual(len(resp), 200)
self.assertTrue(type(resp[0]) is twitter.User)
@responses.activate
def testGetFriends(self):
"""
This is tedious, but the point is to add a responses endpoint for
each call that GetFriends() is going to make against the API and
have it return the appropriate json data.
"""
cursor = -1
for i in range(0, 5):
with open('testdata/get_friends_{0}.json'.format(i)) as f:
resp_data = f.read()
endpoint = 'https://api.twitter.com/1.1/friends/list.json?count=200&tweet_mode=compat&include_user_entities=True&screen_name=codebear&skip_status=False&cursor={0}'.format(cursor)
responses.add(GET, endpoint, body=resp_data, match_querystring=True)
cursor = json.loads(resp_data)['next_cursor']
resp = self.api.GetFriends(screen_name='codebear')
self.assertEqual(len(resp), 819)
@responses.activate
def testGetFriendsWithLimit(self):
with open('testdata/get_friends_0.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetFriends(screen_name='codebear', total_count=200)
self.assertEqual(len(resp), 200)
def testFriendsErrorChecking(self):
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetFriends(screen_name='jack',
total_count='infinity'))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetFriendsPaged(screen_name='jack',
count='infinity'))
@responses.activate
def testGetFollowersIDs(self):
# First request for first 5000 followers
with open('testdata/get_follower_ids_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/followers/ids.json?tweet_mode=compat&cursor=-1&stringify_ids=False&count=5000&screen_name=GirlsMakeGames',
body=resp_data,
match_querystring=True,
status=200)
# Second (last) request for remaining followers
with open('testdata/get_follower_ids_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/followers/ids.json?tweet_mode=compat&count=5000&screen_name=GirlsMakeGames&cursor=1482201362283529597&stringify_ids=False',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetFollowerIDs(screen_name='GirlsMakeGames')
self.assertTrue(type(resp) is list)
self.assertEqual(len(resp), 7885)
self.assertTrue(type(resp[0]) is int)
# Error checking
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetFollowerIDs(total_count='infinity'))
@responses.activate
def testGetFollowers(self):
# First request for first 200 followers
with open('testdata/get_followers_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'{base_url}/followers/list.json?tweet_mode=compat&include_user_entities=True&count=200&screen_name=himawari8bot&skip_status=False&cursor=-1'.format(
base_url=self.api.base_url),
body=resp_data,
match_querystring=True,
status=200)
# Second (last) request for remaining followers
with open('testdata/get_followers_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'{base_url}/followers/list.json?tweet_mode=compat&include_user_entities=True&skip_status=False&count=200&screen_name=himawari8bot&cursor=1516850034842747602'.format(
base_url=self.api.base_url),
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetFollowers(screen_name='himawari8bot')
self.assertTrue(type(resp) is list)
self.assertTrue(type(resp[0]) is twitter.User)
self.assertEqual(len(resp), 335)
@responses.activate
def testGetFollowersPaged(self):
with open('testdata/get_followers_0.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
ncursor, pcursor, resp = self.api.GetFollowersPaged(screen_name='himawari8bot')
self.assertTrue(type(resp) is list)
self.assertTrue(type(resp[0]) is twitter.User)
self.assertEqual(len(resp), 200)
@responses.activate
def testGetFollowerIDsPaged(self):
with open('testdata/get_follower_ids_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/followers/ids.json?tweet_mode=compat&count=5000&stringify_ids=False&cursor=-1&screen_name=himawari8bot',
body=resp_data,
match_querystring=True,
status=200)
ncursor, pcursor, resp = self.api.GetFollowerIDsPaged(
screen_name='himawari8bot')
self.assertTrue(type(resp) is list)
self.assertTrue(type(resp[0]) is int)
self.assertEqual(len(resp), 5000)
with open('testdata/get_follower_ids_stringify.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/followers/ids.json?tweet_mode=compat&count=5000&stringify_ids=True&user_id=12&cursor=-1',
body=resp_data,
match_querystring=True,
status=200)
ncursor, pcursor, resp = self.api.GetFollowerIDsPaged(
user_id=12,
stringify_ids=True)
self.assertTrue(type(resp) is list)
if sys.version_info.major >= 3:
self.assertTrue(type(resp[0]) is str)
else:
self.assertTrue(type(resp[0]) is unicode)
self.assertEqual(len(resp), 5000)
@responses.activate
def testUsersLookup(self):
with open('testdata/users_lookup.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.UsersLookup(user_id=[718443])
self.assertTrue(type(resp) is list)
self.assertEqual(len(resp), 1)
user = resp[0]
self.assertTrue(type(user) is twitter.User)
self.assertEqual(user.screen_name, 'kesuke')
self.assertEqual(user.id, 718443)
@responses.activate
def testGetUser(self):
with open('testdata/get_user.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetUser(user_id=718443)
self.assertTrue(type(resp) is twitter.User)
self.assertEqual(resp.screen_name, 'kesuke')
self.assertEqual(resp.id, 718443)
@responses.activate
def testGetFavorites(self):
with open('testdata/get_favorites.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetFavorites(user_id=12, count=1, since_id=10, max_id=200)
self.assertTrue(type(resp) is list)
fav = resp[0]
self.assertEqual(fav.id, 677180133447372800)
self.assertIn("Extremely", fav.text)
@responses.activate
def testGetMentions(self):
with open('testdata/get_mentions.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetMentions(count=1)
self.assertTrue(type(resp) is list)
self.assertTrue([type(mention) is twitter.Status for mention in resp])
self.assertEqual(resp[0].id, 676148312349609985)
@responses.activate
def testGetListTimeline(self):
with open('testdata/get_list_timeline.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetListTimeline(list_id=None,
slug='space-bots',
owner_screen_name='inky')
self.assertTrue(type(resp) is list)
self.assertTrue([type(status) is twitter.Status for status in resp])
self.assertEqual(resp[0].id, 693191602957852676)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetListTimeline(
list_id=None,
slug=None,
owner_id=None))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetListTimeline(
list_id=None,
slug=None,
owner_screen_name=None))
@responses.activate
def testPostUpdate(self):
with open('testdata/post_update.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/statuses/update.json',
body=resp_data,
status=200)
post = self.api.PostUpdate(
status="blah Longitude coordinate of the tweet in degrees.")
self.assertTrue(type(post) is twitter.Status)
self.assertEqual(
post.text, "blah Longitude coordinate of the tweet in degrees.")
self.assertTrue(post.geo is None)
self.assertEqual(post.user.screen_name, 'notinourselves')
@responses.activate
def testPostUpdateExtraParams(self):
with open('testdata/post_update_extra_params.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/statuses/update.json',
body=resp_data,
status=200)
post = self.api.PostUpdate(
status="Not a dupe. Longitude coordinate of the tweet in degrees.",
in_reply_to_status_id=681496308251754496,
latitude=37.781157,
longitude=-122.398720,
place_id="1",
display_coordinates=True,
trim_user=True)
self.assertEqual(post.in_reply_to_status_id, 681496308251754496)
self.assertIsNotNone(post.coordinates)
@responses.activate
def testVerifyCredentials(self):
with open('testdata/verify_credentials.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.VerifyCredentials()
self.assertEqual(type(resp), twitter.User)
self.assertEqual(resp.name, 'notinourselves')
@responses.activate
def testVerifyCredentialsIncludeEmail(self):
with open('testdata/get_verify_credentials_include_email.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.VerifyCredentials(skip_status=True, include_email=True)
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.email, '[email protected]')
@responses.activate
def testUpdateBanner(self):
responses.add(
POST,
'{0}/account/update_profile_banner.json'.format(self.api.base_url),
body=b'',
status=201
)
resp = self.api.UpdateBanner(image='testdata/168NQ.jpg')
self.assertTrue(resp)
@responses.activate
def testUpdateBanner422Error(self):
responses.add(
POST,
'{0}/account/update_profile_banner.json'.format(self.api.base_url),
body=b'',
status=422
)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.UpdateBanner(image='testdata/168NQ.jpg')
)
try:
self.api.UpdateBanner(image='testdata/168NQ.jpg')
except twitter.TwitterError as e:
self.assertTrue("The image could not be resized or is too large." in str(e))
@responses.activate
def testUpdateBanner400Error(self):
responses.add(
POST,
'{0}/account/update_profile_banner.json'.format(self.api.base_url),
body=b'',
status=400
)
try:
self.api.UpdateBanner(image='testdata/168NQ.jpg')
except twitter.TwitterError as e:
self.assertTrue("Image data could not be processed" in str(e))
@responses.activate
def testGetMemberships(self):
with open('testdata/get_memberships.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetMemberships()
self.assertTrue(type(resp) is list)
self.assertTrue([type(lst) is twitter.List for lst in resp])
self.assertEqual(resp[0].id, 210635540)
@responses.activate
def testGetListsList(self):
with open('testdata/get_lists_list.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/list.json?tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListsList()
self.assertTrue(type(resp) is list)
self.assertTrue([type(lst) is twitter.List for lst in resp])
self.assertEqual(resp[0].id, 189643778)
with open('testdata/get_lists_list_screen_name.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/list.json?tweet_mode=compat&screen_name=inky',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListsList(screen_name='inky')
self.assertTrue(type(resp) is list)
self.assertTrue([type(lst) is twitter.List for lst in resp])
self.assertEqual(resp[0].id, 224581495)
with open('testdata/get_lists_list_user_id.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/list.json?tweet_mode=compat&user_id=13148',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListsList(user_id=13148)
self.assertTrue(type(resp) is list)
self.assertTrue([type(lst) is twitter.List for lst in resp])
self.assertEqual(resp[0].id, 224581495)
@responses.activate
def testGetLists(self):
with open('testdata/get_lists.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetLists()
self.assertTrue(resp)
lst = resp[0]
self.assertEqual(lst.id, 229581524)
self.assertTrue(type(lst), twitter.List)
self.assertEqual(lst.full_name, "@notinourselves/test")
self.assertEqual(lst.slug, "test")
@responses.activate
def testGetListMembers(self):
with open('testdata/get_list_members_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/members.json?count=100&include_entities=False&skip_status=False&list_id=93527328&cursor=-1&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
with open('testdata/get_list_members_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/members.json?list_id=93527328&skip_status=False&include_entities=False&count=100&tweet_mode=compat&cursor=4611686020936348428',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListMembers(list_id=93527328)
self.assertTrue(type(resp[0]) is twitter.User)
self.assertEqual(resp[0].id, 4048395140)
@responses.activate
def testGetListMembersPaged(self):
with open('testdata/get_list_members_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/members.json?count=100&include_entities=True&cursor=4611686020936348428&list_id=93527328&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListMembersPaged(list_id=93527328, cursor=4611686020936348428)
self.assertTrue([isinstance(u, twitter.User) for u in resp])
with open('testdata/get_list_members_extra_params.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/members.json?count=100&tweet_mode=compat&cursor=4611686020936348428&list_id=93527328&skip_status=True&include_entities=False',
body=resp_data,
match_querystring=True,
status=200)
_, _, resp = self.api.GetListMembersPaged(list_id=93527328,
cursor=4611686020936348428,
skip_status=True,
include_entities=False,
count=100)
self.assertFalse(resp[0].status)
@responses.activate
def testGetListTimeline(self):
with open('testdata/get_list_timeline.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/statuses.json?&list_id=229581524&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListTimeline(list_id=229581524)
self.assertTrue(type(resp[0]) is twitter.Status)
with open('testdata/get_list_timeline_max_since.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/statuses.json?owner_screen_name=notinourselves&slug=test&max_id=692980243339071488&tweet_mode=compat&since_id=692829211019575296',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListTimeline(slug='test',
owner_screen_name='notinourselves',
max_id=692980243339071488,
since_id=692829211019575296)
self.assertTrue([isinstance(s, twitter.Status) for s in resp])
self.assertEqual(len(resp), 7)
self.assertTrue([s.id >= 692829211019575296 for s in resp])
self.assertTrue([s.id <= 692980243339071488 for s in resp])
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetListTimeline(slug='test'))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetListTimeline())
# 4012966701
with open('testdata/get_list_timeline_count_rts_ent.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/statuses.json?include_rts=False&count=13&tweet_mode=compat&include_entities=False&slug=test&owner_id=4012966701',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetListTimeline(slug='test',
owner_id=4012966701,
count=13,
include_entities=False,
include_rts=False)
self.assertEqual(len(resp), 13)
@responses.activate
def testCreateList(self):
with open('testdata/post_create_list.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/create.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateList(
name='test2',
mode='private',
description='test for python-twitter')
self.assertEqual(resp.id, 233452137)
self.assertEqual(resp.description, 'test for python-twitter')
self.assertEqual(resp.mode, 'private')
@responses.activate
def testDestroyList(self):
with open('testdata/post_destroy_list.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/destroy.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyList(list_id=233452137)
self.assertEqual(resp.id, 233452137)
self.assertEqual(resp.member_count, 0)
@responses.activate
def testCreateSubscription(self):
with open('testdata/post_create_subscription.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/subscribers/create.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateSubscription(list_id=225486809)
self.assertEqual(resp.id, 225486809)
self.assertEqual(resp.name, 'my-bots')
@responses.activate
def testDestroySubscription(self):
with open('testdata/post_destroy_subscription.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/subscribers/destroy.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroySubscription(list_id=225486809)
self.assertEqual(resp.id, 225486809)
self.assertEqual(resp.name, 'my-bots')
@responses.activate
def testShowSubscription(self):
# User not a subscriber to the list.
with open('testdata/get_show_subscription_not_subscriber.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/subscribers/show.json?tweet_mode=compat&user_id=4040207472&list_id=189643778',
body=resp_data,
match_querystring=True,
status=200)
try:
self.api.ShowSubscription(list_id=189643778, user_id=4040207472)
except twitter.TwitterError as e:
self.assertIn(
"The specified user is not a subscriber of this list.",
str(e.message))
# User is a subscriber to list
with open('testdata/get_show_subscription.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/subscribers/show.json?list_id=189643778&tweet_mode=compat&screen_name=__jcbl__',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.ShowSubscription(list_id=189643778,
screen_name='__jcbl__')
self.assertEqual(resp.id, 372018022)
self.assertEqual(resp.screen_name, '__jcbl__')
self.assertTrue(resp.status)
# User is subscriber, using extra params
with open('testdata/get_show_subscription_extra_params.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/subscribers/show.json?include_entities=True&tweet_mode=compat&list_id=18964377&skip_status=True&screen_name=__jcbl__',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.ShowSubscription(list_id=18964377,
screen_name='__jcbl__',
include_entities=True,
skip_status=True)
self.assertFalse(resp.status)
@responses.activate
def testGetSubscriptions(self):
with open('testdata/get_get_subscriptions.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetSubscriptions()
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].name, 'space bots')
@responses.activate
def testGetSubscriptionsSN(self):
with open('testdata/get_get_subscriptions_uid.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetSubscriptions(screen_name='inky')
self.assertEqual(len(resp), 20)
self.assertTrue([isinstance(l, twitter.List) for l in resp])
@responses.activate
def testGetMemberships(self):
with open('testdata/get_get_memberships.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/memberships.json?count=20&cursor=-1&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetMemberships()
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].name, 'my-bots')
with open('testdata/get_get_memberships_himawari8bot.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/lists/memberships.json?count=20&cursor=-1&screen_name=himawari8bot&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetMemberships(screen_name='himawari8bot')
self.assertEqual(len(resp), 20)
self.assertTrue([isinstance(lst, twitter.List) for lst in resp])
@responses.activate
def testCreateListsMember(self):
with open('testdata/post_create_lists_member.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/members/create.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateListsMember(list_id=229581524, user_id=372018022)
self.assertTrue(isinstance(resp, twitter.List))
self.assertEqual(resp.name, 'test')
self.assertEqual(resp.member_count, 2)
@responses.activate
def testCreateListsMemberMultiple(self):
with open('testdata/post_create_lists_member_multiple.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/members/create_all.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateListsMember(list_id=229581524,
user_id=[372018022, 4040207472])
self.assertTrue(isinstance(resp, twitter.List))
self.assertEqual(resp.name, 'test')
self.assertEqual(resp.member_count, 3)
@responses.activate
def testDestroyListsMember(self):
with open('testdata/post_destroy_lists_member.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/members/destroy.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyListsMember(list_id=229581524, user_id=372018022)
self.assertTrue(isinstance(resp, twitter.List))
self.assertEqual(resp.name, 'test')
self.assertEqual(resp.member_count, 1)
@responses.activate
def testDestroyListsMemberMultiple(self):
with open('testdata/post_destroy_lists_member_multiple.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/lists/members/destroy_all.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyListsMember(list_id=229581524,
user_id=[372018022, 4040207472])
self.assertEqual(resp.member_count, 0)
self.assertEqual(resp.name, 'test')
self.assertTrue(isinstance(resp, twitter.List))
@responses.activate
def testPostUpdateWithMedia(self):
# API will first make a POST request to upload the file.
with open('testdata/post_upload_media_simple.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://upload.twitter.com/1.1/media/upload.json',
body=resp_data,
match_querystring=True,
status=200)
# Then the POST request to post a status with the media id attached.
with open('testdata/post_update_media_id.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/statuses/update.json?media_ids=697007311538229248',
body=resp_data,
match_querystring=True,
status=200)
# Local file
resp = self.api.PostUpdate(media='testdata/168NQ.jpg', status='test')
self.assertEqual(697007311538229248, resp.AsDict()['media'][0]['id'])
self.assertEqual(resp.text, "hi this is a test for media uploads with statuses https://t.co/FHgqb6iLOX")
# File object
with open('testdata/168NQ.jpg', 'rb') as f:
resp = self.api.PostUpdate(media=[f], status='test')
self.assertEqual(697007311538229248, resp.AsDict()['media'][0]['id'])
self.assertEqual(resp.text, "hi this is a test for media uploads with statuses https://t.co/FHgqb6iLOX")
# Media ID as int
resp = self.api.PostUpdate(media=697007311538229248, status='test')
# Media ID as list of ints
resp = self.api.PostUpdate(media=[697007311538229248], status='test')
responses.add(
POST,
"https://api.twitter.com/1.1/statuses/update.json?media_ids=697007311538229248,697007311538229249",
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.PostUpdate(
media=[697007311538229248, 697007311538229249], status='test')
@responses.activate
def testLookupFriendship(self):
with open('testdata/get_friendships_lookup_none.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/friendships/lookup.json?user_id=12&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
responses.add(
responses.GET,
'https://api.twitter.com/1.1/friendships/lookup.json?user_id=12,6385432&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
responses.add(
responses.GET,
'https://api.twitter.com/1.1/friendships/lookup.json?screen_name=jack&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
responses.add(
responses.GET,
'https://api.twitter.com/1.1/friendships/lookup.json?screen_name=jack,dickc&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.LookupFriendship(user_id=12)
self.assertTrue(isinstance(resp, list))
self.assertTrue(isinstance(resp[0], twitter.UserStatus))
self.assertEqual(resp[0].following, False)
self.assertEqual(resp[0].followed_by, False)
# If any of the following produce an unexpected result, the test will
# fail on a request to a URL that hasn't been set by responses:
test_user = twitter.User(id=12, screen_name='jack')
test_user2 = twitter.User(id=6385432, screen_name='dickc')
resp = self.api.LookupFriendship(screen_name='jack')
resp = self.api.LookupFriendship(screen_name=['jack'])
resp = self.api.LookupFriendship(screen_name=test_user)
resp = self.api.LookupFriendship(screen_name=[test_user, test_user2])
resp = self.api.LookupFriendship(user_id=12)
resp = self.api.LookupFriendship(user_id=[12])
resp = self.api.LookupFriendship(user_id=test_user)
resp = self.api.LookupFriendship(user_id=[test_user, test_user2])
self.assertRaises(
twitter.TwitterError,
lambda: self.api.LookupFriendship())
@responses.activate
def testLookupFriendshipMute(self):
with open('testdata/get_friendships_lookup_muting.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.LookupFriendship(screen_name='dickc')
self.assertEqual(resp[0].blocking, False)
self.assertEqual(resp[0].muting, True)
@responses.activate
def testLookupFriendshipBlockMute(self):
with open('testdata/get_friendships_lookup_muting_blocking.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.LookupFriendship(screen_name='dickc')
self.assertEqual(resp[0].muting, True)
self.assertEqual(resp[0].blocking, True)
@responses.activate
def testPostMediaMetadata(self):
responses.add(
POST,
'https://upload.twitter.com/1.1/media/metadata/create.json',
body=b'',
status=200)
resp = self.api.PostMediaMetadata(media_id=718561981427396608, alt_text='test')
# At the moment, all we can do is test if the method call works. The response
# body should be blank, with a 200 status on success.
self.assertTrue(resp)
@responses.activate
def testGetStatusWithExtAltText(self):
with open('testdata/get_status_ext_alt.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetStatus(status_id=724441953534877696)
self.assertEqual(resp.media[0].ext_alt_text, "\u201cJon Snow is dead.\u2026\u201d from \u201cGAME OF THRONES SEASON 6 EPISODES\u201d by HBO PR.")
@responses.activate
def testGetStatus(self):
with open('testdata/get_status.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetStatus(status_id=397)
self.assertTrue(type(resp) is twitter.Status)
self.assertEqual(resp.id, 397)
self.assertEqual(resp.user.id, 12)
self.assertFalse(resp != resp)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetStatus(status_id='test'))
@responses.activate
def testGetStatusExtraParams(self):
with open('testdata/get_status_extra_params.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetStatus(status_id=397, trim_user=True, include_entities=False)
self.assertFalse(resp.user.screen_name)
@responses.activate
def testGetStatuses(self):
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
with open('testdata/get_statuses.1.json') as f:
resp_data = f.read()
rsps.add(GET, DEFAULT_URL, body=resp_data)
with open('testdata/get_statuses.2.json') as f:
resp_data = f.read()
rsps.add(GET, DEFAULT_URL, body=resp_data)
with open('testdata/get_statuses.ids.txt') as f:
status_ids = [int(l) for l in f]
resp = self.api.GetStatuses(status_ids)
self.assertTrue(type(resp) is list)
self.assertEqual(set(respitem.id for respitem in resp), set(status_ids))
self.assertFalse(resp != resp)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetStatuses(['test']))
@responses.activate
def testGetStatusesMap(self):
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
with open('testdata/get_statuses.map.1.json') as f:
resp_data = f.read()
rsps.add(GET, DEFAULT_URL, body=resp_data)
with open('testdata/get_statuses.map.2.json') as f:
resp_data = f.read()
rsps.add(GET, DEFAULT_URL, body=resp_data)
with open('testdata/get_statuses.ids.txt') as f:
status_ids = [int(l) for l in f]
resp = self.api.GetStatuses(status_ids, map=True)
self.assertTrue(type(resp) is dict)
self.assertTrue(all([resp.get(status_id) for status_id in status_ids]))
self.assertFalse(resp != resp)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetStatuses(['test'], map=True))
@responses.activate
def testGetStatusOembed(self):
with open('testdata/get_status_oembed.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/statuses/oembed.json?tweet_mode=compat&id=397',
body=resp_data,
match_querystring=True,
status=200)
responses.add(
responses.GET,
'https://api.twitter.com/1.1/statuses/oembed.json?tweet_mode=compat&url=https://twitter.com/jack/statuses/397',
body=resp_data,
match_querystring=True,
status=200)
resp_id = self.api.GetStatusOembed(status_id=397)
self.assertEqual(resp_id['url'], 'https://twitter.com/jack/statuses/397')
self.assertEqual(resp_id['provider_url'], 'https://twitter.com')
self.assertEqual(resp_id['provider_name'], 'Twitter')
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetStatusOembed(status_id='test'))
resp_url = self.api.GetStatusOembed(url="https://twitter.com/jack/statuses/397")
self.assertEqual(resp_id, resp_url)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetStatusOembed(status_id=None, url=None))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetStatusOembed(status_id=397, align='test'))
@responses.activate
def testGetMutes(self):
# First iteration of the loop to get all the user's mutes
with open('testdata/get_mutes_users_list_loop_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/mutes/users/list.json?cursor=-1&stringify_ids=False&include_entities=True&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
# Last interation of that loop.
with open('testdata/get_mutes_users_list_loop_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/mutes/users/list.json?cursor=1535206520056388207&stringify_ids=False&include_entities=True&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetMutes(include_entities=True)
self.assertEqual(len(resp), 82)
self.assertTrue(isinstance(resp[0], twitter.User))
@responses.activate
def testGetMutesIDs(self):
# First iteration of the loop to get all the user's mutes
with open('testdata/get_mutes_users_ids_loop_0.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/mutes/users/ids.json?cursor=-1&stringify_ids=False&include_entities=True&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
# Last interation of that loop.
with open('testdata/get_mutes_users_ids_loop_1.json') as f:
resp_data = f.read()
responses.add(
responses.GET,
'https://api.twitter.com/1.1/mutes/users/ids.json?cursor=1535206520056565155&stringify_ids=False&include_entities=True&skip_status=False&tweet_mode=compat',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.GetMutesIDs()
self.assertEqual(len(resp), 82)
self.assertTrue(isinstance(resp[0], int))
@responses.activate
def testCreateBlock(self):
with open('testdata/post_blocks_create.json') as f:
resp_data = f.read()
responses.add(
POST,
DEFAULT_URL,
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateBlock(screen_name='jack')
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.screen_name, 'jack')
resp = self.api.CreateBlock(user_id=12)
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.id, 12)
@responses.activate
def testDestroyBlock(self):
with open('testdata/post_blocks_destroy.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/blocks/destroy.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyBlock(screen_name='jack')
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.screen_name, 'jack')
resp = self.api.DestroyBlock(user_id=12)
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.id, 12)
@responses.activate
def testCreateMute(self):
with open('testdata/post_mutes_users_create.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/mutes/users/create.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateMute(screen_name='jack')
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.screen_name, 'jack')
resp = self.api.CreateMute(user_id=12)
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.id, 12)
@responses.activate
def testDestroyMute(self):
with open('testdata/post_mutes_users_destroy.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/mutes/users/destroy.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyMute(screen_name='jack')
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.screen_name, 'jack')
resp = self.api.DestroyMute(user_id=12)
self.assertTrue(isinstance(resp, twitter.User))
self.assertEqual(resp.id, 12)
@responses.activate
def testMuteBlockParamsAndErrors(self):
# Basic type/error checking
self.assertRaises(
twitter.TwitterError,
lambda: self.api.CreateMute(user_id='test'))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.CreateMute())
with open('testdata/post_mutes_users_create_skip_status.json') as f:
resp_data = f.read()
responses.add(
POST,
'https://api.twitter.com/1.1/mutes/users/create.json',
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.CreateMute(screen_name='jack', skip_status=True)
self.assertTrue(isinstance(resp, twitter.User))
self.assertFalse(resp.status)
@responses.activate
def testPostUploadMediaChunkedInit(self):
with open('testdata/post_upload_chunked_INIT.json') as f:
resp_data = f.read()
responses.add(POST, DEFAULT_URL, body=resp_data, status=200)
with open('testdata/corgi.gif', 'rb') as fp:
resp = self.api._UploadMediaChunkedInit(fp)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(resp[0], 737956420046356480)
@responses.activate
def testPostUploadMediaChunkedAppend(self):
media_fp, filename, _, _ = twitter.twitter_utils.parse_media_file(
'testdata/corgi.gif')
responses.add(POST, DEFAULT_URL, body='', status=200)
resp = self.api._UploadMediaChunkedAppend(media_id=737956420046356480,
media_fp=media_fp,
filename=filename)
self.assertEqual(len(responses.calls), 7)
self.assertTrue(resp)
@responses.activate
def testPostUploadMediaChunkedAppendNonASCIIFilename(self):
media_fp, filename, _, _ = twitter.twitter_utils.parse_media_file(
'testdata/corgi.gif')
filename = "عَرَبِيّ"
responses.add(responses.POST, DEFAULT_URL, body='', status=200)
resp = self.api._UploadMediaChunkedAppend(media_id=737956420046356480,
media_fp=media_fp,
filename=filename)
self.assertEqual(len(responses.calls), 7)
@responses.activate
def testPostUploadMediaChunkedFinalize(self):
with open('testdata/post_upload_chunked_FINAL.json') as f:
resp_data = f.read()
responses.add(POST, DEFAULT_URL, body=resp_data, status=200)
resp = self.api._UploadMediaChunkedFinalize(media_id=737956420046356480)
self.assertEqual(len(responses.calls), 1)
self.assertTrue(resp)
@responses.activate
def testGetUserSuggestionCategories(self):
with open('testdata/get_user_suggestion_categories.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetUserSuggestionCategories()
self.assertTrue(type(resp[0]) is twitter.Category)
@responses.activate
def testGetUserSuggestion(self):
with open('testdata/get_user_suggestion.json') as f:
resp_data = f.read()
responses.add(responses.GET, DEFAULT_URL, body=resp_data, status=200)
category = twitter.Category(name='Funny', slug='funny', size=20)
resp = self.api.GetUserSuggestion(category=category)
self.assertTrue(type(resp[0]) is twitter.User)
@responses.activate
def testGetUserTimeSinceMax(self):
with open('testdata/get_user_timeline_sincemax.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetUserTimeline(user_id=12, since_id=757782013914951680, max_id=758097930670645248)
self.assertEqual(len(resp), 6)
@responses.activate
def testGetUserTimelineCount(self):
with open('testdata/get_user_timeline_count.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetUserTimeline(user_id=12, count=63)
self.assertEqual(len(resp), 63)
@responses.activate
def testDestroyStatus(self):
with open('testdata/post_destroy_status.json') as f:
resp_data = f.read()
responses.add(
POST,
DEFAULT_URL,
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyStatus(status_id=746507834003578880)
self.assertTrue(isinstance(resp, twitter.models.Status))
self.assertEqual(resp.id, 746507834003578880)
@responses.activate
def testCreateFavorite(self):
with open('testdata/post_create_favorite.json') as f:
resp_data = f.read()
responses.add(POST, DEFAULT_URL, body=resp_data, status=200)
resp = self.api.CreateFavorite(status_id=757283981683412992)
self.assertEqual(resp.id, 757283981683412992)
status = twitter.models.Status(id=757283981683412992)
resp = self.api.CreateFavorite(status)
self.assertEqual(resp.id, 757283981683412992)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.CreateFavorite(status=None, status_id=None))
@responses.activate
def testDestroyFavorite(self):
with open('testdata/post_destroy_favorite.json') as f:
resp_data = f.read()
responses.add(POST, DEFAULT_URL, body=resp_data, status=200)
resp = self.api.DestroyFavorite(status_id=757283981683412992)
self.assertEqual(resp.id, 757283981683412992)
status = twitter.models.Status(id=757283981683412992)
resp = self.api.DestroyFavorite(status)
self.assertEqual(resp.id, 757283981683412992)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.DestroyFavorite(status=None, status_id=None))
@responses.activate
def testPostDirectMessage(self):
with open('testdata/post_post_direct_message.json') as f:
resp_data = f.read()
responses.add(
POST,
DEFAULT_URL,
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.PostDirectMessage(text="test message", user_id=372018022)
self.assertEqual(resp.text, "test message")
self.assertEqual(resp.sender_id, "4012966701")
self.assertEqual(resp.recipient_id, "372018022")
self.assertTrue(resp._json)
@responses.activate
def testDestroyDirectMessage(self):
with open('testdata/post_destroy_direct_message.json') as f:
resp_data = f.read()
responses.add(
POST,
DEFAULT_URL,
body=resp_data,
match_querystring=True,
status=200)
resp = self.api.DestroyDirectMessage(message_id=761517675243679747)
@responses.activate
def testShowFriendship(self):
with open('testdata/get_show_friendship.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.ShowFriendship(source_user_id=4012966701, target_user_id=372018022)
self.assertTrue(resp['relationship']['target'].get('following', None))
resp = self.api.ShowFriendship(source_screen_name='notinourselves', target_screen_name='__jcbl__')
self.assertRaises(
twitter.TwitterError,
lambda: self.api.ShowFriendship(source_screen_name='notinourselves')
)
self.assertRaises(
twitter.TwitterError,
lambda: self.api.ShowFriendship(target_screen_name='__jcbl__')
)
@responses.activate
@patch('twitter.api.Api.UploadMediaChunked')
def test_UploadSmallVideoUsesChunkedData(self, mocker):
responses.add(POST, DEFAULT_URL, body='{}')
video = NamedTemporaryFile(suffix='.mp4')
video.write(b'10' * 1024)
video.seek(0, 0)
resp = self.api.PostUpdate('test', media=video)
assert os.path.getsize(video.name) <= 1024 * 1024
assert isinstance(resp, twitter.Status)
assert twitter.api.Api.UploadMediaChunked.called
@responses.activate
def test_post_retweet(self):
with open('testdata/post_retweet.json') as f:
resp_data = f.read()
responses.add(POST, DEFAULT_URL, body=resp_data)
resp = self.api.PostRetweet(status_id=967413349473574913, trim_user=True)
assert resp
assert resp.id == 967465567773839360
self.assertRaises(
twitter.TwitterError,
lambda: self.api.PostRetweet(status_id=0))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.PostRetweet(status_id='asdf'))
@responses.activate
def test_get_retweets_of_me(self):
with open('testdata/get_retweets_of_me.json') as f:
resp_data = f.read()
responses.add(GET, DEFAULT_URL, body=resp_data)
resp = self.api.GetRetweetsOfMe(
count=1,
since_id=0,
max_id=100,
trim_user=True,
include_entities=True,
include_user_entities=True)
assert resp
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetRetweetsOfMe(count=200))
self.assertRaises(
twitter.TwitterError,
lambda: self.api.GetRetweetsOfMe(count='asdf'))
@responses.activate
def test_incoming_friendships(self):
with open('testdata/get_incoming_friendships.json') as f:
responses.add(GET, DEFAULT_URL, f.read())
resp = self.api.IncomingFriendship(cursor=1, stringify_ids=True)
assert resp
assert isinstance(resp, list)
assert resp[0] == 12
@responses.activate
def test_outgoing_friendships(self):
with open('testdata/get_outgoing_friendships.json') as f:
responses.add(GET, DEFAULT_URL, f.read())
resp = self.api.OutgoingFriendship(cursor=1, stringify_ids=True)
assert resp
assert isinstance(resp, list)
assert resp[0] == 12
@responses.activate
def test_update_profile(self):
with open('testdata/update_profile.json') as f:
responses.add(POST, DEFAULT_URL, f.read())
resp = self.api.UpdateProfile(
name='jeremy',
location='philly',
profileURL='example.com',
description='test',
profile_link_color='#e35',
include_entities=True,
skip_status=True)
assert resp
assert isinstance(resp, twitter.User)
|
py | 7dfd71e1e393fd32ea1e0bfed51277bd56aa4ca0 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from shared.datastore.entity_util import to_entity
class Subscription(object):
@classmethod
def to_entity(cls, properties, name, parent=None):
return to_entity(
'Subscription',
properties,
name=name,
parent=parent,
include_in_indexes=('date',),
)
class SubscriptionEvent(object):
@classmethod
def to_entity(cls, properties, name=None, parent=None):
return to_entity(
'SubscriptionEvent',
properties,
name=name,
parent=parent,
include_in_indexes=('date',),
)
@classmethod
def hash_name(cls, *args):
if len(args) == 0:
raise TypeError("Expected non-zero-length hash_name args")
hash_string = '-'.join([str(arg) for arg in args])
return hashlib.sha1(hash_string.encode()).hexdigest()
|
py | 7dfd71e2c48f858adddf3e83935f82eec3e90b58 | """
Module for entities implemented using the
switch platform (https://www.home-assistant.io/integrations/switch/).
"""
from __future__ import annotations
import logging
from typing import Any, cast
import hahomematic.central_unit as hm_central
from hahomematic.const import HM_ARG_ON_TIME, TYPE_ACTION, HmPlatform
import hahomematic.device as hm_device
from hahomematic.entity import GenericEntity, GenericSystemVariable
from hahomematic.helpers import SystemVariableData
PARAM_ON_TIME = "ON_TIME"
_LOGGER = logging.getLogger(__name__)
class HmSwitch(GenericEntity[bool]):
"""
Implementation of a switch.
This is a default platform that gets automatically generated.
"""
def __init__(
self,
device: hm_device.HmDevice,
unique_id: str,
channel_address: str,
paramset_key: str,
parameter: str,
parameter_data: dict[str, Any],
):
super().__init__(
device=device,
unique_id=unique_id,
channel_address=channel_address,
paramset_key=paramset_key,
parameter=parameter,
parameter_data=parameter_data,
platform=HmPlatform.SWITCH,
)
@property
def value(self) -> bool | None:
"""Get the value of the entity."""
if self._type == TYPE_ACTION:
return False
return self._value
async def turn_on(self, **kwargs: dict[str, Any] | None) -> None:
"""Turn the switch on."""
if HM_ARG_ON_TIME in kwargs:
on_time = float(cast(float, kwargs[HM_ARG_ON_TIME]))
await self.set_on_time_value(on_time=on_time)
await self.send_value(True)
async def turn_off(self) -> None:
"""Turn the switch off."""
await self.send_value(False)
async def set_on_time_value(self, on_time: float) -> None:
"""Set the on time value in seconds."""
await self._client.set_value_by_paramset_key(
channel_address=self.channel_address,
paramset_key=self._paramset_key,
parameter=PARAM_ON_TIME,
value=float(on_time),
)
class HmSysvarSwitch(GenericSystemVariable):
"""
Implementation of a sysvar switch entity.
"""
def __init__(self, central: hm_central.CentralUnit, data: SystemVariableData):
"""Initialize the entity."""
super().__init__(central=central, data=data, platform=HmPlatform.HUB_SWITCH)
|
py | 7dfd71eaa60149ed871390509279b83fb8a8cde6 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-13 09:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atol', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='receipt',
name='retried_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='Дата повторной инициализации чека в системе оператора'),
),
migrations.AlterField(
model_name='receipt',
name='status',
field=models.CharField(choices=[('created', 'Ожидает инициации в системе оператора'), ('initiated', 'Иницирован в системе оператора'), ('retried', 'Повторно иницирован в системе оператора'), ('received', 'Получен от оператора'), ('no_email_phone', 'Отсутствует email/phone'), ('failed', 'Ошибка')], default='created', max_length=16, verbose_name='Статус чека'),
),
]
|
py | 7dfd7330c7710fdf9f840e4ab493d92205bcdae2 | def tablify(rows):
if not rows:
return [u""]
cols = len(rows[0])
col_widths = [0] * cols
for row in rows:
for pos, col in enumerate(row):
col_widths[pos] = max(col_widths[pos], len(unicode(col)))
buf = []
for row in rows:
cur_row_buf = []
for pos, col in enumerate(row):
cur_row_buf.append(unicode(col).ljust(col_widths[pos] + 2))
buf.append(u"".join(cur_row_buf))
return buf
|
py | 7dfd73aaa2e4e693549ff0dcaa29e85a478c2385 | import torch.nn as nn
from . import base
from . import functional as F
from ..base.modules import Activation
class JaccardLoss(base.Loss):
def __init__(self, eps=1., activation=None, ignore_channels=None, **kwargs):
super().__init__(**kwargs)
self.eps = eps
self.activation = Activation(activation)
self.ignore_channels = ignore_channels
def forward(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
return 1 - F.jaccard(
y_pr, y_gt,
eps=self.eps,
threshold=None,
ignore_channels=self.ignore_channels,
)
class DiceLoss(base.Loss):
def __init__(self, eps=1., beta=1., activation=None, ignore_channels=None, class_intervals=None, **kwargs):
super().__init__(**kwargs)
self.eps = eps
self.beta = beta
self.activation = Activation(activation)
self.ignore_channels = ignore_channels
self.class_intervals = class_intervals
def forward(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
return 1 - F.f_score(
y_pr, y_gt,
beta=self.beta,
eps=self.eps,
threshold=None,
ignore_channels=self.ignore_channels,
class_intervals=self.class_intervals,
)
class L1Loss(nn.L1Loss, base.Loss):
pass
class MSELoss(nn.MSELoss, base.Loss):
pass
class CrossEntropyLoss(nn.CrossEntropyLoss, base.Loss):
def __init__(self, eps=1e-6, activation=None, ignore_channels=None, class_intervals=None, **kwargs):
super().__init__(**kwargs)
self.eps = eps
self.activation = Activation(activation)
self.ignore_channels = ignore_channels
self.class_intervals = class_intervals
def forward(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
loss = F.CE(y_pr, y_gt, eps=self.eps, threshold=None, ignore_channels=self.ignore_channels, class_intervals=self.class_intervals)
return loss
class NLLLoss(nn.NLLLoss, base.Loss):
pass
class BCELoss(nn.BCELoss, base.Loss):
def __init__(self, eps=1e-6, activation=None, ignore_channels=None, **kwargs):
super().__init__(**kwargs)
self.eps = eps
self.activation = Activation(activation)
self.ignore_channels = ignore_channels
def forward(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
loss = F.BCE(y_pr, y_gt, eps=self.eps, threshold=None, ignore_channels=self.ignore_channels)
return loss
class BCEWithLogitsLoss(nn.BCEWithLogitsLoss, base.Loss):
pass
|
py | 7dfd73e3b1e34372e16093c69dd0c3970fdf8063 | #!/usr/bin/env python
import os, sys, glob, subprocess, textwrap, setuptools
try:
# Git version extraction logic designed to be compatible with both semver and PEP 440
version = subprocess.check_output(["git", "describe", "--tags", "--match", "v*.*.*"]).decode()
version = version.strip("v\n").replace("-", "+", 1).replace("-", ".")
except Exception:
version = "0.0.0"
setuptools.setup(
name="aegea",
version=version,
url="https://github.com/kislyuk/aegea",
license=open("LICENSE.md").readline().strip(),
author="Andrey Kislyuk",
author_email="[email protected]",
description="Amazon Web Services Operator Interface",
long_description=open("README.rst").read(),
install_requires=[
"boto3 >= 1.16.43, < 2",
"argcomplete >= 1.9.5, < 2",
"paramiko >= 2.4.2, < 3",
"requests >= 2.18.4, < 3",
"tweak >= 1.0.3, < 2",
"keymaker >= 1.1.0, < 2",
"pyyaml >= 3.12, < 5.4",
"python-dateutil >= 2.6.1, < 3",
"babel >= 2.4.0, < 3",
"ipwhois >= 1.1.0, < 2",
"uritemplate >= 3.0.0, < 4",
"awscli >= 1.18.203, < 2",
"chalice >= 1.21.7, < 2"
],
extras_require={
':python_version == "2.7"': [
"enum34 >= 1.1.6, < 2",
"ipaddress >= 1.0.19, < 2",
"subprocess32 >= 3.2.7, < 4"
]
},
tests_require=[
"coverage",
"flake8",
"mypy"
],
packages=setuptools.find_packages(exclude=["test"]),
scripts=glob.glob("scripts/*"),
platforms=["MacOS X", "Posix"],
test_suite="test",
include_package_data=True
)
|
py | 7dfd74d3904415335bf289487c6cc221cf66d5da | config = {
"randomforest_classification": {
'n_estimators': 200,
'random_state': 42,
},
"randomforest_regression": {
'n_estimators': 200,
'random_state': 42,
},
"adaboost_classification":{
'n_estimators': 200,
'learning_rate': 0.1,
'random_state': 42
},
"adaboost_regression":{
'n_estimators': 200,
'learning_rate': 0.1,
'random_state': 42
},
"gradientboost_classification":{
'n_estimators': 200,
'learning_rate': 0.1,
'subsample': 0.9,
'random_state': 42,
'verbose': -1
},
"gradientboost_regression":{
'n_estimators': 200,
'learning_rate': 0.1,
'subsample': 0.9,
'random_state': 42,
'verbose': -1
},
"svm_classification":{
'C': 10000.0,
'kernel': 'rbf',
'gamma': 0.1,
'tol': 1e-3,
'probability': True,
'shrinking': True,
'class_weight': 'balanced',
'random_state': 42,
},
"svm_regression":{
'C': 10000.0,
'kernel': 'rbf',
'gamma': 0.1,
'tol': 1e-3,
'probability': True,
'shrinking': True
},
"lr_classification":{
'C': 10000.0,
'class_weight': 'balanced',
'solver': 'lbfgs',
'random_state': 42,
'verbose': -1
},
"ridge_regression":{
'alpha': 0.1,
'normalize': False,
'solver': 'lbfgs',
'random_state': 42
},
"xgboost_classification":{
'objective': 'multi:softmax',
'eval_metric': 'logloss',
'num_class': 4,
'n_estimators': 30,
'max_depth': 4,
'subsample': 0.8,
'gamma': 0.01,
'eta': 0.01
},
"xgboost_regression":{
'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'n_estimators': 30,
'learning_rate': 0.3,
'max_depth': 4,
'subsample': 0.8,
'gamma': 0.01,
'eta': 0.01
},
"lightgbm_classification":{
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 4,
'metric': 'multi_logloss',
'verbose': -1,
'num_leaves': 64,
'subsample': 0.7,
'colsample_bytree': 0.7,
'subsample_freq': 1,
'learning_rate': 0.01,
'n_jobs': -1,
'device': 'cpu',
'num_boost_round': 30,
'early_stopping_round': 10,
'verbosity': 1
},
"lightgbm_regression":{
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'multi_logloss',
'verbose': -1,
'num_leaves': 64,
'subsample': 0.7,
'colsample_bytree': 0.7,
'subsample_freq': 1,
'learning_rate': 0.01,
'n_jobs': -1,
'device': 'cpu',
'num_boost_round': 30,
'early_stopping_round': 10,
'verbosity': 1
}
} |
py | 7dfd752537d958bac3edcbfcd30d880addf98b3b | from abc import ABCMeta, abstractmethod
from collections import namedtuple
import warnings
import torch
from torch import nn
from cnn_finetune.shims import no_grad_variable
from cnn_finetune.utils import default, product
ModelInfo = namedtuple(
'ModelInfo',
['input_space', 'input_size', 'input_range', 'mean', 'std']
)
# Global registry which is used to track wrappers for all model names.
MODEL_REGISTRY = {}
class ModelRegistryMeta(type):
"""Metaclass that registers all model names defined in model_names property
of a descendant class in the global MODEL_REGISTRY.
"""
def __new__(mcls, name, bases, namespace, **kwargs):
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
if 'model_names' in namespace:
for model_name in namespace['model_names']:
# If model_name is already registered,
# override the wrapper definition and display a warning.
if model_name in MODEL_REGISTRY:
current_class = "<class '{module}.{qualname}'>".format(
module=namespace['__module__'],
qualname=namespace['__qualname__'],
)
warnings.warn(
"{current_class} redefined model_name '{model_name}'"
"that was already registered by "
"{previous_class}".format(
current_class=current_class,
model_name=model_name,
previous_class=MODEL_REGISTRY[model_name]
)
)
MODEL_REGISTRY[model_name] = cls
return cls
class ModelWrapperMeta(ABCMeta, ModelRegistryMeta):
"""An intermediate class that allows usage of both
ABCMeta and ModelRegistryMeta simultaneously
"""
pass
class ModelWrapperBase(nn.Module, metaclass=ModelWrapperMeta):
"""Base class for all wrappers. To create a new wrapper you should
subclass it and add model names that are supported by the wrapper to
the model_names property. Those model names will be automatically
registered in the global MODEL_REGISTRY upon class initialization.
"""
# If True an output of .features() call will be converted
# to a tensor of shape [B, C * H * W].
flatten_features_output = True
def __init__(self, *, model_name, num_classes, pretrained, dropout_p, pool,
classifier_factory, use_original_classifier, input_size,
original_model_state_dict):
super().__init__()
if num_classes < 1:
raise ValueError('num_classes should be greater or equal to 1')
if use_original_classifier and classifier_factory:
raise ValueError(
'You can\'t use classifier_factory when '
'use_original_classifier is set to True'
)
self.check_args(
model_name=model_name,
num_classes=num_classes,
dropout_p=dropout_p,
pretrained=pretrained,
pool=pool,
classifier_fn=classifier_factory,
use_original_classifier=use_original_classifier,
input_size=input_size,
)
self.model_name = model_name
self.num_classes = num_classes
self.pretrained = pretrained
original_model = self.get_original_model()
if original_model_state_dict is not None:
original_model.load_state_dict(original_model_state_dict)
self._features = self.get_features(original_model)
self.dropout = nn.Dropout(p=dropout_p) if dropout_p else None
self.pool = self.get_pool() if pool is default else pool
self.input_size = input_size
self.original_model_info = self.get_original_model_info(original_model)
if input_size:
classifier_in_features = self.calculate_classifier_in_features(
original_model
)
else:
classifier_in_features = self.get_classifier_in_features(
original_model
)
if use_original_classifier:
classifier = self.get_original_classifier(original_model)
else:
if classifier_factory:
classifier = classifier_factory(
classifier_in_features, num_classes,
)
else:
classifier = self.get_classifier(
classifier_in_features, num_classes
)
self._classifier = classifier
@abstractmethod
def get_original_model(self):
# Should return a model that will be later passed to
# methods that will construct a model for fine-tuning.
pass
@abstractmethod
def get_features(self, original_model):
# Should return an instance of nn.Module that will be used as
# a feature extractor.
pass
@abstractmethod
def get_classifier_in_features(self, original_model):
# Should return a number of input features for classifier
# for a case when default pooling layer is being used.
pass
def get_original_model_info(self, original_model):
# Should return an instance of ModelInfo.
return None
def calculate_classifier_in_features(self, original_model):
# Runs forward pass through feature extractor to get
# the number of input features for classifier.
with no_grad_variable(torch.zeros(1, 3, *self.input_size)) as input_var:
# Set model to the eval mode so forward pass
# won't affect BatchNorm statistics.
original_model.eval()
output = original_model.features(input_var)
if self.pool is not None:
output = self.pool(output)
original_model.train()
return product(output.size()[1:])
def check_args(self, **kwargs):
# Allows additional arguments checking by model wrappers.
pass
def get_pool(self):
# Returns default pooling layer for model. May return None to
# indicate absence of pooling layer in a model.
return nn.AdaptiveAvgPool2d(1)
def get_classifier(self, in_features, num_classes):
return nn.Linear(in_features, self.num_classes)
def get_original_classifier(self, original_model):
raise NotImplementedError()
def features(self, x):
return self._features(x)
def classifier(self, x):
return self._classifier(x)
def forward(self, x):
x = self.features(x)
if self.pool is not None:
x = self.pool(x)
if self.dropout is not None:
x = self.dropout(x)
if self.flatten_features_output:
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_model(
model_name,
num_classes,
pretrained=False,
dropout_p=None,
pool=default,
classifier_factory=None,
use_original_classifier=False,
input_size=None,
original_model_state_dict=None,
):
"""
Args:
model_name (str): Name of the model.
num_classes (int): Number of classes for the classifier.
pretrained (bool, optional) If True uses ImageNet weights for the
original model.
dropout_p (float, optional) Dropout probability.
pool (nn.Module or None, optional) Custom pooling layer.
classifier_factory (callable, optional) Allows creating a custom
classifier instead of using nn.Linear. Should be a callable
that takes the number of input features and the number of classes
as arguments and returns a classifier module.
use_original_classifier (bool, optional) If True uses classifier from
the original model.
input_size (tuple, optional) Input size of images that will be
fed into the network. Should be a tuple containing (width, height)
in pixels. Required for architectures that use fully-connected
layers such as AlexNet or VGG.
original_model_state_dict (dict, optional): Dict containing
parameters for the original model.
"""
if model_name not in MODEL_REGISTRY:
raise ValueError(
'model_name {model_name} not found. '
'Available model_name values: {model_names}'.format(
model_name=model_name,
model_names=', '.join(MODEL_REGISTRY.keys())
)
)
wrapper = MODEL_REGISTRY[model_name]
return wrapper(
model_name=model_name,
num_classes=num_classes,
pretrained=pretrained,
dropout_p=dropout_p,
pool=pool,
classifier_factory=classifier_factory,
use_original_classifier=use_original_classifier,
input_size=input_size,
original_model_state_dict=original_model_state_dict,
)
|
py | 7dfd759a1f80652791d76bf350656b52c6efe0a9 | import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import cv2
import numpy as np
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored (e.g., python main.py --ignore person book)
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU (e.g., python main.py --set-class-iou person 0.7)
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
'''
0,0 ------> x (width)
|
| (Left,Top)
| *_________
| | |
| |
y |_________|
(height) *
(Right,Bottom)
'''
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# make sure that the cwd() is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
# if there are no images then no animation can be shown
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
IMG_PATH_OUT = os.path.join(os.getcwd(), 'output')
if os.path.exists(IMG_PATH):
for dirpath, dirnames, files in os.walk(IMG_PATH):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
show_animation = True
"""
Create a ".temp_files/" and "output/" directory
"""
TEMP_FILES_PATH = ".temp_files"
if not os.path.exists(TEMP_FILES_PATH): # if it doesn't exist already
os.makedirs(TEMP_FILES_PATH)
output_files_path = "output"
if os.path.exists(output_files_path): # if it exist already
# reset the output directory
shutil.rmtree(output_files_path)
os.makedirs(output_files_path)
def error(msg):
print(msg)
sys.exit(0)
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
def resize_image(img):
img_size = img.shape
im_size_min = np.min(img_size[0:2])
im_size_max = np.max(img_size[0:2])
im_scale = float(600) / float(im_size_min)
if np.round(im_scale * im_size_max) > 1200:
im_scale = float(1200) / float(im_size_max)
new_h = int(img_size[0] * im_scale)
new_w = int(img_size[1] * im_scale)
new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16
re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return re_im, (new_h / img_size[0], new_w / img_size[1])
"""
ground-truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
def get_GT_data():
# get a list with the ground-truth files
ground_truth_files_list = glob.glob(GT_PATH + '/*.txt')
detection_files_list = glob.glob(DR_PATH + '/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
counter_images_per_class = {}
gt_files = []
for txt_file in ground_truth_files_list:
# for txt_file in detection_files_list:
#print(txt_file)
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
array_lines = []
# img = cv2.imread(os.path.join(IMG_PATH, (file_id.split('_')[1] + ".jpg")))
img = cv2.imread(os.path.join(IMG_PATH, (file_id + ".jpg")))
# img1, (rh, rw) = resize_image(img)
# img2 = cv2.resize(img1, None, None, fx=1.0 / rh, fy=1.0 / rw, interpolation=cv2.INTER_LINEAR)
for line in lines_list:
try:
print(line)
# xmin, ymin, _, _, xmax, ymax, _, _, original_text = line.split(',', 8)
xmin, ymin, _, _, xmax, ymax, _, _ = line.split(',', 8)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2)
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error(error_msg)
bbox = xmin + " " + ymin + " " + xmax + " " +ymax
bounding_boxes.append({"bbox":bbox, "used":False})
# dump bounding_boxes into a ".json" file
cv2.imwrite(os.path.join(IMG_PATH_OUT, (file_id + ".jpg")), img[:, :, ::-1])
new_temp_file = TEMP_FILES_PATH + os.sep + file_id + "_ground_truth.json"
gt_files.append(new_temp_file)
with open(new_temp_file, 'w') as outfile:
json.dump(bounding_boxes, outfile)
return gt_files
if __name__ == '__main__':
get_GT_data()
|
py | 7dfd76da9ffda01acaf43dc2d9c40916b195fa0e | import pytest
from pytest_bdd import given, when, then, scenario
@pytest.mark.parametrize(
['start', 'eat', 'left'],
[(12, 5, 7)])
@scenario(
'parametrized.feature',
'Parametrized given, when, thens',
)
def test_parametrized(request, start, eat, left):
"""Test parametrized scenario."""
@pytest.fixture(params=[1, 2])
def foo_bar(request):
return 'bar' * request.param
@pytest.mark.parametrize(
['start', 'eat', 'left'],
[(12, 5, 7)])
@scenario(
'parametrized.feature',
'Parametrized given, when, thens',
)
def test_parametrized_with_other_fixtures(request, start, eat, left, foo_bar):
"""Test parametrized scenario, but also with other parametrized fixtures."""
@given('there are <start> cucumbers')
def start_cucumbers(start):
return dict(start=start)
@when('I eat <eat> cucumbers')
def eat_cucumbers(start_cucumbers, start, eat):
start_cucumbers['eat'] = eat
@then('I should have <left> cucumbers')
def should_have_left_cucumbers(start_cucumbers, start, eat, left):
assert start - eat == left
assert start_cucumbers['start'] == start
assert start_cucumbers['eat'] == eat
|
py | 7dfd77c975ce0bd8934251e0898f8fcb0f36a26c |
def chr_is_hex(c):
return (c >= '0' and c <= '9') or (c >= 'a' and c <= 'f') or (c >= 'A' and c <= 'F')
def chr_is_oct(c):
return c >= '0' and c <= '7'
def chr_is_bin(c):
return c == '0' or c == '1'
def chr_is_identifier_start(c):
return c.isalpha() or c in '_'
def chr_is_identifier(c):
return c.isalnum() or c in '_'
class AsmReader:
def __init__(self, text='', filename=''):
self.text = text
self.lineno = 1
self.filename = filename
def feed(self, text):
self.text += text
def syntax_error(self, message):
read_len = len(self.line_start) - len(self.text)
next_line = self.line_start.find('\n')
line = self.line_start[:(next_line if next_line > -1 else len(self.line_start) - 1)]
raise SyntaxError(message, (self.filename, self.lineno, read_len, line))
def __iter__(self):
return self
def __next__(self):
if not self.text:
raise StopIteration()
char = self.next_interesting_character()
while char == ';' or char == '\n':
if char == ';':
self.skip_comment()
if self.text:
self.read('\n')
self.lineno += 1
char = self.next_interesting_character()
if char == '\n':
self.skip(1)
char = self.next_interesting_character()
self.lineno += 1
self.line_start = self.text
if char == '':
return ('eof', None)
if char == '.':
self.skip(1)
const = self.next_constant()
self.end_of_line()
return const
elif char == '@':
self.skip(1)
ent_local = self.next_entity_local()
self.end_of_line()
return ent_local
elif char == '_':
self.skip(1)
return self.next_local_label()
elif char == '#':
self.skip(1)
directive = self.next_directive()
self.end_of_line()
return directive
else:
text = self.text
# TODO there should be a better way to do this
try:
return ('label', self.read_label())
except SyntaxError as e:
self.text = text
instr = self.next_instruction()
self.end_of_line()
return instr
def next_instruction(self):
instr = self.read_symbol().upper()
if self.head == '.':
instr += self.read('.') + self.read_symbol().upper()
whitespace = self.skip_whitespace()
if self.text and self.head not in '\n;' and not whitespace:
self.syntax_error('Expected newline, got %r' % self.head)
operands = []
if instr in ['CMD', 'TEST']: # special case
operands.append(self.read_at_least_once(lambda c: c != '\n', 'non-newline'))
return ('instruction', (instr, operands))
first = True
while self.text and self.head not in '\n;':
if not first:
self.read(',')
self.skip_whitespace()
operands.append(self.read_ref())
self.skip_whitespace()
first = False
return ('instruction', (instr, operands))
def next_constant(self):
name = self.read_symbol()
self.read_whitespace()
value = self.read_ref()
return ('const', (name, value))
def next_entity_local(self):
name = self.read_symbol()
specific = ''
if self.head == ' ':
self.read_whitespace()
specific = self.read_at_least_once(lambda c: c != '\n', 'non-newline')
return ('entity_local', (name, specific))
def next_local_label(self):
return ('local_label', self.read_label())
def next_directive(self):
name = self.read_symbol()
self.read_whitespace()
value = self.read_at_least_once(lambda c: c != '\n', 'non-newline')
return ('directive', (name, value))
def read_whitespace(self):
self.read_at_least_once(lambda c: c in ' \t', 'whitespace')
def read_ref(self):
head = self.head
if head == '#':
self.skip(1)
return ('literal', self.read_number())
elif head.isnumeric():
return ('address', self.read_number())
elif head == '"':
return ('string', self.read_string())
else:
return ('symbol', self.read_symbol())
def read_number(self):
mul = -1 if self.head == '-' else 1
if mul == -1: # Read negative sign
self.skip(1)
if self.head == '0':
type = self.peek()
if type == 'x':
self.skip(2)
return mul*int(self.read_at_least_once(chr_is_hex, 'hex char'), 16)
elif type == 'b':
self.skip(2)
return mul*int(self.read_at_least_once(chr_is_bin, 'bin char'), 2)
elif type == 'o':
self.skip(2)
return mul*int(self.read_at_least_once(chr_is_oct, 'oct char'), 8)
# fall through to read as decimal number
return mul*int(self.read_at_least_once(str.isdecimal, 'decimal char'))
def read_string(self):
self.read('"')
string = ''
while True:
string += self.read_while(lambda c: c not in '\n\\"')
if self.head == '\n':
self.syntax_error('Unterminated string')
elif self.head == '\\':
self.skip(1)
if self.head == 'n':
string += '\n'
elif self.head == '"':
string += '"'
else:
self.syntax_error('Invalid escape %r' % self.head)
self.skip(1)
else:
break
self.read('"')
return string
def read_label(self):
name = self.read_symbol()
self.read(':')
return name
def read_symbol(self):
symb = self.read(chr_is_identifier_start, 'start of identifier')
symb += self.read_while(chr_is_identifier)
return symb
def read(self, cond, desc=''):
head = self.head
test = cond(head) if callable(cond) else head == cond
if test:
self.skip(1)
return head
if not desc:
desc = '<unknown expectation>'
self.syntax_error('Expected %s, got %r' % (desc if callable(cond) else repr(cond), head))
def read_any(self, options):
return self.read(lambda c: c in options, 'any of %s' % list(options))
def read_at_least_once(self, cond, desc=''):
val = self.read(cond, desc)
val += self.read_while(cond)
return val
def read_while(self, cond):
ptr = 0
while ptr < len(self.text) and cond(self.text[ptr]):
ptr += 1
val = self.text[:ptr]
self.skip(ptr)
return val
def peek(self):
return self.text[1] if len(self.text) > 1 else ''
def skip(self, n):
if n >= len(self.text):
self.text = ''
else:
self.text = self.text[n:]
def skip_comment(self):
ptr = 0
while ptr < len(self.text) and self.text[ptr] != '\n':
ptr += 1
self.skip(ptr)
def skip_whitespace(self):
return self.read_while(lambda c: c in ' \t')
def next_interesting_character(self):
self.skip_whitespace()
return self.head
def end_of_line(self):
self.skip_whitespace()
if self.text:
old = self.text
self.read_any('\n;')
self.text = old # don't read, only peek
head = property(lambda self: self.text[0] if self.text else '')
|
py | 7dfd787c64063a44b3f3c9566612a1d3849f2620 | from zc.recipe import egg
import zc.buildout
class WSGI(object):
""" provide a buildout wsgi entrypoint"""
template = """%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%(path)s,
]
from rctk.wsgi import application
"""
def __init__(self, buildout, name, options):
self.buildout, self.name, self.options = buildout, name, options
python = options.get('python', buildout['buildout']['python'])
options['executable'] = buildout[python]['executable']
options['bin-directory'] = buildout['buildout']['bin-directory']
options.setdefault('eggs', 'zc.recipe.egg')
self.zcegg = egg.Egg(buildout, options['recipe'], options)
def install(self):
"""installer"""
options = self.options
reqs, ws = self.zcegg.working_set([options['recipe']])
_script_template = zc.buildout.easy_install.script_template
zc.buildout.easy_install.script_template = zc.buildout.easy_install.script_header + self.template
scripts = zc.buildout.easy_install.scripts(
[(self.name, options['recipe']+'.ctl', 'run')],
ws,
options['executable'],
options['bin-directory'],
arguments = [],
)
zc.buildout.easy_install.script_template = _script_template
return scripts
def update(self):
"""updater"""
self.install()
|
py | 7dfd78900dadae8fd48706d6851b484e69363581 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Memory usage check."""
import time
from threading import Thread
from typing import Any, List, Tuple, Union
import click
from aea.protocols.base import Message
from aea.registries.resources import Resources
from aea.skills.base import Handler
from benchmark.checks.utils import SyncedGeneratorConnection # noqa: I100
from benchmark.checks.utils import (
get_mem_usage_in_mb,
make_agent,
make_envelope,
make_skill,
multi_run,
number_of_runs_deco,
output_format_deco,
print_results,
wait_for_condition,
)
from packages.fetchai.protocols.default.message import DefaultMessage
class TestHandler(Handler):
"""Dummy handler to handle messages."""
SUPPORTED_PROTOCOL = DefaultMessage.protocol_id
def setup(self) -> None:
"""Noop setup."""
def teardown(self) -> None:
"""Noop teardown."""
def handle(self, message: Message) -> None:
"""Handle incoming message."""
self.context.outbox.put(make_envelope(message.to, message.sender))
def run(duration: int, runtime_mode: str) -> List[Tuple[str, Union[int, float]]]:
"""Check memory usage."""
# pylint: disable=import-outside-toplevel,unused-import
# import manually due to some lazy imports in decision_maker
import aea.decision_maker.default # noqa: F401
connection = SyncedGeneratorConnection.make()
resources = Resources()
resources.add_connection(connection)
agent = make_agent(runtime_mode=runtime_mode, resources=resources)
agent.resources.add_skill(make_skill(agent, handlers={"test": TestHandler}))
t = Thread(target=agent.start, daemon=True)
t.start()
wait_for_condition(lambda: agent.is_running, timeout=5)
connection.enable()
time.sleep(duration)
connection.disable()
mem_usage = get_mem_usage_in_mb()
agent.stop()
t.join(5)
rate = connection.count_in / duration
return [
("envelopes received", connection.count_in),
("envelopes sent", connection.count_out),
("rate (envelopes/second)", rate),
("mem usage (Mb)", mem_usage),
]
@click.command()
@click.option("--duration", default=3, help="Run time in seconds.")
@click.option(
"--runtime_mode", default="async", help="Runtime mode: async or threaded."
)
@number_of_runs_deco
@output_format_deco
def main(
duration: int, runtime_mode: str, number_of_runs: int, output_format: str
) -> Any:
"""Run test."""
parameters = {
"Duration(seconds)": duration,
"Runtime mode": runtime_mode,
"Number of runs": number_of_runs,
}
def result_fn() -> List[Tuple[str, Any, Any, Any]]:
return multi_run(
int(number_of_runs),
run,
(duration, runtime_mode),
)
return print_results(output_format, parameters, result_fn)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
py | 7dfd78aaa9f0997f312e633a54c3b874b80be7e5 | #!/usr/bin/python3
import os
import sys
if sys.version_info[0] < 3: raise Exception("Python 3 or a more recent version is required.")
import json
import subprocess
average_num_times = 3
max_accum_time = 60 # don't take too long on a test - stop averaging if time exceeds some amount of seconds
with open('tests.json') as data_file:
data = json.load(data_file)
def runBench(prog):
result = subprocess.Popen(prog.split(), stdout = subprocess.PIPE).communicate()[0]
result = result.splitlines()
for line in result:
line = line.decode("utf-8")
if line.startswith("Time running "):
return str(line.rsplit(' ', 1)[-1])
return ""
call = 'python ./bench.py'
the_os = 'linux'
if os.name == "nt":
call = 'python bench.py'
the_os = 'windows'
f = open('results.txt', 'w')
for test in ['header', 'asserts', 'runtime']:
print( '\n************** ' + test + '\n')
f.write('\n************** ' + test + '\n')
f.flush()
for framework in ['doctest', 'catch']:
print( '== ' + framework + '\n')
f.write('== ' + framework + '\n')
f.flush()
for config in data['compilers'][the_os]:
for curr in data[test][1]:
if curr[0] == framework or curr[0] == "any":
command = call + data[test][0] + config + curr[1] + (' --catch' if framework == 'catch' else '')
print(command)
accum = float(0)
num_times = 0
for i in range(0, average_num_times):
res = float(runBench(command))
print(res)
accum += res
num_times += 1
if accum > max_accum_time:
break
average = "{:7.2f}".format(round(accum / num_times, 2))
print("AVERAGE: " + average)
f.write(average + " | ")
f.flush()
f.write("\n")
f.flush()
f.close()
|
py | 7dfd78b11187e418f3e8208f75b4e2fa2b4c2001 | """Various utilities."""
import numpy
import numpy.linalg
from math import sqrt
def eval_expr_simple(expr, kparam): # pylint=disable: too-many-return-statements
"""
To evaluate expressions tha only require kparams and not a, b, c, ...
"""
if expr == "0":
return 0.0
if expr == "1/2":
return 1.0 / 2.0
if expr == "1":
return 1.0
if expr == "-1/2":
return -1.0 / 2.0
if expr == "1/4":
return 1.0 / 4.0
if expr == "3/8":
return 3.0 / 8.0
if expr == "3/4":
return 3.0 / 4.0
if expr == "5/8":
return 5.0 / 8.0
if expr == "1/3":
return 1.0 / 3.0
try:
return kparam[expr]
except KeyError as exc:
raise ValueError(
"Asking for evaluation of symbol '{}' in "
"eval_expr_simple but this has not been defined or not "
"yet computed".format(str(exc))
)
def extend_kparam(kparam):
"""
Extend the list of kparam with also expressions like :math:`1-x`, ...
:param kparam: a dictionary where the key is the expression as a string and
the value is the numerical value
:return: a similar dictionary, extended with simple expressions
"""
kparam_extended = {}
for key, val in kparam.items():
kparam_extended[key] = val
kparam_extended["-{}".format(key)] = -val
kparam_extended["1-{}".format(key)] = 1.0 - val
kparam_extended["-1+{}".format(key)] = -1.0 + val
kparam_extended["1/2-{}".format(key)] = 1.0 / 2.0 - val
kparam_extended["1/2+{}".format(key)] = 1.0 / 2.0 + val
return kparam_extended
def eval_expr( # pylint: disable=too-many-return-statements,unused-argument
expr, a, b, c, cosalpha, cosbeta, cosgamma, kparam
):
r"""
Given a string expression as a function of the parameters ``a``, ``b``, ``c`` (lengths of the
cell lattice vectors) and ``cosalpha``, ``cosbeta``, ``cosgamma`` (the cosines of the three
angles between lattice vectors) returns the numerical value of the expression.
:param a: length of the first lattice vector
:param b: length of the second lattice vector
:param c: length of the third lattice vector
:param cosalpha: cosine of the :math:`\alpha` angle (between lattice vectors 2 and 3)
:param cosbeta: cosine of the :math:`\beta` angle (between lattice vectors 1 and 3)
:param cosgamma: cosine of the :math:`\gamma` angle (between lattice vectors 1 and 2)
:param kparam: a dictionary that associates the value to expressions as a function
of the ``a, b, c, cosalpha, cosbeta, cosgamma`` parameters
:return: the value of the expression for the given values of the cell parameters
.. note:: To evaluate expressions, I hardcode a table of existing expressions in the
DB rather than parsing the string (to avoid additional dependencies and
avoid the use of ``eval``).
"""
from math import sqrt
# sinalpha = sqrt(1.0 - cosalpha ** 2)
sinbeta = sqrt(1.0 - cosbeta ** 2)
# singamma = sqrt(1.0 - cosgamma ** 2)
try:
if expr == "(a*a/b/b+(1+a/c*cosbeta)/sinbeta/sinbeta)/4":
return (a * a / b / b + (1.0 + a / c * cosbeta) / sinbeta / sinbeta) / 4.0
if expr == "1-Z*b*b/a/a":
Z = kparam["Z"]
return 1.0 - Z * b * b / a / a
if expr == "1/2-2*Z*c*cosbeta/a":
Z = kparam["Z"]
return 1.0 / 2.0 - 2.0 * Z * c * cosbeta / a
if expr == "E/2+a*a/4/b/b+a*c*cosbeta/2/b/b":
E = kparam["E"]
return E / 2.0 + a * a / 4.0 / b / b + a * c * cosbeta / 2.0 / b / b
if expr == "2*F-Z":
F = kparam["F"]
Z = kparam["Z"]
return 2.0 * F - Z
if expr == "c/2/a/cosbeta*(1-4*U+a*a*sinbeta*sinbeta/b/b)":
U = kparam["U"]
return (
c
/ 2.0
/ a
/ cosbeta
* (1.0 - 4.0 * U + a * a * sinbeta * sinbeta / b / b)
)
if expr == "-1/4+W/2-Z*c*cosbeta/a":
W = kparam["W"]
Z = kparam["Z"]
return -1.0 / 4.0 + W / 2.0 - Z * c * cosbeta / a
if expr == "(2+a/c*cosbeta)/4/sinbeta/sinbeta":
return (2.0 + a / c * cosbeta) / 4.0 / sinbeta / sinbeta
if expr == "3/4-b*b/4/a/a/sinbeta/sinbeta":
return 3.0 / 4.0 - b * b / 4.0 / a / a / sinbeta / sinbeta
if expr == "S-(3/4-S)*a*cosbeta/c":
S = kparam["S"]
return S - (3.0 / 4.0 - S) * a * cosbeta / c
if expr == "(1+a*a/b/b)/4":
return (1.0 + a * a / b / b) / 4.0
if expr == "-a*c*cosbeta/2/b/b":
return -a * c * cosbeta / 2.0 / b / b
if expr == "1+Z-2*M":
Z = kparam["Z"]
M = kparam["M"]
return 1.0 + Z - 2.0 * M
if expr == "X-2*D":
X = kparam["X"]
D = kparam["D"]
return X - 2 * D
if expr == "(1+a/c*cosbeta)/2/sinbeta/sinbeta":
return (1.0 + a / c * cosbeta) / 2.0 / sinbeta / sinbeta
if expr == "1/2+Y*c*cosbeta/a":
Y = kparam["Y"]
return 1.0 / 2.0 + Y * c * cosbeta / a
if expr == "a*a/4/c/c":
return a * a / 4.0 / c / c
if expr == "5/6-2*D":
D = kparam["D"]
return 5.0 / 6.0 - 2.0 * D
if expr == "1/3+D":
D = kparam["D"]
return 1.0 / 3.0 + D
if expr == "1/6-c*c/9/a/a":
return 1.0 / 6.0 - c * c / 9.0 / a / a
if expr == "1/2-2*Z":
Z = kparam["Z"]
return 1.0 / 2.0 - 2.0 * Z
if expr == "1/2+Z":
Z = kparam["Z"]
return 1.0 / 2.0 + Z
if expr == "(1+b*b/c/c)/4":
return (1.0 + b * b / c / c) / 4.0
if expr == "(1+c*c/b/b)/4":
return (1.0 + c * c / b / b) / 4.0
if expr == "(1+b*b/a/a)/4":
return (1.0 + b * b / a / a) / 4.0
if expr == "(1+a*a/b/b-a*a/c/c)/4":
return (1.0 + a * a / b / b - a * a / c / c) / 4.0
if expr == "(1+a*a/b/b+a*a/c/c)/4":
return (1.0 + a * a / b / b + a * a / c / c) / 4.0
if expr == "(1+c*c/a/a-c*c/b/b)/4":
return (1.0 + c * c / a / a - c * c / b / b) / 4.0
if expr == "(1+c*c/a/a+c*c/b/b)/4":
return (1.0 + c * c / a / a + c * c / b / b) / 4.0
if expr == "(1+b*b/a/a-b*b/c/c)/4":
return (1.0 + b * b / a / a - b * b / c / c) / 4.0
if expr == "(1+c*c/b/b-c*c/a/a)/4":
return (1.0 + c * c / b / b - c * c / a / a) / 4.0
if expr == "(1+a*a/c/c)/4":
return (1.0 + a * a / c / c) / 4.0
if expr == "(b*b-a*a)/4/c/c":
return (b * b - a * a) / 4.0 / c / c
if expr == "(a*a+b*b)/4/c/c":
return (a * a + b * b) / 4.0 / c / c
if expr == "(1+c*c/a/a)/4":
return (1.0 + c * c / a / a) / 4.0
if expr == "(c*c-b*b)/4/a/a":
return (c * c - b * b) / 4.0 / a / a
if expr == "(b*b+c*c)/4/a/a":
return (b * b + c * c) / 4.0 / a / a
if expr == "(a*a-c*c)/4/b/b":
return (a * a - c * c) / 4.0 / b / b
if expr == "(c*c+a*a)/4/b/b":
return (c * c + a * a) / 4.0 / b / b
if expr == "a*a/2/c/c":
return a * a / 2.0 / c / c
raise ValueError(
"Unknown expression, define a new case:\n"
' elif expr == "{0}":\n'
" return {0}".format(expr)
)
except KeyError as exc:
raise ValueError(
"Asking for evaluation of symbol '{}' but this has "
"not been defined or not yet computed".format(str(exc))
)
def check_spglib_version():
"""
Check the SPGLIB version and raise a ValueError if the version is
older than 1.9.4.
Also raises an warning if the user has a version of SPGLIB that is
older than 1.13, because before then there were some bugs (e.g.
wrong treatment of oI, see e.g. issue )
Return the spglib module.
"""
try:
import spglib
except ImportError:
raise ValueError(
"spglib >= 1.9.4 is required for the creation "
"of the k-paths, but it could not be imported"
)
try:
version = spglib.__version__
except NameError:
version = "1.8.0" # or older, version was introduced only recently
try:
version_pieces = [int(_) for _ in version.split(".")]
if len(version_pieces) < 3:
raise ValueError
except ValueError:
raise ValueError("Unable to parse version number")
if tuple(version_pieces[:2]) < (1, 9):
raise ValueError("Invalid spglib version, need >= 1.9.4")
if version_pieces[:2] == (1, 9) and version_pieces[2] < 4:
raise ValueError("Invalid spglib version, need >= 1.9.4")
if tuple(version_pieces[:2]) < (1, 13):
import warnings
warnings.warn(
"You have a version of SPGLIB older than 1.13, "
"please consider upgrading to 1.13 or later since some bugs "
"have been fixed",
RuntimeWarning,
)
return spglib
def get_cell_params(cell):
r"""
Return (a,b,c,cosalpha,cosbeta,cosgamma) given a :math:`3\times 3` cell
.. note:: Rows are vectors: ``v1 = cell[0]``, ``v2 = cell[1]``, ``v3 = cell[3]``
"""
v1, v2, v3 = numpy.array(cell)
a = sqrt(sum(v1 ** 2))
b = sqrt(sum(v2 ** 2))
c = sqrt(sum(v3 ** 2))
cosalpha = numpy.dot(v2, v3) / b / c
cosbeta = numpy.dot(v1, v3) / a / c
cosgamma = numpy.dot(v1, v2) / a / b
return (a, b, c, cosalpha, cosbeta, cosgamma)
def get_reciprocal_cell_rows(real_space_cell):
r"""
Given the cell in real space (3x3 matrix, vectors as rows,
return the reciprocal-space cell where again the G vectors are
rows, i.e. satisfying
``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`,
where :math:`I` is the :math:`3\times 3` identity matrix.
:return: the :math:`3\times 3` list of reciprocal lattice vectors where each row is
one vector.
"""
reciprocal_space_columns = 2.0 * numpy.pi * numpy.linalg.inv(real_space_cell)
return (reciprocal_space_columns.T).tolist()
def get_real_cell_from_reciprocal_rows(reciprocal_space_rows):
r"""
Given the cell in reciprocal space (3x3 matrix, G vectors as rows,
return the real-space cell where again the R vectors are
rows, i.e. satisfying
``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`,
where :math:`I` is the :math:`3\times 3` identity matrix.
.. note:: This is actually the same as :py:func:`get_reciprocal_cell_rows`.
:return: the :math:`3\times 3` list of real lattice vectors where each row is
one vector.
"""
real_space_columns = 2.0 * numpy.pi * numpy.linalg.inv(reciprocal_space_rows)
return (real_space_columns.T).tolist()
def get_path_data(ext_bravais):
"""
Given an extended Bravais symbol among those defined in the HPKOT paper
(only first three characters, like cF1), return the points and the
suggested path.
:param ext_bravais: a string among the allowed etended Bravais lattices
defined in HPKOT.
:return: a tuple ``(kparam_def, points_def, path)`` where the
first element is the list with the definition of the
k-point parameters, the second is the dictionary with the
definition of the k-points, and the third is the list
with the suggested paths.
.. note:: ``kparam_def`` has to be a list and not a dictionary
because the order matters (later k-parameters can be defined
in terms of previous ones)
"""
import os
# Get the data from the band_data folder
this_folder = os.path.split(os.path.abspath(__file__))[0]
folder = os.path.join(this_folder, "band_path_data", ext_bravais)
path_file = os.path.join(folder, "path.txt")
points_file = os.path.join(folder, "points.txt")
kparam_file = os.path.join(folder, "k_vector_parameters.txt")
with open(kparam_file) as f:
kparam_raw = [_.split() for _ in f.readlines() if _.strip()]
with open(points_file) as f:
points_raw = [_.split() for _ in f.readlines()]
with open(path_file) as f:
path_raw = [_.split() for _ in f.readlines()]
# check
if any(len(_) != 2 for _ in kparam_raw):
raise ValueError("Invalid line length in {}".format(kparam_file))
if any(len(_) != 2 for _ in path_raw):
raise ValueError("Invalid line length in {}".format(path_file))
if any(len(_) != 4 for _ in points_raw):
raise ValueError("Invalid line length in {}".format(points_file))
# order must be preserved here
kparam_def = [(_[0], _[1].strip()) for _ in kparam_raw]
points_def = {}
for label, kPx, kPy, kPz in points_raw:
if label in points_def:
raise ValueError(
"Internal error! Point {} defined multiple times "
"for Bravais lattice {}".format(label, ext_bravais)
)
points_def[label] = (kPx, kPy, kPz)
path = [(_[0], _[1]) for _ in path_raw]
# check path is valid
for p1, p2 in path:
if p1 not in points_def:
raise ValueError(
"Point {} found in path (for {}) but undefined!".format(p1, ext_bravais)
)
if p2 not in points_def:
raise ValueError(
"Point {} found in path (for {}) but undefined!".format(p2, ext_bravais)
)
return (kparam_def, points_def, path)
|
py | 7dfd78f448248461cb0376a79f6a98eaedff264f | import inspect
from blocks.extensions import SimpleExtension
class SharedVariableModifier(SimpleExtension):
"""Adjusts shared variable parameter using some function.
Applies a function to compute the new value of a shared parameter each
iteration.
This class can be used to adapt over the training process parameters
like learning rate, momentum, etc.
Parameters
----------
parameter : :class:`~tensor.TensorSharedVariable`
Shared variable to be adjusted
function : callable
A function which outputs a numeric value to which the
given shared variable will be set and may take one or two
arguments.
In the first case, function that takes the total number of
iterations done (``int``) as an input.
In the second case, it is a function which takes number of
iterations done (``int``) and old value of the shared variable
(with the same dtype as `parameter`).
"""
def __init__(self, parameter, function, **kwargs):
kwargs.setdefault("after_every_batch", True)
super(SharedVariableModifier, self).__init__(**kwargs)
self.parameter = parameter
self.function = function
self.num_args = len(inspect.getargspec(function).args)
def do(self, which_callback, *args):
iterations_done = self.main_loop.log.status.iterations_done
if self.num_args == 1:
new_value = self.function(iterations_done)
else:
old_value = self.parameter.get_value()
new_value = self.function(iterations_done, old_value)
self.parameter.set_value(new_value)
|
py | 7dfd790b74158a068a5912a8eea69f98f6dca22e | # import tests to run within this module
from localground.apps.site.api.tests.admin_tests import *
from localground.apps.site.api.tests.association_tests import *
from localground.apps.site.api.tests.audio_tests import *
from localground.apps.site.api.tests.field_tests import *
from localground.apps.site.api.tests.dataset_tests import *
from localground.apps.site.api.tests.homepage_tests import *
from localground.apps.site.api.tests.icon_tests import *
from localground.apps.site.api.tests.layer_tests import *
from localground.apps.site.api.tests.map_tests import *
from localground.apps.site.api.tests.mapimage_tests import *
from localground.apps.site.api.tests.photo_tests import *
from localground.apps.site.api.tests.print_tests import *
from localground.apps.site.api.tests.project_tests import *
from localground.apps.site.api.tests.record_tests import *
from localground.apps.site.api.tests.renderer_tests import *
from localground.apps.site.api.tests.sharing_tests import *
from localground.apps.site.api.tests.sql_parse_tests import *
from localground.apps.site.api.tests.tags_tests import *
from localground.apps.site.api.tests.user_profile_tests import *
from localground.apps.site.api.tests.video_tests import *
|
py | 7dfd79e31265952105dfb4ccce0700e86ffbf69d | import unittest
import numpy as np
import openmdao.api as om
from dymos.utils.testing_utils import assert_check_partials
from dymos.examples.brachistochrone.brachistochrone_vector_states_ode import \
BrachistochroneVectorStatesODE
class TestBrachistochroneVectorStatesODE(unittest.TestCase):
@classmethod
def setUpClass(cls):
nn = 5
p = cls.p = om.Problem(model=om.Group())
ivc = p.model.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('v', val=np.ones((nn,)), units='m/s')
ivc.add_output('g', val=np.zeros((nn,)), units='m/s**2')
ivc.add_output('theta', val=np.zeros((nn)), units='rad')
p.model.add_subsystem('eom', BrachistochroneVectorStatesODE(num_nodes=nn),
promotes_inputs=['*'], promotes_outputs=['*'])
p.setup(check=True, force_alloc_complex=True)
p['v'] = np.random.rand(nn)
p['g'] = np.random.rand(nn)
p['theta'] = np.random.rand(nn)
p.run_model()
def test_results(self):
pass
def test_partials(self):
np.set_printoptions(linewidth=1024, edgeitems=1000)
cpd = self.p.check_partials(method='cs')
assert_check_partials(cpd)
|
py | 7dfd7aeddd2c7192ea8e1b1fd210634fa356777f | # -*- coding: utf-8 -*-
"""
"""
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class ParseNoSectionError(Error):
"""Exception raised for errors in the input.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
pass
|
py | 7dfd7c4ddb3eca106ad0739cb21770864df67d74 | #
# Morse code trainer
# PJ Evans <[email protected]>
# MIT Licence
#
import time
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
# Change these values (in seconds) to suit your style (or 'fist')
dash_timeout = 0.15
letter_threshold = 1
# Set up the LCD screen
lcd_columns = 16
lcd_rows = 2
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_RGB_I2C(i2c, lcd_columns, lcd_rows)
lcd.color = [100, 0, 0]
lcd.clear()
lcd.home()
lcd.blink = True
# Track the key presses and generated morse
morse_key_pressed = False
morse_key_start_time = 0
morse_key_duration = 0
current_letter = ""
message = ""
# The morse alphabet as a dict
morse = {
".-": "A", "-...": "B", "-.-.": "C", "-..": "D", ".": "E",
"..-.": "F", "--.": "G", "....": "H", "..": "I", ".---": "J",
"-.-": "K", ".-..": "L", "--": "M", "-.": "N", "---": "O",
".--.": "P", "--.-": "Q", ".-.": "R", "...": "S", "-": "T",
"..-": "U", "...-": "V", ".--": "W", "-..-": "X", "-.--": "Y",
"--..": "Z", ".----": "1", "..---": "2", "...--": "3", "....-": "4",
".....": "5", "-....": "6", "--...": "7", "---..": "8", "----.": "9",
"-----": "0"
}
print('Ready')
try:
while True:
# If the key has been pressed
if lcd.right_button:
# Note the start time
if not morse_key_pressed:
morse_key_start_time = time.time()
morse_key_pressed = True
elif morse_key_pressed:
# This triggers when the key is released
morse_key_duration = time.time() - morse_key_start_time
morse_key_pressed = False
# From the duration, was it a dot or a dash?
lcd.cursor_position (len(current_letter), 1)
if morse_key_duration > dash_timeout:
print('-', end='', flush=True)
lcd.message = '-'
current_letter += '-'
else:
print('.', end='', flush=True)
lcd.message = '.'
current_letter += '.'
elif len(current_letter) > 0:
# No activity but we're in the process of constructing a letter
# After 1 second, match the letter
time_since_last_release = time.time() - (morse_key_start_time + morse_key_duration)
if time_since_last_release > 1:
lcd.cursor_position (len(message), 0)
if current_letter in morse:
print(" " + morse[current_letter])
message += morse[current_letter]
lcd.message = morse[current_letter]
else:
print(" ?")
current_letter = ""
lcd.cursor_position(0, 1)
lcd.message = " "
lcd.cursor_position(len(message), 0)
# Clear the message
if lcd.up_button:
message = ""
current_letter = ""
lcd.clear()
time.sleep(0.2) # Debounce
# Backspace
if lcd.left_button and len(message) > 0:
lcd.cursor_position(len(message) - 1 ,0)
lcd.message = ' '
message = message[:-1]
lcd.cursor_position(len(message), 0)
time.sleep(0.2) # Debounce
# Add a space
if lcd.down_button:
lcd.cursor_position (len(message), 0)
lcd.message = " "
message += " "
time.sleep(0.2) # Debounce
# Let the CPU breathe
time.sleep(0.01)
except KeyboardInterrupt:
print("Stopping...")
# Clean up
lcd.display = False
lcd.backlight = False
lcd.clear()
|
py | 7dfd7d54534106a080a1dd2faa06cb5d01a25411 | import os
from torchblocks.metrics import SequenceLabelingScore
from torchblocks.trainer import SequenceLabelingSpanTrainer
from torchblocks.callback import TrainLogger
from torchblocks.processor import SequenceLabelingSpanProcessor, InputExample
from torchblocks.utils import seed_everything, dict_to_text, build_argparse
from torchblocks.utils import prepare_device, get_checkpoints
from torchblocks.data import CNTokenizer
from torchblocks.models.nn import BertSpanForNer
from transformers import WEIGHTS_NAME, BertConfig
from torchblocks.metrics.utils import get_spans
MODEL_CLASSES = {
'bert': (BertConfig, BertSpanForNer, CNTokenizer)
}
class CnerProcessor(SequenceLabelingSpanProcessor):
def __init__(self, markup, tokenizer, data_dir, prefix=''):
super().__init__(tokenizer=tokenizer, data_dir=data_dir, prefix=prefix)
self.markup = markup
def get_labels(self):
"""See base class."""
return ["O", "CONT", "ORG", "LOC", 'EDU', 'NAME', 'PRO', 'RACE', 'TITLE']
def read_data(self, input_file):
"""Reads a json list file."""
lines = []
with open(input_file, 'r') as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
lines.append({"words": "".join(words), "labels": labels})
words = []
labels = []
else:
splits = line.split(" ")
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
lines.append({"words": "".join(words), "labels": labels})
return lines
def create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['words']
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
elif 'E-' in x:
labels.append(x.replace('E-', 'I-'))
else:
labels.append(x)
# 标签序列使用label_ids
spans = get_spans(labels, id2label=None, markup=self.markup)
examples.append(InputExample(guid=guid, texts=[text_a, None], label_ids=spans))
return examples
def main():
parser = build_argparse()
parser.add_argument('--markup', type=str, default='bios', choices=['bios', 'bio'])
args = parser.parse_args()
# output dir
if args.model_name is None:
args.model_name = args.model_path.split("/")[-1]
args.output_dir = args.output_dir + '{}'.format(args.model_name)
os.makedirs(args.output_dir, exist_ok=True)
prefix = "_".join([args.model_name, args.task_name])
logger = TrainLogger(log_dir=args.output_dir, prefix=prefix)
# device
logger.info("initializing device")
args.device, args.n_gpu = prepare_device(args.gpu, args.local_rank)
seed_everything(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
# data processor
logger.info("initializing data processor")
tokenizer = tokenizer_class.from_pretrained(args.model_path, do_lower_case=args.do_lower_case)
processor = CnerProcessor(args.markup, tokenizer, args.data_dir, prefix=prefix)
label_list = processor.get_labels()
num_labels = len(label_list)
id2label = {i: label for i, label in enumerate(label_list)}
args.id2label = id2label
args.num_labels = num_labels
# model
logger.info("initializing model and config")
config = config_class.from_pretrained(args.model_path,
num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_path, config=config)
model.to(args.device)
# trainer
logger.info("initializing traniner")
trainer = SequenceLabelingSpanTrainer(logger=logger, args=args, collate_fn=processor.collate_fn,
batch_input_keys=processor.get_batch_keys(),
metrics=[SequenceLabelingScore(id2label, markup=args.markup, is_spans=True)])
# do train
if args.do_train:
train_dataset = processor.create_dataset(args.train_max_seq_length, 'train.char.bmes', 'train')
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.char.bmes', 'dev')
trainer.train(model, train_dataset=train_dataset, eval_dataset=eval_dataset)
# do eval
if args.do_eval and args.local_rank in [-1, 0]:
results = {}
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.char.bmes', 'dev')
checkpoints = [args.output_dir]
if args.eval_all_checkpoints or args.checkpoint_number > 0:
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint, config=config)
model.to(args.device)
trainer.evaluate(model, eval_dataset, save_preds=True, prefix=str(global_step))
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in trainer.records['result'].items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
dict_to_text(output_eval_file, results)
# do predict
if args.do_predict:
test_dataset = processor.create_dataset(args.eval_max_seq_length, 'test.char.bmes', 'test')
if args.checkpoint_number == 0:
raise ValueError("checkpoint number should > 0,but get %d", args.checkpoint_number)
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
trainer.predict(model, test_dataset=test_dataset, prefix=str(global_step))
if __name__ == "__main__":
main()
|
py | 7dfd7e62426b9b703c1d49c2bcbcfcd82fe299ef | import django_filters
from rest_framework import viewsets
from api_v1.serializers.wydawnictwo_ciagle import (
Wydawnictwo_Ciagle_AutorSerializer,
Wydawnictwo_Ciagle_Zewnetrzna_Baza_DanychSerializer,
Wydawnictwo_CiagleSerializer,
)
from api_v1.viewsets.common import UkryjStatusyKorektyMixin
from bpp.models import (
Wydawnictwo_Ciagle,
Wydawnictwo_Ciagle_Autor,
Wydawnictwo_Ciagle_Zewnetrzna_Baza_Danych,
)
class Wydawnictwo_Ciagle_AutorViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Wydawnictwo_Ciagle_Autor.objects.all()
serializer_class = Wydawnictwo_Ciagle_AutorSerializer
class Wydawnictwo_CiagleFilterSet(django_filters.rest_framework.FilterSet):
ostatnio_zmieniony = django_filters.DateTimeFromToRangeFilter("ostatnio_zmieniony")
rok = django_filters.RangeFilter("rok")
class Meta:
fields = ["ostatnio_zmieniony", "charakter_formalny", "rok"]
model = Wydawnictwo_Ciagle
class Wydawnictwo_CiagleViewSet(
UkryjStatusyKorektyMixin, viewsets.ReadOnlyModelViewSet
):
# Lista musi być posortowana po PK aby nie było duplikatów
queryset = (
Wydawnictwo_Ciagle.objects.exclude(nie_eksportuj_przez_api=True)
.order_by("pk")
.select_related("status_korekty")
.prefetch_related("autorzy_set", "zewnetrzna_baza_danych", "slowa_kluczowe")
)
serializer_class = Wydawnictwo_CiagleSerializer
filterset_class = Wydawnictwo_CiagleFilterSet
class Wydawnictwo_Ciagle_Zewnetrzna_Baza_DanychViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Wydawnictwo_Ciagle_Zewnetrzna_Baza_Danych.objects.all()
serializer_class = Wydawnictwo_Ciagle_Zewnetrzna_Baza_DanychSerializer
|
py | 7dfd7e883d347be7be743bdecf50e9a29c03854a | import sqlite3
import subprocess
merge_conn = sqlite3.connect('../../workflow/merger.db', isolation_level=None)
outfile = 'GO_input.tsv'
go_output_score_files = [
# ["C", "CC", "GO_score_CC.tsv"],
["P", "BP", "GO_score_BP.tsv"],
# ["F", "MF", "GO_score_MF.tsv"],
]
#with merge_conn:
# c = merge_conn.cursor()
# with open(outfile, 'w') as out:
# c.execute("SELECT interactor_a_node_name, interactor_b_node_name from EDGE")
# while True:
# allrows = c.fetchone()
# if allrows is None:
# break
# else:
# nodea = allrows[0].split(":")[1]
# nodeb = allrows[1].split(":")[1]
# out.write(f'{nodea}\t{nodeb}\n')
#for go in go_output_score_files:
# subprocess.call(["java", "-jar", "HVSM.jar", "-org", "human", "-db", go[0], "-gene", "-i", outfile, "-o", go[2]])
# Adding scores to SQL
for go in go_output_score_files:
GOdict = []
with open(go[2], "r") as score_file, open(outfile, 'r') as infile:
for nodes, score in zip (infile, score_file):
score = float(score.strip())
node = nodes.strip().split("\t")
if score > 0:
GOdict.append([node[0], node[1], score])
with merge_conn:
c = merge_conn.cursor()
#c.execute('PRAGMA journal_mode=OFF')
#c.execute('PRAGMA synchronous=OFF')
#c.execute('CREATE INDEX edge_node_a_b_index ON edge (interactor_a_node_name, interactor_b_node_name);')
sum = len(GOdict)
num = 0
for GOscore in GOdict:
c.execute("UPDATE edge SET confidence_scores = edge.confidence_scores || %s"
"WHERE edge.interactor_a_node_name = '%s' AND edge.interactor_b_node_name = '%s'"
% (f'\"|GO_semantics_{go[1]}:{GOscore[2]}|\"', f'Uniprot:{GOscore[0]}', f'Uniprot:{GOscore[1]}'))
num += 1
if num % 100 == 0:
print(f'{num}/{sum}')
#merge_conn.commit()
|
py | 7dfd7e95c185467ae3bc2f0fb0691fbef1db68b1 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from numpy import (
full,
nan,
int64,
float64,
zeros
)
from six import iteritems, with_metaclass
from catalyst.utils.memoize import lazyval
class AssetDispatchBarReader(with_metaclass(ABCMeta)):
"""
Parameters
----------
- trading_calendar : catalyst.utils.trading_calendar.TradingCalendar
- asset_finder : catalyst.assets.AssetFinder
- readers : dict
A dict mapping Asset type to the corresponding
[Minute|Session]BarReader
- last_available_dt : pd.Timestamp or None, optional
If not provided, infers it by using the min of the
last_available_dt values of the underlying readers.
"""
def __init__(
self,
trading_calendar,
asset_finder,
readers,
last_available_dt=None,
):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._readers = readers
self._last_available_dt = last_available_dt
for t, r in iteritems(self._readers):
assert trading_calendar == r.trading_calendar, \
"All readers must share target trading_calendar. " \
"Reader={0} for type={1} uses calendar={2} which does not " \
"match the desired shared calendar={3} ".format(
r, t, r.trading_calendar, trading_calendar)
@abstractmethod
def _dt_window_size(self, start_dt, end_dt):
pass
@property
def _asset_types(self):
return self._readers.keys()
def _make_raw_array_shape(self, start_dt, end_dt, num_sids):
return self._dt_window_size(start_dt, end_dt), num_sids
def _make_raw_array_out(self, field, shape):
if field == 'volume':
out = zeros(shape, dtype=float64)
elif field != 'sid':
out = full(shape, nan)
else:
out = zeros(shape, dtype=int64)
return out
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def last_available_dt(self):
if self._last_available_dt is not None:
return self._last_available_dt
else:
return min(r.last_available_dt for r in self._readers.values())
@lazyval
def first_trading_day(self):
return max(r.first_trading_day for r in self._readers.values())
def get_value(self, sid, dt, field):
asset = self._asset_finder.retrieve_asset(sid)
r = self._readers[type(asset)]
return r.get_value(asset, dt, field)
def get_last_traded_dt(self, asset, dt):
r = self._readers[type(asset)]
return r.get_last_traded_dt(asset, dt)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
asset_types = self._asset_types
sid_groups = {t: [] for t in asset_types}
out_pos = {t: [] for t in asset_types}
assets = self._asset_finder.retrieve_all(sids)
for i, asset in enumerate(assets):
t = type(asset)
sid_groups[t].append(asset)
out_pos[t].append(i)
batched_arrays = {
t: self._readers[t].load_raw_arrays(fields,
start_dt,
end_dt,
sid_groups[t])
for t in asset_types if sid_groups[t]}
results = []
shape = self._make_raw_array_shape(start_dt, end_dt, len(sids))
for i, field in enumerate(fields):
out = self._make_raw_array_out(field, shape)
for t, arrays in iteritems(batched_arrays):
out[:, out_pos[t]] = arrays[i]
results.append(out)
return results
class AssetDispatchMinuteBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.minutes_in_range(start_dt, end_dt))
class AssetDispatchSessionBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.sessions_in_range(start_dt, end_dt))
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self.first_trading_day,
self.last_available_dt)
|
py | 7dfd81853c421b11653e013e0df1ca9fd66871fa | import unittest
import unittest.mock
from programy.oob.search import SearchOutOfBandProcessor
import xml.etree.ElementTree as ET
from programy.context import ClientContext
from programytest.aiml_tests.client import TestClient
class SearchOutOfBandProcessorTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
def test_processor_xml_parsing(self):
oob_processor = SearchOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = unittest.mock.Mock()
oob.text = None
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = unittest.mock.Mock()
oob.text = "Kinghorn"
self.assertTrue(oob_processor.parse_oob_xml(oob))
def test_processor(self):
oob_processor = SearchOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<search>process</search>")
self.assertEqual("SEARCH", oob_processor.process_out_of_bounds(self._client_context, oob_content))
|
py | 7dfd82ee168020a6e9d6957fd918750c2ae3694b | import json
class Badge:
"""
Instantiates a Badge object given an ID, and given that
it exists in the JSON file.
Args:
item_id (int): The ID of the badge.
Attributes:
id (int): The ID of the badge.
name (str): The name of the badge.
description (str): The badge's description
price (int): The gil needed to purchase the badge.
level_needed (int): The level needed to purchase the badge.
price_tag (str): The price "tag" on display.
icon_url (str): The filename of the badge's icon.
is_exclusive (bool): If the badge is exclusive to IPM.
"""
def __init__(self, item_id, is_equipped=False):
with open('assets/obj_badgeshop.json') as f:
badge_shop = json.load(f)
with open('assets/obj_badges.json') as f:
badge_json = json.load(f)
self.id = int(item_id)
item_id = str(item_id)
self.is_equipped = is_equipped
self.name = badge_json[item_id]["name"]
self.is_for_sale = badge_json[item_id]["for_sale"]
if self.is_for_sale:
self.is_exclusive = badge_shop[item_id]["is_exclusive"]
self.price = badge_shop[item_id]["price"]
self.price_tag = badge_shop[item_id]["price_tag"]
self.level_needed = badge_shop[item_id]["level_needed"]
self.icon_url = badge_shop[item_id]["icon_url"]
self.description = badge_shop[item_id]["description"]
else:
self.icon_url = badge_json[item_id]["img-url"]
self.description = badge_json[item_id]["description"]
def __repr__(self):
return f"Badge({self.id})" |
py | 7dfd832ea08aabac8291157f1356dd6fe9476799 | from unittest import TestCase
import datetime
from subrip_ranger import timecodes
class PeriodTest(TestCase):
START = datetime.timedelta(hours=1, minutes=2, seconds=3)
END = datetime.timedelta(hours=1, minutes=2, seconds=45)
def setUp(self):
self.period = timecodes.Period(self.START, self.END)
def test_creation(self):
"""period has proper attributes"""
self.assertEqual(self.period.start, self.START)
self.assertEqual(self.period.end, self.END)
def test_range(self):
"""calculate difference properly"""
self.assertEqual(self.period.range, self.END - self.START)
class AdjusterTest(TestCase):
DEST_FIRST_APPEARANCE = '00:01:03,000'
DEST_LAST_DISAPPERANCE = '00:01:06,000'
TIMECODE_FIRST = datetime.timedelta(minutes=1, seconds=2)
TIMECODE_LAST = datetime.timedelta(minutes=1, seconds=5)
def setUp(self):
timecodes.Adjuster._borg_state = {}
self.adjuster = timecodes.Adjuster()
self.adjuster.update(self.TIMECODE_FIRST)
self.adjuster.update(self.TIMECODE_LAST)
self.dest_first_appearance = \
timecodes.Timecode.timecode_string_to_timedelta(
self.DEST_FIRST_APPEARANCE
)
self.dest_last_disapperance = \
timecodes.Timecode.timecode_string_to_timedelta(
self.DEST_LAST_DISAPPERANCE
)
def test_update(self):
"""updating range start and end properly"""
self.assertEqual(self.adjuster.orig.start, self.TIMECODE_FIRST)
self.assertEqual(self.adjuster.orig.end, self.TIMECODE_LAST)
def test_dest_setting(self):
"""calculate scale and offset properly upon setting destination"""
self.adjuster.set_params(
self.DEST_FIRST_APPEARANCE,
last_disappearance=self.DEST_LAST_DISAPPERANCE
)
self.assertEqual(self.adjuster.scale, 1.0)
self.assertEqual(self.adjuster.offset,
self.TIMECODE_FIRST - self.dest_first_appearance)
def test_lack_of_dest_last(self):
"""missing dest last results no scaling"""
self.adjuster.set_params(self.DEST_FIRST_APPEARANCE, scale=1.0)
expected = 1.0
self.assertEqual(self.adjuster.scale, expected)
def test_adjusting(self):
"""calculate adjustment properly"""
self.adjuster.set_params(self.DEST_FIRST_APPEARANCE, scale=1.0)
adjusted = self.adjuster.adjust(self.TIMECODE_FIRST)
self.assertEqual(self.dest_first_appearance, adjusted)
class TimecodeTest(TestCase):
TIMECODE_STRING = '01:23:45,678'
def setUp(self):
self.timecode_value = datetime.timedelta(
hours=1, minutes=23, seconds=45, milliseconds=678
)
def test_from_timecode_string(self):
"""alternative constructor from timecode_string"""
timecode = timecodes.Timecode.from_timecode_string(self.TIMECODE_STRING)
result = timecode.value
expected = self.timecode_value
self.assertEqual(result, expected, msg='different delta value')
def test_to_timecode_string(self):
"""format timedelta as timecode string"""
timecode = timecodes.Timecode(self.timecode_value)
result = str(timecode)
expected = self.TIMECODE_STRING
self.assertEqual(result, expected, msg='unexpected timecode format')
def test_to_timecode_string_no_fraction(self):
"""format timedelta as timecode string with no fraction seconds"""
timecode = timecodes.Timecode(
datetime.timedelta(hours=1, minutes=2, seconds=3)
)
result = str(timecode)
expected = '01:02:03,000'
self.assertEqual(result, expected, msg='unexpected timecode format')
class AdjustibleTimecodeTest(TestCase):
TIMECODE_FIRST = '00:03:00,000'
TIMECODE_LAST = '00:07:00,000'
DEST_START = '00:02:00,000'
DEST_END = '00:06:00,000'
def setUp(self):
timecodes.Adjuster._borg_state = {}
self.timecode_value = datetime.timedelta(
hours=1, minutes=23, seconds=45, milliseconds=678
)
def test_adjusting(self):
"""adjusting value with parameters"""
timecode_first = timecodes.AdjustibleTimecode.from_timecode_string(
self.TIMECODE_FIRST
)
timecode_last = timecodes.AdjustibleTimecode.from_timecode_string(
self.TIMECODE_LAST
)
adjuster = timecodes.Adjuster()
adjuster.set_params(self.DEST_START, scale=1.0)
timecode_first.adjust()
expected = timecodes.Timecode.timecode_string_to_timedelta(
self.DEST_START
)
self.assertEqual(timecode_first.value, expected)
class TimecodeLineTest(TestCase):
APPEARANCE = '00:42:55,990'
DISAPPEARANCE = '00:43:00,910'
TIMECODE_LINE_STRING = f'{APPEARANCE} --> {DISAPPEARANCE}'
DEST_START = '00:42:56,990'
DEST_END = '00:43:01,910'
def setUp(self):
timecodes.Adjuster._borg_state = {}
self.timecode_line = timecodes.TimecodeLine.from_timecode_line_string(
self.TIMECODE_LINE_STRING
)
def test_from_timecode_line_string(self):
"""attributes properly set"""
result = str(self.timecode_line.appearance)
expected = self.APPEARANCE
self.assertEqual(result, expected)
result = str(self.timecode_line.disappearance)
expected = self.DISAPPEARANCE
self.assertEqual(result, expected)
def test_adjusting(self):
"""adjusting to a desired range"""
dest = timecodes.Period(
timecodes.Timecode.from_timecode_string(self.DEST_START).value,
timecodes.Timecode.from_timecode_string(self.DEST_END).value
)
adjuster = timecodes.Adjuster()
adjuster.set_params(self.DEST_START, last_disappearance=self.DEST_END)
self.timecode_line.adjust()
result = str(self.timecode_line)
expected = '00:42:56,990 --> 00:43:01,910'
self.assertEqual(result, expected)
def test_to_timecode_line_string(self):
"""format timecode_line as timecode line string"""
result = str(self.timecode_line)
expected = self.TIMECODE_LINE_STRING
self.assertEqual(
result, expected, msg='unexpected timecode line format'
)
|
py | 7dfd83a6f8aca9fb7e95f9adca256a562d4b124d | import datetime
from dateutil.tz import tzutc
import pytest
from brightsky.export import DBExporter, SYNOPExporter
SOURCES = [
{
'observation_type': 'recent',
'lat': 10.1,
'lon': 20.2,
'height': 30.3,
'wmo_station_id': '10001',
'dwd_station_id': 'XYZ',
'station_name': 'Münster',
},
{
'observation_type': 'recent',
'lat': 40.4,
'lon': 50.5,
'height': 60.6,
'wmo_station_id': '10002',
'dwd_station_id': None,
'station_name': 'Aurich',
},
{
'observation_type': 'recent',
'lat': 60.6,
'lon': 70.7,
'height': 80.8,
'wmo_station_id': '10003',
'dwd_station_id': None,
'station_name': 'Göttingen',
},
]
RECORDS = [
{
'timestamp': datetime.datetime(2020, 8, 18, 18, tzinfo=tzutc()),
'temperature': 291.25,
'precipitation': 0.3,
},
{
'timestamp': datetime.datetime(2020, 8, 18, 19, tzinfo=tzutc()),
'temperature': 290.25,
'precipitation': 0.2,
},
{
'timestamp': datetime.datetime(2020, 8, 18, 20, tzinfo=tzutc()),
'temperature': 289.25,
'precipitation': 0.1,
},
]
FINGERPRINT = {
'url': 'https://example.com/source.zip',
'last_modified': datetime.datetime(2020, 8, 19, 12, 34, tzinfo=tzutc()),
'file_size': 12345
}
@pytest.fixture
def exporter():
exporter = DBExporter()
exporter.export(
[
{**SOURCES[0], **RECORDS[0]},
{**SOURCES[1], **RECORDS[1]},
],
fingerprint=FINGERPRINT)
return exporter
def _query_sources(db):
return db.fetch("SELECT * FROM sources ORDER BY id")
def _query_records(db, table='weather'):
return db.fetch(
f"""
SELECT * FROM {table}
JOIN sources ON {table}.source_id = sources.id
ORDER BY sources.id, timestamp
""")
def test_db_exporter_creates_new_sources(db, exporter):
db_sources = _query_sources(db)
assert len(db_sources) == 2
for source, row in zip(SOURCES[:2], db_sources):
for k, v in source.items():
assert row[k] == v
def test_db_exporter_reuses_existing_sources(db, exporter):
exporter.export([{**SOURCES[0], **RECORDS[2]}])
db_sources = _query_sources(db)
assert len(db_sources) == len(SOURCES[:2])
# Exports with only known sources should also not increase the sources_id
# sequence
exporter.export([{**SOURCES[2], **RECORDS[2]}])
db_sources = _query_sources(db)
assert db_sources[2]['id'] == db_sources[0]['id'] + 2
def test_db_exporter_creates_new_records(db, exporter):
db_records = _query_records(db)
for record, source, row in zip(RECORDS[:2], SOURCES[:2], db_records):
for k, v in source.items():
assert row[k] == v
for k, v in record.items():
assert row[k] == v
def test_db_exporter_updates_existing_records(db, exporter):
record = RECORDS[0].copy()
record['precipitation'] = 10.
record['cloud_cover'] = 50
exporter.export([{**SOURCES[0], **record}])
db_records = _query_records(db)
for k, v in record.items():
assert db_records[0][k] == v
def test_db_exporter_updates_parsed_files(db, exporter):
parsed_files = db.fetch("SELECT * FROM parsed_files")
assert len(parsed_files) == 1
for k, v in FINGERPRINT.items():
assert parsed_files[0][k] == v
def test_db_exporter_updates_source_first_last_record(db, exporter):
db_sources = _query_sources(db)
assert db_sources[0]['first_record'] == RECORDS[0]['timestamp']
assert db_sources[0]['last_record'] == RECORDS[0]['timestamp']
exporter.export([{**SOURCES[0], **RECORDS[2]}])
db_sources = _query_sources(db)
assert db_sources[0]['first_record'] == RECORDS[0]['timestamp']
assert db_sources[0]['last_record'] == RECORDS[2]['timestamp']
def test_synop_exporter(db):
exporter = SYNOPExporter()
assert len(_query_records(db, table='current_weather')) == 0
# Exporter needs to merge separate records for the same source and time
record = RECORDS[0].copy()
record['timestamp'] = datetime.datetime.utcnow().replace(
minute=0, second=0, microsecond=0, tzinfo=tzutc())
extra_record = {
'timestamp': record['timestamp'],
'pressure_msl': 101010,
}
previous_record = RECORDS[1].copy()
previous_record['timestamp'] = (
record['timestamp'] - datetime.timedelta(minutes=30))
exporter.export([
{**SOURCES[0], **record},
{**SOURCES[0], **extra_record},
{**SOURCES[0], **previous_record},
])
# Merges records for the same source and timestamp
synop_records = _query_records(db, table='synop')
assert len(synop_records) == 2
assert synop_records[-1]['timestamp'] == record['timestamp']
assert synop_records[-1]['temperature'] == record['temperature']
assert synop_records[-1]['pressure_msl'] == extra_record['pressure_msl']
# Updates current_weather
# XXX: This test may be flaky as the concurrent refresh may not have
# finished yet. Can we somehow wait until the lock is released?
current_weather_records = _query_records(db, table='current_weather')
assert len(current_weather_records) == 1
|
py | 7dfd83d5e8e315d109dd5ae387c2bf6d33bbd367 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# String literals representing core events.
BEFORE_CREATE = 'before_create'
BEFORE_READ = 'before_read'
BEFORE_UPDATE = 'before_update'
BEFORE_DELETE = 'before_delete'
AFTER_CREATE = 'after_create'
AFTER_READ = 'after_read'
AFTER_UPDATE = 'after_update'
AFTER_DELETE = 'after_delete'
ABORT_CREATE = 'abort_create'
ABORT_READ = 'abort_read'
ABORT_UPDATE = 'abort_update'
ABORT_DELETE = 'abort_delete'
ABORT = 'abort_'
BEFORE = 'before_'
|
py | 7dfd849d9707e559da4620ed6ea5eb121f0d4ccc | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.event import listens_for
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.modules.events.surveys.fields import get_field_types
from indico.util.string import text_to_repr
from indico.util.struct.enum import IndicoEnum
def _get_next_position(context):
"""Get the next question position for the event."""
survey_id = context.current_parameters['survey_id']
parent_id = context.current_parameters['parent_id']
res = db.session.query(db.func.max(SurveyItem.position)).filter_by(survey_id=survey_id, parent_id=parent_id).one()
return (res[0] or 0) + 1
def _get_item_default_title(context):
return '' if context.current_parameters['type'] == SurveyItemType.section else None
class SurveyItemType(int, IndicoEnum):
question = 1
section = 2
text = 3
class SurveyItem(DescriptionMixin, db.Model):
__tablename__ = 'items'
__table_args__ = (db.CheckConstraint("type != {type} OR ("
"title IS NOT NULL AND "
"is_required IS NOT NULL AND "
"field_type IS NOT NULL AND "
"parent_id IS NOT NULL AND "
"display_as_section IS NULL)"
.format(type=SurveyItemType.question), 'valid_question'),
db.CheckConstraint("type != {type} OR ("
"title IS NOT NULL AND "
"is_required IS NULL AND "
"field_type IS NULL AND "
"field_data::text = '{{}}' AND "
"parent_id IS NULL AND "
"display_as_section IS NOT NULL)"
.format(type=SurveyItemType.section), 'valid_section'),
db.CheckConstraint("type != {type} OR ("
"title IS NULL AND "
"is_required IS NULL AND "
"field_type IS NULL AND "
"field_data::text = '{{}}' AND "
"parent_id IS NOT NULL AND "
"display_as_section IS NULL)"
.format(type=SurveyItemType.text), 'valid_text'),
{'schema': 'event_surveys'})
__mapper_args__ = {
'polymorphic_on': 'type',
'polymorphic_identity': None
}
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
#: The ID of the item
id = db.Column(
db.Integer,
primary_key=True
)
#: The ID of the survey
survey_id = db.Column(
db.Integer,
db.ForeignKey('event_surveys.surveys.id'),
index=True,
nullable=False,
)
#: The ID of the parent section item (NULL for top-level items, i.e. sections)
parent_id = db.Column(
db.Integer,
db.ForeignKey('event_surveys.items.id'),
index=True,
nullable=True,
)
#: The position of the item in the survey form
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
#: The type of the survey item
type = db.Column(
PyIntEnum(SurveyItemType),
nullable=False
)
#: The title of the item
title = db.Column(
db.String,
nullable=True,
default=_get_item_default_title
)
#: If a section should be rendered as a section
display_as_section = db.Column(
db.Boolean,
nullable=True
)
# The following columns are only used for SurveyQuestion objects, but by
# specifying them here we can access them withouy an extra query when we
# query SurveyItem objects directly instead of going through a subclass.
# This is done e.g. when using the Survey.top_level_items relationship.
#: If the question must be answered (wtforms DataRequired)
is_required = db.Column(
db.Boolean,
nullable=True
)
#: The type of the field used for the question
field_type = db.Column(
db.String,
nullable=True
)
#: Field-specific data (such as choices for multi-select fields)
field_data = db.Column(
JSONB,
nullable=False,
default={}
)
# relationship backrefs:
# - parent (SurveySection.children)
# - survey (Survey.items)
def to_dict(self):
"""Return a json-serializable representation of this object.
Subclasses must add their own data to the dict.
"""
return {'type': self.type.name, 'title': self.title, 'description': self.description}
class SurveyQuestion(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.question
}
# relationship backrefs:
# - answers (SurveyAnswer.question)
@property
def field(self):
try:
impl = get_field_types()[self.field_type]
except KeyError:
return None
return impl(self)
@property
def locator(self):
return dict(self.survey.locator, section_id=self.parent_id, question_id=self.id)
@property
def not_empty_answers(self):
return [a for a in self.answers if not a.is_empty]
def get_summary(self, **kwargs):
"""Return the summary of answers submitted for this question."""
if self.field:
return self.field.get_summary(**kwargs)
def __repr__(self):
return f'<SurveyQuestion({self.id}, {self.survey_id}, {self.field_type}, {self.title})>'
def to_dict(self):
data = super().to_dict()
data.update({'is_required': self.is_required, 'field_type': self.field_type,
'field_data': self.field.copy_field_data()})
return data
class SurveySection(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.section
}
#: The child items of this section
children = db.relationship(
'SurveyItem',
order_by='SurveyItem.position',
cascade='all, delete-orphan',
backref=db.backref(
'parent',
remote_side=[SurveyItem.id]
)
)
@property
def locator(self):
return dict(self.survey.locator, section_id=self.id)
def __repr__(self):
return f'<SurveySection({self.id}, {self.survey_id}, {self.title})>'
def to_dict(self):
data = super().to_dict()
content = [child.to_dict() for child in self.children]
data.update({'content': content, 'display_as_section': self.display_as_section})
if not self.display_as_section:
del data['title']
del data['description']
return data
class SurveyText(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.text
}
@property
def locator(self):
return dict(self.survey.locator, section_id=self.parent_id, text_id=self.id)
def __repr__(self):
desc = text_to_repr(self.description)
return f'<SurveyText({self.id}, {self.survey_id}): "{desc}")>'
def to_dict(self):
data = super().to_dict()
del data['title']
return data
@listens_for(SurveySection.children, 'append')
def _set_survey(target, value, *unused):
if value.survey is None and target.survey is not None:
value.survey = target.survey
assert value.survey in {target.survey, None}
|
py | 7dfd84fbcf02caaf40f78973e3ac7f5e4bbfb37f | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_ntp
short_description: Manage NTP servers on a BIG-IP
description:
- Manage NTP servers on a BIG-IP.
version_added: 2.2
options:
ntp_servers:
description:
- A list of NTP servers to set on the device. At least one of C(ntp_servers)
or C(timezone) is required.
type: list
state:
description:
- The state of the NTP servers on the system. When C(present), guarantees
that the NTP servers are set on the system. When C(absent), removes the
specified NTP servers from the device configuration.
type: str
choices:
- absent
- present
default: present
timezone:
description:
- The timezone to set for NTP lookups. At least one of C(ntp_servers) or
C(timezone) is required.
type: str
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set NTP server
bigip_device_ntp:
ntp_servers:
- 192.0.2.23
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set timezone
bigip_device_ntp:
timezone: America/Los_Angeles
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
ntp_servers:
description: The NTP servers that were set on the device
returned: changed
type: list
sample: ["192.0.2.23", "192.0.2.42"]
timezone:
description: The timezone that was set on the device
returned: changed
type: str
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import is_empty_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import is_empty_list
class Parameters(AnsibleF5Parameters):
api_map = {
'servers': 'ntp_servers',
}
api_attributes = [
'servers', 'timezone',
]
updatables = [
'ntp_servers', 'timezone',
]
returnables = [
'ntp_servers', 'timezone',
]
absentables = [
'ntp_servers',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def ntp_servers(self):
ntp_servers = self._values['ntp_servers']
if ntp_servers is None:
return None
if is_empty_list(ntp_servers):
return []
return ntp_servers
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def ntp_servers(self):
state = self.want.state
if self.want.ntp_servers is None:
return None
if state == 'absent':
if self.have.ntp_servers is None and self.want.ntp_servers:
return None
if set(self.want.ntp_servers) == set(self.have.ntp_servers):
return []
if set(self.want.ntp_servers) != set(self.have.ntp_servers):
return list(set(self.want.ntp_servers).difference(self.have.ntp_servers))
if not self.want.ntp_servers:
if self.have.ntp_servers is None:
return None
if self.have.ntp_servers is not None:
return self.want.ntp_servers
if self.have.ntp_servers is None:
return self.want.ntp_servers
if set(self.want.ntp_servers) != set(self.have.ntp_servers):
return self.want.ntp_servers
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _absent_changed_options(self):
diff = Difference(self.want, self.have)
absentables = Parameters.absentables
changed = dict()
for k in absentables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.module.check_mode:
return True
self.absent_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/ntp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/ntp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def absent_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/ntp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
ntp_servers=dict(
type='list',
),
timezone=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_one_of = [
['ntp_servers', 'timezone']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
py | 7dfd853c82ca947b709d63b37f7476cc8face37b | import socket
sk = socket.socket()
sk.connect(('127.0.0.1', 8899))
while 1:
name = input(">>>>:")
sk.send(name.encode('utf-8')) # 字节
response = sk.recv(1024) # 字节
print(response.decode('utf-8'))
|
py | 7dfd85f8659b39f9c19cc96f758bf5cd2bda614f | from conans.client.generators.cmake_common import cmake_dependencies, cmake_dependency_vars, \
cmake_global_vars, cmake_macros, cmake_package_info, cmake_settings_info, cmake_user_info_vars, \
generate_targets_section
from conans.model import Generator
from conans.paths import BUILD_INFO_CMAKE
class DepsCppCmake(object):
def __init__(self, cpp_info):
def join_paths(paths):
# Paths are doubled quoted, and escaped (but spaces)
return "\n\t\t\t".join('"%s"'
% p.replace('\\', '/').replace('$', '\\$').replace('"', '\\"')
for p in paths)
def join_flags(separator, values):
# Flags have to be escaped
return separator.join(v.replace('\\', '\\\\').replace('$', '\\$').replace('"', '\\"')
for v in values)
def join_defines(values, prefix=""):
# Defines have to be escaped, included spaces
return "\n\t\t\t".join('"%s%s"' % (prefix, v.replace('\\', '\\\\').replace('$', '\\$').
replace('"', '\\"'))
for v in values)
self.include_paths = join_paths(cpp_info.include_paths)
self.lib_paths = join_paths(cpp_info.lib_paths)
self.res_paths = join_paths(cpp_info.res_paths)
self.bin_paths = join_paths(cpp_info.bin_paths)
self.build_paths = join_paths(cpp_info.build_paths)
self.src_paths = join_paths(cpp_info.src_paths)
self.libs = join_flags(" ", cpp_info.libs)
self.defines = join_defines(cpp_info.defines, "-D")
self.compile_definitions = join_defines(cpp_info.defines)
self.cxxflags = join_flags(" ", cpp_info.cxxflags)
self.cflags = join_flags(" ", cpp_info.cflags)
self.sharedlinkflags = join_flags(" ", cpp_info.sharedlinkflags)
self.exelinkflags = join_flags(" ", cpp_info.exelinkflags)
# For modern CMake targets we need to prepare a list to not
# loose the elements in the list by replacing " " with ";". Example "-framework Foundation"
# Issue: #1251
self.cxxflags_list = join_flags(";", cpp_info.cxxflags)
self.cflags_list = join_flags(";", cpp_info.cflags)
self.sharedlinkflags_list = join_flags(";", cpp_info.sharedlinkflags)
self.exelinkflags_list = join_flags(";", cpp_info.exelinkflags)
self.rootpath = join_paths([cpp_info.rootpath])
class CMakeGenerator(Generator):
@property
def filename(self):
return BUILD_INFO_CMAKE
@property
def content(self):
sections = ["include(CMakeParseArguments)"]
# Per requirement variables
for dep_name, dep_cpp_info in self.deps_build_info.dependencies:
deps = DepsCppCmake(dep_cpp_info)
dep_flags = cmake_dependency_vars(dep_name, deps=deps)
sections.append(dep_flags)
for config, cpp_info in dep_cpp_info.configs.items():
deps = DepsCppCmake(cpp_info)
dep_flags = cmake_dependency_vars(dep_name, deps=deps, build_type=config)
sections.append(dep_flags)
# GENERAL VARIABLES
sections.append("\n### Definition of global aggregated variables ###\n")
sections.append(cmake_package_info(name=self.conanfile.name,
version=self.conanfile.version))
sections.append(cmake_settings_info(self.conanfile.settings))
all_flags = cmake_dependencies(dependencies=self.deps_build_info.deps)
sections.append(all_flags)
deps = DepsCppCmake(self.deps_build_info)
all_flags = cmake_global_vars(deps=deps)
sections.append(all_flags)
for config, cpp_info in self.deps_build_info.configs.items():
deps = DepsCppCmake(cpp_info)
dep_flags = cmake_global_vars(deps=deps, build_type=config)
sections.append(dep_flags)
# TARGETS
sections.extend(generate_targets_section(self.deps_build_info.dependencies))
# MACROS
sections.append(cmake_macros)
# USER DECLARED VARS
sections.append("\n### Definition of user declared vars (user_info) ###\n")
sections.append(cmake_user_info_vars(self.conanfile.deps_user_info))
return "\n".join(sections)
|
py | 7dfd86e99c905ec4271707a953db90dedac850cf | from __future__ import print_function, unicode_literals
import json
import os
import sys
from wptserve import sslutils
import environment as env
import products
import testloader
import wptcommandline
import wptlogging
import wpttest
from mozlog import capture, handlers
from font import FontInstaller
from testrunner import ManagerGroup
from browsers.base import NullBrowser
here = os.path.split(__file__)[0]
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
return logger
def get_loader(test_paths, product, debug=None, run_info_extras=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product,
browser_version=kwargs.get("browser_version"),
browser_channel=kwargs.get("browser_channel"),
verify=kwargs.get("verify"),
debug=debug,
extras=run_info_extras,
enable_webrender=kwargs.get("enable_webrender"))
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
manifest_download=kwargs["manifest_download"]).load()
manifest_filters = []
if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests,
explicit=kwargs["default_exclude"]))
ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_enabled,
skip_timeout=kwargs["skip_timeout"])
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print(item)
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test_type, tests in test_loader.disabled_tests.iteritems():
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print(json.dumps(rv, indent=2))
def list_tests(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test in test_loader.test_ids:
print(test)
def get_pause_after_test(test_loader, **kwargs):
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["headless"]:
return False
tests = test_loader.tests
is_single_testharness = (sum(len(item) for item in tests.itervalues()) == 1 and
len(tests.get("testharness", [])) == 1)
if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness:
return True
return False
return kwargs["pause_after_test"]
def run_tests(config, test_paths, product, **kwargs):
"""Set up the test environment, load the list of tests to be executed, and
invoke the remainder of the code to execute tests"""
with capture.CaptureIO(logger, not kwargs["no_capture_stdio"]):
env.do_delayed_imports(logger, test_paths)
product = products.load_product(config, product, load_cls=True)
env_extras = product.get_env_extras(**kwargs)
product.check_args(**kwargs)
if kwargs["install_fonts"]:
env_extras.append(FontInstaller(
font_dir=kwargs["font_dir"],
ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
))
run_info, test_loader = get_loader(test_paths,
product.name,
run_info_extras=product.run_info_extras(**kwargs),
**kwargs)
test_source_kwargs = {"processes": kwargs["processes"]}
if kwargs["run_by_dir"] is False:
test_source_cls = testloader.SingleTestSource
else:
# A value of None indicates infinite depth
test_source_cls = testloader.PathGroupedSource
test_source_kwargs["depth"] = kwargs["run_by_dir"]
logger.info("Using %i client processes" % kwargs["processes"])
skipped_tests = 0
test_total = 0
unexpected_total = 0
if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
logger.critical("Unable to find any tests at the path(s):")
for path in kwargs["test_list"]:
logger.critical(" %s" % path)
logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
return False
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
ssl_config = {"type": kwargs["ssl_type"],
"openssl": {"openssl_binary": kwargs["openssl_binary"]},
"pregenerated": {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}}
testharness_timeout_multipler = product.get_timeout_multiplier("testharness", run_info, **kwargs)
with env.TestEnvironment(test_paths,
testharness_timeout_multipler,
kwargs["pause_after_test"],
kwargs["debug_info"],
product.env_options,
ssl_config,
env_extras) as test_environment:
try:
test_environment.ensure_started()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e.message)
raise
repeat = kwargs["repeat"]
repeat_count = 0
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
test_count = 0
unexpected_count = 0
logger.suite_start(test_loader.test_ids,
name='web-platform-test',
run_info=run_info,
extra={"run_by_dir": kwargs["run_by_dir"]})
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
# WebDriver tests may create and destroy multiple browser
# processes as part of their expected behavior. These
# processes are managed by a WebDriver server binary. This
# obviates the need for wptrunner to provide a browser, so
# the NullBrowser is used in place of the "target" browser
if test_type == "wdspec":
browser_cls = NullBrowser
else:
browser_cls = product.browser_cls
browser_kwargs = product.get_browser_kwargs(test_type,
run_info,
config=test_environment.config,
**kwargs)
executor_cls = product.executor_classes.get(test_type)
executor_kwargs = product.get_executor_kwargs(test_type,
test_environment.config,
test_environment.cache_manager,
run_info,
**kwargs)
if executor_cls is None:
logger.error("Unsupported test type %s for product %s" %
(test_type, product.name))
continue
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
skipped_tests += 1
if test_type == "testharness":
run_tests = {"testharness": []}
for test in test_loader.tests["testharness"]:
if ((test.testdriver and not executor_cls.supports_testdriver) or
(test.jsshell and not executor_cls.supports_jsshell)):
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
skipped_tests += 1
else:
run_tests["testharness"].append(test)
else:
run_tests = test_loader.tests
with ManagerGroup("web-platform-tests",
kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
kwargs["rerun"],
kwargs["pause_after_test"],
kwargs["pause_on_unexpected"],
kwargs["restart_on_unexpected"],
kwargs["debug_info"],
not kwargs["no_capture_stdio"]) as manager_group:
try:
manager_group.run(test_type, run_tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
test_count += manager_group.test_count()
unexpected_count += manager_group.unexpected_count()
test_total += test_count
unexpected_total += unexpected_count
logger.info("Got %i unexpected results" % unexpected_count)
logger.suite_end()
if repeat_until_unexpected and unexpected_total > 0:
break
if repeat_count == 1 and len(test_loader.test_ids) == skipped_tests:
break
if test_total == 0:
if skipped_tests > 0:
logger.warning("All requested tests were skipped")
else:
if kwargs["default_exclude"]:
logger.info("No tests ran")
return True
else:
logger.critical("No tests ran")
return False
if unexpected_total and not kwargs["fail_on_unexpected"]:
logger.info("Tolerating %s unexpected results" % unexpected_total)
return True
return unexpected_total == 0
def check_stability(**kwargs):
import stability
if kwargs["stability"]:
logger.warning("--stability is deprecated; please use --verify instead!")
kwargs['verify_max_time'] = None
kwargs['verify_chaos_mode'] = False
kwargs['verify_repeat_loop'] = 0
kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
kwargs['verify_output_results'] = True
return stability.check_stability(logger,
max_time=kwargs['verify_max_time'],
chaos_mode=kwargs['verify_chaos_mode'],
repeat_loop=kwargs['verify_repeat_loop'],
repeat_restart=kwargs['verify_repeat_restart'],
output_results=kwargs['verify_output_results'],
**kwargs)
def start(**kwargs):
assert logger is not None
logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
handler = handlers.LogLevelFilter(logged_critical, "CRITICAL")
logger.add_handler(handler)
try:
if kwargs["list_test_groups"]:
list_test_groups(**kwargs)
elif kwargs["list_disabled"]:
list_disabled(**kwargs)
elif kwargs["list_tests"]:
list_tests(**kwargs)
elif kwargs["verify"] or kwargs["stability"]:
return check_stability(**kwargs) or logged_critical.has_log
else:
return not run_tests(**kwargs) or logged_critical.has_log
finally:
logger.remove_handler(handler)
def main():
"""Main entry point when calling from the command line"""
kwargs = wptcommandline.parse_args()
try:
if kwargs["prefs_root"] is None:
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
setup_logging(kwargs, {"raw": sys.stdout})
return start(**kwargs)
except Exception:
if kwargs["pdb"]:
import pdb
import traceback
print(traceback.format_exc())
pdb.post_mortem()
else:
raise
|
py | 7dfd86fe639101e416828b049dc8a1b9b74f7553 | # download_dataset.py
# ---
# Downloads the raw dataset from the Kaggle API
# and moves the resulting zipfile into the output filepath.
import click
import logging
import os
from shutil import move
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from kaggle.api.kaggle_api_extended import KaggleApi
@click.command()
@click.argument('output_filepath', type=click.Path())
def main(output_filepath):
""" Runs data download scripts to get dataset from Kaggle
and place into the data/raw folder.
"""
# Get the logger.
logger = logging.getLogger(__name__)
# Check if output file exists.
if(os.path.isfile(output_filepath)):
logger.warning("Dataset already exists at specified location")
return
# Load envvars.
load_envvars(logger)
# Authenticate
api = authenticate(logger)
# Download the kaggle dataset.
# https://technowhisp.com/kaggle-api-python-documentation/
logger.info('Downloading dataset...')
api.dataset_download_files('promptcloud/walmart-product-data-2019', quiet=False, unzip=False)
# kaggle datasets download -d promptcloud/walmart-product-data-2019
# Move to the next location.
move_dataset(logger, "walmart-product-data-2019.zip", output_filepath)
def move_dataset(logger, input_filepath, output_filepath):
"""Move dataset file to the output directory location.
:param logger: Logger.
:param input_filepath: Input filepath (relative to project root).
:param output_filepath: Output directory path.
"""
logger.info('Looking for dataset at "' + input_filepath + '" ...')
# Get the file.
source_path = input_filepath
# Check if source file exists.
if(not os.path.isfile(source_path)):
logger.error("Cannot find dataset at specified path.")
return
logger.info("Found dataset.")
target_path = Path(output_filepath)
logger.info('Moving dataset into output directory "' + str(target_path) + '" ...')
# Move the dataset, overwriting if it exists.
move(str(source_path), str(target_path))
logger.info("Moved dataset.")
def load_envvars(logger):
""" Load the environment variables for Kaggle authentication.
"""
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
logger.info("Loading envvars...")
load_dotenv(find_dotenv())
logger.info("Done loading envvars.")
def authenticate(logger):
""" Authenticate to the KaggleAPI.
"""
# Authenticate.
logger.info("Authenticating to the Kaggle API...")
api = KaggleApi()
api.authenticate()
logger.info("Done authenticating to the Kaggle API.")
return api
# Executes when 'main' is set.
if __name__ == '__main__':
# Prepare the logging format and level.
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# Execute the main function.
main() |
py | 7dfd87452d65855bb1f4ddbcdca63e8f96669212 | # Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import inspect
import re
import typing as ty
from abc import ABC, abstractmethod
from dataclasses import dataclass, replace, is_dataclass
import numpy as np
import torcharrow._torcharrow
import typing_inspect
# -----------------------------------------------------------------------------
# Aux
# Pretty printing constants; reused everywhere
OPEN = "{"
CLOSE = "}"
NL = "\n"
# Handy Type abbreviations; reused everywhere
ScalarTypes = ty.Union[int, float, bool, str]
# -----------------------------------------------------------------------------
# Schema and Field
MetaData = ty.Dict[str, str]
@dataclass(frozen=True)
class Field:
name: str
dtype: "DType"
metadata: ty.Optional[MetaData] = None
def __str__(self):
meta = ""
if self.metadata is not None:
meta = (
f"meta = {OPEN}{', '.join(f'{k}: {v}' for k,v in self.metadata)}{CLOSE}"
)
return f"Field('{self.name}', {str(self.dtype)}{meta})"
# -----------------------------------------------------------------------------
# Immutable Types with structural equality...
@dataclass(frozen=True) # type: ignore
class DType(ABC):
typecode: ty.ClassVar[str] = "__TO_BE_DEFINED_IN_SUBCLASS__"
arraycode: ty.ClassVar[str] = "__TO_BE_DEFINED_IN_SUBCLASS__"
@property
@abstractmethod
def nullable(self):
return False
@property
def py_type(self):
return type(self.default_value())
def __str__(self):
if self.nullable:
return f"{self.name.title()}(nullable=True)"
else:
return self.name
@abstractmethod
def constructor(self, nullable):
pass
def with_null(self, nullable=True):
return self.constructor(nullable)
def default_value(self):
# must be overridden by all non primitive types!
return type(self).default
def __qualstr__(self):
return "torcharrow.dtypes"
# for now: no float16, and all date and time stuff, no categorical, (and Null is called Void)
@dataclass(frozen=True)
class Void(DType):
nullable: bool = True
typecode: ty.ClassVar[str] = "n"
arraycode: ty.ClassVar[str] = "b"
name: ty.ClassVar[str] = "void"
default: ty.ClassVar[ty.Optional[bool]] = None
def constructor(self, nullable):
return Void(nullable)
@dataclass(frozen=True) # type: ignore
class Numeric(DType):
pass
@dataclass(frozen=True)
class Boolean(DType):
nullable: bool = False
typecode: ty.ClassVar[str] = "b"
arraycode: ty.ClassVar[str] = "b"
name: ty.ClassVar[str] = "boolean"
default: ty.ClassVar[bool] = False
def constructor(self, nullable):
return Boolean(nullable)
@dataclass(frozen=True)
class Int8(Numeric):
nullable: bool = False
typecode: ty.ClassVar[str] = "c"
arraycode: ty.ClassVar[str] = "b"
name: ty.ClassVar[str] = "int8"
default: ty.ClassVar[int] = 0
def constructor(self, nullable):
return Int8(nullable)
@dataclass(frozen=True)
class Int16(Numeric):
nullable: bool = False
typecode: ty.ClassVar[str] = "s"
arraycode: ty.ClassVar[str] = "h"
name: ty.ClassVar[str] = "int16"
default: ty.ClassVar[int] = 0
def constructor(self, nullable):
return Int16(nullable)
@dataclass(frozen=True)
class Int32(Numeric):
nullable: bool = False
typecode: ty.ClassVar[str] = "i"
arraycode: ty.ClassVar[str] = "i"
name: ty.ClassVar[str] = "int32"
default: ty.ClassVar[int] = 0
def constructor(self, nullable):
return Int32(nullable)
@dataclass(frozen=True)
class Int64(Numeric):
nullable: bool = False
typecode: ty.ClassVar[str] = "l"
arraycode: ty.ClassVar[str] = "l"
name: ty.ClassVar[str] = "int64"
default: ty.ClassVar[int] = 0
def constructor(self, nullable):
return Int64(nullable)
# Not all Arrow types are supported. We don't have a backend to support unsigned
# integer types right now so they are removed to not confuse users. Feel free to
# add unsigned int types when we have a supporting backend.
@dataclass(frozen=True)
class Float32(Numeric):
nullable: bool = False
typecode: ty.ClassVar[str] = "f"
arraycode: ty.ClassVar[str] = "f"
name: ty.ClassVar[str] = "float32"
default: ty.ClassVar[float] = 0.0
def constructor(self, nullable):
return Float32(nullable)
@dataclass(frozen=True)
class Float64(Numeric):
nullable: bool = False
typecode: ty.ClassVar[str] = "g"
arraycode: ty.ClassVar[str] = "d"
name: ty.ClassVar[str] = "float64"
default: ty.ClassVar[float] = 0.0
def constructor(self, nullable):
return Float64(nullable)
@dataclass(frozen=True)
class String(DType):
nullable: bool = False
typecode: ty.ClassVar[str] = "u" # utf8 string (n byte)
arraycode: ty.ClassVar[str] = "w" # wchar_t (2 byte)
name: ty.ClassVar[str] = "string"
default: ty.ClassVar[str] = ""
def constructor(self, nullable):
return String(nullable)
@dataclass(frozen=True)
class Map(DType):
key_dtype: DType
item_dtype: DType
nullable: bool = False
keys_sorted: bool = False
name: ty.ClassVar[str] = "Map"
typecode: ty.ClassVar[str] = "+m"
arraycode: ty.ClassVar[str] = ""
@property
def py_type(self):
return ty.Dict[self.key_dtype.py_type, self.item_dtype.py_type]
def constructor(self, nullable):
return Map(self.key_dtype, self.item_dtype, nullable)
def __str__(self):
nullable = ", nullable=" + str(self.nullable) if self.nullable else ""
return f"Map({self.key_dtype}, {self.item_dtype}{nullable})"
def default_value(self):
return {}
@dataclass(frozen=True)
class List(DType):
item_dtype: DType
nullable: bool = False
fixed_size: int = -1
name: ty.ClassVar[str] = "List"
typecode: ty.ClassVar[str] = "+l"
arraycode: ty.ClassVar[str] = ""
@property
def py_type(self):
return ty.List[self.item_dtype.py_type]
def constructor(self, nullable):
return List(self.item_dtype, nullable)
def __str__(self):
nullable = ", nullable=" + str(self.nullable) if self.nullable else ""
fixed_size = (
", fixed_size=" + str(self.fixed_size) if self.fixed_size >= 0 else ""
)
return f"List({self.item_dtype}{nullable}{fixed_size})"
def default_value(self):
return []
@dataclass(frozen=True)
class Struct(DType):
fields: ty.List[Field]
nullable: bool = False
is_dataframe: bool = False
metadata: ty.Optional[MetaData] = None
name: ty.ClassVar[str] = "Struct"
typecode: ty.ClassVar[str] = "+s"
arraycode: ty.ClassVar[str] = ""
# For generating NamedTuple class name for cached _py_type (done in __post__init__)
_py_type_id: ty.ClassVar[int] = 0
def get_index(self, name: str) -> int:
for idx, field in enumerate(self.fields):
if field.name == name:
return idx
return None
def __post_init__(self):
if self.nullable:
for f in self.fields:
if not f.dtype.nullable:
raise TypeError(
f"nullable structs require each field (like {f.name}) to be nullable as well."
)
# cache the type instance, __setattr__ hack is needed due to the frozen dataclass
# the _py_type is not listed above to avoid participation in equality check
def fix_name(name, idx):
# Anonomous Row
if name == "":
return "f_" + str(idx)
# Remove invalid character for NamedTuple
# TODO: this might cause name duplicates, do disambiguation
name = re.sub("[^a-zA-Z0-9_]", "_", name)
if name == "" or name[0].isdigit() or name[0] == "_":
name = "f_" + name
return name
object.__setattr__(
self,
"_py_type",
ty.NamedTuple(
"_StructGenerated_NamedTuple_" + str(type(self)._py_type_id),
[
(fix_name(f.name, idx), f.dtype.py_type)
for (idx, f) in enumerate(self.fields)
],
),
)
type(self)._py_type_id += 1
@property
def py_type(self):
return self._py_type
def constructor(self, nullable):
return Struct(self.fields, nullable)
def get(self, name):
for f in self.fields:
if f.name == name:
return f.dtype
raise KeyError(f"{name} not among fields")
def __str__(self):
nullable = ", nullable=" + str(self.nullable) if self.nullable else ""
fields = f"[{', '.join(str(f) for f in self.fields)}]"
meta = ""
if self.metadata is not None:
meta = f", meta = {OPEN}{', '.join(f'{k}: {v}' for k,v in self.metadata)}{CLOSE}"
else:
return f"Struct({fields}{nullable}{meta})"
def default_value(self):
return tuple(f.dtype.default_value() for f in self.fields)
# only used internally for type inference -------------------------------------
@dataclass(frozen=True)
class Tuple(DType):
fields: ty.List[DType]
nullable: bool = False
is_dataframe: bool = False
metadata: ty.Optional[MetaData] = None
name: ty.ClassVar[str] = "Tuple"
typecode: ty.ClassVar[str] = "+t"
arraycode: ty.ClassVar[str] = ""
@property
def py_type(self):
return tuple
def constructor(self, nullable):
return Tuple(self.fields, nullable)
def default_value(self):
return tuple(f.dtype.default_value() for f in self.fields)
@dataclass(frozen=True)
class Any(DType):
nullable: bool = True
typecode: ty.ClassVar[str] = "?"
arraycode: ty.ClassVar[str] = "?"
name: ty.ClassVar[str] = "any"
default: ty.ClassVar[ty.Optional[bool]] = None
@property
def size(self):
# currently 1 byte per bit
raise ValueError("Shouldn't be called")
@property
def py_type(self):
raise ValueError("Shouldn't be called")
def constructor(self, nullable=True):
assert nullable
return Any()
# TorchArrow does not yet support these types ---------------------------------
Tag = str
# abstract
@dataclass(frozen=True) # type: ignore
class Union_(DType):
pass
@dataclass(frozen=True) # type: ignore
class DenseUnion(DType):
tags: ty.List[Tag]
name: ty.ClassVar[str] = "DenseUnion"
typecode: ty.ClassVar[str] = "+ud"
arraycode: ty.ClassVar[str] = ""
@dataclass(frozen=True) # type: ignore
class SparseUnion(DType):
tags: ty.List[Tag]
name: ty.ClassVar[str] = "SparseUnion"
typecode: ty.ClassVar[str] = "+us"
arraycode: ty.ClassVar[str] = ""
boolean = Boolean()
int8 = Int8()
int16 = Int16()
int32 = Int32()
int64 = Int64()
float32 = Float32()
float64 = Float64()
string = String()
# Type test -------------------------------------------------------------------
# can be deleted once TorchArrow is implemented over velox...
def is_void(t):
"""
Return True if value is an instance of a void type.
"""
# print('is_boolean', t.typecode)
return t.typecode == "n"
def is_boolean(t):
"""
Return True if value is an instance of a boolean type.
"""
# print('is_boolean', t.typecode)
return t.typecode == "b"
def is_boolean_or_numerical(t):
return is_boolean(t) or is_numerical(t)
def is_numerical(t):
return is_integer(t) or is_floating(t)
def is_integer(t):
"""
Return True if value is an instance of any integer type.
"""
return t.typecode in "csilCSIL"
def is_signed_integer(t):
"""
Return True if value is an instance of any signed integer type.
"""
return t.typecode in "csil"
def is_int8(t):
"""
Return True if value is an instance of an int8 type.
"""
return t.typecode == "c"
def is_int16(t):
"""
Return True if value is an instance of an int16 type.
"""
return t.typecode == "s"
def is_int32(t):
"""
Return True if value is an instance of an int32 type.
"""
return t.typecode == "i"
def is_int64(t):
"""
Return True if value is an instance of an int64 type.
"""
return t.typecode == "l"
def is_floating(t):
"""
Return True if value is an instance of a floating point numeric type.
"""
return t.typecode in "fg"
def is_float32(t):
"""
Return True if value is an instance of a float32 (single precision) type.
"""
return t.typecode == "f"
def is_string(t):
return t.typecode == "u"
def is_float64(t):
"""
Return True if value is an instance of a float32 (single precision) type.
"""
return t.typecode == "g"
def is_list(t):
return t.typecode.startswith("+l")
def is_map(t):
return t.typecode.startswith("+m")
def is_struct(t):
return t.typecode.startswith("+s")
def is_primitive(t):
return t.typecode[0] != "+"
def is_tuple(t):
return t.typecode.startswith("+t")
def contains_tuple(t: DType):
if is_tuple(t):
return True
if is_list(t):
return contains_tuple(t.item_dtype)
if is_map(t):
return contains_tuple(t.key_dtype) or contains_tuple(t.item_dtype)
if is_struct(t):
return any(contains_tuple(f.dtype) for f in t.fields)
return False
def is_any(t):
return t.typecode == "?"
# Infer types from values -----------------------------------------------------
PREFIX_LENGTH = 5
def prt(value, type):
# print("<", value, ":", type, ">")
return type
def infer_dtype_from_value(value):
if value is None:
return Void()
if isinstance(value, (bool, np.bool8)):
return prt(value, boolean)
if isinstance(value, (int, np.integer)):
return prt(value, int64)
if isinstance(value, (float, np.float32)):
return prt(value, float32)
if isinstance(value, np.float64):
return prt(value, float64)
if isinstance(value, (str, np.str_)):
return prt(value, string)
if isinstance(value, list):
dtype = infer_dtype_from_prefix(value[:PREFIX_LENGTH])
return prt(value, List(dtype))
if isinstance(value, dict):
key_dtype = infer_dtype_from_prefix(list(value.keys())[:PREFIX_LENGTH])
items_dtype = infer_dtype_from_prefix(list(value.values())[:PREFIX_LENGTH])
return prt(value, Map(key_dtype, items_dtype))
if isinstance(value, tuple):
dtypes = []
for t in value:
dtypes.append(infer_dtype_from_value(t))
return prt(value, Tuple(dtypes))
raise AssertionError(f"unexpected case {value} of type {type(value)}")
def infer_dtype_from_prefix(prefix):
if len(prefix) == 0:
return Any()
dtype = infer_dtype_from_value(prefix[0])
for p in prefix[1:]:
old_dtype = dtype
next_dtype = infer_dtype_from_value(p)
dtype = common_dtype(old_dtype, next_dtype)
if dtype is None:
raise ValueError(
f"Cannot infer type of {prefix}: {old_dtype} {old_dtype.typecode}, {next_dtype} {next_dtype.typecode} {dtype}"
)
return dtype
def infer_dype_from_callable_hint(
func: ty.Callable,
) -> (ty.Optional[DType], ty.Optional[ty.Type]):
dtype = None
py_type = None
if (
inspect.isfunction(func)
or inspect.ismethod(func)
or inspect.isclass(func)
or inspect.ismodule(func)
):
# get_type_hints expects module, class, method, or function as input
signature = ty.get_type_hints(func)
else:
signature = ty.get_type_hints(func.__call__)
if "return" in signature and signature["return"] is not None:
py_type = signature["return"]
dtype = dtype_of_type(py_type)
return (dtype, py_type)
# lub of two types for inference ----------------------------------------------
_promotion_list = [
("b", "b", boolean),
("bc", "bc", int8),
("bcs", "bcs", int16),
("bcsi", "bcsi", int32),
("bcsil", "bcsil", int64),
("bcsilf", "bcsilf", float32),
("bcsilfg", "bcsilfg", float64),
]
def promote(l, r):
assert is_boolean_or_numerical(l) and is_boolean_or_numerical(r)
lt = l.typecode
rt = r.typecode
if lt == rt:
return l.with_null(l.nullable or r.nullable)
for lts, rts, dtype in _promotion_list:
if (lt in lts) and (rt in rts):
return dtype.with_null(l.nullable or r.nullable)
return None
def common_dtype(l, r):
if is_void(l):
return r.with_null()
if is_void(r):
return l.with_null()
if is_any(l):
return r
if is_any(r):
return l
if is_string(l) and is_string(r):
return String(l.nullable or r.nullable)
if is_boolean_or_numerical(l) and is_boolean_or_numerical(r):
return promote(l, r)
if is_tuple(l) and is_tuple(r) and len(l.fields) == len(r.fields):
res = []
for i, j in zip(l.fields, r.fields):
m = common_dtype(i, j)
if m is None:
return None
res.append(m)
return Tuple(res).with_null(l.nullable or r.nullable)
if is_map(l) and is_map(r):
k = common_dtype(l.key_dtype, r.key_dtype)
i = common_dtype(l.item_dtype, r.item_dtype)
return (
Map(k, i).with_null(l.nullable or r.nullable)
if k is not None and i is not None
else None
)
if is_list(l) and is_list(r):
k = common_dtype(l.item_dtype, r.item_dtype)
return List(k).with_null(l.nullable or r.nullable) if k is not None else None
if l.with_null() == r.with_null():
return l if l.nullable else r
return None
# # Derive result types from operators ------------------------------------------
# Currently not used since we use numpy 's promotion rules...
# # DESIGN BUG: TODO needs actually both sides for symmetric promotion rules ...
# _arithmetic_ops = ["add", "sub", "mul", "floordiv", "truediv", "mod", "pow"]
# _comparison_ops = ["eq", "ne", "lt", "gt", "le", "ge"]
# _logical_ops = ["and", "or"]
# def derive_dtype(left_dtype, op):
# if is_numerical(left_dtype) and op in _arithmetic_ops:
# if op == "truediv":
# return Float64(left_dtype.nullable)
# elif op == "floordiv":
# if is_integer(left_dtype):
# return Int64(left_dtype.nullable)
# else:
# return Float64(left_dtype.nullable)
# else:
# return left_dtype
# if is_boolean(left_dtype) and op in _logical_ops:
# return left_dtype
# if op in _comparison_ops:
# return Boolean(left_dtype.nullable)
# raise AssertionError(
# f"derive_dtype, unexpected type {left_dtype} for operation {op}"
# )
# def derive_operator(op):
# return _operator_map[op]
# def _or(a, b):
# return a or b
# def _and(a, b):
# return a and b
# _operator_map = {
# "add": operator.add,
# "sub": operator.sub,
# "mul": operator.mul,
# "eq": operator.eq,
# "ne": operator.ne,
# "or": _or, # logical instead of bitwise
# "and": _and, # logical instead of bitwise
# "floordiv": operator.floordiv,
# "truediv": operator.truediv,
# "mod": operator.mod,
# "pow": operator.pow,
# "lt": operator.lt,
# "gt": operator.gt,
# "le": operator.le,
# "ge": operator.ge,
# }
def get_agg_op(op: str, dtype: DType) -> ty.Tuple[ty.Callable, DType]:
if op not in _agg_ops:
raise ValueError(f"undefined aggregation operator ({op})")
if op in ["min", "max", "sum", "prod", "mode"]:
return (_agg_ops[op], dtype)
if op in ["mean", "median"]:
return (_agg_ops[op], Float64(dtype.nullable))
if op in ["count"]:
return (_agg_ops[op], Int64(dtype.nullable))
raise AssertionError("unexpected case")
_agg_ops = {
"min": lambda c: c.min(),
"max": lambda c: c.max(),
"all": lambda c: c.all(),
"any": lambda c: c.any(),
"sum": lambda c: c.sum(),
"prod": lambda c: c.prod(),
"mean": lambda c: c.mean(),
"median": lambda c: c.median(),
"mode": lambda c: c.mode(),
"count": lambda c: c.count(),
}
def np_typeof_dtype(t: DType): # -> np.dtype[]:
if is_boolean(t):
return np.bool8
if is_int8(t):
return np.int8
if is_int16(t):
return np.int16
if is_int32(t):
return np.int32
if is_int64(t):
return np.int64
if is_float32(t):
return np.float32
if is_float64(t):
return np.float64
if is_string(t):
# we translate strings not into np.str_ but into object
return object
raise AssertionError(
f"translation of dtype {type(t).__name__} to numpy type unsupported"
)
def typeof_np_ndarray(t: np.ndarray) -> ty.Union[DType, ty.Literal["object"]]:
return typeof_np_dtype(t.dtype)
def typeof_np_dtype(t: np.dtype) -> DType:
# only suppport the following non-structured columns,...
if t == np.bool8:
return boolean
if t == np.int8:
return int8
if t == np.int16:
return int16
if t == np.int32:
return int32
if t == np.int64:
return int64
# any float array can have nan -- all nan(s) will be masked
# -> so result type is FloatXX(True)
if t == np.float32:
return Float32(nullable=True)
if t == np.float64:
return Float64(nullable=True)
# can't test nicely for strings so we use the kind test
if t.kind == "U": # unicode like
return string
# any object array can have non-strings: all non strings will be masked.
# -> so result type is String(True)
if t == object:
return String(nullable=True)
raise AssertionError(
f"translation of numpy type {type(t).__name__} to dtype unsupported"
)
def dtype_of_velox_type(vtype: torcharrow._torcharrow.VeloxType) -> DType:
if vtype.kind() == torcharrow._torcharrow.TypeKind.BOOLEAN:
return Boolean(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.TINYINT:
return Int8(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.SMALLINT:
return Int16(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.INTEGER:
return Int32(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.BIGINT:
return Int64(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.REAL:
return Float32(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.DOUBLE:
return Float64(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.VARCHAR:
return String(nullable=True)
if vtype.kind() == torcharrow._torcharrow.TypeKind.ARRAY:
return List(
item_dtype=dtype_of_velox_type(
ty.cast(torcharrow._torcharrow.VeloxArrayType, vtype).element_type()
),
nullable=True,
)
if vtype.kind() == torcharrow._torcharrow.TypeKind.MAP:
vtype = ty.cast(torcharrow._torcharrow.VeloxMapType, vtype)
return Map(
key_dtype=dtype_of_velox_type(vtype.key_type()),
item_dtype=dtype_of_velox_type(vtype.value_type()),
nullable=True,
)
if vtype.kind() == torcharrow._torcharrow.TypeKind.ROW:
vtype = ty.cast(torcharrow._torcharrow.VeloxRowType, vtype)
fields = [
Field(name=vtype.name_of(i), dtype=dtype_of_velox_type(vtype.child_at(i)))
for i in range(vtype.size())
]
return Struct(fields=fields, nullable=True)
raise AssertionError(
f"translation of Velox typekind {vtype.kind()} to dtype unsupported"
)
def cast_as(dtype):
if is_string(dtype):
return str
if is_integer(dtype):
return int
if is_boolean(dtype):
return bool
if is_floating(dtype):
return float
raise AssertionError(f"cast to {dtype} unsupported")
def get_underlying_dtype(dtype: DType) -> DType:
return replace(dtype, nullable=False)
def get_nullable_dtype(dtype: DType) -> DType:
return replace(dtype, nullable=True)
def dtype_of_type(typ: ty.Union[ty.Type, DType]) -> DType:
assert typ is not None
if isinstance(typ, DType):
return typ
if typing_inspect.is_tuple_type(typ):
return Tuple([dtype_of_type(a) for a in typing_inspect.get_args(typ)])
if inspect.isclass(typ) and issubclass(typ, tuple) and hasattr(typ, "_fields"):
fields = typ._fields
field_types = getattr(typ, "__annotations__", None)
if field_types is None or any(n not in field_types for n in fields):
raise TypeError(
f"Can't infer type from namedtuple without type hints: {typ}"
)
return Struct([Field(n, dtype_of_type(field_types[n])) for n in fields])
if is_dataclass(typ):
return Struct(
[Field(f.name, dtype_of_type(f.type)) for f in dataclasses.fields(typ)]
)
if ty.get_origin(typ) in (List, list):
args = ty.get_args(typ)
assert len(args) == 1
elem_type = dtype_of_type(args[0])
return List(elem_type)
if ty.get_origin(typ) in (ty.Dict, dict):
args = ty.get_args(typ)
assert len(args) == 2
key = dtype_of_type(args[0])
value = dtype_of_type(args[1])
return Map(key, value)
if typing_inspect.is_optional_type(typ):
args = ty.get_args(typ)
assert len(args) == 2
if issubclass(args[1], type(None)):
contained = args[0]
else:
contained = args[1]
return dtype_of_type(contained).with_null()
# same inference rules as for values above
if typ is float:
# PyTorch defaults to use Single-precision floating-point format (float32) for Python float type
return float32
if typ is int:
return int64
if typ is str:
return string
if typ is bool:
return boolean
raise TypeError(f"Can't infer dtype from {typ}")
def dtype_from_batch_pytype(typ: ty.Type) -> DType:
"""
Like dtype_of_type but representing type hint for the set of rows. Can be a Column or a python List of nested types
"""
from .icolumn import IColumn
assert type is not None
if inspect.isclass(typ) and issubclass(typ, IColumn):
# TODO: we need a type annotation for Columns with statically accessible dtype
raise TypeError("Cannot infer dtype from IColumn")
if ty.get_origin(typ) in (List, list):
args = ty.get_args(typ)
assert len(args) == 1
return dtype_of_type(args[0])
raise TypeError("The outer type annotation must be a list or a Column")
|
py | 7dfd879d9dfa5a2d7721c3ab527a886d89cecb98 | from collections import namedtuple
import corner
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from . import PARAMS
from .settings import set_matplotlib_style_settings
from ..agn_logger import logger
set_matplotlib_style_settings()
CORNER_KWARGS = dict(
smooth=0.9,
label_kwargs=dict(fontsize=30),
title_kwargs=dict(fontsize=16),
truth_color="tab:orange",
quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.0)),
plot_density=False,
plot_datapoints=False,
fill_contours=True,
max_n_ticks=3,
verbose=False,
use_math_text=False,
)
def _get_one_dimensional_median_and_error_bar(
posterior, key, fmt=".2f", quantiles=(0.16, 0.84)
):
"""Calculate the median and error bar for a given key
Parameters
----------
key: str
The parameter key for which to calculate the median and error bar
fmt: str, ('.2f')
A format string
quantiles: list, tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
Returns
-------
summary: namedtuple
An object with attributes, median, lower, upper and string
"""
summary = namedtuple("summary", ["median", "lower", "upper", "string"])
if len(quantiles) != 2:
raise ValueError("quantiles must be of length 2")
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(posterior[key], quants_to_compute * 100)
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
fmt = "{{0:{0}}}".format(fmt).format
string_template = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
summary.string = string_template.format(
fmt(summary.median), fmt(summary.minus), fmt(summary.plus)
)
return summary
def _add_ci_vals_to_marginalised_posteriors(
fig, params, posterior: pd.DataFrame
):
# plt the quantiles
axes = fig.get_axes()
for i, par in enumerate(params):
ax = axes[i + i * len(params)]
if ax.title.get_text() == "":
ax.set_title(
_get_one_dimensional_median_and_error_bar(
posterior, par, quantiles=CORNER_KWARGS["quantiles"]
).string,
**CORNER_KWARGS["title_kwargs"],
)
def overlaid_corner(
samples_list,
sample_labels,
params,
samples_colors,
fname="",
title=None,
truths={},
ranges=[],
quants=True,
):
"""Plots multiple corners on top of each other
:param samples_list: list of all posteriors to be plotted ontop of each other
:type samples_list: List[pd.DataFrame]
:param sample_labels: posterior's labels to be put on legend
:type sample_labels: List[str]
:param params: posterior params names (used to access posteriors samples)
:type params: List[str]
:param samples_colors: Color for each posterior
:type samples_colors: List[Color]
:param fname: Plot's save path
:type fname: str
:param title: Plot's suptitle if not None
:type title: None/str
:param truths: posterior param true vals
:type truths: Dict[str:float]
:return: None
"""
logger.info(f"Plotting {fname}")
logger.info(f"Cols in samples: {samples_list[0].columns.values}")
# sort the sample columns
samples_list = [s[params] for s in samples_list]
base_s = samples_list[0]
# get plot_posterior_predictive_check range, latex labels, colors and truths
plot_range, axis_labels = [], []
for p in params:
p_data = PARAMS.get(
p,
dict(range=(min(base_s[p]), max(base_s[p])), latex_label=f"${p}$"),
)
plot_range.append(p_data["range"])
axis_labels.append(p_data["latex_label"])
if isinstance(ranges, list):
if len(ranges)!=0:
plot_range=ranges
elif isinstance(ranges, type(None)):
plot_range=None
# get some constants
n = len(samples_list)
_, ndim = samples_list[0].shape
min_len = min([len(s) for s in samples_list])
c_kwargs = CORNER_KWARGS.copy()
c_kwargs.update(
range=plot_range,
labels=axis_labels,
truths=truths,
truth_color=CORNER_KWARGS["truth_color"]
)
hist_kwargs=dict(lw=3, histtype='step', alpha=0.5)
if not quants:
c_kwargs.pop("quantiles", None)
fig = corner.corner(
samples_list[0],
color=samples_colors[0],
**c_kwargs,
hist_kwargs=dict(fc=samples_colors[0], ec=samples_colors[0], **hist_kwargs)
)
for idx in range(1, n):
col = samples_colors[idx]
fig = corner.corner(
samples_list[idx],
fig=fig,
weights=_get_normalisation_weight(len(samples_list[idx]), min_len),
color=col,
**c_kwargs,
hist_kwargs=dict(fc=col, ec=col, **hist_kwargs)
)
if len(samples_list) == 1:
_add_ci_vals_to_marginalised_posteriors(fig, params, samples_list[0])
plt.legend(
handles=[
mlines.Line2D(
[], [], color=samples_colors[i], label=sample_labels[i]
)
for i in range(len(sample_labels))
],
fontsize=20,
frameon=False,
bbox_to_anchor=(1, ndim),
loc="upper right",
)
if title:
fig.suptitle(title, y=0.97)
fig.subplots_adjust(top=0.75)
if fname:
fig.savefig(fname)
plt.close(fig)
else:
return fig
def _get_normalisation_weight(len_current_samples, len_of_longest_samples):
return np.ones(len_current_samples) * (
len_of_longest_samples / len_current_samples
)
|
py | 7dfd87ca85c27df4e59825eb33a5e986f219e4ee | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
def move():
rospy.init_node('robot_move', anonymous=True)
vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
# rate = rospy.Rate(10)
vel_msg = Twist()
vel_msg.linear.x = 4
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = 4
while not rospy.is_shutdown():
vel_pub.publish(vel_msg)
# rate.sleep()
if __name__ == "__main__":
try:
move()
except rospy.ROSInterruptException:
pass
|
py | 7dfd8828d6ca95deb581a2d7bbc0a4bc76741c82 | import os
from modules.utils import *
from modules.downloader import *
from modules.show import *
from modules.csv_downloader import *
from modules.utils import bcolors as bc
def image_level(args, DEFAULT_OID_DIR):
if not args.Dataset:
dataset_dir = os.path.join(DEFAULT_OID_DIR, 'Dataset_nl')
csv_dir = os.path.join(DEFAULT_OID_DIR, 'csv_folder_nl')
else:
dataset_dir = os.path.join(DEFAULT_OID_DIR, args.Dataset)
csv_dir = os.path.join(DEFAULT_OID_DIR, 'csv_folder_nl')
name_file_class = 'class-descriptions.csv'
CLASSES_CSV = os.path.join(csv_dir, name_file_class)
if args.sub is None:
print(bc.FAIL + 'Missing subset argument.' + bc.ENDC)
exit(1)
if args.sub == 'h':
file_list = ['train-annotations-human-imagelabels.csv', \
'validation-annotations-human-imagelabels.csv', \
'test-annotations-human-imagelabels.csv']
if args.sub == 'm':
file_list = ['train-annotations-machine-imagelabels.csv', \
'validation-annotations-machine-imagelabels.csv', \
'test-annotations-machine-imagelabels.csv']
if args.sub == 'h' or args.sub == 'm':
logo(args.command)
if args.type_csv is None:
print(bc.FAIL + 'Missing type_csv argument.' + bc.ENDC)
exit(1)
if args.classes is None:
print(bc.FAIL + 'Missing classes argument.' + bc.ENDC)
exit(1)
if args.multiclasses is None:
args.multiclasses = 0
folder = ['train', 'validation', 'test']
if args.classes[0].endswith('.txt'):
with open(args.classes[0]) as f:
args.classes = f.readlines()
args.classes = [x.strip() for x in args.classes]
else:
args.classes = [arg.replace('_', ' ') for arg in args.classes]
if args.multiclasses == '0':
mkdirs(dataset_dir, csv_dir, args.classes, args.type_csv)
for classes in args.classes:
class_name = classes
error_csv(name_file_class, csv_dir, args.yes)
df_classes = pd.read_csv(CLASSES_CSV, header=None)
class_code = df_classes.loc[df_classes[1] == class_name].values[0][0]
if args.type_csv == 'train':
name_file = file_list[0]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[0], dataset_dir, class_name, class_code)
else:
download(args, df_val, folder[0], dataset_dir, class_name, class_code, threads = int(args.n_threads))
elif args.type_csv == 'validation':
name_file = file_list[1]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[1], dataset_dir, class_name, class_code)
else:
download(args, df_val, folder[1], dataset_dir, class_name, class_code, threads = int(args.n_threads))
elif args.type_csv == 'test':
name_file = file_list[2]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[2], dataset_dir, class_name, class_code)
else:
download(args, df_val, folder[2], dataset_dir, class_name, class_code, threads = int(args.n_threads))
elif args.type_csv == 'all':
for i in range(3):
name_file = file_list[i]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[i], dataset_dir, class_name, class_code)
else:
download(args, df_val, folder[i], dataset_dir, class_name, class_code, threads = int(args.n_threads))
else:
print(bc.FAIL + 'csv file not specified' + bc.ENDC)
exit(1)
elif args.multiclasses == '1':
class_list = args.classes
print(bc.INFO + "Downloading {} together.".format(class_list) + bc.ENDC)
multiclass_name = ['_'.join(class_list)]
mkdirs(dataset_dir, csv_dir, multiclass_name, args.type_csv)
error_csv(name_file_class, csv_dir, args.yes)
df_classes = pd.read_csv(CLASSES_CSV, header=None)
class_dict = {}
for class_name in class_list:
class_dict[class_name] = df_classes.loc[df_classes[1] == class_name].values[0][0]
for class_name in class_list:
if args.type_csv == 'train':
name_file = file_list[0]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[0], dataset_dir, class_name, class_dict[class_name], class_list)
else:
download(args, df_val, folder[0], dataset_dir, class_name, class_dict[class_name], class_list, int(args.n_threads))
elif args.type_csv == 'validation':
name_file = file_list[1]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[1], dataset_dir, class_name, class_dict[class_name], class_list)
else:
download(args, df_val, folder[1], dataset_dir, class_name, class_dict[class_name], class_list, int(args.n_threads))
elif args.type_csv == 'test':
name_file = file_list[2]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[2], dataset_dir, class_name, class_dict[class_name], class_list)
else:
download(args, df_val, folder[2], dataset_dir, class_name, class_dict[class_name], class_list, int(args.n_threads))
elif args.type_csv == 'all':
for i in range(3):
name_file = file_list[i]
df_val = TTV(csv_dir, name_file, args.yes)
if not args.n_threads:
download(args, df_val, folder[i], dataset_dir, class_name, class_dict[class_name], class_list)
else:
download(args, df_val, folder[i], dataset_dir, class_name, class_dict[class_name], class_list, int(args.n_threads))
|
py | 7dfd888ea94c32eac7f5b3175098886821b82970 | from bs4 import BeautifulSoup
import collections as cl
import json
import os
import pathlib
import re
from libs import directory, connection
root_path = directory.current_directory
def make_and_change_directory(dir_name):
directory.make_directory(dir_name)
directory.change_directory(dir_name)
def create_ignore_file():
if not os.path.exists("./AtCoder/.gitignore"):
with open("./AtCoder/.gitignore", "w") as f:
f.write("test/\n")
f.write("output/\n")
else:
return
def extract_contest_name(url):
pattern_alpha = r"^(http|https)://([\w-]+).contest.atcoder.(jp|jp/)?$"
pattern_beta = r"^(http|https)://atcoder.jp/contests/([\w-]+)?(/)?$"
match_alpha = re.search(pattern_alpha, url)
match_beta = re.search(pattern_beta, url)
if match_alpha is None and match_beta is None:
print("This URL is incorrect\n")
return None
if match_beta is not None:
return match_beta.group(2)
if match_alpha is not None:
return match_alpha.group(2)
def create_directory_of_question(contest_url):
tasks_url = get_tasks_page_url(contest_url)
tasks_html = connection.get_page_text(tasks_url)
questions = get_ques_name_and_url(tasks_html)
root = get_root_url(contest_url)
if not directory.check_exist("../../template"):
return
for question in questions:
directory.copy_directory("../../template", f"./{question[0]}")
directory.change_directory(question[0])
question_html = connection.get_page_text(f"{root}{question[1]}")
examples = extract_example(question_html)
create_example_files(examples)
directory.change_directory("../")
def get_tasks_page_url(contest_url):
top_page = connection.get_page_text(contest_url)
soup = BeautifulSoup(top_page, "html.parser")
list_items = soup.findAll("li")
list_item = [item for item in list_items if "Tasks" in item.getText()]
href = list_item[0].select("a")[0].get("href")
return contest_url + href[href.rfind("/"):]
def get_ques_name_and_url(html):
questions = []
soup = BeautifulSoup(html, "html.parser")
table = soup.select("table")[0]
for tr in table.findAll("tr"):
tds = tr.select("td")
if tds:
href = tds[0].select("a")[0].get("href")
questions.append([tds[0].text, href])
return questions
def get_root_url(contest_url):
root = contest_url
index = root.find("/", len("https://"))
if not index == -1:
root = root[:index]
return root
def extract_example(html):
examples = []
soup = BeautifulSoup(html, "html.parser")
divs = soup.findAll("div")
for div in divs:
h3 = div.select("h3")
pre = div.select("pre")
if len(h3) > 0 and len(pre) > 0 and "例" in h3[0].text:
file_name = h3[0].text
file_content = pre[0].text
if "入力例" in file_name:
file_name = file_name.replace("入力例", "Sample_Input")
if "出力例" in file_name:
file_name = file_name.replace("出力例", "Sample_Output")
examples.append([file_name, file_content])
return examples
def create_example_files(file_info):
make_and_change_directory("test")
for info in file_info:
file_name = f"{directory.current_directory}/{info[0].replace(' ', '_')}"
file_name += ".txt"
with open(file_name, "w") as f:
f.write(info[1])
directory.change_directory("../")
def rename_answer_files_each_directory():
for sub_dir in directory.list_content():
if os.path.isfile(f"{directory.current_directory}/{sub_dir}"):
continue
directory.change_directory(sub_dir)
question_name = sub_dir.lower()
for file in directory.list_content():
if not os.path.isfile(f"{directory.current_directory}/{file}"):
continue
extension = file[file.rfind("."):]
directory.rename_file(file, question_name + extension)
directory.change_directory("../")
def create_info_json():
file_name = f"{directory.current_directory}/info.json"
if not os.path.exists(file_name):
stamp = get_stamp_from_source_files()
with open(file_name, "w") as f:
data = cl.OrderedDict()
data["stamp"] = stamp
json.dump(data, f, indent=2)
def get_stamp_from_source_files():
p = pathlib.Path(directory.current_directory)
stamp = None
for file in p.glob("**/*[!.txt]"):
if file.is_file() and (stamp is None or stamp.stat().st_mtime < file.stat().st_mtime):
stamp = file
if stamp is not None:
return stamp.stat().st_mtime
else:
return 0
def reset():
directory.current_directory = root_path
def run():
make_and_change_directory("AtCoder")
create_ignore_file()
while True:
print('Please input url of contest (ex. https://atcoder.jp/contests/xxx)')
contest_url = input("Use 'exit' to exit\n")
if contest_url == "exit":
return None
contest_name = extract_contest_name(contest_url)
if contest_name is not None:
break
if contest_url[len(contest_url) - 1] == "/":
contest_url = contest_url[:len(contest_url) - 1]
make_and_change_directory(contest_name)
create_directory_of_question(contest_url)
rename_answer_files_each_directory()
create_info_json()
print(f"Successful in joining at {contest_name}!!\n")
return contest_url
if __name__ == '__main__':
run()
|
py | 7dfd89c897b7babe8037489351ffbe689c381d8e | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Jason Narad <[email protected]>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
from collections import defaultdict
from itertools import chain
from functools import total_ordering
from nltk.grammar import (
DependencyProduction,
DependencyGrammar,
ProbabilisticDependencyGrammar,
)
from nltk.parse.dependencygraph import DependencyGraph
from nltk.internals import raise_unorderable_types
#################################################################
# Dependency Span
#################################################################
@total_ordering
class DependencySpan:
"""
A contiguous span over some part of the input string representing
dependency (head -> modifier) relationships amongst words. An atomic
span corresponds to only one word so it isn't a 'span' in the conventional
sense, as its _start_index = _end_index = _head_index for concatenation
purposes. All other spans are assumed to have arcs between all nodes
within the start and end indexes of the span, and one head index corresponding
to the head word for the entire span. This is the same as the root node if
the dependency structure were depicted as a graph.
"""
def __init__(self, start_index, end_index, head_index, arcs, tags):
self._start_index = start_index
self._end_index = end_index
self._head_index = head_index
self._arcs = arcs
self._tags = tags
self._comparison_key = (start_index, end_index, head_index, tuple(arcs))
self._hash = hash(self._comparison_key)
def head_index(self):
"""
:return: An value indexing the head of the entire ``DependencySpan``.
:rtype: int
"""
return self._head_index
def __repr__(self):
"""
:return: A concise string representatino of the ``DependencySpan``.
:rtype: str.
"""
return "Span %d-%d; Head Index: %d" % (
self._start_index,
self._end_index,
self._head_index,
)
def __str__(self):
"""
:return: A verbose string representation of the ``DependencySpan``.
:rtype: str
"""
str = "Span %d-%d; Head Index: %d" % (
self._start_index,
self._end_index,
self._head_index,
)
for i in range(len(self._arcs)):
str += "\n%d <- %d, %s" % (i, self._arcs[i], self._tags[i])
return str
def __eq__(self, other):
return (
type(self) == type(other) and self._comparison_key == other._comparison_key
)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, DependencySpan):
raise_unorderable_types("<", self, other)
return self._comparison_key < other._comparison_key
def __hash__(self):
"""
:return: The hash value of this ``DependencySpan``.
"""
return self._hash
#################################################################
# Chart Cell
#################################################################
class ChartCell:
"""
A cell from the parse chart formed when performing the CYK algorithm.
Each cell keeps track of its x and y coordinates (though this will probably
be discarded), and a list of spans serving as the cell's entries.
"""
def __init__(self, x, y):
"""
:param x: This cell's x coordinate.
:type x: int.
:param y: This cell's y coordinate.
:type y: int.
"""
self._x = x
self._y = y
self._entries = set([])
def add(self, span):
"""
Appends the given span to the list of spans
representing the chart cell's entries.
:param span: The span to add.
:type span: DependencySpan
"""
self._entries.add(span)
def __str__(self):
"""
:return: A verbose string representation of this ``ChartCell``.
:rtype: str.
"""
return "CC[%d,%d]: %s" % (self._x, self._y, self._entries)
def __repr__(self):
"""
:return: A concise string representation of this ``ChartCell``.
:rtype: str.
"""
return "%s" % self
#################################################################
# Parsing with Dependency Grammars
#################################################################
class ProjectiveDependencyParser:
"""
A projective, rule-based, dependency parser. A ProjectiveDependencyParser
is created with a DependencyGrammar, a set of productions specifying
word-to-word dependency relations. The parse() method will then
return the set of all parses, in tree representation, for a given input
sequence of tokens. Each parse must meet the requirements of the both
the grammar and the projectivity constraint which specifies that the
branches of the dependency tree are not allowed to cross. Alternatively,
this can be understood as stating that each parent node and its children
in the parse tree form a continuous substring of the input sequence.
"""
def __init__(self, dependency_grammar):
"""
Create a new ProjectiveDependencyParser, from a word-to-word
dependency grammar ``DependencyGrammar``.
:param dependency_grammar: A word-to-word relation dependencygrammar.
:type dependency_grammar: DependencyGrammar
"""
self._grammar = dependency_grammar
def parse(self, tokens):
"""
Performs a projective dependency parse on the list of tokens using
a chart-based, span-concatenation algorithm similar to Eisner (1996).
:param tokens: The list of input tokens.
:type tokens: list(str)
:return: An iterator over parse trees.
:rtype: iter(Tree)
"""
self._tokens = list(tokens)
chart = []
for i in range(0, len(self._tokens) + 1):
chart.append([])
for j in range(0, len(self._tokens) + 1):
chart[i].append(ChartCell(i, j))
if i == j + 1:
chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"]))
for i in range(1, len(self._tokens) + 1):
for j in range(i - 2, -1, -1):
for k in range(i - 1, j, -1):
for span1 in chart[k][j]._entries:
for span2 in chart[i][k]._entries:
for newspan in self.concatenate(span1, span2):
chart[i][j].add(newspan)
for parse in chart[len(self._tokens)][0]._entries:
conll_format = ""
# malt_format = ""
for i in range(len(tokens)):
# malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null')
# conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-')
# Modify to comply with the new Dependency Graph requirement (at least must have an root elements)
conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % (
i + 1,
tokens[i],
tokens[i],
"null",
"null",
"null",
parse._arcs[i] + 1,
"ROOT",
"-",
"-",
)
dg = DependencyGraph(conll_format)
# if self.meets_arity(dg):
yield dg.tree()
def concatenate(self, span1, span2):
"""
Concatenates the two spans in whichever way possible. This
includes rightward concatenation (from the leftmost word of the
leftmost span to the rightmost word of the rightmost span) and
leftward concatenation (vice-versa) between adjacent spans. Unlike
Eisner's presentation of span concatenation, these spans do not
share or pivot on a particular word/word-index.
:return: A list of new spans formed through concatenation.
:rtype: list(DependencySpan)
"""
spans = []
if span1._start_index == span2._start_index:
print("Error: Mismatched spans - replace this with thrown error")
if span1._start_index > span2._start_index:
temp_span = span1
span1 = span2
span2 = temp_span
# adjacent rightward covered concatenation
new_arcs = span1._arcs + span2._arcs
new_tags = span1._tags + span2._tags
if self._grammar.contains(
self._tokens[span1._head_index], self._tokens[span2._head_index]
):
# print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index))
new_arcs[span2._head_index - span1._start_index] = span1._head_index
spans.append(
DependencySpan(
span1._start_index,
span2._end_index,
span1._head_index,
new_arcs,
new_tags,
)
)
# adjacent leftward covered concatenation
new_arcs = span1._arcs + span2._arcs
if self._grammar.contains(
self._tokens[span2._head_index], self._tokens[span1._head_index]
):
# print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index))
new_arcs[span1._head_index - span1._start_index] = span2._head_index
spans.append(
DependencySpan(
span1._start_index,
span2._end_index,
span2._head_index,
new_arcs,
new_tags,
)
)
return spans
#################################################################
# Parsing with Probabilistic Dependency Grammars
#################################################################
class ProbabilisticProjectiveDependencyParser:
"""A probabilistic, projective dependency parser.
This parser returns the most probable projective parse derived from the
probabilistic dependency grammar derived from the train() method. The
probabilistic model is an implementation of Eisner's (1996) Model C, which
conditions on head-word, head-tag, child-word, and child-tag. The decoding
uses a bottom-up chart-based span concatenation algorithm that's identical
to the one utilized by the rule-based projective parser.
Usage example
-------------
>>> from nltk.parse.dependencygraph import conll_data2
>>> graphs = [
... DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry
... ]
>>> ppdp = ProbabilisticProjectiveDependencyParser()
>>> ppdp.train(graphs)
>>> sent = ['Cathy', 'zag', 'hen', 'wild', 'zwaaien', '.']
>>> list(ppdp.parse(sent))
[Tree('zag', ['Cathy', 'hen', Tree('zwaaien', ['wild', '.'])])]
"""
def __init__(self):
"""
Create a new probabilistic dependency parser. No additional
operations are necessary.
"""
def parse(self, tokens):
"""
Parses the list of tokens subject to the projectivity constraint
and the productions in the parser's grammar. This uses a method
similar to the span-concatenation algorithm defined in Eisner (1996).
It returns the most probable parse derived from the parser's
probabilistic dependency grammar.
"""
self._tokens = list(tokens)
chart = []
for i in range(0, len(self._tokens) + 1):
chart.append([])
for j in range(0, len(self._tokens) + 1):
chart[i].append(ChartCell(i, j))
if i == j + 1:
if tokens[i - 1] in self._grammar._tags:
for tag in self._grammar._tags[tokens[i - 1]]:
chart[i][j].add(
DependencySpan(i - 1, i, i - 1, [-1], [tag])
)
else:
print(
"No tag found for input token '%s', parse is impossible."
% tokens[i - 1]
)
return []
for i in range(1, len(self._tokens) + 1):
for j in range(i - 2, -1, -1):
for k in range(i - 1, j, -1):
for span1 in chart[k][j]._entries:
for span2 in chart[i][k]._entries:
for newspan in self.concatenate(span1, span2):
chart[i][j].add(newspan)
trees = []
max_parse = None
max_score = 0
for parse in chart[len(self._tokens)][0]._entries:
conll_format = ""
malt_format = ""
for i in range(len(tokens)):
malt_format += "%s\t%s\t%d\t%s\n" % (
tokens[i],
"null",
parse._arcs[i] + 1,
"null",
)
# conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], parse._tags[i], parse._tags[i], 'null', parse._arcs[i] + 1, 'null', '-', '-')
# Modify to comply with recent change in dependency graph such that there must be a ROOT element.
conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % (
i + 1,
tokens[i],
tokens[i],
parse._tags[i],
parse._tags[i],
"null",
parse._arcs[i] + 1,
"ROOT",
"-",
"-",
)
dg = DependencyGraph(conll_format)
score = self.compute_prob(dg)
trees.append((score, dg.tree()))
trees.sort()
return (tree for (score, tree) in trees)
def concatenate(self, span1, span2):
"""
Concatenates the two spans in whichever way possible. This
includes rightward concatenation (from the leftmost word of the
leftmost span to the rightmost word of the rightmost span) and
leftward concatenation (vice-versa) between adjacent spans. Unlike
Eisner's presentation of span concatenation, these spans do not
share or pivot on a particular word/word-index.
:return: A list of new spans formed through concatenation.
:rtype: list(DependencySpan)
"""
spans = []
if span1._start_index == span2._start_index:
print("Error: Mismatched spans - replace this with thrown error")
if span1._start_index > span2._start_index:
temp_span = span1
span1 = span2
span2 = temp_span
# adjacent rightward covered concatenation
new_arcs = span1._arcs + span2._arcs
new_tags = span1._tags + span2._tags
if self._grammar.contains(
self._tokens[span1._head_index], self._tokens[span2._head_index]
):
new_arcs[span2._head_index - span1._start_index] = span1._head_index
spans.append(
DependencySpan(
span1._start_index,
span2._end_index,
span1._head_index,
new_arcs,
new_tags,
)
)
# adjacent leftward covered concatenation
new_arcs = span1._arcs + span2._arcs
new_tags = span1._tags + span2._tags
if self._grammar.contains(
self._tokens[span2._head_index], self._tokens[span1._head_index]
):
new_arcs[span1._head_index - span1._start_index] = span2._head_index
spans.append(
DependencySpan(
span1._start_index,
span2._end_index,
span2._head_index,
new_arcs,
new_tags,
)
)
return spans
def train(self, graphs):
"""
Trains a ProbabilisticDependencyGrammar based on the list of input
DependencyGraphs. This model is an implementation of Eisner's (1996)
Model C, which derives its statistics from head-word, head-tag,
child-word, and child-tag relationships.
:param graphs: A list of dependency graphs to train from.
:type: list(DependencyGraph)
"""
productions = []
events = defaultdict(int)
tags = {}
for dg in graphs:
for node_index in range(1, len(dg.nodes)):
# children = dg.nodes[node_index]['deps']
children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values()))
nr_left_children = dg.left_children(node_index)
nr_right_children = dg.right_children(node_index)
nr_children = nr_left_children + nr_right_children
for child_index in range(
0 - (nr_left_children + 1), nr_right_children + 2
):
head_word = dg.nodes[node_index]["word"]
head_tag = dg.nodes[node_index]["tag"]
if head_word in tags:
tags[head_word].add(head_tag)
else:
tags[head_word] = set([head_tag])
child = "STOP"
child_tag = "STOP"
prev_word = "START"
prev_tag = "START"
if child_index < 0:
array_index = child_index + nr_left_children
if array_index >= 0:
child = dg.nodes[children[array_index]]["word"]
child_tag = dg.nodes[children[array_index]]["tag"]
if child_index != -1:
prev_word = dg.nodes[children[array_index + 1]]["word"]
prev_tag = dg.nodes[children[array_index + 1]]["tag"]
if child != "STOP":
productions.append(DependencyProduction(head_word, [child]))
head_event = "(head (%s %s) (mods (%s, %s, %s) left))" % (
child,
child_tag,
prev_tag,
head_word,
head_tag,
)
mod_event = "(mods (%s, %s, %s) left))" % (
prev_tag,
head_word,
head_tag,
)
events[head_event] += 1
events[mod_event] += 1
elif child_index > 0:
array_index = child_index + nr_left_children - 1
if array_index < nr_children:
child = dg.nodes[children[array_index]]["word"]
child_tag = dg.nodes[children[array_index]]["tag"]
if child_index != 1:
prev_word = dg.nodes[children[array_index - 1]]["word"]
prev_tag = dg.nodes[children[array_index - 1]]["tag"]
if child != "STOP":
productions.append(DependencyProduction(head_word, [child]))
head_event = "(head (%s %s) (mods (%s, %s, %s) right))" % (
child,
child_tag,
prev_tag,
head_word,
head_tag,
)
mod_event = "(mods (%s, %s, %s) right))" % (
prev_tag,
head_word,
head_tag,
)
events[head_event] += 1
events[mod_event] += 1
self._grammar = ProbabilisticDependencyGrammar(productions, events, tags)
def compute_prob(self, dg):
"""
Computes the probability of a dependency graph based
on the parser's probability model (defined by the parser's
statistical dependency grammar).
:param dg: A dependency graph to score.
:type dg: DependencyGraph
:return: The probability of the dependency graph.
:rtype: int
"""
prob = 1.0
for node_index in range(1, len(dg.nodes)):
# children = dg.nodes[node_index]['deps']
children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values()))
nr_left_children = dg.left_children(node_index)
nr_right_children = dg.right_children(node_index)
nr_children = nr_left_children + nr_right_children
for child_index in range(0 - (nr_left_children + 1), nr_right_children + 2):
head_word = dg.nodes[node_index]["word"]
head_tag = dg.nodes[node_index]["tag"]
child = "STOP"
child_tag = "STOP"
prev_word = "START"
prev_tag = "START"
if child_index < 0:
array_index = child_index + nr_left_children
if array_index >= 0:
child = dg.nodes[children[array_index]]["word"]
child_tag = dg.nodes[children[array_index]]["tag"]
if child_index != -1:
prev_word = dg.nodes[children[array_index + 1]]["word"]
prev_tag = dg.nodes[children[array_index + 1]]["tag"]
head_event = "(head (%s %s) (mods (%s, %s, %s) left))" % (
child,
child_tag,
prev_tag,
head_word,
head_tag,
)
mod_event = "(mods (%s, %s, %s) left))" % (
prev_tag,
head_word,
head_tag,
)
h_count = self._grammar._events[head_event]
m_count = self._grammar._events[mod_event]
# If the grammar is not covered
if m_count != 0:
prob *= h_count / m_count
else:
prob = 0.00000001 # Very small number
elif child_index > 0:
array_index = child_index + nr_left_children - 1
if array_index < nr_children:
child = dg.nodes[children[array_index]]["word"]
child_tag = dg.nodes[children[array_index]]["tag"]
if child_index != 1:
prev_word = dg.nodes[children[array_index - 1]]["word"]
prev_tag = dg.nodes[children[array_index - 1]]["tag"]
head_event = "(head (%s %s) (mods (%s, %s, %s) right))" % (
child,
child_tag,
prev_tag,
head_word,
head_tag,
)
mod_event = "(mods (%s, %s, %s) right))" % (
prev_tag,
head_word,
head_tag,
)
h_count = self._grammar._events[head_event]
m_count = self._grammar._events[mod_event]
if m_count != 0:
prob *= h_count / m_count
else:
prob = 0.00000001 # Very small number
return prob
#################################################################
# Demos
#################################################################
def demo():
projective_rule_parse_demo()
# arity_parse_demo()
projective_prob_parse_demo()
def projective_rule_parse_demo():
"""
A demonstration showing the creation and use of a
``DependencyGrammar`` to perform a projective dependency
parse.
"""
grammar = DependencyGrammar.fromstring(
"""
'scratch' -> 'cats' | 'walls'
'walls' -> 'the'
'cats' -> 'the'
"""
)
print(grammar)
pdp = ProjectiveDependencyParser(grammar)
trees = pdp.parse(["the", "cats", "scratch", "the", "walls"])
for tree in trees:
print(tree)
def arity_parse_demo():
"""
A demonstration showing the creation of a ``DependencyGrammar``
in which a specific number of modifiers is listed for a given
head. This can further constrain the number of possible parses
created by a ``ProjectiveDependencyParser``.
"""
print()
print("A grammar with no arity constraints. Each DependencyProduction")
print("specifies a relationship between one head word and only one")
print("modifier word.")
grammar = DependencyGrammar.fromstring(
"""
'fell' -> 'price' | 'stock'
'price' -> 'of' | 'the'
'of' -> 'stock'
'stock' -> 'the'
"""
)
print(grammar)
print()
print("For the sentence 'The price of the stock fell', this grammar")
print("will produce the following three parses:")
pdp = ProjectiveDependencyParser(grammar)
trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"])
for tree in trees:
print(tree)
print()
print("By contrast, the following grammar contains a ")
print("DependencyProduction that specifies a relationship")
print("between a single head word, 'price', and two modifier")
print("words, 'of' and 'the'.")
grammar = DependencyGrammar.fromstring(
"""
'fell' -> 'price' | 'stock'
'price' -> 'of' 'the'
'of' -> 'stock'
'stock' -> 'the'
"""
)
print(grammar)
print()
print(
"This constrains the number of possible parses to just one:"
) # unimplemented, soon to replace
pdp = ProjectiveDependencyParser(grammar)
trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"])
for tree in trees:
print(tree)
def projective_prob_parse_demo():
"""
A demo showing the training and use of a projective
dependency parser.
"""
from nltk.parse.dependencygraph import conll_data2
graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
ppdp = ProbabilisticProjectiveDependencyParser()
print("Training Probabilistic Projective Dependency Parser...")
ppdp.train(graphs)
sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."]
print("Parsing '", " ".join(sent), "'...")
print("Parse:")
for tree in ppdp.parse(sent):
print(tree)
if __name__ == "__main__":
demo()
|
py | 7dfd8bb7e2e2b6bf63352b778a57d7ad76a8058a | from fastapi import APIRouter
import json
import pandas as pd
router = APIRouter()
strain = pd.read_csv('data/strains.csv')
""" Return the data as JSON """
@router.get('/types')
async def types():
sativa = []
for i in range(strain.shape[0]):
if 'Sativa' in strain.type.iloc[i]:
sativa.append(strain.name.iloc[i])
# Converting into Json
sativa_json = json.dumps(sativa)
indica = []
for i in range(strain.shape[0]):
if 'Indica' in strain.type.iloc[i]:
indica.append(strain.name.iloc[i])
# Converting into Json
indica_json = json.dumps(indica)
hybrid = []
for i in range(strain.shape[0]):
if 'Hybrid' in strain.type.iloc[i]:
hybrid.append(strain.name.iloc[i])
# Converting into Json
hybrid_json = json.dumps(hybrid)
return 'Sativa', sativa_json, 'Indica', indica_json, 'Hybrid', hybrid_json |
py | 7dfd8c2a58bd74a52014c978885a862742c0948b | # coding: utf-8
"""
Antenny API
This is an api that allows you to interact with the Antenny platform. It allows you to manage your clients and subscriptions. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
from antenny.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:param disabled_client_side_validations (string): Comma-separated list of
JSON schema validation keywords to disable JSON schema structural validation
rules. The following keywords may be specified: multipleOf, maximum,
exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern,
maxItems, minItems.
By default, the validation is performed for data generated locally by the client
and data received from the server, independent of any validation performed by
the server side. If the input data does not satisfy the JSON schema validation
rules specified in the OpenAPI document, an exception is raised.
If disabled_client_side_validations is set, structural validation is
disabled. This can be useful to troubleshoot data validation problem, such as
when the OpenAPI document validation rules do not match the actual API data
received by the server.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = antenny.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
username=None, password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
):
"""Constructor
"""
self._base_path = "https://api.antenny.io" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("antenny")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'disabled_client_side_validations':
s = set(filter(None, value.split(',')))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError(
"Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if 'ApiKeyAuth' in self.api_key:
auth['ApiKeyAuth'] = {
'type': 'api_key',
'in': 'header',
'key': 'X-API-Key',
'value': self.get_api_key_with_prefix(
'ApiKeyAuth',
),
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.0\n"\
"SDK Package Version: 1.3.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "https://api.antenny.io",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
|
py | 7dfd8c8213d460939d2cdf0a28f19f6aceb8a528 |
import pickle
import redis
from .base import CacheBackend, ABSENT
class RedisBackend(CacheBackend):
def __init__(self, host='localhost', port=6379, db=0, password=None):
self._host = host
self._port = port
self._db = db
self._client = redis.Redis(host=host, port=port, db=db, password=password)
def get(self, key):
svalue = self._client.get(key)
if svalue is None:
return ABSENT
return self._load_value(svalue)
def set(self, key, value, lifetime):
self._client.setex(key, lifetime, self._dump_value(value))
def delete(self, key):
self._client.delete(key)
def _dump_value(self, value):
return pickle.dumps(value)
def _load_value(self, svalue):
return pickle.loads(svalue)
|
py | 7dfd8cca1a73569a582457230189dc5eba48b29e | from tkinter import *
from tkinter import ttk
root = Tk()
# Initialize our country "databases":
# - the list of country codes (a subset anyway)
# - a parallel list of country names, in the same order as the country codes
# - a hash table mapping country code to population<
countrycodes = ('ar', 'au', 'be', 'br', 'ca', 'cn', 'dk', 'fi', 'fr', 'gr', 'in', 'it', 'jp', 'mx', 'nl', 'no', 'es', 'se', 'ch')
countrynames = ('Argentina', 'Australia', 'Belgium', 'Brazil', 'Canada', 'China', 'Denmark', \
'Finland', 'France', 'Greece', 'India', 'Italy', 'Japan', 'Mexico', 'Netherlands', 'Norway', 'Spain', \
'Sweden', 'Switzerland')
cnames = StringVar(value=countrynames)
populations = {'ar':41000000, 'au':21179211, 'be':10584534, 'br':185971537, \
'ca':33148682, 'cn':1323128240, 'dk':5457415, 'fi':5302000, 'fr':64102140, 'gr':11147000, \
'in':1131043000, 'it':59206382, 'jp':127718000, 'mx':106535000, 'nl':16402414, \
'no':4738085, 'es':45116894, 'se':9174082, 'ch':7508700}
# Names of the gifts we can send
gifts = { 'card':'Greeting card', 'flowers':'Flowers', 'nastygram':'Nastygram'}
# State variables
gift = StringVar()
sentmsg = StringVar()
statusmsg = StringVar()
# Called when the selection in the listbox changes; figure out
# which country is currently selected, and then lookup its country
# code, and from that, its population. Update the status message
# with the new population. As well, clear the message about the
# gift being sent, so it doesn't stick around after we start doing
# other things.
def showPopulation(*args):
idxs = lbox.curselection()
if len(idxs)==1:
idx = int(idxs[0])
code = countrycodes[idx]
name = countrynames[idx]
popn = populations[code]
statusmsg.set("The population of %s (%s) is %d" % (name, code, popn))
sentmsg.set('')
# Called when the user double clicks an item in the listbox, presses
# the "Send Gift" button, or presses the Return key. In case the selected
# item is scrolled out of view, make sure it is visible.
#
# Figure out which country is selected, which gift is selected with the
# radiobuttons, "send the gift", and provide feedback that it was sent.
def sendGift(*args):
idxs = lbox.curselection()
if len(idxs)==1:
idx = int(idxs[0])
lbox.see(idx)
name = countrynames[idx]
# Gift sending left as an exercise to the reader
sentmsg.set("Sent %s to leader of %s" % (gifts[gift.get()], name))
# Create and grid the outer content frame
c = ttk.Frame(root, padding=(5, 5, 12, 0))
c.grid(column=0, row=0, sticky=(N,W,E,S))
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0,weight=1)
# Create the different widgets; note the variables that many
# of them are bound to, as well as the button callback.
# Note we're using the StringVar() 'cnames', constructed from 'countrynames'
#SHOW <b>lbox</b> = Listbox(c, <b>listvariable=cnames</b>, height=5)
#HIDE
lbox = Listbox(c, listvariable=cnames, height=5)
#/HIDE
lbl = ttk.Label(c, text="Send to country's leader:")
#SHOW g1 = ttk.Radiobutton(c, text=gifts['card'], <b>variable=gift</b>, value='card')
#SHOW g2 = ttk.Radiobutton(c, text=gifts['flowers'], <b>variable=gift</b>, value='flowers')
#SHOW g3 = ttk.Radiobutton(c, text=gifts['nastygram'], <b>variable=gift</b>, value='nastygram')
#SHOW send = ttk.Button(c, text='Send Gift', <b>command=sendGift</b>, default='active')
#SHOW sentlbl = ttk.Label(c, <b>textvariable=sentmsg</b>, anchor='center')
#SHOW status = ttk.Label(c, <b>textvariable=statusmsg</b>, anchor=W)
#HIDE
g1 = ttk.Radiobutton(c, text=gifts['card'], variable=gift, value='card')
g2 = ttk.Radiobutton(c, text=gifts['flowers'], variable=gift, value='flowers')
g3 = ttk.Radiobutton(c, text=gifts['nastygram'], variable=gift, value='nastygram')
send = ttk.Button(c, text='Send Gift', command=sendGift, default='active')
sentlbl = ttk.Label(c, textvariable=sentmsg, anchor='center')
status = ttk.Label(c, textvariable=statusmsg, anchor=W)
#/HIDE
# Grid all the widgets
lbox.grid(column=0, row=0, rowspan=6, sticky=(N,S,E,W))
lbl.grid(column=1, row=0, padx=10, pady=5)
g1.grid(column=1, row=1, sticky=W, padx=20)
g2.grid(column=1, row=2, sticky=W, padx=20)
g3.grid(column=1, row=3, sticky=W, padx=20)
send.grid(column=2, row=4, sticky=E)
sentlbl.grid(column=1, row=5, columnspan=2, sticky=N, pady=5, padx=5)
status.grid(column=0, row=6, columnspan=2, sticky=(W,E))
c.grid_columnconfigure(0, weight=1)
c.grid_rowconfigure(5, weight=1)
# Set event bindings for when the selection in the listbox changes,
# when the user double clicks the list, and when they hit the Return key
lbox.bind('<<ListboxSelect>>', showPopulation)
lbox.bind('<Double-1>', sendGift)
root.bind('<Return>', sendGift)
# Colorize alternating lines of the listbox
for i in range(0,len(countrynames),2):
lbox.itemconfigure(i, background='#f0f0ff')
# Set the starting state of the interface, including selecting the
# default gift to send, and clearing the messages. Select the first
# country in the list; because the <<ListboxSelect>> event is only
# generated when the user makes a change, we explicitly call showPopulation.
gift.set('card')
sentmsg.set('')
statusmsg.set('')
lbox.selection_set(0)
showPopulation()
root.mainloop()
#HIDE
#update idletasks; update
#$::lbox selection clear 0
#$::lbox selection set 4
#$::lbox yview scroll 2 units
#wm geometry . [expr [winfo width .]+15]x[expr [winfo height .]+20]
#showPopulation
#set gift nastygram
#sendGift
#/HIDE
|
py | 7dfd8ea6e84a17cf088fe38ea5c2c4acad33c1aa | #!/usr/bin/python
##Basic socket interface to the R&S signal generator used for CW test signal input
import socket, time
class SCPI:
PORT = 5025
BAUDRATE = 9600
## Connect to the R&S signal generator
def __init__(self,
host=None, port=PORT, # set up socket connection
device=None, baudrate=BAUDRATE, # set up serial port not used
timeout=1,
display_info = False):
if device:
raise RuntimeError('Only one connection can be initaited at a time.\nSelect socket connection.\n')
# Ethernet socket connection
self.connection = None
if host:
self.connection = 'socket'
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, port))
self.s.settimeout(1)
else:
raise RuntimeError('No connections specified.\n')
# Querie instrument identificaton
if display_info:
self.write("*IDN?")
print("DEVICE: " + self.read())
def display_info(self):
self.write("*IDN?")
return "DEVICE: " + self.read()
def testConnect(self):
try:
self.write('*IDN?')
return self.read()
except:
return False
# send query / command via relevant port comm
def write(self, command):
self.s.send(command+ '\n')
time.sleep(1)
def read(self):
return self.s.recv(128)
# activates RF output
def outputOn(self):
self.write("OUTPut ON")
# deactivates the RF output
def outputOff( self):
self.write("OUTPut OFF")
# reset
def reset(self):
self.write("*RST")
self.write(" *CLS")
time.sleep(5)
# Sleep for 5 second the time of the reset
# close the comms port to the R&S signal generator
def __close__(self):
self.s.close()
def __exit__(self, type, value, traceback):
self.outputOff()
self.s.close()
return isinstance(value, TypeError)
def __enter__(self):
return self
# set requested frequency
def setFrequency(self, freq):
self.write(" FREQuency %.2f"%(freq,)) # Hz
def setSweep(self, start_freq, step_size, stop_freq,
SG_level, dwell_time):
self.write("SYST:DISP:UPD OFF")
self.write("FREQ:STAR %.2f kHz"%start_freq)
self.write("FREQ:STOP %.2f kHz"%stop_freq)
self.write("SWE:SPAC LIN")
self.write("SWE:STEP:LIN %f kHz"%step_size)
self.write("SWE:DWEL %.4f ms"%dwell_time)
self.write("SWE:MODE AUTO")
self.write("POW %.1f"%SG_level)
self.write("FREQ:MODE SWE")
# read signal generator frequency
def getFrequency(self):
self.write('FREQuency?')
return_freq=self.read()
try:
return_freq=float(return_freq)
except Exception as e:
print(e)
return return_freq # Hz
# set requested power level
def setPower(self, pwr):
self .write('POWer %s'%str(pwr)) # dBm
# read sig gen power level
def getPower(self):
self.write('POWer?')
return float(self.read()) # dBm
if __name__ == '__main__':
# SMB100A R&S Signal Generator IP address
siggen_ip='192.168.14.61'
siggen_port=5025
## Using SCPI class for comms to signal generator for CW input signal
sigme=SCPI(siggen_ip)
sigme.setSweep(100, 1000, 1e6, -25, 100)
sigme.outputOn()
try:
sigme.__close__()
print('Closing all ports...')
except:
pass # socket already closed
#fin
|
py | 7dfd906543e933ab0cbca49096e6a790880dc712 | from pypdnsrest.dnsrecords import DNSCNameRecord
from pypdnsrest.dnsrecords import InvalidDNSRecordException
from tests.records.test_records import TestRecords
class TestCnameRecord(TestRecords):
def test_record(self):
rec = DNSCNameRecord(self.zone)
self.assertTrue(rec.set_data(u"test.test."))
def test_record_empty(self):
rec = DNSCNameRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data("")
def test_record_empty2(self):
rec = DNSCNameRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(None)
def test_invalid(self):
rec = DNSCNameRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(int(1)) |
py | 7dfd91af786dfeb31a00bde5eca56b19f6edc51c | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import os
import sys
# gce_backend/
APP_DIR = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
def setup_test_env():
"""Sets up App Engine test environment."""
# For application modules.
sys.path.insert(0, APP_DIR)
from test_support import test_env
test_env.setup_test_env()
|
py | 7dfd91d5f4898c2fb584339170c5a3b81fd8a944 | from setuptools import setup, find_packages
import os
version = '1.2.1'
requires = [
'setuptools',
'schematics',
'jsonschema'
]
test_requires = requires + [
'flake8',
'coverage',
'Sphinx',
'mock'
]
docs_requires = requires + [
'sphinxcontrib-httpdomain',
'sphinx_rtd_theme',
'sphinx-jsonschema'
]
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
setup(name='openprocurement.schemas.dgf',
version=version,
description="",
long_description=README,
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords="web services",
author='Quintagroup, Ltd.',
author_email='[email protected]',
url='https://github.com/openprocurement/openprocurement.schemas.dgf',
license='Apache License 2.0',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['openprocurement', 'openprocurement.schemas'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={'test': test_requires, 'docs': docs_requires},
test_suite="openprocurement.schemas.dgf.tests.main.suite")
|
py | 7dfd93fa06ddca3a132a2efe1fbfe55b876d1011 | from config import ElasticsearchConfig
import opensearchpy
import hashlib
import json
from storage import ElasticsearchStorage
class TestElasticsearchStorage:
def setup(self):
config = ElasticsearchConfig.create_from_env()
self._es_client = opensearchpy.OpenSearch(config.host)
self._es_store = ElasticsearchStorage(self._es_client)
self._index = "testindex"
def teardown(self):
self._es_client.indices.delete(index=self._index, ignore=[400, 404])
self._es_store = None
self._es_client = None
def test_store_changes(self):
d = {"foo": "bar"}
docs = [d]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_id)
d = {"foobar": "barfoo"}
docs.append(d)
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_id)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 2
# Insert again the same records, should not store anything new
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum)
assert len(all_docs["hits"]["hits"]) == 2
def test_store_changes_with_id_fn(self):
docs = [{
"foo": "bar",
}]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum)
docs.append({"foo": "bar", "timestamp": "2021-03-01T00:00:00.000Z"})
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
def test_store_changes_with_transform_document_fn(self):
def add_qux(x: dict) -> dict:
x["qux"] = "foobar"
return x
docs = [{
"foo": "bar",
}]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum,
transform_document_fn=add_qux
)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
assert "qux" in all_docs["hits"]["hits"][0]["_source"]
assert all_docs["hits"]["hits"][0]["_source"]["qux"] == "foobar"
# still won't add a new document if we try without transform document fn
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum
)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
def test_store_changes_filtered(self):
filter_by = {"term": {"cluster_id": "da932361-df0a-4bfa-8b4f-599bb2db5135"}}
docs = [{
"cluster_id": "da932361-df0a-4bfa-8b4f-599bb2db5135",
"foo": "bar",
}]
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum,
filter_by=filter_by
)
docs.append({
"foo": "bar",
"cluster_id": "da932361-df0a-4bfa-8b4f-599bb2db5135",
"timestamp": "2021-03-01T00:00:00.000Z"
})
self._es_store.store_changes(
index=self._index,
documents=docs,
id_fn=get_doc_checksum,
filter_by=filter_by
)
# make sure the value is visible in the index
self._es_client.indices.refresh(index=self._index)
all_docs = self._es_client.search(index=self._index)
assert len(all_docs["hits"]["hits"]) == 1
def get_doc_id(doc: dict) -> str:
return hashlib.sha256(json.dumps(doc).encode('utf-8')).hexdigest()
def get_doc_checksum(doc: dict) -> str:
payload = doc
if "timestamp" in doc:
del payload["timestamp"]
return hashlib.sha256(json.dumps(payload, sort_keys=True).encode('utf-8')).hexdigest()
|
py | 7dfd9457ecd0f4f602a384115f2f3226fb36a13e | from enum import Enum
class RealmFlags(Enum):
NORMAL = 0
LOCKED = 1 # not shown in realm list
OFFLINE = 2
NEW_PLAYERS = 32
RECOMMENDED = 64
|
py | 7dfd94b961c485d1669e6077e24c23e85d4a01e0 | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
from collections import defaultdict
from typing import Dict, List
import numpy as np
from mo.graph.graph import Graph, Node
from mo.middle.passes.conv import get_tensor_in_port, get_value_in_port
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.const import Const
def resolve_shared_inputs(node: Node, port_ids_to_duplicate: List[int]):
"""
Duplicates shared constants that are consumed by more than one node.
If constant is consumed by several ports of one node - no duplication gets done
"""
graph = node.graph
for port_id in port_ids_to_duplicate:
dst_port_map = defaultdict(list)
for dst in node.in_port(port_id).get_source().get_connection().get_destinations():
dst_port_map[dst.node].append(dst.idx)
del dst_port_map[node]
value = node.in_port(port_id).data.get_value()
if value is None:
log.debug('Can not duplicate due no data for in_port {} of node {}'.format(port_id, node.name))
for node, idxs in dst_port_map.items():
const = Const(graph, {'value': np.array(value)}).create_node()
for idx in idxs:
node.in_port(idx).disconnect()
const.out_port(0).connect(node.in_port(idx))
const.infer(const)
class MulFakeQuantizeFuse(MiddleReplacementPattern):
""" Fuses Mul --> FakeQuantize sequence if possible
"""
enabled = False
def run_after(self):
return []
def run_before(self):
return []
def pattern(self):
return dict(
nodes=[
('preop', dict(op='Mul')),
('preoped', dict()),
('quantize', dict(op='FakeQuantize', keep_in_IR=True)),
],
edges=[
('preop', 'preoped'),
('preoped', 'quantize', {'in': 0}),
]
)
def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
quantize = match['quantize']
preop = match['preop']
tensor_port, value_port = get_tensor_in_port(preop), get_value_in_port(preop)
if value_port is None or value_port.data.get_value() is None:
log.debug('MulQuantizeFuse: cannot fuse because Mul op has dynamic inputs')
return
mul_val = value_port.data.get_value()
# Direct modifications to quantize 1-st and 2-nd port inputs are performed.
# So the data nodes at those inputs shouldn't have more than 1 consumer maximum 2 consumers to the same
# quantize op (consumed by 1st and 2nd ports). So we duplicate FakeQuantize in_port 1, 2 data if needed
resolve_shared_inputs(node=quantize, port_ids_to_duplicate=[1, 2])
# TODO: need some special processing for values that exactly equal to threshold
# Need to flip output_low and output_high for those elements that have multiplier < 0
if np.all(mul_val < 0):
mi_o_node = quantize.in_port(3).get_source()
ma_o_node = quantize.in_port(4).get_source()
quantize.in_port(3).disconnect()
quantize.in_port(4).disconnect()
mi_o_node.connect(quantize.in_port(4))
ma_o_node.connect(quantize.in_port(3))
elif np.any(mul_val < 0):
# Flipping values should be done on exclusive inputs of FakeQuantize node, so we duplicate them if needed
resolve_shared_inputs(node=quantize, port_ids_to_duplicate=[3, 4])
# Successful flipping will be done on broadcasted arrays
mi_o_val = quantize.in_port(3).data.get_value()
ma_o_val = quantize.in_port(4).data.get_value()
mul_val, mi_o_val, ma_o_val = [np.array(a) for a in np.broadcast_arrays(mul_val, mi_o_val, ma_o_val)]
neg_idx = np.where(mul_val < 0)
mi_o_val[neg_idx], ma_o_val[neg_idx] = ma_o_val[neg_idx], mi_o_val[neg_idx]
# TODO: revert broadcasting where unnecessary
quantize.in_port(3).data.set_value(mi_o_val)
quantize.in_port(4).data.set_value(ma_o_val)
quantize.in_port(1).data.set_value(quantize.in_port(1).data.get_value() / mul_val)
if quantize.in_node(1).id != quantize.in_node(2).id:
quantize.in_port(2).data.set_value(quantize.in_port(2).data.get_value() / mul_val)
# Reconnect Mul as it no longer needed for current FakeQuantize
in_mul_connection = quantize.in_port(0).get_source().node.in_port(0).get_connection()
quantize.in_port(0).disconnect()
in_mul_connection.add_destination(quantize.in_port(0))
|
py | 7dfd95e62aa024fde2d90c9335df5e883c7d7d44 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1CephFSVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'monitors': 'list[str]',
'path': 'str',
'read_only': 'bool',
'secret_file': 'str',
'secret_ref': 'V1LocalObjectReference',
'user': 'str'
}
attribute_map = {
'monitors': 'monitors',
'path': 'path',
'read_only': 'readOnly',
'secret_file': 'secretFile',
'secret_ref': 'secretRef',
'user': 'user'
}
def __init__(self, monitors=None, path=None, read_only=None, secret_file=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1CephFSVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._monitors = None
self._path = None
self._read_only = None
self._secret_file = None
self._secret_ref = None
self._user = None
self.discriminator = None
self.monitors = monitors
if path is not None:
self.path = path
if read_only is not None:
self.read_only = read_only
if secret_file is not None:
self.secret_file = secret_file
if secret_ref is not None:
self.secret_ref = secret_ref
if user is not None:
self.user = user
@property
def monitors(self):
"""Gets the monitors of this V1CephFSVolumeSource. # noqa: E501
Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:return: The monitors of this V1CephFSVolumeSource. # noqa: E501
:rtype: list[str]
"""
return self._monitors
@monitors.setter
def monitors(self, monitors):
"""Sets the monitors of this V1CephFSVolumeSource.
Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:param monitors: The monitors of this V1CephFSVolumeSource. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
self._monitors = monitors
@property
def path(self):
"""Gets the path of this V1CephFSVolumeSource. # noqa: E501
Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501
:return: The path of this V1CephFSVolumeSource. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1CephFSVolumeSource.
Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501
:param path: The path of this V1CephFSVolumeSource. # noqa: E501
:type: str
"""
self._path = path
@property
def read_only(self):
"""Gets the read_only of this V1CephFSVolumeSource. # noqa: E501
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:return: The read_only of this V1CephFSVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1CephFSVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:param read_only: The read_only of this V1CephFSVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_file(self):
"""Gets the secret_file of this V1CephFSVolumeSource. # noqa: E501
Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:return: The secret_file of this V1CephFSVolumeSource. # noqa: E501
:rtype: str
"""
return self._secret_file
@secret_file.setter
def secret_file(self, secret_file):
"""Sets the secret_file of this V1CephFSVolumeSource.
Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:param secret_file: The secret_file of this V1CephFSVolumeSource. # noqa: E501
:type: str
"""
self._secret_file = secret_file
@property
def secret_ref(self):
"""Gets the secret_ref of this V1CephFSVolumeSource. # noqa: E501
:return: The secret_ref of this V1CephFSVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1CephFSVolumeSource.
:param secret_ref: The secret_ref of this V1CephFSVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def user(self):
"""Gets the user of this V1CephFSVolumeSource. # noqa: E501
Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:return: The user of this V1CephFSVolumeSource. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1CephFSVolumeSource.
Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
:param user: The user of this V1CephFSVolumeSource. # noqa: E501
:type: str
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CephFSVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CephFSVolumeSource):
return True
return self.to_dict() != other.to_dict()
|
py | 7dfd962f97ecd68b5c0533ed57031fdf44f5ac6d | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains utility functions for general purposes file/folder/image reading/writing."""
import errno
import os
import random
import imageio
from os.path import dirname
import numpy as np
import scipy.misc
import tensorflow as tf
from PIL import Image
from typing import Union
###########
# Folders #
###########
def touch_folder(file_path):
# type: (Union[str,unicode]) -> None
"""Create a folder along with its parent folders recursively if they do not exist."""
# Taken from https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist .
if not file_path.endswith('/'):
file_path = file_path + "/"
dn = dirname(file_path)
if dn != '':
try:
os.makedirs(dn)
except OSError as e:
if e.errno != errno.EEXIST:
raise
#########
# Files #
#########
def get_files_in_dir(directory, do_sort=False, do_random_ordering=False,
allowed_extensions={'.jpg', '.png', '.jpeg'}):
"""Returns all files in the directory and subdirectories with certain extensions.
:param directory: The parent directory of the images, or a file containing paths to images.
:param do_sort: returns a sorted list.
:param do_random_ordering: returns a deliberately shuffled list.
:param allowed_extensions: (optional) a set of allowed extensions. If not set, it allows all extensions.
:return: A sorted list of paths to images in the directory as well as all of its subdirectories.
"""
assert not (do_random_ordering and do_sort), '`do_sort` and `do_random_ordering` cannot both be true'
if os.path.isdir(directory):
if not directory.endswith('/'):
directory = directory + "/"
content_dirs = []
for path, subdirs, files in os.walk(directory):
for name in files:
full_file_path = os.path.join(path, name)
_, ext = os.path.splitext(full_file_path)
ext = ext.lower()
if allowed_extensions and ext in allowed_extensions:
content_dirs.append(full_file_path)
if len(content_dirs) == 0:
print('There is no requested file in directory %s.' % directory)
elif os.path.isfile(directory):
content_dirs = []
with open(directory, 'r') as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0:
content_dirs.append(line)
if len(content_dirs) == 0:
print('File %s is empty.' % directory)
else:
content_dirs = []
print('There is no file or directory named %s.' % directory)
if do_sort:
content_dirs.sort()
elif do_random_ordering:
random.shuffle(content_dirs)
return content_dirs
##########
# Images #
##########
def imread(path, shape=None, bw=False, rgba=False, dtype=np.float32):
# type: (str, tuple, bool, bool, np.dtype) -> np.ndarray
"""Reads an image.
:param path: path to the image
:param shape: (Height, width)
:param bw: Whether the image is black and white.
:param rgba: Whether the image is in rgba format.
:param dtype: dtype of the returned array.
:return: np array with shape (height, width, num_color(1, 3, or 4))
"""
assert not (bw and rgba)
if bw:
convert_format = 'L'
elif rgba:
convert_format = 'RGBA'
else:
convert_format = 'RGB'
if shape is None:
return np.asarray(Image.open(path).convert(convert_format), dtype)
else:
return np.asarray(Image.open(path).convert(convert_format).resize((shape[1], shape[0])), dtype)
def imsave(path, img):
# type: (str, (Union[np.ndarray,list])) -> None
"""
Automatically clip the image represented in a numpy array to 0~255 and save the image.
:param path: Path to save the image.
:param img: Image represented in numpy array with a legal format for scipy.misc.imsave
:return: None
"""
if isinstance(img, list):
img = np.array(img)
if img.shape[-1] > 3 and len(img.shape) >= 3:
# Convert the image into one channel by summing all channels together
img = np.sum(img, axis=-1, keepdims=True)
img = np.clip(img, 0, 255).astype(np.uint8)
if len(img.shape) == 3 and img.shape[-1] == 1:
img = np.squeeze(img, -1)
imageio.imwrite(path, img)
def save_float_image(filename, img):
"""Saves a numpy image to `filename` assuming the image has values from 0~1.0"""
img = img * 255.0
img = img.astype(np.int32)
return imsave(filename, img)
##############
# Tensorflow #
##############
# Adapted from https://github.com/davidsandberg/facenet/blob/master/src/facenet.py
def load_model(model, input_map=None):
"""Loads a tensorflow model and restore the variables to the default session."""
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with tf.gfile.FastGFile(model_exp, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
meta_file = ckpt_file + '.meta'
return meta_file, ckpt_file
else:
raise ValueError('No checkpoint file found in the model directory (%s)' % model_dir)
|
py | 7dfd981995c3ca9b2a732199b08862c2744798e1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class ModifyCasterLayoutRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'ModifyCasterLayout','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BlendLists(self):
return self.get_query_params().get('BlendList')
def set_BlendLists(self, BlendLists):
for depth1 in range(len(BlendLists)):
if BlendLists[depth1] is not None:
self.add_query_param('BlendList.' + str(depth1 + 1) , BlendLists[depth1])
def get_LayoutId(self):
return self.get_query_params().get('LayoutId')
def set_LayoutId(self,LayoutId):
self.add_query_param('LayoutId',LayoutId)
def get_CasterId(self):
return self.get_query_params().get('CasterId')
def set_CasterId(self,CasterId):
self.add_query_param('CasterId',CasterId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AudioLayers(self):
return self.get_query_params().get('AudioLayer')
def set_AudioLayers(self, AudioLayers):
for depth1 in range(len(AudioLayers)):
if AudioLayers[depth1].get('VolumeRate') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.VolumeRate', AudioLayers[depth1].get('VolumeRate'))
if AudioLayers[depth1].get('ValidChannel') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.ValidChannel', AudioLayers[depth1].get('ValidChannel'))
if AudioLayers[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.FixedDelayDuration', AudioLayers[depth1].get('FixedDelayDuration'))
def get_VideoLayers(self):
return self.get_query_params().get('VideoLayer')
def set_VideoLayers(self, VideoLayers):
for depth1 in range(len(VideoLayers)):
if VideoLayers[depth1].get('FillMode') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FillMode', VideoLayers[depth1].get('FillMode'))
if VideoLayers[depth1].get('HeightNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.HeightNormalized', VideoLayers[depth1].get('HeightNormalized'))
if VideoLayers[depth1].get('WidthNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.WidthNormalized', VideoLayers[depth1].get('WidthNormalized'))
if VideoLayers[depth1].get('PositionRefer') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionRefer', VideoLayers[depth1].get('PositionRefer'))
if VideoLayers[depth1].get('PositionNormalized') is not None:
for depth2 in range(len(VideoLayers[depth1].get('PositionNormalized'))):
if VideoLayers[depth1].get('PositionNormalized')[depth2] is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionNormalized.' + str(depth2 + 1) , VideoLayers[depth1].get('PositionNormalized')[depth2])
if VideoLayers[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FixedDelayDuration', VideoLayers[depth1].get('FixedDelayDuration'))
def get_MixLists(self):
return self.get_query_params().get('MixList')
def set_MixLists(self, MixLists):
for depth1 in range(len(MixLists)):
if MixLists[depth1] is not None:
self.add_query_param('MixList.' + str(depth1 + 1) , MixLists[depth1]) |
py | 7dfd98a89a3221e38948a071d3c566d07db6aaf0 | """
========================================================================
MemMsg_test
========================================================================
Author : Shunning Jiang
Date : Mar 10, 2018
"""
from pymtl3 import *
from ..MemMsg import MemMsgType, mk_mem_msg, mk_mem_req_msg, mk_mem_resp_msg
#-------------------------------------------------------------------------
# test_req_fields
#-------------------------------------------------------------------------
def test_req_fields():
# Create msg
ReqType = mk_mem_req_msg(8,16,40)
msg = ReqType( MemMsgType.READ, 7, 0x1000, 3, 0 )
# Verify msg
assert msg.type_ == 0
assert msg.opaque == 7
assert msg.addr == 0x1000
assert msg.len == 3
# Create msg
msg = ReqType( MemMsgType.WRITE, 9, 0x2000, 0, 0xdeadbeef )
# Verify msg
assert msg.type_ == 1
assert msg.opaque == 9
assert msg.addr == 0x2000
assert msg.len == 0
assert msg.data == 0xdeadbeef
#-------------------------------------------------------------------------
# test_req_str
#-------------------------------------------------------------------------
def test_req_str():
ReqType = mk_mem_req_msg(8,16,40)
# Create msg
msg = ReqType( MemMsgType.READ, 7, 0x1000, 3, 0 )
# Verify string
assert str(msg) == "rd:07:1000:3: "
ReqType = mk_mem_req_msg(4,16,40)
# Create msg
msg = ReqType( MemMsgType.WRITE, 9, 0x2000, 4, 0xdeadbeef )
# Verify string
assert str(msg) == "wr:9:2000:4:00deadbeef"
#-------------------------------------------------------------------------
# test_resp_fields
#-------------------------------------------------------------------------
def test_resp_fields():
RespType = mk_mem_resp_msg(8,40)
# Create msg
msg = RespType( MemMsgType.READ, 7, 2, 3, 0xf000adbeef )
# Verify msg
assert msg.type_ == 0
assert msg.opaque == 7
assert msg.test == 2
assert msg.len == 3
assert msg.data == 0xf000adbeef
# Create msg
msg = RespType( MemMsgType.WRITE, 9, 1, 0, 0 )
# Verify msg
assert msg.type_ == 1
assert msg.opaque == 9
assert msg.test == 1
assert msg.len == 0
assert msg.data == 0
#-------------------------------------------------------------------------
# test_resp_str
#-------------------------------------------------------------------------
def test_resp_str():
RespType = mk_mem_resp_msg(8,40)
# Create msg
msg = RespType( MemMsgType.READ, 7, 2, 3, 0x0000adbeef )
# Verify string
assert str(msg) == "rd:07:2:3:0000adbeef"
RespType = mk_mem_resp_msg(4,40)
# Create msg
msg = RespType( MemMsgType.WRITE, 9, 1, 0, 0 )
# Verify string
assert str(msg) == "wr:9:1:0: "
|
py | 7dfd9918315f2472079d315d82c7f06976050b6b | from __clrclasses__.System.Runtime.Remoting.Lifetime import ClientSponsor
from __clrclasses__.System.Runtime.Remoting.Lifetime import ILease
from __clrclasses__.System.Runtime.Remoting.Lifetime import ISponsor
from __clrclasses__.System.Runtime.Remoting.Lifetime import LeaseState
from __clrclasses__.System.Runtime.Remoting.Lifetime import LifetimeServices
|
py | 7dfd9a083d3e0fb9946eb383970c80a66ebf5d11 | """Support for Insteon Thermostats via ISY994 Platform."""
from typing import Callable, List, Optional
from pyisy.constants import (
CMD_CLIMATE_FAN_SETTING,
CMD_CLIMATE_MODE,
PROP_HEAT_COOL_STATE,
PROP_HUMIDITY,
PROP_SETPOINT_COOL,
PROP_SETPOINT_HEAT,
PROP_UOM,
PROTO_INSTEON,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN as CLIMATE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
DOMAIN as ISY994_DOMAIN,
HA_FAN_TO_ISY,
HA_HVAC_TO_ISY,
ISY994_NODES,
ISY_HVAC_MODES,
UOM_FAN_MODES,
UOM_HVAC_ACTIONS,
UOM_HVAC_MODE_GENERIC,
UOM_HVAC_MODE_INSTEON,
UOM_ISY_CELSIUS,
UOM_ISY_FAHRENHEIT,
UOM_ISYV4_NONE,
UOM_TO_STATES,
)
from .entity import ISYNodeEntity
from .helpers import convert_isy_value_to_hass, migrate_old_unique_ids
from .services import async_setup_device_services
ISY_SUPPORTED_FEATURES = (
SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE
)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 thermostat platform."""
entities = []
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
for node in hass_isy_data[ISY994_NODES][CLIMATE]:
entities.append(ISYThermostatEntity(node))
await migrate_old_unique_ids(hass, CLIMATE, entities)
async_add_entities(entities)
async_setup_device_services(hass)
class ISYThermostatEntity(ISYNodeEntity, ClimateEntity):
"""Representation of an ISY994 thermostat entity."""
def __init__(self, node) -> None:
"""Initialize the ISY Thermostat entity."""
super().__init__(node)
self._node = node
self._uom = self._node.uom
if isinstance(self._uom, list):
self._uom = self._node.uom[0]
self._hvac_action = None
self._hvac_mode = None
self._fan_mode = None
self._temp_unit = None
self._current_humidity = 0
self._target_temp_low = 0
self._target_temp_high = 0
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return ISY_SUPPORTED_FEATURES
@property
def precision(self) -> str:
"""Return the precision of the system."""
return PRECISION_TENTHS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
uom = self._node.aux_properties.get(PROP_UOM)
if not uom:
return self.hass.config.units.temperature_unit
if uom.value == UOM_ISY_CELSIUS:
return TEMP_CELSIUS
if uom.value == UOM_ISY_FAHRENHEIT:
return TEMP_FAHRENHEIT
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
humidity = self._node.aux_properties.get(PROP_HUMIDITY)
if not humidity:
return None
return int(humidity.value)
@property
def hvac_mode(self) -> Optional[str]:
"""Return hvac operation ie. heat, cool mode."""
hvac_mode = self._node.aux_properties.get(CMD_CLIMATE_MODE)
if not hvac_mode:
return None
# Which state values used depends on the mode property's UOM:
uom = hvac_mode.uom
# Handle special case for ISYv4 Firmware:
if uom in (UOM_ISYV4_NONE, ""):
uom = (
UOM_HVAC_MODE_INSTEON
if self._node.protocol == PROTO_INSTEON
else UOM_HVAC_MODE_GENERIC
)
return UOM_TO_STATES[uom].get(hvac_mode.value)
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return ISY_HVAC_MODES
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
hvac_action = self._node.aux_properties.get(PROP_HEAT_COOL_STATE)
if not hvac_action:
return None
return UOM_TO_STATES[UOM_HVAC_ACTIONS].get(hvac_action.value)
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return convert_isy_value_to_hass(
self._node.status, self._uom, self._node.prec, 1
)
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return 1.0
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self.target_temperature_high
if self.hvac_mode == HVAC_MODE_HEAT:
return self.target_temperature_low
return None
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_COOL)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_HEAT)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [FAN_AUTO, FAN_ON]
@property
def fan_mode(self) -> str:
"""Return the current fan mode ie. auto, on."""
fan_mode = self._node.aux_properties.get(CMD_CLIMATE_FAN_SETTING)
if not fan_mode:
return None
return UOM_TO_STATES[UOM_FAN_MODES].get(fan_mode.value)
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.hvac_mode == HVAC_MODE_COOL:
target_temp_high = target_temp
if self.hvac_mode == HVAC_MODE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
self._node.set_climate_setpoint_heat(int(target_temp_low))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_low = target_temp_low
if target_temp_high is not None:
self._node.set_climate_setpoint_cool(int(target_temp_high))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_high = target_temp_high
self.schedule_update_ha_state()
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
_LOGGER.debug("Requested fan mode %s", fan_mode)
self._node.set_fan_mode(HA_FAN_TO_ISY.get(fan_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._fan_mode = fan_mode
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
_LOGGER.debug("Requested operation mode %s", hvac_mode)
self._node.set_climate_mode(HA_HVAC_TO_ISY.get(hvac_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._hvac_mode = hvac_mode
self.schedule_update_ha_state()
|
py | 7dfd9a7bdde4805f749549d2b8b17acc3e221cee | import bpy
import numpy as np
import random
import time
import os
import sys
import re
import json
import ast
def srgb_to_linearrgb(c):
if c < 0: return 0
elif c < 0.04045: return c/12.92
else: return ((c+0.055)/1.055)**2.4
def hex_to_rgb(h,alpha=1):
r = (h & 0xff0000) >> 16
g = (h & 0x00ff00) >> 8
b = (h & 0x0000ff)
return tuple([srgb_to_linearrgb(c/0xff) for c in (r,g,b)] + [alpha])
def create_scene(bpyscene):
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.light_add(type='AREA', radius=3, align='WORLD', location=(-3, -3, 1.5))
bpy.ops.transform.rotate(value=-1.09453, orient_axis='Y', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.transform.rotate(value=0.802275, orient_axis='Z', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, False, True), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.context.object.data.energy = 240
bpy.ops.object.light_add(type='AREA', radius=3, align='WORLD', location=(7, -0.4, 1.5))
bpy.ops.transform.rotate(value=-1.09453, orient_axis='Y', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.transform.rotate(value=-3.306, orient_axis='Z', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, False, True), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.context.object.data.energy = 200
bpy.ops.object.light_add(type='AREA', radius=3, align='WORLD', location=(1.55, 2.99, 2.3))
bpy.ops.transform.rotate(value=-0.9, orient_axis='X', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.context.object.data.energy = 200
bpy.ops.object.camera_add(enter_editmode=False, align='VIEW', location=(0, 0, 0), rotation=(0.111701, 1.00995e-10, 0.0628319))
bpy.ops.object.rotation_clear(clear_delta=False)
bpy.ops.transform.translate(value=(1.245, -5.68, 11.49))
bpy.ops.transform.rotate(value=0.449807, orient_axis='X', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(True, False, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpyscene.camera = bpy.context.object
bpy.ops.mesh.primitive_plane_add(enter_editmode=False, align='WORLD', location=(0, 0, -0.5))
bpy.ops.transform.resize(value=(15, 10, 1), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.extrude_region_move(MESH_OT_extrude_region={"use_normal_flip":False, "mirror":False}, TRANSFORM_OT_translate={"value":(0, 0, -0.492551), "orient_type":'NORMAL', "orient_matrix":((0, -1, 0), (1, 0, -0), (0, 0, 1)), "orient_matrix_type":'NORMAL', "constraint_axis":(False, False, True)})
bpy.ops.object.editmode_toggle()
bpy.ops.rigidbody.object_add(type='PASSIVE')
def sort_words(words_freq, add_exclude, num_words):
top_list = []
if num_words > len(words_freq):
num_words = len(words_freq)
for w in sorted(words_freq, key=words_freq.get, reverse=True)[:num_words]:
if w not in add_exclude:
top_list.append([w, words_freq[w]])
top_list.sort(key = lambda x: x[1])
max_val = top_list[-1][1]
min_val = top_list[0][1]
for i in range(len(top_list)):
top_list[i][1] = round(((top_list[i][1] - min_val) / (max_val - min_val)), 4)
return top_list
def conv_rgb(color_pal):
col_pal_rgb = []
for c in color_pal:
col_pal_rgb.append(hex_to_rgb(int(c, 16)))
return col_pal_rgb
def create_material(color_pal, random_color):
material = bpy.data.materials.new(name="Random Colour")
material.use_nodes = True
bsdf = material.node_tree.nodes.get('Principled BSDF')
material_output = material.node_tree.nodes.get('Material Output')
material.node_tree.links.new(material_output.inputs[0], bsdf.outputs[0])
if not random_color:
col_pal_rgb = conv_rgb(color_pal)
col_ramp = material.node_tree.nodes.new('ShaderNodeValToRGB')
col_ramp.color_ramp.interpolation = 'CONSTANT'
material.node_tree.links.new(bsdf.inputs[0], col_ramp.outputs[0])
col_ramp.color_ramp.elements.remove(col_ramp.color_ramp.elements[0])
spacing = 1/len(col_pal_rgb)
pos = 0
for i in range(len(col_pal_rgb)):
col_ramp.color_ramp.elements.new(pos)
col_ramp.color_ramp.elements[i].color = col_pal_rgb[i]
pos += spacing
else:
hue_sat = material.node_tree.nodes.new('ShaderNodeHueSaturation')
hue_sat.inputs[4].default_value = (0.8, .11, 0.134, 1)
hue_sat.inputs[1].default_value = 0.9
material.node_tree.links.new(bsdf.inputs[0], hue_sat.outputs[0])
math = material.node_tree.nodes.new('ShaderNodeMath')
material.node_tree.links.new(hue_sat.inputs[0], math.outputs[0])
col_ramp = material.node_tree.nodes.new('ShaderNodeMath')
col_ramp.operation = 'MULTIPLY'
col_ramp.inputs[1].default_value = random.uniform(0.3, 10)
material.node_tree.links.new(math.inputs[0], col_ramp.outputs[0])
obj_info = material.node_tree.nodes.new('ShaderNodeObjectInfo')
material.node_tree.links.new(col_ramp.inputs[0], obj_info.outputs[4])
return material
def generate_objects(top_list, material, current_height, vertical_offset, mi_max, li_max, s_f, extrusion):
for word in top_list:
bpy.ops.object.text_add(enter_editmode=False, align='WORLD', location=(0,0, current_height))
bpy.context.active_object.name = word[0]
bpy.context.active_object.data.body = word[0]
bpy.context.object.data.extrude = extrusion
bpy.context.object.data.bevel_depth = 0.015
bpy.ops.object.convert(target='MESH')
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
if word[1] > 0.5:
bpy.ops.transform.translate(value=(random.uniform(-mi_max, mi_max), random.uniform(-mi_max, mi_max), 0), orient_type='GLOBAL')
else:
bpy.ops.transform.translate(value=(random.uniform(-li_max, li_max), random.uniform(-li_max, li_max), 0), orient_type='GLOBAL')
bpy.ops.transform.resize(value=((0.5 + word[1])*s_f, (0.5 + word[1])*s_f, 1), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)))
bpy.ops.transform.rotate(value=random.uniform(-.1, .1), orient_axis='Z', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, False, True), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
bpy.ops.rigidbody.object_add()
bpy.ops.object.shade_smooth()
bpy.context.object.data.use_auto_smooth = True
bpy.context.active_object.active_material = material
current_height += vertical_offset
def setup_render(bpyscene, URI):
dir_path = os.path.dirname(os.path.realpath(__file__))
override = {'scene': bpy.context.scene,
'point_cache': bpy.context.scene.rigidbody_world.point_cache}
#bpyscene.rigidbody_world.substeps_per_frame = 104
bpyscene.rigidbody_world.point_cache.frame_end = 75
bpyscene.frame_set(75)
bpy.context.scene.frame_current = 70
bpy.ops.ptcache.bake(override, bake=True)
bpyscene.render.engine = 'BLENDER_EEVEE'
bpyscene.cycles.device = "GPU"
bpyscene.render.image_settings.file_format = 'PNG'
bpyscene.render.filepath = dir_path + '/' + URI[-22:] + '.png'
bpy.ops.render.render(write_still = 1)
bpy.ops.wm.save_as_mainfile(filepath=dir_path + '/' + URI[-22:] + '.blend')
bpy.ops.ptcache.free_bake_all(override)
def main_blender(URI, settingsDict):
try:
num_words = int(settingsDict['num_words'])
mi_max = float(settingsDict['mi_max'])
li_max = float(settingsDict['li_max'])
current_height = float(settingsDict['current_height'])
vertical_offset = float(settingsDict['vertical_offset'])
extrusion = float(settingsDict['extrusion'])
s_f = float(settingsDict['s_f'])
color_pal = ast.literal_eval(settingsDict['color_pal'])
random_color = ast.literal_eval(settingsDict['random_color'])
add_exclude = ast.literal_eval(settingsDict['add_exclude'])
except KeyError:
print("Invalid config file")
exit()
bpyscene = bpy.context.scene
with open(URI[-22:] + '.txt', 'r') as f:
words_freq = json.loads(f.read())
create_scene(bpyscene)
top_list = sort_words(words_freq, add_exclude, num_words)
material = create_material(color_pal, random_color)
generate_objects(top_list, material, current_height, vertical_offset, mi_max, li_max, s_f, extrusion)
setup_render(bpyscene, URI)
def look_for_file(URI):
try:
open(URI[-22:] + '.txt')
print("Found frequency file for song...")
return
except IOError:
print("Generate a frequency file first")
exit()
def look_for_config():
try:
with open('config.txt') as f:
cont = f.read()
except IOError:
print("Could not find config file, make a new one")
exit()
print("\nSettings:")
settingsDict = {}
for c in cont.split("\n"):
try:
setting = c.split(" = ")
settingsDict[setting[0]] = setting[1].strip()
print(setting[0] + ':' + setting[1])
except IndexError:
pass
print("\n")
return settingsDict
def validate_uri(URI_maybe):
URI = re.findall(r"spotify:playlist:[A-Za-z0-9]{22}", URI_maybe)
if URI:
return URI[0]
else:
print("\nNot a valid URI")
exit()
def main():
argv = sys.argv
URI_maybe = argv[argv.index("--") + 1]
URI = validate_uri(URI_maybe)
print("\n")
print(URI)
look_for_file(URI)
settingsDict = look_for_config()
main_blender(URI, settingsDict)
if __name__ == '__main__':
main() |
py | 7dfd9bc0d1fca8bb9e7b2868cd4fe98d3f321c46 | import os
import math
import time
import argparse
import numpy as np
from tqdm import tqdm
from numpy.testing._private.utils import print_assert_equal
import torch
from torch import optim
from torch.utils.data import dataset
from numpy.core.fromnumeric import shape
from torchsummary import summary
import utils.loss
import utils.utils
import utils.datasets
import model.detector
if __name__ == '__main__':
# 指定训练配置文件
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='',
help='Specify training profile *.data')
opt = parser.parse_args()
cfg = utils.utils.load_datafile(opt.data)
print("训练配置:")
print(cfg)
# 数据集加载
train_dataset = utils.datasets.TensorDataset(cfg["train"], cfg["width"], cfg["height"], imgaug = True)
val_dataset = utils.datasets.TensorDataset(cfg["val"], cfg["width"], cfg["height"], imgaug = False)
batch_size = int(cfg["batch_size"] / cfg["subdivisions"])
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
# 训练集
train_dataloader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=utils.datasets.collate_fn,
num_workers=nw,
pin_memory=True,
drop_last=True,
persistent_workers=True
)
#验证集
val_dataloader = torch.utils.data.DataLoader(val_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=utils.datasets.collate_fn,
num_workers=nw,
pin_memory=True,
drop_last=False,
persistent_workers=True
)
# 指定后端设备CUDA&CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 判断是否加载预训练模型
load_param = False
premodel_path = cfg["pre_weights"]
if premodel_path != None and os.path.exists(premodel_path):
load_param = True
# 初始化模型结构
model = model.detector.Detector(cfg["classes"], cfg["anchor_num"], load_param).to(device)
summary(model, input_size=(3, cfg["height"], cfg["width"]))
# 加载预训练模型参数
if load_param == True:
model.load_state_dict(torch.load(premodel_path, map_location=device), strict = False)
print("Load finefune model param: %s" % premodel_path)
else:
print("Initialize weights: model/backbone/backbone.pth")
# 构建SGD优化器
optimizer = optim.SGD(params=model.parameters(),
lr=cfg["learning_rate"],
momentum=0.949,
weight_decay=0.0005,
)
# 学习率衰减策略
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
milestones=cfg["steps"],
gamma=0.1)
print('Starting training for %g epochs...' % cfg["epochs"])
batch_num = 0
for epoch in range(cfg["epochs"]):
model.train()
pbar = tqdm(train_dataloader)
for imgs, targets in pbar:
# 数据预处理
imgs = imgs.to(device).float() / 255.0
targets = targets.to(device)
# 模型推理
preds = model(imgs)
# loss计算
iou_loss, obj_loss, cls_loss, total_loss = utils.loss.compute_loss(preds, targets, cfg, device)
# 反向传播求解梯度
total_loss.backward()
#学习率预热
for g in optimizer.param_groups:
warmup_num = 5 * len(train_dataloader)
if batch_num <= warmup_num:
scale = math.pow(batch_num/warmup_num, 4)
g['lr'] = cfg["learning_rate"] * scale
lr = g["lr"]
# 更新模型参数
if batch_num % cfg["subdivisions"] == 0:
optimizer.step()
optimizer.zero_grad()
# 打印相关信息
info = "Epoch:%d LR:%f CIou:%f Obj:%f Cls:%f Total:%f" % (
epoch, lr, iou_loss, obj_loss, cls_loss, total_loss)
pbar.set_description(info)
batch_num += 1
# 模型保存
if epoch % 10 == 0 and epoch > 0:
model.eval()
#模型评估
print("computer mAP...")
_, _, AP, _ = utils.utils.evaluation(val_dataloader, cfg, model, device)
print("computer PR...")
precision, recall, _, f1 = utils.utils.evaluation(val_dataloader, cfg, model, device, 0.3)
print("Precision:%f Recall:%f AP:%f F1:%f"%(precision, recall, AP, f1))
torch.save(model.state_dict(), "weights/%s-%d-epoch-%fap-model.pth" %
(cfg["model_name"], epoch, AP))
# 学习率调整
scheduler.step()
|
py | 7dfd9c3dec86b77ab20a2c9fa8964fc8c0ed9932 | #!/usr/bin/env python
import re
def rules(lang):
with open('rules.%s' % lang) as rules_file:
for line in rules_file.readlines():
pattern, search, replace = line.split()
yield lambda noun: re.search(pattern, noun) and re.sub(search, replace, noun)
def plural(noun, lang="en"):
for rule in rules(lang):
result = rule(noun)
if result:
return result
if __name__ == '__main__':
tests = [
('bird', 'birds'),
('fax', 'faxes'),
('rash', 'rashes'),
('cheetah', 'cheetahs'),
('candy', 'candies'),
('day', 'days'),
]
for singular, pluralized in tests:
result = plural(singular)
if result == pluralized:
print("yes, the plural of %s is %s" % (singular, result))
else:
print("no, the plural of %s should be %s, not %s" % (singular, pluralized, result))
|
py | 7dfd9c7d0b47078cb0e313c8dae7a15f8593eb1d | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.notifications import make_email, send_email
from indico.modules.events.editing.schemas import EditingConfirmationAction
from indico.web.flask.templating import get_template_module
def _get_commenting_users(revision, check_internal_access=False):
return {
c.user
for c in revision.comments
if not check_internal_access or revision.editable.can_use_internal_comments(c.user)
}
def notify_comment(comment):
"""Notify about a new comments on a revision."""
revision = comment.revision
editor = revision.editable.editor
author = comment.user
if comment.internal:
# internal comments notify the editor and anyone who commented + can see internal comments
recipients = _get_commenting_users(revision, check_internal_access=True) | {editor}
elif author == editor:
# editor comments notify the submitter and anyone else who commented
recipients = _get_commenting_users(revision) | {revision.submitter}
elif revision.editable.can_perform_submitter_actions(author):
# submitter comments notify the editor and anyone else who commented
recipients = _get_commenting_users(revision) | {editor}
else:
# comments from someone else (managers) notify everyone
recipients = _get_commenting_users(revision) | {editor, revision.submitter}
recipients.discard(None) # in case there's no editor assigned
recipients.discard(author) # never bother people about their own comments
for recipient in recipients:
tpl = get_template_module('events/editing/emails/comment_notification.txt',
author_name=author.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=recipient.first_name)
send_email(make_email(recipient.email, template=tpl))
def notify_editor_judgment(revision, editor):
"""Notify the submitter about a judgment made by an editor."""
submitter = revision.submitter
tpl = get_template_module('events/editing/emails/editor_judgment_notification.txt',
editor_name=editor.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=submitter.first_name)
send_email(make_email(submitter.email, template=tpl))
def notify_submitter_upload(revision):
"""Notify the editor about the submitter uploading a new revision."""
submitter = revision.submitter
editor = revision.editable.editor
if not editor:
return
tpl = get_template_module('events/editing/emails/submitter_upload_notification.txt',
submitter_name=submitter.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=editor.first_name)
send_email(make_email(editor.email, template=tpl))
def notify_submitter_confirmation(revision, submitter, action):
"""Notify the editor(s) about submitter accepting/rejecting revision changes."""
editable = revision.editable
current_editor = editable.editor
prev_revision_editor = editable.revisions[-2].editor
recipients = {current_editor, prev_revision_editor}
recipients.discard(None)
if action == EditingConfirmationAction.accept:
template_path = 'events/editing/emails/submitter_confirmation_notification.txt'
else:
template_path = 'events/editing/emails/submitter_rejection_notification.txt'
for recipient in recipients:
tpl = get_template_module(template_path,
submitter_name=submitter.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=recipient.first_name)
send_email(make_email(recipient.email, template=tpl))
|
py | 7dfd9c851d5217dd38367c34c5a6b5bdbc1678fd | import calendar
import time
import locale
import six
import re
from dateutil.tz import tzlocal
from dateutil.parser import parse as dateutil_parse
from datetime import datetime, timedelta
from parsedatetime.parsedatetime import Calendar
locale.setlocale(locale.LC_ALL, '')
fuzzy_date_parse = Calendar().parse
fuzzy_datetime_parse = Calendar().parseDT
REMINDER_REGEX = r'^(\d+)([wdhm]?)(?:\s+(popup|email|sms))?$'
DURATION_REGEX = re.compile(
r'^((?P<days>[\.\d]+?)(?:d|day|days))?[ :]*'
r'((?P<hours>[\.\d]+?)(?:h|hour|hours))?[ :]*'
r'((?P<minutes>[\.\d]+?)(?:m|min|mins|minute|minutes))?[ :]*'
r'((?P<seconds>[\.\d]+?)(?:s|sec|secs|second|seconds))?$'
)
def parse_reminder(rem):
match = re.match(REMINDER_REGEX, rem)
if not match:
# Allow argparse to generate a message when parsing options
return None
n = int(match.group(1))
t = match.group(2)
m = match.group(3)
if t == 'w':
n = n * 7 * 24 * 60
elif t == 'd':
n = n * 24 * 60
elif t == 'h':
n = n * 60
if not m:
m = 'popup'
return n, m
def set_locale(new_locale):
try:
locale.setlocale(locale.LC_ALL, new_locale)
except locale.Error as exc:
raise ValueError(
'Error: ' + str(exc) +
'!\n Check supported locales of your system.\n')
def _u(text):
encoding = locale.getlocale()[1] or \
locale.getpreferredencoding(False) or 'UTF-8'
if issubclass(type(text), six.text_type):
return text
if not issubclass(type(text), six.string_types):
if six.PY3:
if isinstance(text, bytes):
return six.text_type(text, encoding, 'replace')
else:
return six.text_type(text)
elif hasattr(text, '__unicode__'):
return six.text_type(text)
else:
return six.text_type(bytes(text), encoding, 'replace')
else:
return text.decode(encoding, 'replace')
def get_times_from_duration(when, duration=0, allday=False):
try:
start = get_time_from_str(when)
except Exception:
raise ValueError('Date and time is invalid: %s\n' % (when))
if allday:
try:
stop = start + timedelta(days=float(duration))
except Exception:
raise ValueError(
'Duration time (days) is invalid: %s\n' % (duration))
start = start.date().isoformat()
stop = stop.date().isoformat()
else:
try:
stop = start + get_timedelta_from_str(duration)
except Exception:
raise ValueError(
'Duration time is invalid: %s\n' % (duration))
start = start.isoformat()
stop = stop.isoformat()
return start, stop
def get_time_from_str(when):
"""Convert a string to a time: first uses the dateutil parser, falls back
on fuzzy matching with parsedatetime
"""
zero_oclock_today = datetime.now(tzlocal()).replace(
hour=0, minute=0, second=0, microsecond=0)
try:
event_time = dateutil_parse(when, default=zero_oclock_today)
except ValueError:
struct, result = fuzzy_date_parse(when)
if not result:
raise ValueError('Date and time is invalid: %s' % (when))
event_time = datetime.fromtimestamp(time.mktime(struct), tzlocal())
return event_time
def get_timedelta_from_str(delta):
"""
Parse a time string a timedelta object.
Formats:
- number -> duration in minutes
- "1:10" -> hour and minutes
- "1d 1h 1m" -> days, hours, minutes
Based on https://stackoverflow.com/a/51916936/12880
"""
parsed_delta = None
try:
parsed_delta = timedelta(minutes=float(delta))
except ValueError:
pass
if parsed_delta is None:
parts = DURATION_REGEX.match(delta)
if parts is not None:
try:
time_params = {name: float(param)
for name, param
in parts.groupdict().items() if param}
parsed_delta = timedelta(**time_params)
except ValueError:
pass
if parsed_delta is None:
dt, result = fuzzy_datetime_parse(delta, sourceTime=datetime.min)
if result:
parsed_delta = dt - datetime.min
if parsed_delta is None:
raise ValueError('Duration is invalid: %s' % (delta))
return parsed_delta
def days_since_epoch(dt):
__DAYS_IN_SECONDS__ = 24 * 60 * 60
return calendar.timegm(dt.timetuple()) / __DAYS_IN_SECONDS__
def agenda_time_fmt(dt, military):
hour_min_fmt = '%H:%M' if military else '%I:%M'
ampm = '' if military else dt.strftime('%p').lower()
return dt.strftime(hour_min_fmt).lstrip('0') + ampm
|
py | 7dfd9da9bb4b0ad2f7d71ff0f6590b29484107fe | """
Name: create_mask_valid.py
Desc: Create valid masks for each image.
"""
import os
import sys
import numpy as np
from PIL import Image
# Import remaining packages
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from load_settings import settings
basepath = settings.MODEL_PATH
TASK_NAME = 'mask_valid'
def main():
depthz_path = os.path.join(basepath, 'depth_zbuffer')
for depth_img_file in os.listdir(depthz_path):
depth_img = np.array(Image.open(os.path.join(depthz_path, depth_img_file)))
mask_valid = 255 * (1 - 1 * (depth_img==65535))
mask_valid = np.array(mask_valid, dtype=np.uint8)
mask_valid_img = Image.fromarray(mask_valid)
save_path = os.path.join(basepath, TASK_NAME, depth_img_file[:-17] + 'mask_valid.png')
mask_valid_img.save(save_path)
print(save_path)
if __name__ == "__main__":
main()
|
py | 7dfd9dccecda7db32ab57ba5a2c0e5baf1f50a6a | """Utility classes and functions for Figma API endpoints.
"""
import requests
class Files:
"""https://www.figma.com/developers/api#files-endpoints
"""
API_ENDPOINT_URL = "https://api.figma.com/v1"
def __init__(self, token, file_key):
self.token = token
self.file_key = file_key
def __str__(self):
return f"Files {{ Token: {self.token}, File: {self.file_key} }}"
def get_file(self) -> dict:
try:
response = requests.get(
f"{self.API_ENDPOINT_URL}/files/{self.file_key}",
headers={"X-FIGMA-TOKEN": self.token}
)
except ValueError:
raise RuntimeError(
"Invalid Input. Please check your input and try again.")
except requests.ConnectionError:
raise RuntimeError(
"Tkinter Designer requires internet access to work.")
else:
return response.json()
def get_image(self, item_id) -> str:
response = requests.get(
f"{self.API_ENDPOINT_URL}/images/{self.file_key}?ids={item_id}",
headers={"X-FIGMA-TOKEN": self.token}
)
return response.json()["images"][item_id]
|
py | 7dfd9e9c7ae94b4862416055abd059557360bc6d | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Trainer.
To run locally:
.. code-block:: bash
$ bazel build -c opt //lingvo:trainer
$ bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=sync --logdir=/tmp/lenet5 --run_locally=cpu
To use GPU, add `--config=cuda` to build command and set `--run_locally=gpu`.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import re
import threading
import time
import numpy as np
import six
from six.moves import zip
import tensorflow as tf
from lingvo import base_runner
from tensorflow.core.protobuf import config_pb2
from lingvo import base_trial
from lingvo import model_registry
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import cluster_factory
from lingvo.core import inference_graph_exporter
from lingvo.core import metrics
from lingvo.core import py_utils
tf.flags.DEFINE_string(
'model', '', 'Name of the model class to train. Must be one of those'
' defined in models.py.')
tf.flags.DEFINE_string(
'model_task_name', '', 'For multitask models: '
'select task to train/evaluate/decode. '
'Empty means to sample a task (training only).')
tf.flags.DEFINE_string('logdir', '', 'Log directory.')
tf.flags.DEFINE_bool(
'interactive', False,
'If True, enter interactive IPython for the controller job.')
tf.flags.DEFINE_string(
'run_locally', None,
'If True, ignores flags below and runs controller and trainer '
'in the single process.')
tf.flags.DEFINE_string('tf_master', '', 'TF runtime.')
tf.flags.DEFINE_string(
'cluster_spec', '', 'A tf.train.ClusterSpec to override the master. '
'The dict is specified as: job=host1:port1,host2:port2,'
'host3:port3@job2=host3:port4,...')
tf.flags.DEFINE_string(
'mode', 'async', 'How this trainer binary is used. '
'async: used in an async training setup; '
'sync: used in a sync training setup; '
'shell: an interactive shell for development; '
'inspect_evaler: print evaler dataset names; '
'inspect_decoder: print decoder dataset names; '
'write_inference_graph: write inference graphs to logdir.')
tf.flags.DEFINE_string('job', '', 'trainer/controller/eval, etc.')
tf.flags.DEFINE_integer('task', 0, 'Task id within the job.')
tf.flags.DEFINE_string('controller_job', '/job:controller', 'Job name.')
tf.flags.DEFINE_integer('controller_gpus', 0, 'Number of controller GPUs.')
tf.flags.DEFINE_string('worker_job', '/job:trainer', 'Job name.')
tf.flags.DEFINE_integer('worker_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('worker_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer('worker_tpus', 0, 'Number of tpus to use per replica.')
tf.flags.DEFINE_integer('worker_num_tpu_hosts', 0, 'Number of tpu hosts.')
tf.flags.DEFINE_integer('worker_split_size', 1,
'Number of devices for one split.')
tf.flags.DEFINE_string('ps_job', '/job:ps', 'Job name')
tf.flags.DEFINE_integer('ps_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('ps_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('input_job', '/job:input', 'Job name')
tf.flags.DEFINE_integer('input_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_string('evaler_job', '/job:evaler', 'Job name')
tf.flags.DEFINE_integer('evaler_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('evaler_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('decoder_job', '/job:decoder', 'Job name')
tf.flags.DEFINE_integer('decoder_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('decoder_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_bool(
'evaler_in_same_address_as_controller', False,
'Whether or not evaler is in the same address space as '
' controller. This flag is meant for unittest only.')
FLAGS = tf.flags.FLAGS
# useful for debugging.
def _StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def _ModelAnalysis(model):
"""Returns a text showing variable sizes and their total size."""
class Analyzer(object):
def __init__(self):
self._seen_var = {}
self.total = 0
def __call__(self, v):
assert isinstance(v, tf.Variable)
# pylint: disable=protected-access
if not v.shape.is_fully_defined():
# Only Cudnn RNN params lack static shapes.
if hasattr(v, 'approx_size'):
size = v.approx_size
else:
return '%-20s %10s %s' % (v.shape, 'n/a', v._shared_name)
else:
size = v.shape.num_elements()
if v._shared_name not in self._seen_var:
self._seen_var[v._shared_name] = size
self.total += size
return '%-20s %10d %s' % (v.shape, size, v._shared_name)
analyzer = Analyzer()
output = '\n'
output += model.vars.Transform(analyzer).DebugString()
output += '\n'
output += '=' * 100
output += '\ntotal #params: %10d\n' % (analyzer.total)
return output, analyzer.total
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
assert not self._model_task_name, 'Controller needs all tasks!'
self._save_path = os.path.join(self._train_dir, 'ckpt')
tf.gfile.MakeDirs(self._train_dir)
self._control_dir = os.path.join(self._logdir, 'control')
tf.gfile.MakeDirs(self._control_dir)
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._time_steps = [] # A short history of (timestamp, global_step)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._saver = self._GetSaver()
self._summary_op = tf.summary.merge_all()
self._vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self._uninitialized = tf.report_uninitialized_variables(self._vars)
self._initialize_all = tf.global_variables_initializer()
self.initialize_tables = tf.tables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.close_queue_ops = tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = _ModelAnalysis(self._model)
tf.logging.info(self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
tf.train.write_graph(self._graph.as_graph_def(), self._control_dir,
'train.pbtxt')
def Start(self):
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop('controller/enqueue_op/%s' % op.name, self._LoopEnqueue, op)
def _Loop(self):
self._summary_writer.add_graph(self._graph)
with tf.container(self._container_id), self._GetSession() as sess:
gsteps = self._model.global_step
examples = self._model.total_examples
if FLAGS.interactive:
# Into interactive debugging mode.
_StartShell(locals())
return
# This initializes local tables
sess.run(self.initialize_tables)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
save_interval_seconds = tp.save_interval_seconds
summary_interval_steps = tp.summary_interval_steps
next_checkpoint_seconds = 0
next_summary_step = 1
while True:
now = time.time()
next_iteration_seconds = now + 10 # 10 seconds
# Init/restore variable if needed.
self._RestoreIfNeeded(sess)
global_step, total_examples = sess.run([gsteps, examples])
step_rate, example_rate = self._RecordStepRate(global_step,
total_examples)
if self._trial.ShouldStop() or self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
self._saver.save(sess, self._save_path, gsteps)
# Close all the queues so the enqueue threads can also finish.
for close_op in self.close_queue_ops:
sess.run(close_op)
sess.close()
return
# Checkpoint.
if now >= next_checkpoint_seconds:
tf.logging.info('Save checkpoint')
path = self._saver.save(sess, self._save_path, gsteps)
tf.logging.info('Save checkpoint done: %s', path)
next_checkpoint_seconds = now + save_interval_seconds
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
tf.logging.info('Write summary @%s', global_step)
summary_str = sess.run(self._summary_op)
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
next_summary_step = global_step + summary_interval_steps
tf.logging.info('Write summary done: step %d', global_step)
self._SetStatusMessage(
'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' %
(global_step, step_rate, example_rate))
self._ExportMetrics(
global_step=global_step,
step_rate=step_rate,
example_rate=example_rate)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _RestoreIfNeeded(self, sess):
uninitialized_var_names = list(sess.run(self._uninitialized))
if not uninitialized_var_names:
return
tf.logging.info('Uninitialized var list: %s ', uninitialized_var_names)
path = tf.train.latest_checkpoint(self._train_dir)
if path:
tf.logging.info('Load from checkpoint %s.', path)
self._saver.restore(sess, path)
tf.logging.info('Load checkpoint done.')
return
if (not any(task.params.train.init_from_checkpoint_rules
for task in self._model.tasks) and
not self._params.train.init_from_checkpoint_rules):
tf.logging.info('Initialize ALL variables: %s', uninitialized_var_names)
sess.run([self._initialize_all])
tf.logging.info('Initialize variables done.')
return
# There was a race in local run. Another thread will get unblocked once
# _initialize_all is called. OverrideVarsFromCheckpoints
# might not happen at the right time.
for task in self._model.tasks:
tp = task.params.train
if tp.init_from_checkpoint_rules:
tf.logging.info('OverrideVarsFromCheckpoints %s',
tp.init_from_checkpoint_rules)
py_utils.OverrideVarsFromCheckpoints(sess, self._vars,
tp.init_from_checkpoint_rules)
if self._params.train.init_from_checkpoint_rules:
tp = self._params.train
tf.logging.info('OverrideVarsFromCheckpoints %s',
tp.init_from_checkpoint_rules)
py_utils.OverrideVarsFromCheckpoints(sess, self._vars,
tp.init_from_checkpoint_rules)
uninitialized_var_names = list(sess.run(self._uninitialized))
if not uninitialized_var_names:
return
# uninitialized_var_names is a list of strings without ":0" suffix.
assert all(isinstance(s, str) for s in uninitialized_var_names)
# Need to retrieve vars, removing ":0" suffix from names.
uninitialized_vars = [
v for v in self._vars if v.name[:-2] in uninitialized_var_names
]
tf.logging.info('Initialize variables: %s',
[v.name for v in uninitialized_vars])
sess.run(tf.variables_initializer(uninitialized_vars))
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _RecordStepRate(self, current_steps, total_examples):
"""Computes the overall step rate and adds a summary."""
self._time_steps.append((time.time(), current_steps, total_examples))
# Keeps a relative long history to compute a smooth steps/second.
# Removes duplicate stats for step = 0 to get rid of the warm-up period.
while (self._time_steps[-1][1] - self._time_steps[0][1] > 10000 or
(len(self._time_steps) > 1 and self._time_steps[-1][1] == 0 and
self._time_steps[0][1] == 0)):
del self._time_steps[0]
(t0, s0, e0), (t1, s1, e1) = self._time_steps[0], self._time_steps[-1]
rate = 0.0
example_rate = 0.0
if t1 > t0 + 1:
elapsed_secs = t1 - t0
rate = (s1 - s0) / elapsed_secs
example_rate = (e1 - e0) / elapsed_secs
tf.logging.info('Steps/second: %f, Examples/second: %f', rate, example_rate)
self._SummarizeValue(current_steps,
'%s/sec' % self._model.global_step.op.name, rate)
self._SummarizeValue(current_steps, 'examples/sec', example_rate)
return rate, example_rate
class Trainer(base_runner.BaseRunner):
"""Trainer on non-TPU."""
def __init__(self, *args, **kwargs):
super(Trainer, self).__init__(*args, **kwargs)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self.initialize_tables = tf.tables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.close_queue_ops = tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
try:
self._task_probs_summary_writers = []
for task in self._model.task_schedule.tasks:
path = os.path.join(os.path.join(self._train_dir, task))
tf.gfile.MakeDirs(path)
self._task_probs_summary_writers.append(self._CreateSummaryWriter(path))
except AttributeError:
tf.logging.info('AttributeError. Expected for single task models.')
self._task_probs_summary_writers = []
# Saves the graph def.
if self.params.cluster.task > 0:
self._summary_writer = None
else:
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
tf.train.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
worker_id = self.params.cluster.task
self._start_up_delay_steps = (((worker_id + 1) * worker_id / 2) *
self.params.train.start_up_delay_steps)
def _SummarizeValue(self, steps, tag, value, writer):
if writer:
writer.add_summary(metrics.CreateScalarSummary(tag, value), steps)
def Start(self):
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop('trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, op)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
return super(Trainer, self)._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
global_step = None
@py_utils.Retry(retry_value=(tf.errors.FailedPreconditionError,))
def _WaitTillInit():
"""Wait until the model is ready."""
try:
global_step = sess.run(self._model.global_step)
except tf.errors.FailedPreconditionError as e:
tf.logging.info('Probably the expected race on global_step: %s', e)
raise
msg = 'step:%6d' % global_step
self._SetStatusMessage(msg)
if global_step < self._start_up_delay_steps:
msg = 'global step (%d) has not reached start up delay steps (%d)' % (
global_step, self._start_up_delay_steps)
tf.logging.info('%s', msg)
raise tf.errors.FailedPreconditionError(
node_def=None, op=None, message=msg)
return global_step
global_step = _WaitTillInit()
status_interval_steps = 100
next_status_step = 1
eval_metrics = None
while True:
if (self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics) or
self._ShouldStop(sess, global_step)):
tf.logging.info('Training finished.')
# Close all the queues so the enque threads can also finish.
for close_op in self.close_queue_ops:
sess.run(close_op)
if self._early_stop:
time.sleep(300) # controller hangs if it doesn't finish first
return
# If a task is explicitly specified, only train that task.
if self._model_task_name:
model_task = self._model.GetTask(self._model_task_name)
else:
# Note: This is a slightly stale global_step value from the previous
# sess.run() call.
# For multi-task models, `self._model.task_schedule.cur_probs` will
# be updated.
model_task = self._model.SampleTask(global_step)
if self._task_probs_summary_writers:
for index, prob in enumerate(self._model.task_schedule.cur_probs):
self._SummarizeValue(global_step, 'task_probability', prob,
self._task_probs_summary_writers[index])
try:
for index, task in enumerate(self._model.tasks):
self._SummarizeValue(global_step, 'task_weight',
sess.run(task.vars.task_weight),
self._task_probs_summary_writers[index])
except AttributeError:
pass
_, global_step, eval_metrics, fetched_verbose_tensors = sess.run([
model_task.train_op,
self._model.global_step,
model_task.eval_metrics,
model_task.trainer_verbose_tensors,
])
msg = 'step:%6d' % (global_step)
for key, (val, _) in sorted(six.iteritems(eval_metrics)):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val, self._summary_writer)
model_task.ProcessFetchedTrainerVerboseTensors(global_step,
fetched_verbose_tensors)
if global_step >= next_status_step:
self._SetStatusMessage(msg)
next_status_step = global_step + status_interval_steps
else:
tf.logging.info(msg)
self._model.ModelPostUpdate(sess, global_step, eval_metrics)
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super(TrainerTpu, self).__init__(*args, **kwargs)
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
def ComputationShape(split_size):
"""Decides the computation shape based on the split_size."""
computation_shape = None
if split_size == 1:
computation_shape = [1, 1, 1]
elif split_size == 2:
computation_shape = [1, 1, 2]
elif split_size == 4:
computation_shape = [1, 2, 2]
elif split_size == 8:
computation_shape = [2, 2, 2]
elif split_size == 16:
computation_shape = [4, 2, 2]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitTillInit():
"""Wait until the model is ready."""
try:
with self._GetSession() as sess:
topology = sess.run(
tf.contrib.tpu.initialize_system(embedding_config=None, job=None))
device_assignment = tf.contrib.tpu.device_assignment(
topology,
computation_shape=ComputationShape(num_devices_per_split),
num_replicas=data_parallelism)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitTillInit()
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.job_spec.name):
self._eval_metrics = metrics.TpuEvalMetrics()
def TpuTrainStep(*args):
self._model = self.params.cls(self.params)
self._model.ConstructFPropBPropGraph()
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._model.GetTask().eval_metrics, args)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._model.GetTask().train_op]
def TpuTrain():
loop_result = tf.contrib.tpu.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
batch_parallel_res = tf.contrib.tpu.batch_parallel(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
# Get metric result from a single replica; they are all same here.
self._tpu_train_ops = [t[0] for t in batch_parallel_res]
self.initialize_tables = tf.tables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
# Saves the graph def.
tf.train.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
def Start(self):
# Run training.
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop('trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, op)
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
return super(TrainerTpu, self)._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(self._container_id), self._GetSession() as sess:
sess.run(self.initialize_tables)
sess.run(
tf.contrib.tpu.initialize_system(embedding_config=None, job=None))
if FLAGS.run_locally == 'tpu':
sess.run(tf.global_variables_initializer())
global_step, = sess.run([self._model.global_step])
eval_metrics = None
while True:
if (self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics) or
self._ShouldStop(sess, global_step)):
tf.logging.info('Training finished.')
return
values = sess.run(self._tpu_train_ops)
eval_metrics = self._eval_metrics.PackMetricsValues(values)
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
global_step, = sess.run([self._model.global_step])
msg = 'step:%6d' % (global_step)
for key, (val, _) in sorted(six.iteritems(eval_metrics)):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super(Evaler, self).__init__(*args, **kwargs)
self._eval_type = 'eval_' + eval_type
self.params.is_eval = True
self._eval_dir = os.path.join(self._logdir, self._eval_type)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.gfile.MakeDirs(self._eval_dir)
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
# Always create the same graph to make sure node names are always
# exactly the same.
self._model.ConstructFPropGraph()
self._model_task = self._model.GetTask(self._model_task_name)
self._saver = self._GetSaver()
self.initialize_tables = tf.tables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.train.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._eval_type)
def Start(self):
self._RunLoop(self._eval_type, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self._EvalOnce(path, sess):
break
self.EvalLatestCheckpoint(path)
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(path, sess)
def _EvalOnce(self, path, sess):
"""Runs evaluation for a batch of samples.
Args:
path: checkpoint path.
sess: the tf Session.
Returns:
should_stop.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self._LoadCheckpointForEval(sess, path)
global_step = sess.run(self._model.global_step)
metrics_dict = {
name: metrics.AverageMetric() for name in self._model_task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
while (num_samples_metric.total_value <
self._model_task.params.eval.samples_per_summary):
# NOTE: We intentionally do not let FProp generate summaries by default,
# because evaler calls FProp multiple times for each checkpoint. Multiple
# summaries at the same step is often confusing. Instead, models should
# update eval_metrics and generate aggregate summaries.
ans = sess.run(self._model_task.eval_metrics)
for name, (value, weight) in six.iteritems(ans):
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value,
self._model_task.params.eval.samples_per_summary)
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step, {k: v.Summary(k) for k, v in six.iteritems(metrics_dict)},
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
is_final = global_step >= self.params.train.max_steps
should_stop = self._trial.ReportEvalMeasure(global_step, metrics_dict, path)
return should_stop or is_final
def GetDecoderDir(logdir, decoder_type, model_task_name):
if model_task_name:
decoder_dir = '%s_%s' % (decoder_type, model_task_name)
else:
decoder_dir = decoder_type
return os.path.join(logdir, decoder_dir)
def _GetCheckpointIdForDecodeOut(checkpoint_path, global_step):
"""Retrieve the checkpoint id for the decoder out file.
Finds the checkpoint id in the checkpoint file name and compares to global
step. If they diverge, uses the retrieved id and prints a warning.
Args:
checkpoint_path: path to checkpoint file.
global_step: int specifying the global step of the model.
Returns:
Checkpoint id as int.
"""
ckpt_id_from_file = int(re.sub(r'.*ckpt-', '', checkpoint_path))
tf.logging.info('Loaded checkpoint is at global step: %d', global_step)
tf.logging.info('Checkpoint path: %s', checkpoint_path)
tf.logging.info('Checkpoint id according to checkpoint path: %d',
ckpt_id_from_file)
if global_step != ckpt_id_from_file:
tf.logging.warning(
'Checkpoint id %d != global step %d. '
'Will use checkpoint id from checkpoint file for '
'writing decoder output.', ckpt_id_from_file, global_step)
return ckpt_id_from_file
class Decoder(base_runner.BaseRunner):
"""Decoder."""
def __init__(self, decoder_type, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
self._decoder_type = 'decoder_' + decoder_type
self.params.is_eval = True
self._decoder_dir = GetDecoderDir(self._logdir, self._decoder_type,
self._model_task_name)
tf.gfile.MakeDirs(self._decoder_dir)
self._summary_writer = self._CreateSummaryWriter(self._decoder_dir)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
self._model_task = self._model.GetTask(self._model_task_name)
# Note, different graphs are being constructed for different model
# tasks, which may result in different node names being chosen.
# Obviously, variable names has to be stay the same between train and
# decode.
input_batch = (
self._model_task.input_generator.GetPreprocessedInputBatch())
self._dec_output = self._model_task.Decode(input_batch)
self._saver = self._GetSaver()
self._summary_op = tf.summary.merge_all()
self.initialize_tables = tf.tables_initializer()
# No queues are allowed for decoder models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._decoder_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.train.write_graph(self._graph.as_graph_def(), self._decoder_dir,
'%s.pbtxt' % self._decoder_type)
def Start(self):
self._RunLoop(self._decoder_type, self._Loop)
def _Loop(self):
with tf.container(
self._container_id), self._GetSession(inline=False) as sess:
# This initializes local tables
sess.run(self.initialize_tables)
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self.DecodeCheckpoint(sess, path):
break
tf.logging.info('Decoding finished.')
@classmethod
def GetDecodeOutPath(cls, decoder_dir, checkpoint_id):
"""Gets the path to decode out file."""
out_dir = cls._GetTtlDir(decoder_dir, duration='7d')
return os.path.join(out_dir, 'decoder_out_%09d' % checkpoint_id)
def DecodeCheckpoint(self, sess, checkpoint_path):
"""Decodes `samples_per_summary` examples using `checkpoint_path`."""
p = self._model_task.params
samples_per_summary = p.eval.decoder_samples_per_summary
if not samples_per_summary:
samples_per_summary = p.eval.samples_per_summary
self._LoadCheckpointForEval(sess, checkpoint_path)
global_step = sess.run(self._model.global_step)
dec_metrics = self._model_task.CreateDecoderMetrics()
buffered_decode_out = []
num_examples_metric = dec_metrics['num_samples_in_batch']
start_time = time.time()
while num_examples_metric.total_value < samples_per_summary:
tf.logging.info('Fetching dec_output.')
run_options = config_pb2.RunOptions(
report_tensor_allocations_upon_oom=False)
if self._summary_op is None:
# No summaries were collected.
dec_out = sess.run(self._dec_output, options=run_options)
else:
dec_out, summary = sess.run([self._dec_output, self._summary_op],
options=run_options)
self._summary_writer.add_summary(summary, global_step)
tf.logging.info('Done fetching.')
decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics)
if decode_out:
buffered_decode_out.extend(decode_out)
tf.logging.info('Total examples done: %d/%d',
num_examples_metric.total_value, samples_per_summary)
summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}
elapsed_secs = time.time() - start_time
example_rate = num_examples_metric.total_value / elapsed_secs
summaries['examples/sec'] = metrics.CreateScalarSummary(
'examples/sec', example_rate)
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._decoder_dir),
global_step,
summaries,
text_filename=os.path.join(self._decoder_dir,
'score-{:08d}.txt'.format(global_step)))
self._ExportMetrics(
decode_checkpoint=global_step,
dec_metrics=dec_metrics,
example_rate=example_rate)
if buffered_decode_out:
# global_step and the checkpoint id from the checkpoint file might be
# different. For consistency of checkpoint filename and decoder_out
# file, use the checkpoint id as derived from the checkpoint filename.
checkpoint_id = _GetCheckpointIdForDecodeOut(checkpoint_path, global_step)
decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)
self._WriteKeyValuePairs(decode_out_path, buffered_decode_out)
return global_step >= self.params.train.max_steps
class RunnerManager(object):
"""Helper class for managing runners."""
# This is a hack so these classes can be overridded with internal
# non-public implementations.
inference_graph_exporter = inference_graph_exporter
model_registry = model_registry
Controller = Controller
Trainer = Trainer
TrainerTpu = TrainerTpu
Evaler = Evaler
Decoder = Decoder
def __init__(self, model):
self._model_name = model
def MaybeLaunchTensorFlow(self):
"""Starts TF machinary in this process."""
if FLAGS.run_locally:
return
tf.logging.info('Launching tensorflow.')
target = FLAGS.tf_master
if not target.startswith('localhost'):
# E.g., trainer_client is configured w/ FLAGS.tf_master pointing to
# another job. In that case, start a local server.
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError('Invalid job specification: %s', job_spec)
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
self._tf_server = tf.train.Server(
tf.train.ClusterSpec(cluster_spec_dict),
job_name=FLAGS.job,
task_index=FLAGS.task)
target = self._tf_server.target
if not FLAGS.tf_master:
FLAGS.tf_master = target
with tf.Session(target).as_default():
value = (tf.constant(1.) + tf.constant(1.)).eval()
assert value == 2.0, 'Something is really wrong.'
tf.logging.info('Launched tensorflow.')
def GetParamsForDataset(self, job_name, dataset_name):
"""Returns params for job `job_name` on the dataset `dataset_name`."""
try:
cfg = self.model_registry.GetParams(self._model_name, dataset_name)
except AttributeError as e:
dataset_name_retry = dataset_name.title()
tf.logging.warning('Exception configuring dataset %s, retrying as %s: %s',
dataset_name, dataset_name_retry, e)
cfg = self.model_registry.GetParams(self._model_name, dataset_name_retry)
self.UpdateClusterParamsFromFlags(cfg, job_name)
return cfg
def MaybeConfigRunDistributed(self):
"""If given a `FLAGS.cluster_spec`, update flags for running distributed."""
if not FLAGS.cluster_spec:
return
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError('Invalid job specification: %s', job_spec)
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
if FLAGS.job == 'trainer_client':
FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]
for job in cluster_spec_dict.keys():
if job.startswith('decoder_'):
assert len(job_specs) == 1, 'Decoder jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.decoder_job = '/job:%s' % job
FLAGS.decoder_replicas = 1
if job.startswith('evaler_'):
assert len(job_specs) == 1, 'Evaler jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.evaler_job = '/job:%s' % job
FLAGS.evaler_replicas = 1
if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',
'worker'):
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = len(cluster_spec_dict['worker'])
FLAGS.ps_job = '/job:worker'
FLAGS.ps_replicas = FLAGS.worker_replicas
if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):
FLAGS.worker_job = '/job:trainer'
FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])
FLAGS.ps_job = '/job:ps'
FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
def UpdateClusterParamsFromFlags(self, cfg, job_name):
"""Update `cfg` with a training cluster configuration from flags."""
cfg.cluster.mode = FLAGS.mode
cfg.cluster.job = job_name
cfg.cluster.task = FLAGS.task
cfg.cluster.controller.name = FLAGS.controller_job
cfg.cluster.controller.gpus_per_replica = FLAGS.controller_gpus
cfg.cluster.worker.name = FLAGS.worker_job
cfg.cluster.worker.replicas = FLAGS.worker_replicas
cfg.cluster.worker.gpus_per_replica = FLAGS.worker_gpus
cfg.cluster.worker.tpus_per_replica = FLAGS.worker_tpus
cfg.cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts
cfg.cluster.worker.devices_per_split = FLAGS.worker_split_size
cfg.cluster.ps.name = FLAGS.ps_job
cfg.cluster.ps.replicas = FLAGS.ps_replicas
cfg.cluster.ps.gpus_per_replica = FLAGS.ps_gpus
cfg.cluster.input.name = FLAGS.input_job
cfg.cluster.input.replicas = FLAGS.input_replicas
cfg.cluster.evaler.name = FLAGS.evaler_job
cfg.cluster.evaler.replicas = FLAGS.evaler_replicas
cfg.cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus
cfg.cluster.decoder.name = FLAGS.decoder_job
cfg.cluster.decoder.replicas = FLAGS.decoder_replicas
cfg.cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
def _CreateRunner(self, job, model_task_name, logdir, tf_master, trial):
"""Create a runner."""
evaler_job_name_prefix = 'evaler_'
decoder_job_name_prefix = 'decoder_'
tf.logging.info('Job %s start', job)
common_args = (model_task_name, logdir, tf_master, trial)
if job == 'controller':
cfg = self.GetParamsForDataset('controller', 'Train')
return self.Controller(cfg, *common_args)
elif job == 'trainer':
cfg = self.GetParamsForDataset('trainer', 'Train')
return self.Trainer(cfg, *common_args)
elif job == 'trainer_client':
cfg = self.GetParamsForDataset('trainer_client', 'Train')
if py_utils.use_tpu():
return self.TrainerTpu(cfg, *common_args)
else:
return self.Trainer(cfg, *common_args)
elif job.startswith(evaler_job_name_prefix):
dataset_name = job[len(evaler_job_name_prefix):]
cfg = self.GetParamsForDataset('evaler', dataset_name)
return self.Evaler(dataset_name.lower(), cfg, *common_args)
elif job.startswith(decoder_job_name_prefix):
dataset_name = job[len(decoder_job_name_prefix):]
cfg = self.GetParamsForDataset('decoder', dataset_name)
return self.Decoder(dataset_name.lower(), cfg, *common_args)
elif job in ('ps', 'worker', 'input'):
self._tf_server.join()
else:
raise ValueError('job %s is not supported' % job)
def CreateRunners(self, jobs, logdir, trial=base_trial.NoOpTrial()):
"""Creates a list of runners based on `FLAGS.mode`.
Args:
jobs: a list of runner jobs.
logdir: the directory used for logging, usually on CNS.
trial: optional `Trial` object, used for reporting measures and early
stopping.
Returns:
A list of `.BaseRunner`, one per job in `jobs`.
"""
runners = []
for j in jobs:
tf_master = FLAGS.tf_master
# Ensure that decoder or evaler threads do not clobber variables being
# updated by trainer by forcing them to use independent sessions.
if ('trainer' in jobs and
(j.startswith('decoder') or j.startswith('evaler'))):
tf_master = ''
runner = self._CreateRunner(j, FLAGS.model_task_name, logdir, tf_master,
trial)
runners.append(runner)
return runners
def StartRunners(self, runners):
"""Runs `runners` in parallel threads.
Returns when all of them finish.
Args:
runners: a list of `.BaseRunner`.
Returns:
None.
"""
threads = []
tf.logging.info('Starting runners')
for runner in runners:
t = threading.Thread(target=runner.Start)
t.daemon = True
t.start()
threads.append(t)
tf.logging.info('Total num runner.enqueue_ops: %d',
len(runner.enqueue_ops))
for enqueue_op in runner.enqueue_ops:
def StartEnqueue(runner, op):
tf.logging.info('Starting enqueue op %s', op.name)
return lambda: runner.StartEnqueueOp(op)
tq = threading.Thread(target=StartEnqueue(runner, enqueue_op))
tq.start()
threads.append(tq)
tf.logging.info('Waiting for runners to finish...')
for t in threads:
while True:
t.join(1)
if not t.isAlive():
break
tf.logging.info('All runners done.')
def RunTrial(self, job, logdir, trial):
"""A wrapper function for running a trial."""
if job == 'all':
# For async mode: Run controller, trainer, evaler jobs in one process,
# multiple threads.
self.StartRunners(
self.CreateRunners(['controller', 'trainer'], logdir, trial))
evaler = self._CreateRunner('evaler_dev', FLAGS.model_task_name, logdir,
FLAGS.tf_master, trial)
evaler.EvalLatestCheckpoint()
elif job == 'all_sync':
# For sync mode: Run controller, trainer_client, evaler jobs in one
# process, multiple threads.
self.StartRunners(
self.CreateRunners(['controller', 'trainer_client'], logdir, trial))
evaler = self._CreateRunner('evaler_dev', FLAGS.model_task_name, logdir,
FLAGS.tf_master, trial)
evaler.EvalLatestCheckpoint()
else:
# Run each job in separate process/task
# TODO(rpang): add support for running evaler_test and decoder.
self.StartRunners(self.CreateRunners([job], logdir, trial))
def MaybeConfigRunLocally(self):
"""Update flags if configured to run locally."""
if not FLAGS.run_locally:
# Do nothing
return
FLAGS.tf_master = tf.train.Server.create_local_server().target
if not FLAGS.mode:
FLAGS.mode = 'sync'
if not FLAGS.job:
if FLAGS.run_locally == 'tpu':
FLAGS.job = 'trainer_client'
else:
FLAGS.job = 'controller,trainer_client'
FLAGS.task = 0
FLAGS.controller_job = '/job:local'
FLAGS.worker_job = '/job:local'
FLAGS.worker_replicas = 1
if FLAGS.run_locally == 'gpu':
if not FLAGS.worker_gpus:
FLAGS.worker_gpus = 1
else:
FLAGS.worker_gpus = 0
if FLAGS.run_locally == 'tpu':
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
else:
FLAGS.worker_tpus = 0
if not FLAGS.worker_split_size:
FLAGS.worker_split_size = 1
FLAGS.ps_job = '/job:local'
FLAGS.ps_replicas = 1
FLAGS.ps_gpus = 0
FLAGS.input_job = '/job:local'
FLAGS.input_replicas = 0
FLAGS.evaler_job = '/job:local'
FLAGS.evaler_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.evaler_gpus = 1
else:
FLAGS.evaler_gpus = 0
FLAGS.decoder_job = '/job:local'
FLAGS.decoder_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.decoder_gpus = 1
else:
FLAGS.decoder_gpus = 0
def InspectModel(self):
"""Prints out model analysis for the model."""
p = self.GetParamsForDataset('controller', 'Train')
p.cluster.mode = 'sync'
c = cluster_factory.Cluster(p.cluster)
with tf.Graph().as_default(), c, tf.device(c.GetPlacer()):
analysis, _ = _ModelAnalysis(p.cls(p))
print(analysis)
def InspectDatasets(self):
"""Prints out datasets configured for the model."""
cls = self.model_registry.GetClass(self._model_name)
datasets = []
for name, _ in inspect.getmembers(cls, inspect.ismethod):
if name not in ['GetDatasetParams', 'Model', 'Task'
] and not name.startswith('_'):
datasets += [name]
print(','.join([_.lower() for _ in datasets]))
def InspectDecoder(self):
"""Prints out datasets configured for the decoder."""
cls = self.model_registry.GetClass(self._model_name)
has_decoder = False
if issubclass(cls, base_model_params.SingleTaskModelParams):
has_decoder = cls.Task(
).cls.CreateDecoderMetrics != base_model.BaseTask.CreateDecoderMetrics
else:
for _, task_param in cls.Model().task_params.IterParams():
has_decoder |= (
task_param.cls.CreateDecoderMetrics !=
base_model.BaseTask.CreateDecoderMetrics)
if has_decoder:
# We assume that the proper decoder is implemented.
self.InspectDatasets()
else:
print('')
def WriteInferenceGraph(self):
"""Generates the inference graphs for a given model."""
inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')
tf.gfile.MakeDirs(inference_graph_dir)
tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)
cfg = self.model_registry.GetParams(self._model_name, 'Test')
if (issubclass(cfg.cls, base_model.MultiTaskModel) and
not FLAGS.model_task_name):
tf.logging.info('Cannot write inference graphs for multi-task model '
'when model_task_name is not specified.')
return
try:
filename_prefix = 'inference'
if FLAGS.model_task_name:
filename_prefix = '%s_inference' % FLAGS.model_task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
# Standard inference graph.
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=FLAGS.model_task_name,
export_path=filename_prefix + '.pbtxt')
# TPU inference graph.
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=FLAGS.model_task_name,
device_options=self.inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options='ON_DEVICE',
gen_init_op=True,
dtype_override=None),
export_path=filename_prefix + '_tpu.pbtxt')
except NotImplementedError as e:
tf.logging.error('Cannot write inference graph: %s', e)
def Start(self):
"""Start the process."""
tf.logging.set_verbosity(tf.logging.INFO)
assert self.model_registry.GetClass(
self._model_name), ('Model %s is not found.' % FLAGS.model)
if FLAGS.mode == 'inspect_model':
self.InspectModel()
return
if FLAGS.mode == 'inspect_evaler':
self.InspectDatasets()
return
if FLAGS.mode == 'inspect_decoder':
self.InspectDecoder()
return
if FLAGS.mode == 'write_inference_graph':
self.WriteInferenceGraph()
return
assert FLAGS.mode in ['sync', 'async']
if FLAGS.mode == 'shell':
_StartShell(locals())
return
self.MaybeConfigRunLocally()
self.MaybeConfigRunDistributed()
self.MaybeLaunchTensorFlow()
self.StartRunners(self.CreateRunners(FLAGS.job.split(','), FLAGS.logdir))
def main(unused_argv):
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-variable
from lingvo import model_imports
RunnerManager(FLAGS.model).Start()
if __name__ == '__main__':
tf.app.run(main)
|
py | 7dfd9f57afc892bf3dbeffc25f7d3e5f442aa925 | # -*- coding: utf-8 -*-
"""
Unlike configuration.py, this file is meant for static, entire project
encompassing settings, like memoization and caching file directories.
"""
__title__ = 'nscrapy'
__author__ = 'Munyakabera Jean Claude'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016, Munyakabera Jean Claude'
import logging
import os
from cookielib import CookieJar as cj
PARENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
POPULAR_URLS = os.path.join(
PARENT_DIRECTORY, 'resources/misc/popular_sources.txt')
USERAGENTS = os.path.join(PARENT_DIRECTORY, 'resources/misc/useragents.txt')
STOPWORDS_DIR = os.path.join(PARENT_DIRECTORY, 'resources/text')
# NLP stopwords are != regular stopwords for now...
NLP_STOPWORDS_EN = os.path.join(
PARENT_DIRECTORY, 'resources/misc/stopwords-nlp-en.txt')
|
py | 7dfd9f70d74185bfb0c5d70ee6ccd6ce89e7a711 | def entrelin(var):
print('-' * 20)
print(f'{str(var):^20}')
print('-' * 20)
entrelin('NOVO PROGRAMA')
def soma(a, b):
s = a + b
print(f'A soma dos números {a} e {b} é {s}')
soma(1, 2)
soma(10, 213)
soma(a=4, b=5)
soma(b=9, a=7)
# soma(a=9, 3) -> Erro
def contador(* num):
print(f'''O conjunto: [{', '.join(str(x) for x in num)}] tem {len(num)} elementos''')
contador(1, 2, 3)
contador('r', 'y', 2, 1, 'ggg', '')
def dobro(lst):
for c1 in range(0, len(lst)):
lst[c1] *= 2
valores = [1, 2]
dobro(valores)
print(f'''O dobro dos valores é {', '.join(str(x) for x in valores)}''')
|
py | 7dfda0463ecaf841c0b572d136bdb16fb59cd99a | """
Tests for scalar Timedelta arithmetic ops
"""
from datetime import (
datetime,
timedelta,
)
import operator
import numpy as np
import pytest
from pandas.errors import OutOfBoundsTimedelta
import pandas as pd
from pandas import (
NaT,
Timedelta,
Timestamp,
offsets,
)
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
def test_td_add_timestamp_overflow(self):
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
Timestamp("1700-01-01") + Timedelta(13 * 19999, unit="D")
with pytest.raises(OutOfBoundsTimedelta, match=msg):
Timestamp("1700-01-01") + timedelta(days=13 * 19999)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
msg = "unsupported operand type"
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError, match=msg):
td + other
with pytest.raises(TypeError, match=msg):
other + td
with pytest.raises(TypeError, match=msg):
td - other
with pytest.raises(TypeError, match=msg):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = (
r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'|"
"Concatenation operation is not implemented for NumPy arrays"
)
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types "
r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"])
msg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'",
r"ufunc '?multiply'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError, match=msg):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
assert result == td.value / (86400 * 10 ** 9)
result = td / np.timedelta64(1, "s")
assert result == td.value / 10 ** 9
result = td / np.timedelta64(1, "ns")
assert result == td.value
# floordiv
td = Timedelta("1 days 2 hours 3 ns")
result = td // np.timedelta64(1, "D")
assert result == 1
result = td // np.timedelta64(1, "s")
assert result == 93600
result = td // np.timedelta64(1, "ns")
assert result == td.value
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize(
"nan",
[
np.nan,
np.float64("NaN"),
float("nan"),
],
)
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
def test_td_rdiv_na_scalar(self):
# GH#31869 None gets cast to NaT
td = Timedelta(10, unit="d")
result = NaT / td
assert np.isnan(result)
result = None / td
assert np.isnan(result)
result = np.timedelta64("NaT") / td
assert np.isnan(result)
msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.datetime64("NaT") / td
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.nan / td
def test_td_rdiv_ndarray(self):
td = Timedelta(10, unit="d")
arr = np.array([td], dtype=object)
result = arr / td
expected = np.array([1], dtype=np.float64)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([None])
result = arr / td
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
arr = np.array([np.nan], dtype=object)
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
arr / td
arr = np.array([np.nan], dtype=np.float64)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
arr / td
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
msg = "|".join(
[
r"Invalid dtype datetime64\[D\] for __floordiv__",
"'dtype' is an invalid keyword argument for this function",
r"ufunc '?floor_divide'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
assert td.__rfloordiv__(dt64) is NotImplemented
msg = (
r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'"
)
with pytest.raises(TypeError, match=msg):
dt64 // td
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented
assert td.__rfloordiv__(np.uint8(9)) is NotImplemented
assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented
msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta"
with pytest.raises(TypeError, match=msg):
np.float64(2.0) // td
with pytest.raises(TypeError, match=msg):
np.uint8(9) // td
with pytest.raises(TypeError, match=msg):
# deprecated GH#19761, enforced GH#29797
np.int32(2.0) // td
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_intarray(self):
# deprecated GH#19761, enforced GH#29797
ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10 ** 9
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
ints // Timedelta(1, unit="s")
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
# Deprecated GH#19761, enforced GH#29797
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError, match=msg):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand"
with pytest.raises(TypeError, match=msg):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError, match=msg):
15 % td
with pytest.raises(TypeError, match=msg):
16.0 % td
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), Timedelta(hours=-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_rdivmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
divmod(Timestamp("2018-01-22"), td)
with pytest.raises(TypeError, match=msg):
divmod(15, td)
with pytest.raises(TypeError, match=msg):
divmod(16.0, td)
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
divmod(np.array([22, 24]), td)
# ----------------------------------------------------------------
@pytest.mark.parametrize(
"op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
)
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]),
np.array([Timestamp.now(), Timedelta("1D")]),
],
)
def test_td_op_timedelta_timedeltalike_array(self, op, arr):
msg = "unsupported operand type|cannot use operands with types"
with pytest.raises(TypeError, match=msg):
op(arr, Timedelta("1D"))
class TestTimedeltaComparison:
def test_compare_tick(self, tick_classes):
cls = tick_classes
off = cls(4)
td = off.delta
assert isinstance(td, Timedelta)
assert td == off
assert not td != off
assert td <= off
assert td >= off
assert not td < off
assert not td > off
assert not td == 2 * off
assert td != 2 * off
assert td <= 2 * off
assert td < 2 * off
assert not td >= 2 * off
assert not td > 2 * off
def test_comparison_object_array(self):
# analogous to GH#15183
td = Timedelta("2 days")
other = Timedelta("3 hours")
arr = np.array([other, td], dtype=object)
res = arr == td
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, td], [td, other]], dtype=object)
res = arr != td
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
def test_compare_timedelta_ndarray(self):
# GH#11835
periods = [Timedelta("0 days 01:00:00"), Timedelta("0 days 01:00:00")]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_compare_td64_ndarray(self):
# GG#33441
arr = np.arange(5).astype("timedelta64[ns]")
td = Timedelta(arr[1])
expected = np.array([False, True, False, False, False], dtype=bool)
result = td == arr
tm.assert_numpy_array_equal(result, expected)
result = arr == td
tm.assert_numpy_array_equal(result, expected)
result = td != arr
tm.assert_numpy_array_equal(result, ~expected)
result = arr != td
tm.assert_numpy_array_equal(result, ~expected)
@pytest.mark.skip(reason="GH#20829 is reverted until after 0.24.0")
def test_compare_custom_object(self):
"""
Make sure non supported operations on Timedelta returns NonImplemented
and yields to other operand (GH#20829).
"""
class CustomClass:
def __init__(self, cmp_result=None):
self.cmp_result = cmp_result
def generic_result(self):
if self.cmp_result is None:
return NotImplemented
else:
return self.cmp_result
def __eq__(self, other):
return self.generic_result()
def __gt__(self, other):
return self.generic_result()
t = Timedelta("1s")
assert not (t == "string")
assert not (t == 1)
assert not (t == CustomClass())
assert not (t == CustomClass(cmp_result=False))
assert t < CustomClass(cmp_result=True)
assert not (t < CustomClass(cmp_result=False))
assert t == CustomClass(cmp_result=True)
@pytest.mark.parametrize("val", ["string", 1])
def test_compare_unknown_type(self, val):
# GH#20829
t = Timedelta("1s")
msg = "not supported between instances of 'Timedelta' and '(int|str)'"
with pytest.raises(TypeError, match=msg):
t >= val
with pytest.raises(TypeError, match=msg):
t > val
with pytest.raises(TypeError, match=msg):
t <= val
with pytest.raises(TypeError, match=msg):
t < val
def test_ops_notimplemented():
class Other:
pass
other = Other()
td = Timedelta("1 day")
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str():
# GH#13624
td = Timedelta("1 day")
for left, right in [(td, "a"), ("a", td)]:
msg = "|".join(
[
"unsupported operand type",
r'can only concatenate str \(not "Timedelta"\) to str',
"must be str, not Timedelta",
]
)
with pytest.raises(TypeError, match=msg):
left + right
msg = "not supported between instances of"
with pytest.raises(TypeError, match=msg):
left > right
assert not left == right
assert left != right
|
py | 7dfda1afd899491fb6c074be3bf208571591ed80 | import json
import shutil
from sepal_ui import sepalwidgets as sw
from sepal_ui.scripts import utils as su
import ipyvuetify as v
from component.message import cm
from component import parameter as cp
from component import widget as cw
from component import scripts as cs
from .gwb_tile import GwbTile
class RssTile(GwbTile):
def __init__(self, model):
# create the widgets
connectivity = v.Select(
label=cm.acc.connectivity,
items=cp.connectivity,
v_model=cp.connectivity[0]["value"],
)
# bind to the io
model.bind(connectivity, "connectivity")
super().__init__(model=model, inputs=[connectivity])
@su.loading_button()
def _on_click(self, widget, event, data):
# check inputs
if not all(
[
self.alert.check_input(self.model.connectivity, cm.acc.no_connex),
self.alert.check_input(self.model.bin_map, cm.bin.no_bin),
]
):
return
super()._on_click(widget, event, data)
return
|
py | 7dfda28feb22acb6eab2bd5696a03d7f7117f088 | """add a flag for review active
Revision ID: 4d45dd3d8ce5
Revises: 49d09b3d2801
Create Date: 2014-02-26 16:29:54.507710
"""
# revision identifiers, used by Alembic.
revision = '4d45dd3d8ce5'
down_revision = '49d09b3d2801'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'violations',
sa.Column('review_is_active', sa.Integer, nullable=False, server_default='1')
)
connection = op.get_bind()
connection.execute('''UPDATE violations
SET review_is_active = 0
WHERE review_id IN (SELECT id FROM reviews WHERE is_active = 0)''')
op.create_index(
'idx_key_domain_review_active',
'violations',
['key_id', 'domain_id', 'review_is_active'])
def downgrade():
op.drop_index('idx_key_domain_review_active', 'violations')
op.drop_column('violations', 'review_is_active')
|
py | 7dfda2c659c5084c431a5ad101493cdeefb9cfc4 | from ipaddress import IPv6Network, ip_network
from typing import Dict, List, Union
from diamond_miner.queries import (
CountFlowsPerPrefix,
CountLinksPerPrefix,
CountProbesPerPrefix,
CountResultsPerPrefix,
FlowsQuery,
LinksQuery,
ProbesQuery,
ResultsQuery,
)
from diamond_miner.utilities import common_parameters
Counts = Dict[IPv6Network, int]
def subsets_for(
query: Union[FlowsQuery, LinksQuery, ProbesQuery, ResultsQuery],
url: str,
measurement_id: str,
*,
max_items_per_subset: int = 8_000_000,
) -> List[IPv6Network]:
"""
>>> from diamond_miner.test import url
>>> from diamond_miner.queries import GetLinksFromView, GetLinks, GetProbes, GetResults
>>> subsets_for(GetLinksFromView(), url, 'test_nsdi_example', max_items_per_subset=1)
[IPv6Network('::ffff:c800:0/104')]
>>> subsets_for(GetLinks(), url, 'test_nsdi_example', max_items_per_subset=1)
[IPv6Network('::ffff:c800:0/104')]
>>> subsets_for(GetProbes(round_eq=1), url, 'test_nsdi_example', max_items_per_subset=1)
[IPv6Network('::ffff:c800:0/104')]
>>> subsets_for(GetResults(), url, 'test_nsdi_example', max_items_per_subset=1)
[IPv6Network('::ffff:c800:0/104')]
"""
if isinstance(query, FlowsQuery):
count_query = CountFlowsPerPrefix(**common_parameters(query, FlowsQuery))
elif isinstance(query, LinksQuery):
count_query = CountLinksPerPrefix(**common_parameters(query, LinksQuery)) # type: ignore
elif isinstance(query, ProbesQuery):
count_query = CountProbesPerPrefix(**common_parameters(query, ProbesQuery)) # type: ignore
elif isinstance(query, ResultsQuery):
count_query = CountResultsPerPrefix(**common_parameters(query, ResultsQuery)) # type: ignore
else:
raise NotImplementedError
counts = {
addr_to_network(
row["prefix"], count_query.prefix_len_v4, count_query.prefix_len_v6
): row["count"]
for row in count_query.execute_iter(url, measurement_id)
}
return split(counts, max_items_per_subset)
def split(counts: Counts, max_items_per_subset: int) -> List[IPv6Network]:
"""
Return the IP networks such that there are no more than `max_items_per_subset`
per network.
:param counts: Number of items per prefix in the database table.
:param max_items_per_subset: Maximum number of items per network.
>>> counts = {ip_network("::ffff:8.8.4.0/120"): 10, ip_network("::ffff:8.8.8.0/120"): 5}
>>> split(counts, 15)
[IPv6Network('::/0')]
>>> split(counts, 10)
[IPv6Network('::ffff:808:0/117'), IPv6Network('::ffff:808:800/117')]
>>> split(counts, 1) # Impossible case, should return the minimal feasible networks.
[IPv6Network('::ffff:808:400/120'), IPv6Network('::ffff:808:800/120')]
>>> split({}, 10)
[]
"""
candidates = [(ip_network("::/0"), n_items(counts, ip_network("::/0")))]
subsets = []
while candidates:
candidate, n_replies = candidates.pop()
if max_items_per_subset >= n_replies > 0:
subsets.append(candidate)
elif n_replies > 0:
a, b = tuple(candidate.subnets(prefixlen_diff=1))
n_items_a = n_items(counts, a)
n_items_b = n_items(counts, b)
if n_items_a + n_items_b == 0:
subsets.append(candidate)
else:
candidates.append((a, n_items_a))
candidates.append((b, n_items_b))
return sorted(subsets)
def addr_to_network(addr: str, prefix_len_v4: int, prefix_len_v6: int) -> IPv6Network:
"""
>>> addr_to_network("::ffff:8.8.8.0", 24, 64)
IPv6Network('::ffff:808:800/120')
>>> addr_to_network("2001:4860:4860:1234::", 24, 64)
IPv6Network('2001:4860:4860:1234::/64')
"""
assert ":" in addr, "`addr` must be an (IPv4-mapped) IPv6 address."
if addr.startswith("::ffff:"):
return IPv6Network(f"{addr}/{96+prefix_len_v4}")
return IPv6Network(f"{addr}/{prefix_len_v6}")
def n_items(counts: Counts, subset: IPv6Network) -> int:
"""
>>> counts = {IPv6Network("1000::/16"): 2, IPv6Network("8000::/16"): 10}
>>> n_items(counts, IPv6Network("0000::/1"))
2
>>> n_items(counts, IPv6Network("8000::/1"))
10
>>> n_items(counts, IPv6Network("::/0"))
12
"""
total = 0
for network, count_ in counts.items():
if network.subnet_of(subset):
total += count_
return total
|
py | 7dfda2f2840e8a042a202a311b0d31c730c8b3b1 | import os
import sys
import django
import weakref
from django.apps import apps
from django.db import connections
from django.db.backends.base import creation
from django.db.models import Model
from django.db.utils import ConnectionHandler, NotSupportedError
from functools import partial
from itertools import chain
try:
from unittest.mock import Mock, MagicMock, patch, PropertyMock
except ImportError:
from mock import Mock, MagicMock, patch, PropertyMock
from types import MethodType
from .constants import DjangoModelDeletionCollector, DjangoDbRouter
from .query import MockSet
# noinspection PyUnresolvedReferences
patch_object = patch.object
def monkey_patch_test_db(disabled_features=None):
""" Replace the real database connection with a mock one.
This is useful for running Django tests without the cost of setting up a
test database.
Any database queries will raise a clear error, and the test database
creation and tear down are skipped.
Tests that require the real database should be decorated with
@skipIfDBFeature('is_mocked')
:param disabled_features: a list of strings that should be marked as
*False* on the connection features list. All others will default
to True.
"""
# noinspection PyUnusedLocal
def create_mock_test_db(self, *args, **kwargs):
mock_django_connection(disabled_features)
# noinspection PyUnusedLocal
def destroy_mock_test_db(self, *args, **kwargs):
pass
creation.BaseDatabaseCreation.create_test_db = create_mock_test_db
creation.BaseDatabaseCreation.destroy_test_db = destroy_mock_test_db
def mock_django_setup(settings_module, disabled_features=None):
""" Must be called *AT IMPORT TIME* to pretend that Django is set up.
This is useful for running tests without using the Django test runner.
This must be called before any Django models are imported, or they will
complain. Call this from a module in the calling project at import time,
then be sure to import that module at the start of all mock test modules.
Another option is to call it from the test package's init file, so it runs
before all the test modules are imported.
:param settings_module: the module name of the Django settings file,
like 'myapp.settings'
:param disabled_features: a list of strings that should be marked as
*False* on the connection features list. All others will default
to True.
"""
if apps.ready:
# We're running in a real Django unit test, don't do anything.
return
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
django.setup()
mock_django_connection(disabled_features)
def mock_django_connection(disabled_features=None):
""" Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching.
"""
db = connections.databases['default']
db['PASSWORD'] = '****'
db['USER'] = '**Database disabled for unit tests**'
ConnectionHandler.__getitem__ = MagicMock(name='mock_connection')
# noinspection PyUnresolvedReferences
mock_connection = ConnectionHandler.__getitem__.return_value
if disabled_features:
for feature in disabled_features:
setattr(mock_connection.features, feature, False)
mock_ops = mock_connection.ops
# noinspection PyUnusedLocal
def compiler(queryset, connection, using, **kwargs):
result = MagicMock(name='mock_connection.ops.compiler()')
# noinspection PyProtectedMember
result.execute_sql.side_effect = NotSupportedError(
"Mock database tried to execute SQL for {} model.".format(
queryset.model._meta.object_name))
result.has_results.side_effect = result.execute_sql.side_effect
return result
mock_ops.compiler.return_value.side_effect = compiler
mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize)
mock_ops.max_name_length.return_value = sys.maxsize
Model.refresh_from_db = Mock() # Make this into a noop.
class MockMap(object):
def __init__(self, original):
""" Wrap a mock mapping around the original one-to-many relation. """
self.map = {}
self.original = original
def __set__(self, instance, value):
""" Set a related object for an instance. """
self.map[id(instance)] = (weakref.ref(instance), value)
def __getattr__(self, name):
""" Delegate all other calls to the original. """
return getattr(self.original, name)
class MockOneToManyMap(MockMap):
def __get__(self, instance, owner):
""" Look in the map to see if there is a related set.
If not, create a new set.
"""
if instance is None:
# Call was to the class, not an object.
return self
instance_id = id(instance)
entry = self.map.get(instance_id)
old_instance = related_objects = None
if entry is not None:
old_instance_weak, related_objects = entry
old_instance = old_instance_weak()
if entry is None or old_instance is None:
related = getattr(self.original, 'related', self.original)
related_objects = MockSet(model=related.field.model)
self.__set__(instance, related_objects)
return related_objects
class MockOneToOneMap(MockMap):
def __get__(self, instance, owner):
""" Look in the map to see if there is a related object.
If not (the default) raise the expected exception.
"""
if instance is None:
# Call was to the class, not an object.
return self
entry = self.map.get(id(instance))
old_instance = related_object = None
if entry is not None:
old_instance_weak, related_object = entry
old_instance = old_instance_weak()
if entry is None or old_instance is None:
raise self.original.RelatedObjectDoesNotExist(
"Mock %s has no %s." % (
owner.__name__,
self.original.related.get_accessor_name()
)
)
return related_object
def find_all_models(models):
""" Yield all models and their parents. """
for model in models:
yield model
# noinspection PyProtectedMember
for parent in model._meta.parents.keys():
for parent_model in find_all_models((parent,)):
yield parent_model
def _patch_save(model, name):
return patch_object(
model,
'save',
new_callable=partial(Mock, name=name + '.save')
)
def _patch_objects(model, name):
return patch_object(
model, 'objects',
new_callable=partial(MockSet, mock_name=name + '.objects', model=model)
)
def _patch_relation(model, name, related_object):
relation = getattr(model, name)
if related_object.one_to_one:
new_callable = partial(MockOneToOneMap, relation)
else:
new_callable = partial(MockOneToManyMap, relation)
return patch_object(model, name, new_callable=new_callable)
# noinspection PyProtectedMember
def mocked_relations(*models):
""" Mock all related field managers to make pure unit tests possible.
The resulting patcher can be used just like one from the mock module:
As a test method decorator, a test class decorator, a context manager,
or by just calling start() and stop().
@mocked_relations(Dataset):
def test_dataset(self):
dataset = Dataset()
check = dataset.content_checks.create() # returns a ContentCheck object
"""
patchers = []
for model in find_all_models(models):
if isinstance(model.save, Mock):
# already mocked, so skip it
continue
model_name = model._meta.object_name
patchers.append(_patch_save(model, model_name))
if hasattr(model, 'objects'):
patchers.append(_patch_objects(model, model_name))
for related_object in chain(model._meta.related_objects,
model._meta.many_to_many):
name = related_object.name
if name not in model.__dict__ and related_object.one_to_many:
name += '_set'
if name in model.__dict__:
# Only mock direct relations, not inherited ones.
if getattr(model, name, None):
patchers.append(_patch_relation(
model, name, related_object
))
return PatcherChain(patchers, pass_mocks=False)
class PatcherChain(object):
""" Chain a list of mock patchers into one.
The resulting patcher can be used just like one from the mock module:
As a test method decorator, a test class decorator, a context manager,
or by just calling start() and stop().
"""
def __init__(self, patchers, pass_mocks=True):
""" Initialize a patcher.
:param patchers: a list of patchers that should all be applied
:param pass_mocks: True if any mock objects created by the patchers
should be passed to any decorated test methods.
"""
self.patchers = patchers
self.pass_mocks = pass_mocks
def __call__(self, func):
if isinstance(func, type):
decorated = self.decorate_class(func)
else:
decorated = self.decorate_callable(func)
# keep the previous class/function name
decorated.__name__ = func.__name__
return decorated
def decorate_class(self, cls):
for attr in dir(cls):
# noinspection PyUnresolvedReferences
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(cls, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(cls, attr, self(attr_value))
return cls
def decorate_callable(self, target):
""" Called as a decorator. """
# noinspection PyUnusedLocal
def absorb_mocks(test_case, *args):
return target(test_case)
should_absorb = not (self.pass_mocks or isinstance(target, type))
result = absorb_mocks if should_absorb else target
for patcher in self.patchers:
result = patcher(result)
return result
def __enter__(self):
""" Starting a context manager.
All the patched objects are passed as a list to the with statement.
"""
return [patcher.__enter__() for patcher in self.patchers]
def __exit__(self, exc_type, exc_val, exc_tb):
""" Ending a context manager. """
for patcher in self.patchers:
patcher.__exit__(exc_type, exc_val, exc_tb)
def start(self):
return [patcher.start() for patcher in self.patchers]
def stop(self):
for patcher in reversed(self.patchers):
patcher.stop()
class Mocker(object):
"""
A decorator that patches multiple class methods with a magic mock instance that does nothing.
"""
shared_mocks = {}
shared_patchers = {}
shared_original = {}
def __init__(self, cls, *methods, **kwargs):
self.cls = cls
self.methods = methods
self.inst_mocks = {}
self.inst_patchers = {}
self.inst_original = {}
self.outer = kwargs.get('outer', True)
def __enter__(self):
self._patch_object_methods(self.cls, *self.methods)
return self
def __call__(self, func):
def decorated(*args, **kwargs):
with self:
return func(*((args[0], self) + args[1:]), **kwargs)
# keep the previous method name
decorated.__name__ = func.__name__
return decorated
def __exit__(self, exc_type, exc_val, exc_tb):
for key, patcher in self.inst_patchers.items():
patcher.stop()
if self.outer:
for key, patcher in self.shared_patchers.items():
patcher.stop()
def _key(self, method, obj=None):
return '{}.{}'.format(obj or self.cls, method)
def _method_obj(self, name, obj, *sources):
d = {}
[d.update(s) for s in sources]
return d[self._key(name, obj=obj)]
def method(self, name, obj=None):
return self._method_obj(name, obj, self.shared_mocks, self.inst_mocks)
def original_method(self, name, obj=None):
return self._method_obj(name, obj, self.shared_original, self.inst_original)
def _get_source_method(self, obj, method):
source_obj = obj
parts = method.split('.')
source_method = parts[-1]
parts = parts[:-1]
while parts:
source_obj = getattr(source_obj, parts[0], None) or getattr(source_obj.model, '_' + parts[0])
parts.pop(0)
return source_obj, source_method
def _patch_method(self, method_name, source_obj, source_method):
target_name = '_'.join(method_name.split('.'))
target_obj = getattr(self, target_name, None)
if target_obj is None:
mock_args = dict(new=MagicMock())
elif type(target_obj) == MethodType:
mock_args = dict(new=MagicMock(autospec=True, side_effect=target_obj))
else:
mock_args = dict(new=PropertyMock(return_value=target_obj))
return patch_object(source_obj, source_method, **mock_args)
def _patch_object_methods(self, obj, *methods, **kwargs):
if kwargs.get('shared', False):
original, patchers, mocks = self.shared_original, self.shared_patchers, self.shared_mocks
else:
original, patchers, mocks = self.inst_original, self.inst_patchers, self.inst_mocks
for method in methods:
key = self._key(method, obj=obj)
source_obj, source_method = self._get_source_method(obj, method)
original[key] = original.get(key, None) or getattr(source_obj, source_method)
patcher = self._patch_method(method, source_obj, source_method)
patchers[key] = patcher
mocks[key] = patcher.start()
class ModelMocker(Mocker):
"""
A decorator that patches django base model's db read/write methods and wires them to a MockSet.
"""
default_methods = ['objects', '_do_update']
if django.VERSION[0] == 3:
default_methods += ['_base_manager._insert', ]
else:
default_methods += ['_meta.base_manager._insert', ]
default_methods = tuple(default_methods)
def __init__(self, cls, *methods, **kwargs):
super(ModelMocker, self).__init__(cls, *(self.default_methods + methods), **kwargs)
self.objects = MockSet(model=self.cls)
self.objects.on('added', self._on_added)
self.state = {}
def __enter__(self):
result = super(ModelMocker, self).__enter__()
self._patch_object_methods(DjangoModelDeletionCollector, 'collect', 'delete', shared=True)
return result
def _obj_pk(self, obj):
return getattr(obj, self.cls._meta.pk.attname, None)
def _on_added(self, obj):
pk = max([self._obj_pk(x) or 0 for x in self.objects] + [0]) + 1
setattr(obj, self.cls._meta.pk.attname, pk)
def _meta_base_manager__insert(self, objects, *_, **__):
obj = objects[0]
self.objects.add(obj)
return self._obj_pk(obj)
def _base_manager__insert(self, objects, *_, **__):
obj = objects[0]
self.objects.add(obj)
return [self._obj_pk(obj)]
def _do_update(self, *args, **_):
_, _, pk_val, values, _, _ = args
objects = self.objects.filter(pk=pk_val)
if objects.exists():
attrs = {field.name: value for field, _, value in values if value is not None}
self.objects.update(**attrs)
return True
else:
return False
def collect(self, objects, *args, **kwargs):
model = getattr(objects, 'model', None) or objects[0]
if not (model is self.cls or isinstance(model, self.cls)):
using = getattr(objects, 'db', None) or DjangoDbRouter.db_for_write(model._meta.model, instance=model)
self.state['collector'] = DjangoModelDeletionCollector(using=using)
collect = self.original_method('collect', obj=DjangoModelDeletionCollector)
collect(self.state['collector'], objects, *args, **kwargs)
self.state['model'] = model
def delete(self, *args, **kwargs):
model = self.state.pop('model')
if not (model is self.cls or isinstance(model, self.cls)):
delete = self.original_method('delete', obj=DjangoModelDeletionCollector)
return delete(self.state.pop('collector'), *args, **kwargs)
else:
return self.objects.filter(pk=getattr(model, self.cls._meta.pk.attname)).delete()
|
py | 7dfda319795ffe3380401b62c29a9ef51caf4b67 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A simple, lightweight, WSGI-compatible web framework.
'''
__author__ = 'Michael Liao'
import types, os, re, cgi, sys, time, datetime, functools, mimetypes, threading, urllib, traceback
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# thread local object for storing request and response:
ctx = threading.local()
# Dict object:
class Dict(dict):
'''
Simple dict but support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
>>> d3 = Dict(('a', 'b', 'c'), (1, 2, 3))
>>> d3.a
1
>>> d3.b
2
>>> d3.c
3
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
_TIMEDELTA_ZERO = datetime.timedelta(0)
# timezone as UTC+8:00, UTC-10:00
_RE_TZ = re.compile('^([\+\-])([0-9]{1,2})\:([0-9]{1,2})$')
class UTC(datetime.tzinfo):
'''
A UTC tzinfo object.
>>> tz0 = UTC('+00:00')
>>> tz0.tzname(None)
'UTC+00:00'
>>> tz8 = UTC('+8:00')
>>> tz8.tzname(None)
'UTC+8:00'
>>> tz7 = UTC('+7:30')
>>> tz7.tzname(None)
'UTC+7:30'
>>> tz5 = UTC('-05:30')
>>> tz5.tzname(None)
'UTC-05:30'
>>> from datetime import datetime
>>> u = datetime.utcnow().replace(tzinfo=tz0)
>>> l1 = u.astimezone(tz8)
>>> l2 = u.replace(tzinfo=tz8)
>>> d1 = u - l1
>>> d2 = u - l2
>>> d1.seconds
0
>>> d2.seconds
28800
'''
def __init__(self, utc):
utc = str(utc.strip().upper())
mt = _RE_TZ.match(utc)
if mt:
minus = mt.group(1)=='-'
h = int(mt.group(2))
m = int(mt.group(3))
if minus:
h, m = (-h), (-m)
self._utcoffset = datetime.timedelta(hours=h, minutes=m)
self._tzname = 'UTC%s' % utc
else:
raise ValueError('bad utc time zone')
def utcoffset(self, dt):
return self._utcoffset
def dst(self, dt):
return _TIMEDELTA_ZERO
def tzname(self, dt):
return self._tzname
def __str__(self):
return 'UTC tzinfo object (%s)' % self._tzname
__repr__ = __str__
# all known response statues:
_RESPONSE_STATUSES = {
# Informational
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
# Successful
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used',
# Redirection
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
# Client Error
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot",
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
# Server Error
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended',
}
_RE_RESPONSE_STATUS = re.compile(r'^\d\d\d(\ [\w\ ]+)?$')
_RESPONSE_HEADERS = (
'Accept-Ranges',
'Age',
'Allow',
'Cache-Control',
'Connection',
'Content-Encoding',
'Content-Language',
'Content-Length',
'Content-Location',
'Content-MD5',
'Content-Disposition',
'Content-Range',
'Content-Type',
'Date',
'ETag',
'Expires',
'Last-Modified',
'Link',
'Location',
'P3P',
'Pragma',
'Proxy-Authenticate',
'Refresh',
'Retry-After',
'Server',
'Set-Cookie',
'Strict-Transport-Security',
'Trailer',
'Transfer-Encoding',
'Vary',
'Via',
'Warning',
'WWW-Authenticate',
'X-Frame-Options',
'X-XSS-Protection',
'X-Content-Type-Options',
'X-Forwarded-Proto',
'X-Powered-By',
'X-UA-Compatible',
)
_RESPONSE_HEADER_DICT = dict(zip(map(lambda x: x.upper(), _RESPONSE_HEADERS), _RESPONSE_HEADERS))
_HEADER_X_POWERED_BY = ('X-Powered-By', 'transwarp/1.0')
class HttpError(Exception):
'''
HttpError that defines http error code.
>>> e = HttpError(404)
>>> e.status
'404 Not Found'
'''
def __init__(self, code):
'''
Init an HttpError with response code.
'''
super(HttpError, self).__init__()
self.status = '%d %s' % (code, _RESPONSE_STATUSES[code])
def header(self, name, value):
if not hasattr(self, '_headers'):
self._headers = [_HEADER_X_POWERED_BY]
self._headers.append((name, value))
@property
def headers(self):
if hasattr(self, '_headers'):
return self._headers
return []
def __str__(self):
return self.status
__repr__ = __str__
class RedirectError(HttpError):
'''
RedirectError that defines http redirect code.
>>> e = RedirectError(302, 'http://www.apple.com/')
>>> e.status
'302 Found'
>>> e.location
'http://www.apple.com/'
'''
def __init__(self, code, location):
'''
Init an HttpError with response code.
'''
super(RedirectError, self).__init__(code)
self.location = location
def __str__(self):
return '[ %s ], %s' % (self.status, self.location)
__repr__ = __str__
def badrequest():
'''
Send a bad request response.
>>> raise badrequest()
Traceback (most recent call last):
...
HttpError: 400 Bad Request
'''
return HttpError(400)
def unauthorized():
'''
Send an unauthorized response.
>>> raise unauthorized()
Traceback (most recent call last):
...
HttpError: 401 Unauthorized
'''
return HttpError(401)
def forbidden():
'''
Send a forbidden response.
>>> raise forbidden()
Traceback (most recent call last):
...
HttpError: 403 Forbidden
'''
return HttpError(403)
def notfound():
'''
Send a not found response.
>>> raise notfound()
Traceback (most recent call last):
...
HttpError: 404 Not Found
'''
return HttpError(404)
def conflict():
'''
Send a conflict response.
>>> raise conflict()
Traceback (most recent call last):
...
HttpError: 409 Conflict
'''
return HttpError(409)
def internalerror():
'''
Send an internal error response.
>>> raise internalerror()
Traceback (most recent call last):
...
HttpError: 500 Internal Server Error
'''
return HttpError(500)
def redirect(location):
'''
Do permanent redirect.
>>> raise redirect('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 301 Moved Permanently, http://www.itranswarp.com/
'''
return RedirectError(301, location)
def found(location):
'''
Do temporary redirect.
>>> raise found('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 302 Found, http://www.itranswarp.com/
'''
return RedirectError(302, location)
def seeother(location):
'''
Do temporary redirect.
>>> raise seeother('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 303 See Other, http://www.itranswarp.com/
>>> e = seeother('http://www.itranswarp.com/seeother?r=123')
>>> e.location
'http://www.itranswarp.com/seeother?r=123'
'''
return RedirectError(303, location)
def _to_str(s):
'''
Convert to str.
>>> _to_str('s123') == 's123'
True
>>> _to_str(u'\u4e2d\u6587') == '\xe4\xb8\xad\xe6\x96\x87'
True
>>> _to_str(-123) == '-123'
True
'''
if isinstance(s, str):
return s
if isinstance(s, unicode):
return s.encode('utf-8')
return str(s)
def _to_unicode(s, encoding='utf-8'):
'''
Convert to unicode.
>>> _to_unicode('\xe4\xb8\xad\xe6\x96\x87') == u'\u4e2d\u6587'
True
'''
return s.decode('utf-8')
def _quote(s, encoding='utf-8'):
'''
Url quote as str.
>>> _quote('http://example/test?a=1+')
'http%3A//example/test%3Fa%3D1%2B'
>>> _quote(u'hello world!')
'hello%20world%21'
'''
if isinstance(s, unicode):
s = s.encode(encoding)
return urllib.quote(s)
def _unquote(s, encoding='utf-8'):
'''
Url unquote as unicode.
>>> _unquote('http%3A//example/test%3Fa%3D1+')
u'http://example/test?a=1+'
'''
return urllib.unquote(s).decode(encoding)
def get(path):
'''
A @get decorator.
@get('/:id')
def index(id):
pass
>>> @get('/test/:id')
... def test():
... return 'ok'
...
>>> test.__web_route__
'/test/:id'
>>> test.__web_method__
'GET'
>>> test()
'ok'
'''
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'GET'
return func
return _decorator
def post(path):
'''
A @post decorator.
>>> @post('/post/:id')
... def testpost():
... return '200'
...
>>> testpost.__web_route__
'/post/:id'
>>> testpost.__web_method__
'POST'
>>> testpost()
'200'
'''
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator
_re_route = re.compile(r'(\:[a-zA-Z_]\w*)')
def _build_regex(path):
r'''
Convert route path to regex.
>>> _build_regex('/path/to/:file')
'^\\/path\\/to\\/(?P<file>[^\\/]+)$'
>>> _build_regex('/:user/:comments/list')
'^\\/(?P<user>[^\\/]+)\\/(?P<comments>[^\\/]+)\\/list$'
>>> _build_regex(':id-:pid/:w')
'^(?P<id>[^\\/]+)\\-(?P<pid>[^\\/]+)\\/(?P<w>[^\\/]+)$'
'''
re_list = ['^']
var_list = []
is_var = False
for v in _re_route.split(path):#?
if is_var:
var_name = v[1:]
var_list.append(var_name)
re_list.append(r'(?P<%s>[^\/]+)' % var_name)
else:
s = ''
for ch in v:
if ch>='0' and ch<='9':
s = s + ch
elif ch>='A' and ch<='Z':
s = s + ch
elif ch>='a' and ch<='z':
s = s + ch
else:
s = s + '\\' + ch
re_list.append(s)
is_var = not is_var
re_list.append('$')
return ''.join(re_list)
class Route(object):
'''
A Route object is a callable object.
'''
def __init__(self, func):
self.path = func.__web_route__
self.method = func.__web_method__
self.is_static = _re_route.search(self.path) is None
if not self.is_static:
self.route = re.compile(_build_regex(self.path))
self.func = func
def match(self, url):
m = self.route.match(url)
if m:
return m.groups()
return None
def __call__(self, *args):
return self.func(*args)
def __str__(self):
if self.is_static:
return '[ static ][ %s ] %s' % (self.method, self.path)
return '[ dynamic ][ %s ] %s' % (self.method, self.path)
__repr__ = __str__
def _static_file_generator(fpath):
BLOCK_SIZE = 8192
with open(fpath, 'rb') as f:
block = f.read(BLOCK_SIZE)
while block:
yield block
block = f.read(BLOCK_SIZE)
class StaticFileRoute(object):
def __init__(self):
self.method = 'GET'
self.is_static = False
self.route = re.compile('^/static/(.+)$')
def match(self, url):
if url.startswith('/static/'):
return (url[1:], )
return None
def __call__(self, *args):
fpath = os.path.join(ctx.application.document_root, args[0])
if not os.path.isfile(fpath):
raise notfound()
fext = os.path.splitext(fpath)[1]
ctx.response.content_type = mimetypes.types_map.get(fext.lower(), 'application/octet-stream')
return _static_file_generator(fpath)
def favicon_handler():
return static_file_handler('/favicon.ico')
class MultipartFile(object):
'''
Multipart file storage get from request input.
f = ctx.request['file']
f.filename # 'test.png'
f.file # file-like object
'''
def __init__(self, storage):
self.filename = _to_unicode(storage.filename)
self.file = storage.file
class Request(object):
'''
Request object for obtaining all http request information.
'''
def __init__(self, environ):
self._environ = environ
def _parse_input(self):
def _convert(item):
if isinstance(item, list):
return [_to_unicode(i.value) for i in item]
if item.filename:
return MultipartFile(item)
return _to_unicode(item.value)
fs = cgi.FieldStorage(fp=self._environ['wsgi.input'], environ=self._environ, keep_blank_values=True)
inputs = dict()
for key in fs:
inputs[key] = _convert(fs[key])
return inputs
def _get_raw_input(self):
'''
Get raw input as dict containing values as unicode, list or MultipartFile.
'''
if not hasattr(self, '_raw_input'):
self._raw_input = self._parse_input()
return self._raw_input
def __getitem__(self, key):
'''
Get input parameter value. If the specified key has multiple value, the first one is returned.
If the specified key is not exist, then raise KeyError.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r['a']
u'1'
>>> r['c']
u'ABC'
>>> r['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> b = '----WebKitFormBoundaryQQ3J8kPsjFpTmqNz'
>>> pl = ['--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Scofield', '--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Lincoln', '--%s' % b, 'Content-Disposition: form-data; name=\\"file\\"; filename=\\"test.txt\\"', 'Content-Type: text/plain\\n', 'just a test', '--%s' % b, 'Content-Disposition: form-data; name=\\"id\\"\\n', '4008009001', '--%s--' % b, '']
>>> payload = '\\n'.join(pl)
>>> r = Request({'REQUEST_METHOD':'POST', 'CONTENT_LENGTH':str(len(payload)), 'CONTENT_TYPE':'multipart/form-data; boundary=%s' % b, 'wsgi.input':StringIO(payload)})
>>> r.get('name')
u'Scofield'
>>> r.gets('name')
[u'Scofield', u'Lincoln']
>>> f = r.get('file')
>>> f.filename
u'test.txt'
>>> f.file.read()
'just a test'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[0]
return r
def get(self, key, default=None):
'''
The same as request[key], but return default value if key is not found.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.get('a')
u'1'
>>> r.get('empty')
>>> r.get('empty', 'DEFAULT')
'DEFAULT'
'''
r = self._get_raw_input().get(key, default)
if isinstance(r, list):
return r[0]
return r
def gets(self, key):
'''
Get multiple values for specified key.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.gets('a')
[u'1']
>>> r.gets('c')
[u'ABC', u'XYZ']
>>> r.gets('empty')
Traceback (most recent call last):
...
KeyError: 'empty'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[:]
return [r]
def input(self, **kw):
'''
Get input as dict from request, fill dict using provided default value if key not exist.
i = ctx.request.input(role='guest')
i.role ==> 'guest'
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> i = r.input(x=2008)
>>> i.a
u'1'
>>> i.b
u'M M'
>>> i.c
u'ABC'
>>> i.x
2008
>>> i.get('d', u'100')
u'100'
>>> i.x
2008
'''
copy = Dict(**kw)
raw = self._get_raw_input()
for k, v in raw.iteritems():
copy[k] = v[0] if isinstance(v, list) else v
return copy
def get_body(self):
'''
Get raw data from HTTP POST and return as str.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('<xml><raw/>')})
>>> r.get_body()
'<xml><raw/>'
'''
fp = self._environ['wsgi.input']
return fp.read()
@property
def remote_addr(self):
'''
Get remote addr. Return '0.0.0.0' if cannot get remote_addr.
>>> r = Request({'REMOTE_ADDR': '192.168.0.100'})
>>> r.remote_addr
'192.168.0.100'
'''
return self._environ.get('REMOTE_ADDR', '0.0.0.0')
@property
def document_root(self):
'''
Get raw document_root as str. Return '' if no document_root.
>>> r = Request({'DOCUMENT_ROOT': '/srv/path/to/doc'})
>>> r.document_root
'/srv/path/to/doc'
'''
return self._environ.get('DOCUMENT_ROOT', '')
@property
def query_string(self):
'''
Get raw query string as str. Return '' if no query string.
>>> r = Request({'QUERY_STRING': 'a=1&c=2'})
>>> r.query_string
'a=1&c=2'
>>> r = Request({})
>>> r.query_string
''
'''
return self._environ.get('QUERY_STRING', '')
@property
def environ(self):
'''
Get raw environ as dict, both key, value are str.
>>> r = Request({'REQUEST_METHOD': 'GET', 'wsgi.url_scheme':'http'})
>>> r.environ.get('REQUEST_METHOD')
'GET'
>>> r.environ.get('wsgi.url_scheme')
'http'
>>> r.environ.get('SERVER_NAME')
>>> r.environ.get('SERVER_NAME', 'unamed')
'unamed'
'''
return self._environ
@property
def request_method(self):
'''
Get request method. The valid returned values are 'GET', 'POST', 'HEAD'.
>>> r = Request({'REQUEST_METHOD': 'GET'})
>>> r.request_method
'GET'
>>> r = Request({'REQUEST_METHOD': 'POST'})
>>> r.request_method
'POST'
'''
return self._environ['REQUEST_METHOD']
@property
def path_info(self):
'''
Get request path as str.
>>> r = Request({'PATH_INFO': '/test/a%20b.html'})
>>> r.path_info
'/test/a b.html'
'''
return urllib.unquote(self._environ.get('PATH_INFO', ''))
@property
def host(self):
'''
Get request host as str. Default to '' if cannot get host..
>>> r = Request({'HTTP_HOST': 'localhost:8080'})
>>> r.host
'localhost:8080'
'''
return self._environ.get('HTTP_HOST', '')
def _get_headers(self):
if not hasattr(self, '_headers'):
hdrs = {}
for k, v in self._environ.iteritems():
if k.startswith('HTTP_'):
# convert 'HTTP_ACCEPT_ENCODING' to 'ACCEPT-ENCODING'
hdrs[k[5:].replace('_', '-').upper()] = v.decode('utf-8')
self._headers = hdrs
return self._headers
@property
def headers(self):
'''
Get all HTTP headers with key as str and value as unicode. The header names are 'XXX-XXX' uppercase.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> H = r.headers
>>> H['ACCEPT']
u'text/html'
>>> H['USER-AGENT']
u'Mozilla/5.0'
>>> L = H.items()
>>> L.sort()
>>> L
[('ACCEPT', u'text/html'), ('USER-AGENT', u'Mozilla/5.0')]
'''
return dict(**self._get_headers())
def header(self, header, default=None):
'''
Get header from request as unicode, return None if not exist, or default if specified.
The header name is case-insensitive such as 'USER-AGENT' or u'content-Type'.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> r.header('User-Agent')
u'Mozilla/5.0'
>>> r.header('USER-AGENT')
u'Mozilla/5.0'
>>> r.header('Accept')
u'text/html'
>>> r.header('Test')
>>> r.header('Test', u'DEFAULT')
u'DEFAULT'
'''
return self._get_headers().get(header.upper(), default)
def _get_cookies(self):
if not hasattr(self, '_cookies'):
cookies = {}
cookie_str = self._environ.get('HTTP_COOKIE')
if cookie_str:
for c in cookie_str.split(';'):
pos = c.find('=')
if pos>0:
cookies[c[:pos].strip()] = _unquote(c[pos+1:])
self._cookies = cookies
return self._cookies
@property
def cookies(self):
'''
Return all cookies as dict. The cookie name is str and values is unicode.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookies['A']
u'123'
>>> r.cookies['url']
u'http://www.example.com/'
'''
return Dict(**self._get_cookies())
def cookie(self, name, default=None):
'''
Return specified cookie value as unicode. Default to None if cookie not exists.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookie('A')
u'123'
>>> r.cookie('url')
u'http://www.example.com/'
>>> r.cookie('test')
>>> r.cookie('test', u'DEFAULT')
u'DEFAULT'
'''
return self._get_cookies().get(name, default)
UTC_0 = UTC('+00:00')
class Response(object):
def __init__(self):
self._status = '200 OK'
self._headers = {'CONTENT-TYPE': 'text/html; charset=utf-8'}
@property
def headers(self):
'''
Return response headers as [(key1, value1), (key2, value2)...] including cookies.
>>> r = Response()
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('X-Powered-By', 'transwarp/1.0')]
>>> r.set_cookie('s1', 'ok', 3600)
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 's1=ok; Max-Age=3600; Path=/; HttpOnly'), ('X-Powered-By', 'transwarp/1.0')]
'''
L = [(_RESPONSE_HEADER_DICT.get(k, k), v) for k, v in self._headers.iteritems()]
if hasattr(self, '_cookies'):
for v in self._cookies.itervalues():
L.append(('Set-Cookie', v))
L.append(_HEADER_X_POWERED_BY)
return L
def header(self, name):
'''
Get header by name, case-insensitive.
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.header('CONTENT-type')
'text/html; charset=utf-8'
>>> r.header('X-Powered-By')
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
return self._headers.get(key)
def unset_header(self, name):
'''
Unset header by name and value.
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.unset_header('CONTENT-type')
>>> r.header('content-type')
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
if key in self._headers:
del self._headers[key]
def set_header(self, name, value):
'''
Set header by name and value.
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.set_header('CONTENT-type', 'image/png')
>>> r.header('content-TYPE')
'image/png'
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
self._headers[key] = _to_str(value)
@property
def content_type(self):
'''
Get content type from response. This is a shortcut for header('Content-Type').
>>> r = Response()
>>> r.content_type
'text/html; charset=utf-8'
>>> r.content_type = 'application/json'
>>> r.content_type
'application/json'
'''
return self.header('CONTENT-TYPE')
@content_type.setter
def content_type(self, value):
'''
Set content type for response. This is a shortcut for set_header('Content-Type', value).
'''
if value:
self.set_header('CONTENT-TYPE', value)
else:
self.unset_header('CONTENT-TYPE')
@property
def content_length(self):
'''
Get content length. Return None if not set.
>>> r = Response()
>>> r.content_length
>>> r.content_length = 100
>>> r.content_length
'100'
'''
return self.header('CONTENT-LENGTH')
@content_length.setter
def content_length(self, value):
'''
Set content length, the value can be int or str.
>>> r = Response()
>>> r.content_length = '1024'
>>> r.content_length
'1024'
>>> r.content_length = 1024 * 8
>>> r.content_length
'8192'
'''
self.set_header('CONTENT-LENGTH', str(value))
def delete_cookie(self, name):
'''
Delete a cookie immediately.
Args:
name: the cookie name.
'''
self.set_cookie(name, '__deleted__', expires=0)
def set_cookie(self, name, value, max_age=None, expires=None, path='/', domain=None, secure=False, http_only=True):
'''
Set a cookie.
Args:
name: the cookie name.
value: the cookie value.
max_age: optional, seconds of cookie's max age.
expires: optional, unix timestamp, datetime or date object that indicate an absolute time of the
expiration time of cookie. Note that if expires specified, the max_age will be ignored.
path: the cookie path, default to '/'.
domain: the cookie domain, default to None.
secure: if the cookie secure, default to False.
http_only: if the cookie is for http only, default to True for better safty
(client-side script cannot access cookies with HttpOnly flag).
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.set_cookie('company', r'Example="Limited"', expires=1342274794.123, path='/sub/')
>>> r._cookies
{'company': 'company=Example%3D%22Limited%22; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/sub/; HttpOnly'}
>>> dt = datetime.datetime(2012, 7, 14, 22, 6, 34, tzinfo=UTC('+8:00'))
>>> r.set_cookie('company', 'Expires', expires=dt)
>>> r._cookies
{'company': 'company=Expires; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/; HttpOnly'}
'''
if not hasattr(self, '_cookies'):
self._cookies = {}
L = ['%s=%s' % (_quote(name), _quote(value))]
if expires is not None:
if isinstance(expires, (float, int, long)):
L.append('Expires=%s' % datetime.datetime.fromtimestamp(expires, UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
if isinstance(expires, (datetime.date, datetime.datetime)):
L.append('Expires=%s' % expires.astimezone(UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
elif isinstance(max_age, (int, long)):
L.append('Max-Age=%d' % max_age)
L.append('Path=%s' % path)
if domain:
L.append('Domain=%s' % domain)
if secure:
L.append('Secure')
if http_only:
L.append('HttpOnly')
self._cookies[name] = '; '.join(L)
def unset_cookie(self, name):
'''
Unset a cookie.
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.unset_cookie('company')
>>> r._cookies
{}
'''
if hasattr(self, '_cookies'):
if name in self._cookies:
del self._cookies[name]
@property
def status_code(self):
'''
Get response status code as int.
>>> r = Response()
>>> r.status_code
200
>>> r.status = 404
>>> r.status_code
404
>>> r.status = '500 Internal Error'
>>> r.status_code
500
'''
return int(self._status[:3])
@property
def status(self):
'''
Get response status. Default to '200 OK'.
>>> r = Response()
>>> r.status
'200 OK'
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 Oh My God'
>>> r.status
'500 Oh My God'
'''
return self._status
@status.setter
def status(self, value):
'''
Set response status as int or str.
>>> r = Response()
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 ERR'
>>> r.status
'500 ERR'
>>> r.status = u'403 Denied'
>>> r.status
'403 Denied'
>>> r.status = 99
Traceback (most recent call last):
...
ValueError: Bad response code: 99
>>> r.status = 'ok'
Traceback (most recent call last):
...
ValueError: Bad response code: ok
>>> r.status = [1, 2, 3]
Traceback (most recent call last):
...
TypeError: Bad type of response code.
'''
if isinstance(value, (int, long)):
if value>=100 and value<=999:
st = _RESPONSE_STATUSES.get(value, '')
if st:
self._status = '%d %s' % (value, st)
else:
self._status = str(value)
else:
raise ValueError('Bad response code: %d' % value)
elif isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode('utf-8')
if _RE_RESPONSE_STATUS.match(value):
self._status = value
else:
raise ValueError('Bad response code: %s' % value)
else:
raise TypeError('Bad type of response code.')
class Template(object):
def __init__(self, template_name, **kw):
'''
Init a template object with template name, model as dict, and additional kw that will append to model.
>>> t = Template('hello.html', title='Hello', copyright='@2012')
>>> t.model['title']
'Hello'
>>> t.model['copyright']
'@2012'
>>> t = Template('test.html', abc=u'ABC', xyz=u'XYZ')
>>> t.model['abc']
u'ABC'
'''
self.template_name = template_name
self.model = dict(**kw)
class TemplateEngine(object):
'''
Base template engine.
'''
def __call__(self, path, model):
return '<!-- override this method to render template -->'
class Jinja2TemplateEngine(TemplateEngine):
'''
Render using jinja2 template engine.
>>> templ_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test')
>>> engine = Jinja2TemplateEngine(templ_path)
>>> engine.add_filter('datetime', lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S'))
>>> engine('jinja2-test.html', dict(name='Michael', posted_at=datetime.datetime(2014, 6, 1, 10, 11, 12)))
'<p>Hello, Michael.</p><span>2014-06-01 10:11:12</span>'
'''
def __init__(self, templ_dir, **kw):
from jinja2 import Environment, FileSystemLoader
if not 'autoescape' in kw:
kw['autoescape'] = True
self._env = Environment(loader=FileSystemLoader(templ_dir), **kw)
def add_filter(self, name, fn_filter):
self._env.filters[name] = fn_filter
def __call__(self, path, model):
return self._env.get_template(path).render(**model).encode('utf-8')
def _default_error_handler(e, start_response, is_debug):
if isinstance(e, HttpError):
#colorlog.info('HttpError: %s' % e.status)
headers = e.headers[:]
headers.append(('Content-Type', 'text/html'))
start_response(e.status, headers)
return ('<html><body><h1>%s</h1></body></html>' % e.status)
#colorlog.exception('Exception:')
start_response('500 Internal Server Error', [('Content-Type', 'text/html'), _HEADER_X_POWERED_BY])
if is_debug:
return _debug()
return ('<html><body><h1>500 Internal Server Error</h1><h3>%s</h3></body></html>' % str(e))
def view(path):
'''
A view decorator that render a view by dict.
>>> @view('test/view.html')
... def hello():
... return dict(name='Bob')
>>> t = hello()
>>> isinstance(t, Template)
True
>>> t.template_name
'test/view.html'
>>> @view('test/view.html')
... def hello2():
... return ['a list']
>>> t = hello2()
Traceback (most recent call last):
...
ValueError: Expect return a dict when using @view() decorator.
'''
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
r = func(*args, **kw)
if isinstance(r, dict):
return Template(path, **r)
raise ValueError('Expect return a dict when using @view() decorator.')
return _wrapper
return _decorator
_RE_INTERCEPTROR_STARTS_WITH = re.compile(r'^([^\*\?]+)\*?$')
_RE_INTERCEPTROR_ENDS_WITH = re.compile(r'^\*([^\*\?]+)$')
def _build_pattern_fn(pattern):
m = _RE_INTERCEPTROR_STARTS_WITH.match(pattern)
if m:
return lambda p: p.startswith(m.group(1))
m = _RE_INTERCEPTROR_ENDS_WITH.match(pattern)
if m:
return lambda p: p.endswith(m.group(1))
raise ValueError('Invalid pattern definition in interceptor.')
def interceptor(pattern='/'):
'''
An @interceptor decorator.
@interceptor('/admin/')
def check_admin(req, resp):
pass
'''
def _decorator(func):
func.__interceptor__ = _build_pattern_fn(pattern)
return func
return _decorator
def _build_interceptor_fn(func, next):
@functools.wraps(func)
def _wrapper():
if func.__interceptor__(ctx.request.path_info):
return func(next)
else:
return next()
return _wrapper
def _build_interceptor_chain(last_fn, *interceptors):
'''
Build interceptor chain.
>>> def target():
... print 'target'
... return 123
>>> @interceptor('/')
... def f1(next):
... print 'before f1()'
... return next()
>>> @interceptor('/test/')
... def f2(next):
... print 'before f2()'
... try:
... return next()
... finally:
... print 'after f2()'
>>> @interceptor('/')
... def f3(next):
... print 'before f3()'
... try:
... return next()
... finally:
... print 'after f3()'
>>> chain = _build_interceptor_chain(target, f1, f2, f3)
>>> ctx.request = Dict(path_info='/test/abc')
>>> chain()
before f1()
before f2()
before f3()
target
after f3()
after f2()
123
>>> ctx.request = Dict(path_info='/api/')
>>> chain()
before f1()
before f3()
target
after f3()
123
'''
L = list(interceptors)
L.reverse()
fn = last_fn
for f in L:
fn = _build_interceptor_fn(f, fn)
return fn
def _load_module(module_name):
'''
Load module from name as str.
>>> m = _load_module('xml')
>>> m.__name__
'xml'
>>> m = _load_module('xml.sax')
>>> m.__name__
'xml.sax'
>>> m = _load_module('xml.sax.handler')
>>> m.__name__
'xml.sax.handler'
'''
last_dot = module_name.rfind('.')
if last_dot==(-1):
return __import__(module_name, globals(), locals())
from_module = module_name[:last_dot]
import_module = module_name[last_dot+1:]
m = __import__(from_module, globals(), locals(), [import_module])
return getattr(m, import_module)
class WSGIApplication(object):
def __init__(self, document_root=None, **kw):
'''
Init a WSGIApplication.
Args:
document_root: document root path.
'''
self._running = False
self._document_root = document_root
self._interceptors = []
self._template_engine = None
self._get_static = {}
self._post_static = {}
self._get_dynamic = []
self._post_dynamic = []
def _check_not_running(self):
if self._running:
raise RuntimeError('Cannot modify WSGIApplication when running.')
@property
def template_engine(self):
return self._template_engine
@template_engine.setter
def template_engine(self, engine):
self._check_not_running()
self._template_engine = engine
def add_module(self, mod):
self._check_not_running()
m = mod if type(mod)==types.ModuleType else _load_module(mod)
# #colorlog.info('Add module: %s' % m.__name__)
for name in dir(m):
fn = getattr(m, name)
if callable(fn) and hasattr(fn, '__web_route__') and hasattr(fn, '__web_method__'):
self.add_url(fn)
def add_url(self, func):
self._check_not_running()
route = Route(func)
if route.is_static:
if route.method=='GET':
self._get_static[route.path] = route
if route.method=='POST':
self._post_static[route.path] = route
else:
if route.method=='GET':
self._get_dynamic.append(route)
if route.method=='POST':
self._post_dynamic.append(route)
#colorlog.info('[200]%s' % str(route))
def add_interceptor(self, func):
self._check_not_running()
self._interceptors.append(func)
#colorlog.info('Add interceptor: %s' % str(func))
def run(self, config):
from wsgiref.simple_server import make_server
if not config:
host = "127.0.0.1"
port = 5237
debug = True
else:
host = config["host"]
port = int(config["port"])
debug = False
print 'application (%s) will start at %s:%s...' % (self._document_root, host, port)
server = make_server(host, port, self.get_wsgi_application(debug=True))
server.serve_forever()
def get_wsgi_application(self, debug=False):
self._check_not_running()
if debug:
self._get_dynamic.append(StaticFileRoute())
self._running = True
_application = Dict(document_root=self._document_root)
def fn_route():
request_method = ctx.request.request_method
path_info = ctx.request.path_info
if request_method=='GET':
fn = self._get_static.get(path_info, None)
if fn:
return fn()
for fn in self._get_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise notfound()
if request_method=='POST':
fn = self._post_static.get(path_info, None)
if fn:
return fn()
for fn in self._post_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise notfound()
raise badrequest()
fn_exec = _build_interceptor_chain(fn_route, *self._interceptors)
def wsgi(env, start_response):
ctx.application = _application
ctx.request = Request(env)
response = ctx.response = Response()
try:
r = fn_exec()
if isinstance(r, Template):
r = self._template_engine(r.template_name, r.model)
if isinstance(r, unicode):
r = r.encode('utf-8')
if r is None:
r = []
start_response(response.status, response.headers)
return r
except RedirectError, e:
response.set_header('Location', e.location)
start_response(e.status, response.headers)
return []
except HttpError, e:
start_response(e.status, response.headers)
return ['<html><body><h1>', e.status, '</h1></body></html>']
except Exception, e:
#colorlog.info(e)
if not debug:
start_response('500 Internal Server Error', [])
return ['<html><body><h1>500 Internal Server Error</h1></body></html>']
exc_type, exc_value, exc_traceback = sys.exc_info()
fp = StringIO()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=fp)
stacks = fp.getvalue()
fp.close()
start_response('500 Internal Server Error', [])
return [
r'''<html><body><h1>500 Internal Server Error</h1><div style="font-family:Monaco, Menlo, Consolas, 'Courier New', monospace;"><pre>''',
stacks.replace('<', '<').replace('>', '>'),
'</pre></div></body></html>']
finally:
del ctx.application
del ctx.request
del ctx.response
return wsgi
if __name__=='__main__':
sys.path.append('.')
import doctest
doctest.testmod()
|
py | 7dfda37ed9fe7023323d90423724239c06aefa1c | """Run script for all linters."""
import subprocess
import sys
import typing
def main() -> None:
"""Run all linters.
Linter output will be sent to stdout.
This function will exit the script with return code 0 on success, and other
value on failure.
"""
is_success: bool = True
for linter_input in [
['flake8', '--max-complexity', '8', '.'],
['mypy', '--strict', '.'],
['pydocstyle', '.'],
['autopep8', '-r', '-d', '-a', '-a', '--exit-code', '.']
]:
this_success = run_single_linter(linter_input)
is_success = is_success and this_success
if is_success:
print("all linters pass")
sys.exit(0)
else:
print("linter failure")
sys.exit(-1)
def run_single_linter(args: typing.List[str]) -> bool:
"""Return true if the linter passes, and false if it fails."""
p = subprocess.run(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, text=True)
if p.returncode != 0:
print("{} failure:".format(args[0]))
print(p.stdout)
return False
else:
print("{} success".format(args[0]))
return True
if __name__ == "__main__":
"""Run the main function as a script."""
main()
|
py | 7dfda64b59407277038c88b758e4b3645df4ee76 | import logging
from kubernetes import client
from kubeflow.fairing.builders.builder import BuilderInterface
from kubeflow.fairing.constants import constants
from kubeflow.fairing.cloud import gcp
logger = logging.getLogger(__name__)
class BaseBuilder(BuilderInterface): #pylint:disable=too-many-instance-attributes
"""A builder using the local Docker client"""
def __init__(self,
registry=None,
image_name=None,
base_image=constants.DEFAULT_BASE_IMAGE,
push=True,
preprocessor=None,
dockerfile_path=None):
self.registry = registry
self.image_name = image_name
self.push = push
if self.registry is None:
# TODO(r2d4): Add more heuristics here...
# If no push and no registry provided, use any registry name
if not self.push:
self.registry = 'local/fairing-image'
else:
self.registry = 'gcr.io/{}'.format(gcp.guess_project_name())
self.base_image = base_image
self.dockerfile_path = dockerfile_path
self.preprocessor = preprocessor
self.image_tag = None
self.docker_client = None
def generate_pod_spec(self, image=None):
if not image:
image = self.image_tag
return client.V1PodSpec(
containers=[client.V1Container(
name='model',
image=image,
command=self.preprocessor.get_command(),
security_context=client.V1SecurityContext(
run_as_user=0,
),
env=[client.V1EnvVar(
name='FAIRING_RUNTIME',
value='1',
)],
# Set the directory where the python files are built.
# TODO(jlewi): Would it be better to set PYTHONPATH?
working_dir=self.preprocessor.path_prefix,
)],
)
def full_image_name(self, tag):
"""Retrun the full image name
:param tag: the new tag for the image
"""
return '{}/{}:{}'.format(self.registry, self.image_name, tag)
def build(self):
"""Runs the build"""
raise NotImplementedError()
|
py | 7dfda6a7409772f694a7913dbfb223727c0f4154 | from eth.exceptions import PyEVMError
from eth_utils import ValidationError
class StateMachineNotFound(PyEVMError):
"""
Raised when no ``StateMachine`` is available for the provided block slot number.
"""
pass
class BlockClassError(PyEVMError):
"""
Raised when the given ``block`` doesn't match the block class version
"""
pass
class ProposerIndexError(PyEVMError):
"""
Raised when the given ``validator_index`` doesn't match the ``validator_index``
of proposer of the given ``slot``
"""
pass
class NoCommitteeAssignment(PyEVMError):
"""
Raised when no potential crosslink committee assignment.
"""
pass
class InvalidEpochError(ValidationError):
"""
Raised when a function receives a query for an epoch that is not semantically valid.
Example: asking the ``BeaconState`` about an epoch that is not derivable given the current data.
"""
pass
class BLSValidationError(ValidationError):
"""
Raised when a verification of public keys, messages, and signature fails.
"""
pass
class SignatureError(BLSValidationError):
"""
Signature is ill-formed
"""
pass
class PublicKeyError(BLSValidationError):
"""
Public Key is ill-formed
"""
pass
|
py | 7dfda810ae3643656e0bdcdc9dfd493b35f7c97d | # pylint: disable=redefined-outer-name,protected-access
import ast
import importlib.util
from pathlib import Path
from shutil import copyfile
from subprocess import CalledProcessError
from unittest.mock import ANY, patch, sentinel
from uuid import uuid4
from zipfile import ZipFile
import pytest
from docker.errors import APIError, ContainerError, ImageLoadError
from requests.exceptions import ConnectionError as RequestsConnectionError
from rpdk.core.exceptions import DownstreamError
from rpdk.core.project import Project
from rpdk.python.__init__ import __version__
from rpdk.python.codegen import (
SUPPORT_LIB_NAME,
SUPPORT_LIB_PKG,
Python36LanguagePlugin as PythonLanguagePlugin,
validate_no,
)
TYPE_NAME = "foo::bar::baz"
@pytest.fixture
def plugin():
return PythonLanguagePlugin()
@pytest.fixture
def project(tmp_path):
project = Project(root=tmp_path)
patch_plugins = patch.dict(
"rpdk.core.plugin_registry.PLUGIN_REGISTRY",
{PythonLanguagePlugin.NAME: lambda: PythonLanguagePlugin},
clear=True,
)
patch_wizard = patch(
"rpdk.python.codegen.input_with_validation", autospec=True, side_effect=[False]
)
with patch_plugins, patch_wizard:
project.init(TYPE_NAME, PythonLanguagePlugin.NAME)
return project
def get_files_in_project(project):
return {
str(child.relative_to(project.root)): child for child in project.root.rglob("*")
}
@pytest.mark.parametrize(
"value,result",
[
("y", True),
("Y", True),
("yes", True),
("Yes", True),
("YES", True),
("asdf", True),
("no", False),
("No", False),
("No", False),
("n", False),
("N", False),
],
)
def test_validate_no(value, result):
assert validate_no(value) is result
def test__remove_build_artifacts_file_found(tmp_path):
deps_path = tmp_path / "build"
deps_path.mkdir()
PythonLanguagePlugin._remove_build_artifacts(deps_path)
def test__remove_build_artifacts_file_not_found(tmp_path):
deps_path = tmp_path / "build"
with patch("rpdk.python.codegen.LOG", autospec=True) as mock_log:
PythonLanguagePlugin._remove_build_artifacts(deps_path)
mock_log.debug.assert_called_once()
def test_initialize(project):
assert project.settings == {"use_docker": False, "protocolVersion": "2.0.0"}
files = get_files_in_project(project)
assert set(files) == {
".gitignore",
".rpdk-config",
"README.md",
"foo-bar-baz.json",
"requirements.txt",
"example_inputs/inputs_1_invalid.json",
"example_inputs/inputs_1_update.json",
"example_inputs/inputs_1_create.json",
"example_inputs",
"src",
"src/foo_bar_baz",
"src/foo_bar_baz/__init__.py",
"src/foo_bar_baz/handlers.py",
"template.yml",
}
assert "__pycache__" in files[".gitignore"].read_text()
assert SUPPORT_LIB_NAME in files["requirements.txt"].read_text()
readme = files["README.md"].read_text()
assert project.type_name in readme
assert SUPPORT_LIB_PKG in readme
assert "handlers.py" in readme
assert "models.py" in readme
assert project.entrypoint in files["template.yml"].read_text()
# this is a rough check the generated Python code is valid as far as syntax
ast.parse(files["src/foo_bar_baz/__init__.py"].read_text())
ast.parse(files["src/foo_bar_baz/handlers.py"].read_text())
def test_generate(project):
project.load_schema()
before = get_files_in_project(project)
project.generate()
after = get_files_in_project(project)
files = after.keys() - before.keys() - {"resource-role.yaml"}
print("Project files: ", get_files_in_project(project))
assert files == {"src/foo_bar_baz/models.py"}
models_path = after["src/foo_bar_baz/models.py"]
# this is a rough check the generated Python code is valid as far as syntax
ast.parse(models_path.read_text())
# this however loads the module
spec = importlib.util.spec_from_file_location("foo_bar_baz.models", models_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
assert hasattr(module.ResourceModel, "_serialize")
assert hasattr(module.ResourceModel, "_deserialize")
assert hasattr(module.TypeConfigurationModel, "_serialize")
assert hasattr(module.TypeConfigurationModel, "_deserialize")
type_configuration_schema_file = project.root / "foo-bar-baz-configuration.json"
assert not type_configuration_schema_file.is_file()
def test_generate_with_type_configuration(tmp_path):
type_name = "schema::with::typeconfiguration"
project = Project(root=tmp_path)
patch_plugins = patch.dict(
"rpdk.core.plugin_registry.PLUGIN_REGISTRY",
{PythonLanguagePlugin.NAME: lambda: PythonLanguagePlugin},
clear=True,
)
patch_wizard = patch(
"rpdk.python.codegen.input_with_validation", autospec=True, side_effect=[False]
)
with patch_plugins, patch_wizard:
project.init(type_name, PythonLanguagePlugin.NAME)
copyfile(
str(Path.cwd() / "tests/data/schema-with-typeconfiguration.json"),
str(project.root / "schema-with-typeconfiguration.json"),
)
project.type_info = ("schema", "with", "typeconfiguration")
project.load_schema()
project.load_configuration_schema()
project.generate()
# assert TypeConfigurationModel is added to generated directory
models_path = project.root / "src" / "schema_with_typeconfiguration" / "models.py"
# this however loads the module
spec = importlib.util.spec_from_file_location("foo_bar_baz.models", models_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
assert hasattr(module.ResourceModel, "_serialize")
assert hasattr(module.ResourceModel, "_deserialize")
assert hasattr(module.TypeConfigurationModel, "_serialize")
assert hasattr(module.TypeConfigurationModel, "_deserialize")
type_configuration_schema_file = (
project.root / "schema-with-typeconfiguration-configuration.json"
)
assert type_configuration_schema_file.is_file()
def test_package_pip(project):
project.load_schema()
project.generate()
# not real requirements, would make version bumps a pain to test
(project.root / "requirements.txt").write_text("")
(project.root / f"{SUPPORT_LIB_NAME}-2.1.1.tar.gz").touch()
# want to exclude *.pyc files from zip, but code isn't run, so never get made
(project.root / "src" / "foo_bar_baz" / "coverage.pyc").touch()
zip_path = project.root / "foo-bar-baz.zip"
with zip_path.open("wb") as f, ZipFile(f, mode="w") as zip_file:
project._plugin.package(project, zip_file)
with zip_path.open("rb") as f, ZipFile(f, mode="r") as zip_file:
assert sorted(zip_file.namelist()) == [
"ResourceProvider.zip",
"src/foo_bar_baz/__init__.py",
"src/foo_bar_baz/handlers.py",
"src/foo_bar_baz/models.py",
]
def test__pip_build_executable_not_found(tmp_path):
executable_name = str(uuid4())
patch_cmd = patch.object(
PythonLanguagePlugin, "_make_pip_command", return_value=[executable_name]
)
with patch_cmd as mock_cmd:
with pytest.raises(DownstreamError) as excinfo:
PythonLanguagePlugin._pip_build(tmp_path)
mock_cmd.assert_called_once_with(tmp_path)
assert isinstance(excinfo.value.__cause__, FileNotFoundError)
def test__pip_build_called_process_error(tmp_path):
patch_cmd = patch.object(
PythonLanguagePlugin, "_make_pip_command", return_value=["false"]
)
with patch_cmd as mock_cmd:
with pytest.raises(DownstreamError) as excinfo:
PythonLanguagePlugin._pip_build(tmp_path)
mock_cmd.assert_called_once_with(tmp_path)
assert isinstance(excinfo.value.__cause__, CalledProcessError)
def test__build_pip(plugin):
plugin._use_docker = False
patch_pip = patch.object(plugin, "_pip_build", autospec=True)
patch_docker = patch.object(plugin, "_docker_build", autospec=True)
with patch_docker as mock_docker, patch_pip as mock_pip:
plugin._build(sentinel.base_path)
mock_docker.assert_not_called()
mock_pip.assert_called_once_with(sentinel.base_path)
def test__build_docker(plugin):
plugin._use_docker = True
patch_pip = patch.object(plugin, "_pip_build", autospec=True)
patch_docker = patch.object(plugin, "_docker_build", autospec=True)
with patch_docker as mock_docker, patch_pip as mock_pip:
plugin._build(sentinel.base_path)
mock_pip.assert_not_called()
mock_docker.assert_called_once_with(sentinel.base_path)
def test__docker_build_good_path(plugin, tmp_path):
patch_from_env = patch("rpdk.python.codegen.docker.from_env", autospec=True)
with patch_from_env as mock_from_env:
mock_run = mock_from_env.return_value.containers.run
mock_run.return_value = [b"output\n\n"]
plugin._docker_build(tmp_path)
mock_from_env.assert_called_once_with()
mock_run.assert_called_once_with(
image=ANY,
command=ANY,
auto_remove=True,
volumes={str(tmp_path): {"bind": "/project", "mode": "rw"}},
stream=True,
user=ANY,
)
def test_get_plugin_information(project):
plugin_information = project._plugin.get_plugin_information(project)
assert plugin_information["plugin-tool-version"] == __version__
assert plugin_information["plugin-name"] == "python"
@pytest.mark.parametrize(
"exception",
[
lambda: ContainerError("abcde", 255, "/bin/false", "image", ""),
ImageLoadError,
lambda: APIError("500"),
lambda: RequestsConnectionError(
"Connection aborted.", ConnectionRefusedError(61, "Connection refused")
),
],
)
def test__docker_build_bad_path(plugin, tmp_path, exception):
patch_from_env = patch("rpdk.python.codegen.docker.from_env", autospec=True)
with patch_from_env as mock_from_env:
mock_run = mock_from_env.return_value.containers.run
mock_run.side_effect = exception()
with pytest.raises(DownstreamError):
plugin._docker_build(tmp_path)
mock_from_env.assert_called_once_with()
mock_run.assert_called_once_with(
image=ANY,
command=ANY,
auto_remove=True,
volumes={str(tmp_path): {"bind": "/project", "mode": "rw"}},
stream=True,
user=ANY,
)
|
py | 7dfda83aaf35a20cc12e855fd07f84417ede9262 | """
hubspot companies api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import prettify, get_log
from typing import List, Dict, Optional, Union
COMPANIES_API_VERSION = "2"
class CompaniesClient(BaseClient):
"""
hubspot3 Companies client
:see: https://developers.hubspot.com/docs/methods/companies/companies-overview
"""
def __init__(self, *args, **kwargs):
super(CompaniesClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.companies")
def _get_path(self, subpath: str) -> str:
"""get the full api url for the given subpath on this client"""
return "companies/v{}/{}".format(
self.options.get("version") or COMPANIES_API_VERSION, subpath
)
def create(self, data: Dict = None, **options) -> Dict:
"""create a new company"""
data = data or {}
return self._call("companies/", data=data, method="POST", **options)
def update(self, company_id: str, data: Dict = None, **options) -> Dict:
"""update the given company with data"""
data = data or {}
return self._call(
"companies/{}".format(company_id), data=data, method="PUT", **options
)
def delete(self, company_id: str, **options) -> Dict:
"""delete a company"""
return self._call("companies/{}".format(company_id), method="DELETE", **options)
def delete_all(self, **options):
"""
Delete all the companies. Please use it carefully.
"""
for company in self.get_all(**options):
self.delete(company["id"])
def get(self, company_id: str, **options) -> Dict:
"""get a single company by it's ID"""
return self._call("companies/{}".format(company_id), method="GET", **options)
def search_domain(
self, domain: str, limit: int = 1, extra_properties: Dict = None, **options
) -> Dict:
"""searches for companies by domain name. limit is max'd at 100"""
# default properties to fetch
properties = [
"domain",
"createdate",
"name",
"hs_lastmodifieddate",
"hubspot_owner_id",
]
# append extras if they exist
if extra_properties:
if isinstance(extra_properties, list):
properties += extra_properties
if isinstance(extra_properties, str):
properties.append(extra_properties)
return self._call(
"domains/{}/companies".format(domain),
method="POST",
data={"limit": limit, "requestOptions": {"properties": properties}},
**options
)
def get_all(
self,
prettify_output: bool = True,
extra_properties: Union[str, List] = None,
**options
) -> Optional[List]:
"""get all companies, including extra properties if they are passed in"""
finished = False
output = []
offset = 0
query_limit = 250 # Max value according to docs
# default properties to fetch
properties = [
"name",
"description",
"address",
"address2",
"city",
"state",
"story",
"hubspot_owner_id",
]
# append extras if they exist
if extra_properties:
if isinstance(extra_properties, list):
properties += extra_properties
if isinstance(extra_properties, str):
properties.append(extra_properties)
while not finished:
batch = self._call(
"companies/paged",
method="GET",
doseq=True,
params={
"limit": query_limit,
"offset": offset,
"propertiesWithHistory": properties,
"includeMergeAudits": "true",
},
**options
)
output.extend(
[
prettify(company, id_key="companyId")
if prettify_output
else company
for company in batch["companies"]
if not company["isDeleted"]
]
)
finished = not batch["has-more"]
offset = batch["offset"]
return output
def _get_recent(self, recency_type: str, **options) -> Optional[List]:
"""
Returns either list of recently modified companies or recently created companies,
depending on recency_type passed in. Both API endpoints take identical parameters
and return identical formats, they differ only in the URLs
(companies/recent/created or companies/recent/modified)
:see: https://developers.hubspot.com/docs/methods/companies/get_companies_modified
:see: https://developers.hubspot.com/docs/methods/companies/get_companies_created
"""
finished = False
output = []
offset = 0
query_limit = 250 # Max value according to docs
while not finished:
batch = self._call(
"companies/recent/{}".format(recency_type),
method="GET",
doseq=True,
params={"count": query_limit, "offset": offset},
**options
)
output.extend(
[
prettify(company, id_key="companyId")
for company in batch["results"]
if not company["isDeleted"]
]
)
finished = not batch["hasMore"]
offset = batch["offset"]
return output
def get_recently_modified(self, **options) -> Optional[List]:
return self._get_recent("modified", **options)
def get_recently_created(self, **options) -> Optional[List]:
return self._get_recent("created", **options)
def get_contacts_at_a_company(self, company_id: str, **options) -> Optional[List]:
"""
Returns all of the contacts who have an associatedcompanyid contact property of
`company_id`.
:see: https://developers.hubspot.com/docs/methods/companies/get_company_contacts
"""
return self._call(
"companies/{}/contacts".format(company_id), method="GET", **options
)
|
py | 7dfda8cef5923a2a0d78158e8c874838389cfd46 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExternalMaster(object):
"""
An external master name server used as the source of zone data.
"""
def __init__(self, **kwargs):
"""
Initializes a new ExternalMaster object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param address:
The value to assign to the address property of this ExternalMaster.
:type address: str
:param port:
The value to assign to the port property of this ExternalMaster.
:type port: int
:param tsig_key_id:
The value to assign to the tsig_key_id property of this ExternalMaster.
:type tsig_key_id: str
"""
self.swagger_types = {
'address': 'str',
'port': 'int',
'tsig_key_id': 'str'
}
self.attribute_map = {
'address': 'address',
'port': 'port',
'tsig_key_id': 'tsigKeyId'
}
self._address = None
self._port = None
self._tsig_key_id = None
@property
def address(self):
"""
**[Required]** Gets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:return: The address of this ExternalMaster.
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""
Sets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:param address: The address of this ExternalMaster.
:type: str
"""
self._address = address
@property
def port(self):
"""
Gets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:return: The port of this ExternalMaster.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:param port: The port of this ExternalMaster.
:type: int
"""
self._port = port
@property
def tsig_key_id(self):
"""
Gets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:return: The tsig_key_id of this ExternalMaster.
:rtype: str
"""
return self._tsig_key_id
@tsig_key_id.setter
def tsig_key_id(self, tsig_key_id):
"""
Sets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:param tsig_key_id: The tsig_key_id of this ExternalMaster.
:type: str
"""
self._tsig_key_id = tsig_key_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 7dfda90518f2a43dd065e13260c89decc846baab | from flask import Flask, session
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_httpauth import HTTPBasicAuth
from app.methods import formatAsUSD
from config import *
app = Flask(__name__)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.jinja_env.globals.update(usd=formatAsUSD) # This 'formatAsUSD' function is in the methods file.
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
from app import routes |
py | 7dfda9674fe77c70e92a56fe8bec266ba8b9889b | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
LocalExecutor
.. seealso::
For more information on how the LocalExecutor works, take a look at the guide:
:ref:`executor:LocalExecutor`
"""
import logging
import os
import signal
import psutil
import subprocess
from abc import abstractmethod
from multiprocessing import Manager, Process
from multiprocessing.managers import SyncManager
from queue import Empty, Queue # pylint: disable=unused-import # noqa: F401
from typing import Any, List, Optional, Tuple, Union # pylint: disable=unused-import # noqa: F401
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import NOT_STARTED_MESSAGE, PARALLELISM, BaseExecutor, CommandType
from airflow.executors.scheduling_action import SchedulingAction
from airflow.models import DagRun
from airflow.models.taskinstance import ( # pylint: disable=unused-import # noqa: F401
TaskInstanceKey,
TaskInstanceStateType, TaskInstance,
)
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
# This is a work to be executed by a worker.
# It can Key and Command - but it can also be None, None which is actually a
# "Poison Pill" - worker seeing Poison Pill should take the pill and ... die instantly.
ExecutorWorkType = Tuple[Optional[TaskInstanceKey], Optional[CommandType]]
class LocalWorkerBase(Process, LoggingMixin):
"""
LocalWorkerBase implementation to run airflow commands. Executes the given
command and puts the result into a result queue when done, terminating execution.
:param result_queue: the queue to store result state
"""
def __init__(self, result_queue: 'Queue[TaskInstanceStateType]'):
super().__init__(target=self.do_work)
self.daemon: bool = True
self.result_queue: 'Queue[TaskInstanceStateType]' = result_queue
def run(self):
# We know we've just started a new process, so lets disconnect from the metadata db now
settings.engine.pool.dispose()
settings.engine.dispose()
return super().run()
def execute_work(self, key: TaskInstanceKey, command: CommandType) -> None:
"""
Executes command received and stores result state in queue.
:param key: the key to identify the task instance
:param command: the command to execute
"""
if key is None:
return
self.log.info("%s running %s", self.__class__.__name__, command)
if settings.EXECUTE_TASKS_NEW_PYTHON_INTERPRETER:
state = self._execute_work_in_subprocess(command)
else:
state = self._execute_work_in_fork(command)
self.result_queue.put((key, state))
def _execute_work_in_subprocess(self, command: CommandType) -> str:
try:
subprocess.check_call(command, close_fds=True)
return State.SUCCESS
except subprocess.CalledProcessError as e:
self.log.error("Failed to execute task %s.", str(e))
return State.FAILED
def _execute_work_in_fork(self, command: CommandType) -> str:
pid = os.fork()
if pid:
# In parent, wait for the child
pid, ret = os.waitpid(pid, 0)
return State.SUCCESS if ret == 0 else State.FAILED
from airflow.sentry import Sentry
ret = 1
try:
import signal
from airflow.cli.cli_parser import get_parser
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGUSR2, signal.SIG_DFL)
parser = get_parser()
# [1:] - remove "airflow" from the start of the command
args = parser.parse_args(command[1:])
args.shut_down_logging = False
setproctitle(f"airflow task supervisor: {command}")
args.func(args)
ret = 0
return State.SUCCESS
except Exception as e: # pylint: disable=broad-except
self.log.error("Failed to execute task %s.", str(e))
finally:
Sentry.flush()
logging.shutdown()
os._exit(ret) # pylint: disable=protected-access
raise RuntimeError('unreachable -- keep mypy happy')
@abstractmethod
def do_work(self):
"""Called in the subprocess and should then execute tasks"""
raise NotImplementedError()
class LocalWorker(LocalWorkerBase):
"""
Local worker that executes the task.
:param result_queue: queue where results of the tasks are put.
:param key: key identifying task instance
:param command: Command to execute
"""
def __init__(
self, result_queue: 'Queue[TaskInstanceStateType]', key: TaskInstanceKey, command: CommandType
):
super().__init__(result_queue)
self.key: TaskInstanceKey = key
self.command: CommandType = command
def do_work(self) -> None:
self.execute_work(key=self.key, command=self.command)
class QueuedLocalWorker(LocalWorkerBase):
"""
LocalWorker implementation that is waiting for tasks from a queue and will
continue executing commands as they become available in the queue.
It will terminate execution once the poison token is found.
:param task_queue: queue from which worker reads tasks
:param result_queue: queue where worker puts results after finishing tasks
"""
def __init__(self, task_queue: 'Queue[ExecutorWorkType]', result_queue: 'Queue[TaskInstanceStateType]'):
super().__init__(result_queue=result_queue)
self.task_queue = task_queue
def do_work(self) -> None:
while True:
key, command = self.task_queue.get()
try:
if key is None or command is None:
# Received poison pill, no more tasks to run
break
self.execute_work(key=key, command=command)
finally:
self.task_queue.task_done()
class LocalExecutor(BaseExecutor):
"""
LocalExecutor executes tasks locally in parallel.
It uses the multiprocessing Python library and queues to parallelize the execution
of tasks.
:param parallelism: how many parallel processes are run in the executor
"""
def __init__(self, parallelism: int = PARALLELISM):
super().__init__(parallelism=parallelism)
self.manager: Optional[SyncManager] = None
self.result_queue: Optional['Queue[TaskInstanceStateType]'] = None
self.workers: List[QueuedLocalWorker] = []
self.workers_used: int = 0
self.workers_active: int = 0
self.impl: Optional[
Union['LocalExecutor.UnlimitedParallelism', 'LocalExecutor.LimitedParallelism']
] = None
class UnlimitedParallelism:
"""
Implements LocalExecutor with unlimited parallelism, starting one process
per each command to execute.
:param executor: the executor instance to implement.
"""
def __init__(self, executor: 'LocalExecutor'):
self.executor: 'LocalExecutor' = executor
def start(self) -> None:
"""Starts the executor."""
self.executor.workers_used = 0
self.executor.workers_active = 0
# pylint: disable=unused-argument # pragma: no cover
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
"""
Executes task asynchronously.
:param key: the key to identify the task instance
:param command: the command to execute
:param queue: Name of the queue
:param executor_config: configuration for the executor
"""
if not self.executor.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
local_worker = LocalWorker(self.executor.result_queue, key=key, command=command)
self.executor.workers_used += 1
self.executor.workers_active += 1
local_worker.start()
# pylint: enable=unused-argument # pragma: no cover
def sync(self) -> None:
"""Sync will get called periodically by the heartbeat method."""
if not self.executor.result_queue:
raise AirflowException("Executor should be started first")
while not self.executor.result_queue.empty():
results = self.executor.result_queue.get()
self.executor.change_state(*results)
self.executor.send_message(results[0])
self.executor.workers_active -= 1
def end(self) -> None:
"""
This method is called when the caller is done submitting job and
wants to wait synchronously for the job submitted previously to be
all done.
"""
while self.executor.workers_active > 0:
self.executor.sync()
class LimitedParallelism:
"""
Implements LocalExecutor with limited parallelism using a task queue to
coordinate work distribution.
:param executor: the executor instance to implement.
"""
def __init__(self, executor: 'LocalExecutor'):
self.executor: 'LocalExecutor' = executor
self.queue: Optional['Queue[ExecutorWorkType]'] = None
def start(self) -> None:
"""Starts limited parallelism implementation."""
if not self.executor.manager:
raise AirflowException(NOT_STARTED_MESSAGE)
self.queue = self.executor.manager.Queue()
if not self.executor.result_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.executor.workers = [
QueuedLocalWorker(self.queue, self.executor.result_queue)
for _ in range(self.executor.parallelism)
]
self.executor.workers_used = len(self.executor.workers)
for worker in self.executor.workers:
worker.start()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None, # pylint: disable=unused-argument
executor_config: Optional[Any] = None, # pylint: disable=unused-argument
) -> None:
"""
Executes task asynchronously.
:param key: the key to identify the task instance
:param command: the command to execute
:param queue: name of the queue
:param executor_config: configuration for the executor
"""
if not self.queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.queue.put((key, command))
def sync(self):
"""Sync will get called periodically by the heartbeat method."""
while True:
try:
results = self.executor.result_queue.get_nowait()
try:
self.executor.change_state(*results)
self.executor.send_message(results[0])
finally:
self.executor.result_queue.task_done()
except Empty:
break
def end(self):
"""Ends the executor. Sends the poison pill to all workers."""
for _ in self.executor.workers:
self.queue.put((None, None))
# Wait for commands to finish
self.queue.join()
self.executor.sync()
def start(self) -> None:
"""Starts the executor"""
self.manager = Manager()
self.result_queue = self.manager.Queue()
self.workers = []
self.workers_used = 0
self.workers_active = 0
self.impl = (
LocalExecutor.UnlimitedParallelism(self)
if self.parallelism == 0
else LocalExecutor.LimitedParallelism(self)
)
self.impl.start()
def _stop_related_process(self, ti: TaskInstance) -> bool:
self.kill_children(ti.pid, signal.SIGTERM)
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
"""Execute asynchronously."""
if not self.impl:
raise AirflowException(NOT_STARTED_MESSAGE)
self.validate_command(command)
self.impl.execute_async(key=key, command=command, queue=queue, executor_config=executor_config)
def kill_children(self, ppid=None, sig=signal.SIGTERM) -> None:
"""
Kill children of specific process.
:param ppid: parent process id
:type ppid: int
:param sig: signal type
:type sig: int
"""
try:
process = psutil.Process(ppid)
except psutil.NoSuchProcess:
self.log.error("No such process: %s", str(ppid))
return
children = process.children(recursive=False)
for pid in children:
os.kill(pid.pid, sig)
def recover_state(self):
"""
Recover the state of dags after restarting scheduler.
"""
self.log.info("Start to recover LocalExecutor.")
def sync(self) -> None:
"""Sync will get called periodically by the heartbeat method."""
if not self.impl:
raise AirflowException(NOT_STARTED_MESSAGE)
self.impl.sync()
def end(self) -> None:
"""
Ends the executor.
:return:
"""
if not self.impl:
raise AirflowException(NOT_STARTED_MESSAGE)
if not self.manager:
raise AirflowException(NOT_STARTED_MESSAGE)
self.impl.end()
self.manager.shutdown()
def terminate(self):
"""Terminate the executor is not doing anything."""
|
py | 7dfda96a5ec8198bdb3783e1077fb02cd6543663 | from common.internal.FieldType import FieldType
from common.internal.fieldTypes.SingleArgumentType import SingleArgumentType
class SetType(SingleArgumentType):
"""
Field type of all sets.
"""
_typeID = 19
def __init__(self, groundType: FieldType):
super(SetType, self).__init__(self.typeID(), groundType)
def readSingleField(self, inStream):
i = inStream.v64()
rval = set()
while i != 0:
rval.add(self.groundType.readSingleField(inStream))
return rval
def __str__(self):
return "set<" + self.groundType.__str__() + ">"
def __eq__(self, obj):
if isinstance(obj, SetType):
return self.groundType.__eq__(obj.groundType)
return False
|
py | 7dfdaa6aa50bae934530fbb88ef025ca35aeede8 | import re
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
PLAYLIST_URL = "http://x{0}x.api.channel.livestream.com/3.0/playlist.m3u8"
_url_re = re.compile(r"http(s)?://(cdn|original)\.livestream\.com/(embed/)?(?P<channel>[^&?/]+)")
class OldLivestream(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group("channel")
channel = channel.replace("_", "-")
playlist_url = PLAYLIST_URL.format(channel)
return HLSStream.parse_variant_playlist(self.session, playlist_url, check_streams=True)
__plugin__ = OldLivestream
|
py | 7dfdaaaa3099e2c22d8037dedeff795e45b14f44 | a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
print(c)
print(len(c))
print(c.count(5))# Quantas vezes aparece o '5' dentro de 'c'
print(c.index(8)) #Em que posição está o valor '8'
pessoa = ('Gustavo', 39, 'M', 99.88)
print(pessoa)
del (pessoa) #Apaga a tupla 'pessoa'
print(pessoa)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.