hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
793fae4d2142a2c8d2de90280e24b42043c11edb | 19,433 | py | Python | nova/virt/hyperv/volumeops.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/virt/hyperv/volumeops.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/virt/hyperv/volumeops.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 1 | 2020-07-24T01:18:44.000Z | 2020-07-24T01:18:44.000Z | # Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import collections
import os
import re
import time
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from six.moves import range
from nova import block_device
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import utils
from nova.virt import driver
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class VolumeOps(object):
"""Management class for Volume-related tasks
"""
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_iscsi_initiator_utils()
self._initiator = None
self._default_root_device = 'vda'
self.volume_drivers = {'smbfs': SMBFSVolumeDriver(),
'iscsi': ISCSIVolumeDriver()}
def _get_volume_driver(self, driver_type=None, connection_info=None):
if connection_info:
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
if ebs_root:
self.attach_volume(mapping[0]['connection_info'],
instance_name, True)
mapping = mapping[1:]
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def disconnect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
block_devices = self._group_block_devices_by_type(
mapping)
for driver_type, block_device_mapping in block_devices.items():
volume_driver = self._get_volume_driver(driver_type)
volume_driver.disconnect_volumes(block_device_mapping)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.attach_volume(connection_info, instance_name, ebs_root)
def detach_volume(self, connection_info, instance_name):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.detach_volume(connection_info, instance_name)
def ebs_root_in_block_devices(self, block_device_info):
if block_device_info:
root_device = block_device_info.get('root_device_name')
if not root_device:
root_device = self._default_root_device
return block_device.volume_in_mapping(root_device,
block_device_info)
def fix_instance_volume_disk_paths(self, instance_name, block_device_info):
# Mapping containing the current disk paths for each volume.
actual_disk_mapping = self.get_disk_path_mapping(block_device_info)
if not actual_disk_mapping:
return
# Mapping containing virtual disk resource path and the physical
# disk path for each volume serial number. The physical path
# associated with this resource may not be the right one,
# as physical disk paths can get swapped after host reboots.
vm_disk_mapping = self._vmutils.get_vm_physical_disk_mapping(
instance_name)
for serial, vm_disk in vm_disk_mapping.items():
actual_disk_path = actual_disk_mapping[serial]
if vm_disk['mounted_disk_path'] != actual_disk_path:
self._vmutils.set_disk_host_res(vm_disk['resource_path'],
actual_disk_path)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warning(_LW('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_block_storage_ip,
'host': CONF.host,
'initiator': self._initiator,
}
def initialize_volumes_connection(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.initialize_volume_connection(connection_info)
def get_disk_path_mapping(self, block_device_info):
block_mapping = driver.block_device_info_get_mapping(block_device_info)
disk_path_mapping = {}
for vol in block_mapping:
connection_info = vol['connection_info']
disk_serial = connection_info['serial']
disk_path = self.get_mounted_disk_path_from_volume(connection_info)
disk_path_mapping[disk_serial] = disk_path
return disk_path_mapping
def _group_block_devices_by_type(self, block_device_mapping):
block_devices = collections.defaultdict(list)
for volume in block_device_mapping:
connection_info = volume['connection_info']
volume_type = connection_info.get('driver_volume_type')
block_devices[volume_type].append(volume)
return block_devices
def get_mounted_disk_path_from_volume(self, connection_info):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
return volume_driver.get_mounted_disk_path_from_volume(
connection_info)
class ISCSIVolumeDriver(object):
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_iscsi_initiator_utils()
def login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
auth_method = data.get('auth_method')
auth_username = data.get('auth_username')
auth_password = data.get('auth_password')
if auth_method and auth_method.upper() != 'CHAP':
LOG.error(_LE("Cannot log in target %(target_iqn)s. Unsupported "
"iSCSI authentication method: %(auth_method)s."),
{'target_iqn': target_iqn,
'auth_method': auth_method})
raise exception.UnsupportedBDMVolumeAuthMethod(
auth_method=auth_method)
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s",
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
else:
LOG.debug("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s",
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal, auth_username,
auth_password)
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def disconnect_volumes(self, block_device_mapping):
iscsi_targets = collections.defaultdict(int)
for vol in block_device_mapping:
target_iqn = vol['connection_info']['data']['target_iqn']
iscsi_targets[target_iqn] += 1
for target_iqn, disconnected_luns in iscsi_targets.items():
self.logout_storage_target(target_iqn, disconnected_luns)
def logout_storage_target(self, target_iqn, disconnected_luns_count=1):
total_available_luns = self._volutils.get_target_lun_count(
target_iqn)
if total_available_luns == disconnected_luns_count:
LOG.debug("Logging off storage target %s", target_iqn)
self._volutils.logout_storage_target(target_iqn)
else:
LOG.debug("Skipping disconnecting target %s as there "
"are LUNs still being used.", target_iqn)
def get_mounted_disk_path_from_volume(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
# Getting the mounted disk
return self._get_mounted_disk_from_lun(target_iqn, target_lun,
wait_for_device=True)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
"""
target_iqn = None
LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
try:
self.login_storage_target(connection_info)
serial = connection_info['serial']
# Getting the mounted disk
mounted_disk_path = self.get_mounted_disk_path_from_volume(
connection_info)
if ebs_root:
# Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
# Attaching to the first slot
slot = 0
else:
# Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
mounted_disk_path,
serial=serial)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to attach volume to instance %s'),
instance_name)
target_iqn = connection_info['data']['target_iqn']
if target_iqn:
self.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Detach a volume to the SCSI controller."""
LOG.debug("Detach_volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
target_iqn = connection_info['data']['target_iqn']
mounted_disk_path = self.get_mounted_disk_path_from_volume(
connection_info)
LOG.debug("Detaching physical disk from instance: %s",
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
self.logout_storage_target(target_iqn)
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
# The WMI query in get_device_number_for_target can incorrectly
# return no data when the system is under load. This issue can
# be avoided by adding a retry.
for i in range(CONF.hyperv.mounted_disk_query_retry_count):
device_number = self._volutils.get_device_number_for_target(
target_iqn, target_lun)
if device_number in (None, -1):
attempt = i + 1
LOG.debug('Attempt %d to get device_number '
'from get_device_number_for_target failed. '
'Retrying...', attempt)
time.sleep(CONF.hyperv.mounted_disk_query_retry_interval)
else:
break
if device_number in (None, -1):
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.debug('Device number: %(device_number)s, '
'target lun: %(target_lun)s',
{'device_number': device_number, 'target_lun': target_lun})
# Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s. Please ensure that '
'the host\'s SAN policy is set to '
'"OfflineAll" or "OfflineShared"') %
target_iqn)
return mounted_disk_path
def get_target_from_disk_path(self, physical_drive_path):
return self._volutils.get_target_from_disk_path(physical_drive_path)
def get_target_lun_count(self, target_iqn):
return self._volutils.get_target_lun_count(target_iqn)
def initialize_volume_connection(self, connection_info):
self.login_storage_target(connection_info)
def export_path_synchronized(f):
def wrapper(inst, connection_info, *args, **kwargs):
export_path = inst._get_export_path(connection_info)
@utils.synchronized(export_path)
def inner():
return f(inst, connection_info, *args, **kwargs)
return inner()
return wrapper
class SMBFSVolumeDriver(object):
def __init__(self):
self._smbutils = utilsfactory.get_smbutils()
self._vmutils = utilsfactory.get_vmutils()
self._username_regex = re.compile(r'user(?:name)?=([^, ]+)')
self._password_regex = re.compile(r'pass(?:word)?=([^, ]+)')
def get_mounted_disk_path_from_volume(self, connection_info):
return self._get_disk_path(connection_info)
@export_path_synchronized
def attach_volume(self, connection_info, instance_name, ebs_root=False):
self.ensure_share_mounted(connection_info)
disk_path = self._get_disk_path(connection_info)
try:
if ebs_root:
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
slot = 0
else:
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
self._vmutils.attach_drive(instance_name,
disk_path,
ctrller_path,
slot)
except os_win_exc.HyperVException as exn:
LOG.exception(_LE('Attach volume failed to %(instance_name)s: '
'%(exn)s'), {'instance_name': instance_name,
'exn': exn})
raise exception.VolumeAttachFailed(
volume_id=connection_info['data']['volume_id'],
reason=exn.message)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
disk_path = self._get_disk_path(connection_info)
export_path = self._get_export_path(connection_info)
self._vmutils.detach_vm_disk(instance_name, disk_path,
is_physical=False)
self._unmount_smb_share(export_path)
def disconnect_volumes(self, block_device_mapping):
export_paths = set()
for vol in block_device_mapping:
connection_info = vol['connection_info']
export_path = self._get_export_path(connection_info)
export_paths.add(export_path)
for export_path in export_paths:
self._unmount_smb_share(export_path)
def _get_export_path(self, connection_info):
return connection_info['data']['export'].replace('/', '\\')
def _get_disk_path(self, connection_info):
export = self._get_export_path(connection_info)
disk_name = connection_info['data']['name']
disk_path = os.path.join(export, disk_name)
return disk_path
def ensure_share_mounted(self, connection_info):
export_path = self._get_export_path(connection_info)
if not self._smbutils.check_smb_mapping(export_path):
opts_str = connection_info['data'].get('options', '')
username, password = self._parse_credentials(opts_str)
self._smbutils.mount_smb_share(export_path,
username=username,
password=password)
def _parse_credentials(self, opts_str):
match = self._username_regex.findall(opts_str)
username = match[0] if match and match[0] != 'guest' else None
match = self._password_regex.findall(opts_str)
password = match[0] if match else None
return username, password
def initialize_volume_connection(self, connection_info):
self.ensure_share_mounted(connection_info)
def _unmount_smb_share(self, export_path):
# We synchronize share unmount and volume attach operations based on
# the share path in order to avoid the situation when a SMB share is
# unmounted while a volume exported by it is about to be attached to
# an instance.
@utils.synchronized(export_path)
def unmount_synchronized():
self._smbutils.unmount_smb_share(export_path)
unmount_synchronized()
| 42.993363 | 79 | 0.629599 |
793fae59e2beb84a5a89d950e46ba393f7a680c4 | 2,678 | py | Python | tests/extmod/vfs_lfs_error.py | codefreax/micropython | 7679e3be96f7e0996faa89678beaf423d7c25999 | [
"MIT"
] | 181 | 2018-01-31T08:22:21.000Z | 2021-12-25T12:46:18.000Z | tests/extmod/vfs_lfs_error.py | cjaikaeo/micropython | ce40abcf21926b35da6c7255215c5062ac2be521 | [
"MIT"
] | 19 | 2018-01-31T10:07:19.000Z | 2021-03-11T07:32:28.000Z | tests/extmod/vfs_lfs_error.py | cjaikaeo/micropython | ce40abcf21926b35da6c7255215c5062ac2be521 | [
"MIT"
] | 87 | 2018-03-22T00:41:48.000Z | 2022-02-05T10:09:59.000Z | # Test for VfsLittle using a RAM device, testing error handling
try:
import uos
uos.VfsLfs1
uos.VfsLfs2
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class RAMBlockDevice:
ERASE_BLOCK_SIZE = 1024
def __init__(self, blocks):
self.data = bytearray(blocks * self.ERASE_BLOCK_SIZE)
def readblocks(self, block, buf, off):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
buf[i] = self.data[addr + i]
def writeblocks(self, block, buf, off):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
self.data[addr + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # block count
return len(self.data) // self.ERASE_BLOCK_SIZE
if op == 5: # block size
return self.ERASE_BLOCK_SIZE
if op == 6: # erase block
return 0
def test(bdev, vfs_class):
print('test', vfs_class)
# mkfs with too-small block device
try:
vfs_class.mkfs(RAMBlockDevice(1))
except OSError:
print('mkfs OSError')
# mount with invalid filesystem
try:
vfs_class(bdev)
except OSError:
print('mount OSError')
# set up for following tests
vfs_class.mkfs(bdev)
vfs = vfs_class(bdev)
with vfs.open('testfile', 'w') as f:
f.write('test')
vfs.mkdir('testdir')
# ilistdir
try:
vfs.ilistdir('noexist')
except OSError:
print('ilistdir OSError')
# remove
try:
vfs.remove('noexist')
except OSError:
print('remove OSError')
# rmdir
try:
vfs.rmdir('noexist')
except OSError:
print('rmdir OSError')
# rename
try:
vfs.rename('noexist', 'somethingelse')
except OSError:
print('rename OSError')
# mkdir
try:
vfs.mkdir('testdir')
except OSError:
print('mkdir OSError')
# chdir to nonexistent
try:
vfs.chdir('noexist')
except OSError:
print('chdir OSError')
print(vfs.getcwd()) # check still at root
# chdir to file
try:
vfs.chdir('testfile')
except OSError:
print('chdir OSError')
print(vfs.getcwd()) # check still at root
# stat
try:
vfs.stat('noexist')
except OSError:
print('stat OSError')
# error during seek
with vfs.open('testfile', 'r') as f:
f.seek(1 << 30) # SEEK_SET
try:
f.seek(1 << 30, 1) # SEEK_CUR
except OSError:
print('seek OSError')
bdev = RAMBlockDevice(30)
test(bdev, uos.VfsLfs1)
test(bdev, uos.VfsLfs2)
| 22.694915 | 63 | 0.578417 |
793fb0ac845fdbd1b3455d43d1920b71030c768b | 2,100 | py | Python | tests/compat/test_responses_load_cache.py | meggiman/requests-cache | 3d5b77e78013d9c08146b94cbbaa5c9a321b9f75 | [
"BSD-2-Clause"
] | 846 | 2015-01-05T19:19:35.000Z | 2022-03-31T07:11:25.000Z | tests/compat/test_responses_load_cache.py | meggiman/requests-cache | 3d5b77e78013d9c08146b94cbbaa5c9a321b9f75 | [
"BSD-2-Clause"
] | 495 | 2015-01-16T09:28:34.000Z | 2022-03-28T21:59:13.000Z | tests/compat/test_responses_load_cache.py | meggiman/requests-cache | 3d5b77e78013d9c08146b94cbbaa5c9a321b9f75 | [
"BSD-2-Clause"
] | 134 | 2015-01-16T14:01:47.000Z | 2022-03-28T18:18:17.000Z | """Example of using requests-cache with the responses library"""
from contextlib import contextmanager
from os.path import dirname, join
from unittest.mock import patch
import pytest
import requests
from requests.exceptions import ConnectionError
from responses import RequestsMock, Response
from requests_cache import CachedSession
TEST_DB = join(dirname(__file__), 'httpbin_sample.test-db')
TEST_URLS = [
'https://httpbin.org/get',
'https://httpbin.org/html',
'https://httpbin.org/json',
]
PASSTHRU_URL = 'https://httpbin.org/gzip'
UNMOCKED_URL = 'https://httpbin.org/ip'
@contextmanager
def get_responses():
"""Contextmanager that provides a RequestsMock object mocked URLs and responses
based on cache data
"""
with RequestsMock() as mocker:
cache = CachedSession(TEST_DB).cache
for response in cache.values():
mocker.add(
Response(
response.request.method,
response.request.url,
body=response.content,
headers=response.headers,
status=response.status_code,
)
)
mocker.add_passthru(PASSTHRU_URL)
yield mocker
# responses patches HTTPAdapter.send(), so we need to patch one level lower to verify request mocking
@patch.object(
requests.adapters.HTTPAdapter, 'get_connection', side_effect=ValueError('Real request made!')
)
def test_mock_session(mock_http_adapter):
"""Test that the mock_session fixture is working as expected"""
with get_responses():
# An error will be raised if a real request is made
with pytest.raises(ValueError):
requests.get(PASSTHRU_URL)
# All mocked URLs will return a response based on requests-cache data
for url in TEST_URLS:
response = requests.get(url)
assert getattr(response, 'from_cache', False) is False
# responses will raise an error for an unmocked URL, as usual
with pytest.raises(ConnectionError):
requests.get(UNMOCKED_URL)
| 33.333333 | 101 | 0.670476 |
793fb0b4f9631dbc4e2768b0b908a411a998978a | 1,350 | py | Python | src/dependency/parser/javascript.py | allow-cookies/demon | 0a62fdbdfbcb9ab5224be747ddf45c968207b51d | [
"MIT"
] | null | null | null | src/dependency/parser/javascript.py | allow-cookies/demon | 0a62fdbdfbcb9ab5224be747ddf45c968207b51d | [
"MIT"
] | 1 | 2021-03-31T08:12:05.000Z | 2021-03-31T08:12:05.000Z | src/dependency/parser/javascript.py | allow-cookies/demon | 0a62fdbdfbcb9ab5224be747ddf45c968207b51d | [
"MIT"
] | null | null | null | import json
import re
from typing import Generator
from dependency.dto import DependencyDTO
from dependency.parser.base import BaseParser
class PackageLockJsonParser(BaseParser):
_KEY_DEPENDENCIES = "dependencies"
_KEY_VERSION = "version"
_AT = "@"
_EMPTY = ""
@classmethod
def parse(
cls, source_file: str, contents: bytes
) -> Generator[DependencyDTO, None, None]:
json_contents = json.loads(contents)
dependencies: dict[str, dict] = json_contents.get(cls._KEY_DEPENDENCIES, {})
return (
DependencyDTO(
name=name.replace(cls._AT, cls._EMPTY),
version=dependency[cls._KEY_VERSION],
source_file=source_file,
)
for name, dependency in dependencies.items()
)
class YarnLockParser(BaseParser):
_REGEX = re.compile(r'^([\w\d\-]+)@.*\n\s+version "([\d.]+)"$', re.MULTILINE)
_ENCODING = "utf-8"
@classmethod
def parse(
cls, source_file: str, contents: bytes
) -> Generator[DependencyDTO, None, None]:
return (
DependencyDTO(
name=dependency[1],
version=dependency[2],
source_file=source_file,
)
for dependency in re.finditer(cls._REGEX, str(contents, cls._ENCODING))
)
| 28.723404 | 84 | 0.6 |
793fb108fde3f384f5eecf9d23dc20baaf268169 | 1,997 | py | Python | scripts/add_chats.py | zeshuaro/off-campus | 5ca0c9c5ea1ef6fd9225414712262f00a41693da | [
"MIT"
] | null | null | null | scripts/add_chats.py | zeshuaro/off-campus | 5ca0c9c5ea1ef6fd9225414712262f00a41693da | [
"MIT"
] | null | null | null | scripts/add_chats.py | zeshuaro/off-campus | 5ca0c9c5ea1ef6fd9225414712262f00a41693da | [
"MIT"
] | null | null | null | import argparse
import datetime as dt
import firebase_admin
from firebase_admin import credentials, firestore
import scrapers
cred = credentials.Certificate("keyfile.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
def main(course_subject, limit, **kwargs):
unis = {
("University of New South Wales", "T3"): lambda: scrapers.get_unsw_courses(
course_subject, limit
),
("University of Sydney", "S2"): lambda: scrapers.get_usyd_courses(
course_subject, limit
),
}
for key in unis:
uni, semester = key
print(f"Adding chats for {uni}")
courses = unis[key]()
add_course_chats(uni, courses, semester)
def add_course_chats(university, courses, semester):
print("Creating course chats")
total = len(courses)
datetime = dt.datetime.utcnow()
for i, (course_code, course_name) in enumerate(courses):
if (i + 1) % 50 == 0:
print(f"Created {i + 1}/{total} chats")
doc_ref = db.collection("chats").document()
doc_ref.set(
{
"title": f"{course_code} {course_name} {datetime.year} {semester}",
"type": "course",
"university": university,
"lastMessage": "Chat created",
"lastMessageUser": "OffCampus",
"updatedAt": datetime,
"numMembers": 0,
"year": datetime.year,
"semester": semester,
}
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--course_subject",
help="Limit to only scrape for a specific course subject",
)
parser.add_argument(
"-l",
"--limit",
type=int,
help="Limit the number of courses to be scraped for each university",
)
parser.set_defaults(method=main)
args = parser.parse_args()
args.method(**vars(args))
| 27.356164 | 83 | 0.580871 |
793fb16b8224bfd56c2829ad89dac10a3c1f22ac | 1,590 | py | Python | pgserv/users/views.py | PeanutPencil/Pavlov-Gacha-Django | 378557e76ec3f9485c20eb4db9fcaa950ad12b85 | [
"MIT"
] | null | null | null | pgserv/users/views.py | PeanutPencil/Pavlov-Gacha-Django | 378557e76ec3f9485c20eb4db9fcaa950ad12b85 | [
"MIT"
] | null | null | null | pgserv/users/views.py | PeanutPencil/Pavlov-Gacha-Django | 378557e76ec3f9485c20eb4db9fcaa950ad12b85 | [
"MIT"
] | 1 | 2020-10-22T01:22:00.000Z | 2020-10-22T01:22:00.000Z |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from users.models import User
from users.serializers import UserSerializer
from django.http import Http404
class UserList(APIView):
def get(self, request):
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetail(APIView):
"""
Retrieve, update or delete a user instance.
"""
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk):
user = self.get_object(pk)
serializer = UserSerializer(user)
return Response(serializer.data)
def put(self, request, pk):
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
card = self.get_object(pk)
card.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 30.576923 | 78 | 0.668553 |
793fb2125dc99bf686413401095d19f2ac82bedb | 6,222 | py | Python | ResNetFCN_App/resnetfcn_app.py | EnriqueNueve/ResNetFCN | c01b3675ec77089ba9e3a2a8a0fa4b7d1baf95fc | [
"MIT"
] | null | null | null | ResNetFCN_App/resnetfcn_app.py | EnriqueNueve/ResNetFCN | c01b3675ec77089ba9e3a2a8a0fa4b7d1baf95fc | [
"MIT"
] | null | null | null | ResNetFCN_App/resnetfcn_app.py | EnriqueNueve/ResNetFCN | c01b3675ec77089ba9e3a2a8a0fa4b7d1baf95fc | [
"MIT"
] | null | null | null | # python3 resnetfcn_app.py test_pic.jpg
##########################
import os
import numpy as np
import tensorflow as tf
from PIL import Image , ImageOps
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
##########################
# Passed img path
#img_path = "test_pic.jpg"
img_path = os.environ["IMG_PATH"]
print("Passed file path: {}".format(img_path))
##########################
class ImageMarker():
"""
Class to perform image segmantation on raw images.
Parameters
----------
model_path : string
Tells the class where the tf.lite model is stored.
width : int
Tells the class the width of the input the tf.lite
model was trained on (default: 320).
height: int
Tells the class the height of the input the tf.lite
model was trained on (default: 224).
"""
def __init__(self, model_path: str, width: int = 320, height: int = 224):
# Set model path
if not isinstance(model_path, str):
raise TypeError("model_path must be a string")
else:
self.set_model(model_path)
# Set width
if not isinstance(width, int):
raise TypeError("width must be a int")
else:
self.width = width
# Set height
if not isinstance(height, int):
raise TypeError("height must be a int")
else:
self.height = height
# VOC 2012 dataset fields
self.N_CLASSES = 21
self.VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
self.VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def set_model(self, model_path: str):
"""Set tf.lite interpreter class."""
# Set tf.lite model to interpreter
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()
# Get input and output tensors.
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
def shade_img(self, yh_mask: np.array):
# Get pred classes
yh_unique = np.unique(yh_mask)
classes_found = []
for c in yh_unique.tolist():
classes_found.append(self.VOC_CLASSES[c])
# Get colors
colors = []
colors = [self.VOC_COLORMAP[i] for i in yh_unique]
# Make color mask
color_mask = np.reshape(yh_mask,(self.width,self.height,1))
color_mask = np.concatenate((color_mask, color_mask, color_mask), axis=-1)
for i, c in enumerate(yh_unique.tolist()):
# Change r-val
color_mask[:,:,0] = np.where(color_mask[:,:,0] == c, self.VOC_COLORMAP[c][0], color_mask[:,:,0])
# Change g-val
color_mask[:,:,1] = np.where(color_mask[:,:,1] == c, self.VOC_COLORMAP[c][1], color_mask[:,:,1])
# Change b-val
color_mask[:,:,2] = np.where(color_mask[:,:,2] == c, self.VOC_COLORMAP[c][2], color_mask[:,:,2])
return color_mask, classes_found, colors
def __call__(self, data: np.array, color_img: bool = False) -> np.array:
""" Infer mask for passed image data."""
# Set image data
sample_img = data/255.0
sample_img = sample_img.reshape(1,self.width,self.height,3)
sample_img_tensor = tf.convert_to_tensor(sample_img, dtype='float32')
self.interpreter.set_tensor(self.input_details[0]['index'], sample_img_tensor)
self.interpreter.invoke()
# Get model output for set data
yh = self.interpreter.get_tensor(self.output_details[0]['index'])
yh = yh*255.0
yh = yh.astype('int')
yh_mask = np.argmax(yh[0,:,:,:],axis=-1)
if color_img == True:
color_mask, pred_classes, colors = self.shade_img(yh_mask)
return yh_mask, color_mask, pred_classes, colors
else:
return mask, None, None, None
##########################
model_path = "resnetfcn_lite.tflite"
model = ImageMarker(model_path)
##########################
test_pic = Image.open(img_path)
test_pic = test_pic.resize((320,224))
test_pic = np.array(test_pic)
test_pic = np.transpose(test_pic, (1,0,2))
yh_mask, color_mask, classes_found, colors = model(test_pic, color_img=True)
##########################
def rgb_to_hex(c):
hex_col = '#%02x%02x%02x' % (c[0],c[1],c[2])
return hex_col
def plotImgMask(pic_path, color_mask, classes_found, colors):
test_pic = Image.open(pic_path)
test_pic = test_pic.resize((320,224))
test_pic = np.array(test_pic)
fig=plt.figure(figsize=(16, 16))
columns = 2
rows = 1
for i in range(1, columns*rows +1):
fig.add_subplot(rows, columns, i)
if i == 2:
table_columns = ["Predicted Classes", "Color"]
table_rows = classes_found
cell_text = [[clas," "] for clas in classes_found]
# Add a table at the bottom of the axes
table_colors = []
for k, c in enumerate(colors):
table_colors.append(["w", rgb_to_hex(c)])
table_plot = plt.table(cellText=cell_text,cellColours=table_colors,
colLabels=table_columns,loc='center',fontsize='medium')
table_plot.set_fontsize(34)
table_plot.scale(1, 4)
plt.axis('tight')
plt.axis('off')
elif i == 1:
plt.imshow(test_pic)
plt.imshow(np.transpose(color_mask, (1,0,2)), alpha=0.3)
plt.title('Image with Mask')
fig.savefig('output/pred_img_mask.jpg')
plotImgMask(img_path, color_mask, classes_found, colors)
| 35.554286 | 108 | 0.575378 |
793fb27d31e68f2109a5d1bdc4e0750ea7aeff09 | 1,475 | py | Python | utils/analyzer.py | smiyawaki0820/aio2-tfidf-baseline | db27b12df8555108973206383f9a4c5a11d946ed | [
"MIT"
] | 4 | 2021-12-06T02:50:51.000Z | 2022-02-19T07:23:47.000Z | utils/analyzer.py | smiyawaki0820/aio2-tfidf-baseline | db27b12df8555108973206383f9a4c5a11d946ed | [
"MIT"
] | null | null | null | utils/analyzer.py | smiyawaki0820/aio2-tfidf-baseline | db27b12df8555108973206383f9a4c5a11d946ed | [
"MIT"
] | 1 | 2022-02-19T07:24:08.000Z | 2022-02-19T07:24:08.000Z | import unicodedata
import ipadic
from fugashi import GenericTagger
from typing import Any, Dict, List, Optional
class JapaneseTextAnalyzer:
def __init__(
self,
do_unicode_normalize: bool = True,
pos_list: Optional[List[str]] = None,
stop_words: Optional[List[str]] = None,
) -> None:
if do_unicode_normalize and stop_words is not None:
stop_words = [unicodedata.normalize("NFKC", word) for word in stop_words]
self._do_unicode_normalize = do_unicode_normalize
self._pos_list = pos_list
self._stop_words = stop_words
self.tagger = GenericTagger(ipadic.MECAB_ARGS)
def __call__(self, text: str) -> str:
if self._do_unicode_normalize:
text = unicodedata.normalize("NFKC", text)
tokens = []
# tokenize the text
for token in self.tagger(text):
if self._pos_list is not None and token.feature[0] not in self._pos_list:
continue
if self._stop_words is not None and token.surface in self._stop_words:
continue
tokens.append(token.surface)
return tokens
def __getstate__(self)-> Dict[str, Any]:
state = self.__dict__.copy()
del state["tagger"] # unpicklable object
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__.update(state)
self.tagger = GenericTagger(ipadic.MECAB_ARGS)
| 30.102041 | 85 | 0.638644 |
793fb3454038469dce1555e6da52c515b73ba500 | 894 | py | Python | heat/db/sqlalchemy/migrate_repo/versions/034_raw_template_files.py | jasondunsmore/heat | 6bd7352dc4838b8ef782f2345a4dfdf57ba3e356 | [
"Apache-2.0"
] | null | null | null | heat/db/sqlalchemy/migrate_repo/versions/034_raw_template_files.py | jasondunsmore/heat | 6bd7352dc4838b8ef782f2345a4dfdf57ba3e356 | [
"Apache-2.0"
] | null | null | null | heat/db/sqlalchemy/migrate_repo/versions/034_raw_template_files.py | jasondunsmore/heat | 6bd7352dc4838b8ef782f2345a4dfdf57ba3e356 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:37:03.000Z | 2021-03-21T11:37:03.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from heat.db.sqlalchemy import types
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
files = sqlalchemy.Column('files', types.Json, default={})
files.create(raw_template)
| 34.384615 | 78 | 0.728188 |
793fb374196221dca6a8821330584c0f9bca579e | 6,317 | py | Python | setup.py | lijiayong/cwltool | b9fca03db868fb3ef433b8e4c601bc656623ad8f | [
"Apache-2.0"
] | null | null | null | setup.py | lijiayong/cwltool | b9fca03db868fb3ef433b8e4c601bc656623ad8f | [
"Apache-2.0"
] | 15 | 2021-08-09T15:24:53.000Z | 2022-03-30T20:17:42.000Z | setup.py | lijiayong/cwltool | b9fca03db868fb3ef433b8e4c601bc656623ad8f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Setup for the reference implementation of the CWL standards."""
import os
import sys
import warnings
import setuptools.command.egg_info as egg_info_cmd
from setuptools import setup
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, "README.rst")
try:
import gittaggers
Tagger = gittaggers.EggInfoFromGit
except ImportError:
Tagger = egg_info_cmd.egg_info
NEEDS_PYTEST = {"pytest", "test", "ptr"}.intersection(sys.argv)
PYTEST_RUNNER = ["pytest-runner", "pytest-cov"] if NEEDS_PYTEST else []
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc":
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv("CWLTOOL_USE_MYPYC", None) == "1":
USE_MYPYC = True
if USE_MYPYC:
mypyc_targets = [
"cwltool/argparser.py",
"cwltool/builder.py",
"cwltool/checker.py",
"cwltool/command_line_tool.py",
# "cwltool/context.py", # monkeypatching
"cwltool/cwlrdf.py",
"cwltool/docker_id.py",
"cwltool/docker.py",
"cwltool/udocker.py",
"cwltool/errors.py",
"cwltool/executors.py",
"cwltool/expression.py",
"cwltool/factory.py",
"cwltool/flatten.py",
# "cwltool/__init__.py",
"cwltool/job.py",
"cwltool/load_tool.py",
# "cwltool/loghandler.py", # so we can monkeypatch the logger from tests
# "cwltool/__main__.py",
"cwltool/main.py",
"cwltool/mutation.py",
"cwltool/pack.py",
# "cwltool/pathmapper.py", # class PathMapper needs to be subclassable
"cwltool/process.py",
"cwltool/procgenerator.py",
# "cwltool/provenance.py", # WritableBag is having issues
"cwltool/resolver.py",
# "cwltool/sandboxjs.py", # probably not speed critical, tests need to mock components
"cwltool/secrets.py",
"cwltool/singularity.py",
"cwltool/software_requirements.py",
"cwltool/stdfsaccess.py",
"cwltool/subgraph.py",
"cwltool/update.py",
"cwltool/utils.py",
"cwltool/validate_js.py",
"cwltool/workflow.py",
]
from mypyc.build import mypycify
opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
ext_modules = mypycify(mypyc_targets, opt_level=opt_level)
else:
ext_modules = []
setup(
name="cwltool",
version="3.1",
description="Common workflow language reference implementation",
long_description=open(README).read(),
long_description_content_type="text/x-rst",
author="Common workflow language working group",
author_email="[email protected]",
url="https://github.com/common-workflow-language/cwltool",
download_url="https://github.com/common-workflow-language/cwltool",
ext_modules=ext_modules,
# platforms='', # empty as is conveyed by the classifier below
# license='', # empty as is conveyed by the classifier below
packages=["cwltool", "cwltool.tests"],
package_dir={"cwltool.tests": "tests"},
include_package_data=True,
install_requires=[
"setuptools",
"requests >= 2.6.1", # >= 2.6.1 to workaround
# https://github.com/ionrock/cachecontrol/issues/137
"ruamel.yaml >= 0.15, < 0.17.12",
"rdflib >= 4.2.2, < 6.1.0",
"shellescape >= 3.4.1, < 3.9",
# 7.1.20210518142926 or later required due to
# https://github.com/common-workflow-language/schema_salad/issues/385
"schema-salad >= 8.1.20210627200047, < 9",
"mypy-extensions",
"psutil >= 5.6.6",
"prov == 1.5.1",
"bagit >= 1.6.4",
"typing-extensions",
"coloredlogs",
"pydot >= 1.4.1",
"argcomplete",
],
extras_require={
"deps": ["galaxy-tool-util >= 21.1.0"],
"docs": [
"sphinx >= 2.2",
"sphinx-rtd-theme",
"sphinx-autoapi",
"sphinx-autodoc-typehints",
"typed_ast;python_version<'3.8'",
],
},
python_requires=">=3.6, <4",
setup_requires=PYTEST_RUNNER,
test_suite="tests",
tests_require=[
"pytest >= 6.2, < 6.3",
"mock >= 2.0.0",
"pytest-mock >= 1.10.0",
"arcp >= 0.2.0",
"rdflib-jsonld >= 0.4.0",
],
entry_points={"console_scripts": ["cwltool=cwltool.main:run"]},
zip_safe=True,
cmdclass={"egg_info": Tagger},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Healthcare Industry",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: System :: Distributed Computing",
"Topic :: Utilities",
],
)
| 36.097143 | 95 | 0.613582 |
793fb4a6240b031a0e20818b7239425e787466f5 | 888 | py | Python | conanfile.py | uilianries/sequences | 94d78c5b50fda4df8daa785a604601334d7f9610 | [
"MIT"
] | null | null | null | conanfile.py | uilianries/sequences | 94d78c5b50fda4df8daa785a604601334d7f9610 | [
"MIT"
] | null | null | null | conanfile.py | uilianries/sequences | 94d78c5b50fda4df8daa785a604601334d7f9610 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake
class SequencesConan(ConanFile):
name = "sequences"
description = "C++11 header-only library that provides efficient algorithms to generate and work on variadic templates and std::integer_sequence"
homepage = "https://github.com/taocpp/sequences"
url = homepage
license = "MIT"
author = "[email protected]"
settings = "compiler", "arch"
exports = "LICENSE"
exports_sources = "include/*", "CMakeLists.txt"
no_copy_source = True
def build(self):
pass
def package(self):
cmake = CMake(self)
cmake.definitions["TAOCPP_SEQUENCES_BUILD_TESTS"] = "OFF"
cmake.definitions["TAOCPP_SEQUENCES_INSTALL_DOC_DIR"] = "licenses"
cmake.configure()
cmake.install()
def package_id(self):
self.info.header_only()
| 28.645161 | 149 | 0.664414 |
793fb4b5d78ba58dea866154c805eb02f9f7a5e5 | 191 | py | Python | iterators_and_generators/possible_permutations.py | lowrybg/PythonOOP | 1ef5023ca76645d5d96b8c4fb9a54d0f431a1947 | [
"MIT"
] | null | null | null | iterators_and_generators/possible_permutations.py | lowrybg/PythonOOP | 1ef5023ca76645d5d96b8c4fb9a54d0f431a1947 | [
"MIT"
] | null | null | null | iterators_and_generators/possible_permutations.py | lowrybg/PythonOOP | 1ef5023ca76645d5d96b8c4fb9a54d0f431a1947 | [
"MIT"
] | null | null | null | from itertools import permutations
def possible_permutations(my_list):
for p in list(permutations(my_list)):
yield list(p)
[print(n) for n in possible_permutations([1, 2, 3])] | 23.875 | 52 | 0.717277 |
793fb5c60af23d1b5145028e58eb32661df50cb0 | 3,598 | py | Python | nflgame/sched.py | RyanJDick/nflgame | 0261ff8b73fcb05c521483084f8ef051eb6eff87 | [
"Unlicense"
] | null | null | null | nflgame/sched.py | RyanJDick/nflgame | 0261ff8b73fcb05c521483084f8ef051eb6eff87 | [
"Unlicense"
] | 1 | 2020-02-16T14:27:26.000Z | 2020-02-16T14:27:26.000Z | nflgame/sched.py | RyanJDick/nflgame | 0261ff8b73fcb05c521483084f8ef051eb6eff87 | [
"Unlicense"
] | null | null | null | from collections import OrderedDict
import datetime
import json
import os.path
__pdoc__ = {}
_sched_json_file = os.path.join(os.path.dirname(__file__), 'schedule.json')
def calc_desired_weeks(year, phase):
desired_weeks = []
for week in range(5):
desired_weeks.append(tuple([year, 'PRE', week]))
for week in range(1,18):
desired_weeks.append(tuple([year,'REG',week]))
if phase == 'POST':
for week in range(1,5):
desired_weeks.append(tuple([year, 'POST', week]))
return desired_weeks
def check_missing_weeks(sched, year, phase):
missing_weeks = calc_desired_weeks(year, phase)
stored_weeks = set()
for info in sched.values():
if info['year'] != year:
continue
stored_week = (year, info['season_type'], info['week'])
stored_weeks.add(stored_week)
for stored_week in stored_weeks:
missing_weeks.remove(stored_week)
return missing_weeks
def order_weeks_to_update(missing_weeks, current_week):
if current_week in missing_weeks:
missing_weeks.remove(current_week)
missing_weeks.insert(0, current_week)
return missing_weeks
def _create_schedule(jsonf=None):
"""
Returns an ordered dict of schedule data from the schedule.json
file, where games are ordered by the date and time that they
started. Keys in the dictionary are GSIS ids and values are
dictionaries with the following keys: week, month, year, home,
away, wday, gamekey, season_type, time.
"""
day = 60 * 60 * 24
if jsonf is None:
jsonf = _sched_json_file
try:
data = json.loads(open(jsonf).read())
except IOError:
return OrderedDict(), datetime.datetime.utcnow()
sched = OrderedDict()
for gsis_id, info in data.get('games', []):
sched[gsis_id] = info
last_updated = datetime.datetime.utcfromtimestamp(data.get('time', 0))
if (datetime.datetime.utcnow() - last_updated).total_seconds() >= day:
# Only try to update if we can write to the schedule file.
if os.access(jsonf, os.W_OK):
import nflgame.live
import nflgame.update_sched
year, week = nflgame.live.current_year_and_week()
phase = nflgame.live._cur_season_phase
current_week = (year, phase, week)
missing_weeks = check_missing_weeks(sched, year, phase)
weeks_to_update = order_weeks_to_update(missing_weeks, current_week)
for week_to_update in weeks_to_update:
print(('Updating {}').format(week_to_update))
year, phase, week = week_to_update
week_was_updated = nflgame.update_sched.update_week(sched, year, phase, week)
if not week_was_updated:
print(("Week {}{} of {} was either empty, or it couldn't be fetched from NFL.com. Aborting.")\
.format(phase , week, year))
break
nflgame.update_sched.write_schedule(jsonf, sched)
last_updated = datetime.datetime.utcnow()
return sched, last_updated
games, last_updated = _create_schedule()
__pdoc__['nflgame.sched.games'] = """
An ordered dict of schedule data, where games are ordered by the date
and time that they started. Keys in the dictionary are GSIS ids and
values are dictionaries with the following keys: week, month, year,
home, away, wday, gamekey, season_type, time.
"""
__pdoc__['nflgame.sched.last_updated'] = """
A `datetime.datetime` object representing the last time the schedule
was updated.
"""
| 32.414414 | 114 | 0.660645 |
793fb62224354c92ed538b54120e7514d3ef01d0 | 29,326 | py | Python | vunit/test/unit/test_verilog_preprocessor.py | barri/vunit | 5d825c91d4ee8d1efa3190b06bb81852a2a6085d | [
"Artistic-2.0"
] | 1 | 2020-08-30T08:30:02.000Z | 2020-08-30T08:30:02.000Z | vunit/test/unit/test_verilog_preprocessor.py | smgl9/vunit | 9933d9a1ae600cc241894244361282dd7f7227d7 | [
"Artistic-2.0"
] | null | null | null | vunit/test/unit/test_verilog_preprocessor.py | smgl9/vunit | 9933d9a1ae600cc241894244361282dd7f7227d7 | [
"Artistic-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, Lars Asplund [email protected]
# pylint: disable=too-many-public-methods
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
"""
Test of the Verilog preprocessor
"""
from os.path import join, dirname, exists
import os
from unittest import TestCase
import shutil
from vunit.ostools import renew_path, write_file
from vunit.parsing.verilog.preprocess import VerilogPreprocessor, Macro
from vunit.parsing.verilog.tokenizer import VerilogTokenizer
from vunit.parsing.tokenizer import Token
from vunit.test.mock_2or3 import mock
class TestVerilogPreprocessor(TestCase):
"""
Test of the Verilog preprocessor
"""
def setUp(self):
self.output_path = join(dirname(__file__), "test_verilog_preprocessor_out")
renew_path(self.output_path)
self.cwd = os.getcwd()
os.chdir(self.output_path)
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.output_path)
def test_non_preprocess_tokens_are_kept(self):
result = self.preprocess('"hello"ident/*comment*///comment')
result.assert_has_tokens('"hello"ident/*comment*///comment')
result.assert_no_defines()
def test_preprocess_define_without_value(self):
result = self.preprocess("`define foo")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo")})
result = self.preprocess("`define foo\nkeep")
result.assert_has_tokens("keep")
result.assert_has_defines({"foo": Macro("foo")})
def test_preprocess_define_with_value(self):
result = self.preprocess("`define foo bar \"abc\"")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("bar \"abc\""))})
def test_preprocess_define_with_lpar_value(self):
result = self.preprocess("`define foo (bar)")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("(bar)"))})
def test_preprocess_define_with_one_arg(self):
result = self.preprocess("`define foo(arg)arg 123")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("arg 123"), args=("arg",))})
def test_preprocess_define_with_one_arg_ignores_initial_space(self):
result = self.preprocess("`define foo(arg) arg 123")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("arg 123"), args=("arg",))})
def test_preprocess_define_with_multiple_args(self):
result = self.preprocess("`define foo( arg1, arg2)arg1 arg2")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo", tokenize("arg1 arg2"), args=("arg1", "arg2"))})
def test_preprocess_define_with_default_values(self):
result = self.preprocess("`define foo(arg1, arg2=default)arg1 arg2")
result.assert_has_tokens("")
result.assert_has_defines({"foo": Macro("foo",
tokenize("arg1 arg2"),
args=("arg1", "arg2"),
defaults={"arg2": tokenize("default")})})
def test_preprocess_substitute_define_without_args(self):
result = self.preprocess("""\
`define foo bar \"abc\"
`foo""")
result.assert_has_tokens("bar \"abc\"")
def test_preprocess_substitute_define_with_one_arg(self):
result = self.preprocess("""\
`define foo(arg)arg 123
`foo(hello hey)""")
result.assert_has_tokens("hello hey 123")
def test_preprocess_substitute_define_with_space_before_arg(self):
result = self.preprocess("""\
`define foo(arg) arg
`foo (hello)""")
result.assert_has_tokens("hello")
def test_preprocess_substitute_define_no_args(self):
result = self.preprocess("""\
`define foo bar
`foo (hello)""")
result.assert_has_tokens("bar (hello)")
def test_preprocess_substitute_define_with_multile_args(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1 2, hello)""")
result.assert_has_tokens("1 2, hello")
def test_preprocess_substitute_define_with_default_values(self):
result = self.preprocess("""\
`define foo(arg1, arg2=default)arg1 arg2
`foo(1)""")
result.assert_has_tokens("1 default")
def test_preprocess_include_directive(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('`include "include.svh"',
include_paths=[self.output_path])
result.assert_has_tokens("hello hey")
result.assert_included_files([join(self.output_path, "include.svh")])
def test_detects_circular_includes(self):
self.write_file("include1.svh", '`include "include2.svh"')
self.write_file("include2.svh", '`include "include1.svh"')
result = self.preprocess('`include "include1.svh"',
include_paths=[self.output_path])
result.logger.error.assert_called_once_with(
'Circular `include of include2.svh detected\n%s',
'from fn.v line 1:\n'
'`include "include1.svh"\n'
'~~~~~~~~\n'
'from include1.svh line 1:\n'
'`include "include2.svh"\n'
'~~~~~~~~\n'
'from include2.svh line 1:\n'
'`include "include1.svh"\n'
'~~~~~~~~\n'
'at include1.svh line 1:\n'
'`include "include2.svh"\n'
' ~~~~~~~~~~~~~~')
def test_detects_circular_include_of_self(self):
self.write_file("include.svh", '`include "include.svh"')
result = self.preprocess('`include "include.svh"',
include_paths=[self.output_path])
result.logger.error.assert_called_once_with(
'Circular `include of include.svh detected\n%s',
'from fn.v line 1:\n'
'`include "include.svh"\n'
'~~~~~~~~\n'
'from include.svh line 1:\n'
'`include "include.svh"\n'
'~~~~~~~~\n'
'at include.svh line 1:\n'
'`include "include.svh"\n'
' ~~~~~~~~~~~~~')
def test_does_not_detect_non_circular_includes(self):
self.write_file("include3.svh", 'keep')
self.write_file("include1.svh", '`include "include3.svh"\n`include "include2.svh"')
self.write_file("include2.svh", '`include "include3.svh"')
result = self.preprocess('`include "include1.svh"\n`include "include2.svh"',
include_paths=[self.output_path])
result.assert_no_log()
def test_detects_circular_macro_expansion_of_self(self):
result = self.preprocess('''
`define foo `foo
`foo
''')
result.logger.error.assert_called_once_with(
'Circular macro expansion of foo detected\n%s',
'from fn.v line 3:\n'
'`foo\n'
'~~~~\n'
'from fn.v line 2:\n'
'`define foo `foo\n'
' ~~~~\n'
'at fn.v line 2:\n'
'`define foo `foo\n'
' ~~~~')
def test_detects_circular_macro_expansion(self):
result = self.preprocess('''
`define foo `bar
`define bar `foo
`foo
''')
result.logger.error.assert_called_once_with(
'Circular macro expansion of bar detected\n%s',
'from fn.v line 4:\n'
'`foo\n'
'~~~~\n'
'from fn.v line 2:\n'
'`define foo `bar\n'
' ~~~~\n'
'from fn.v line 3:\n'
'`define bar `foo\n'
' ~~~~\n'
'at fn.v line 2:\n'
'`define foo `bar\n'
' ~~~~')
def test_does_not_detect_non_circular_macro_expansion(self):
result = self.preprocess('''
`define foo bar
`foo
`foo
''')
result.assert_no_log()
def test_preprocess_include_directive_from_define(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('''\
`define inc "include.svh"
`include `inc''',
include_paths=[self.output_path])
result.assert_has_tokens('hello hey')
result.assert_included_files([join(self.output_path, "include.svh")])
def test_preprocess_include_directive_from_define_with_args(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('''\
`define inc(a) a
`include `inc("include.svh")''', include_paths=[self.output_path])
result.assert_has_tokens('hello hey')
result.assert_included_files([join(self.output_path, "include.svh")])
def test_preprocess_macros_are_recursively_expanded(self):
result = self.preprocess('''\
`define foo `bar
`define bar xyz
`foo
`define bar abc
`foo
''',
include_paths=[self.output_path])
result.assert_has_tokens('xyz\nabc\n')
def test_ifndef_taken(self):
result = self.preprocess('''\
`ifndef foo
taken
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_taken(self):
result = self.preprocess('''\
`define foo
`ifdef foo
taken
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_else_taken(self):
result = self.preprocess('''\
`define foo
`ifdef foo
taken
`else
else
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_not_taken(self):
result = self.preprocess('''\
`ifdef foo
taken
`endif
keep''')
result.assert_has_tokens("keep")
def test_ifdef_else_not_taken(self):
result = self.preprocess('''\
`ifdef foo
taken
`else
else
`endif
keep''')
result.assert_has_tokens("else\nkeep")
def test_ifdef_elsif_taken(self):
result = self.preprocess('''\
`define foo
`ifdef foo
taken
`elsif bar
elsif_taken
`else
else_taken
`endif
keep''')
result.assert_has_tokens("taken\nkeep")
def test_ifdef_elsif_elseif_taken(self):
result = self.preprocess('''\
`define bar
`ifdef foo
taken
`elsif bar
elsif_taken
`else
else_taken
`endif
keep''')
result.assert_has_tokens("elsif_taken\nkeep")
def test_ifdef_elsif_else_taken(self):
result = self.preprocess('''\
`ifdef foo
taken
`elsif bar
elsif_taken
`else
else_taken
`endif
keep''')
result.assert_has_tokens("else_taken\nkeep")
def test_nested_ifdef(self):
result = self.preprocess('''\
`define foo
`ifdef foo
outer_before
`ifdef bar
inner_ifndef
`else
inner_else
`endif
`ifdef bar
inner_ifndef
`elsif foo
inner_elsif
`endif
outer_after
`endif
keep''')
result.assert_has_tokens("outer_before\n"
"inner_else\n"
"inner_elsif\n"
"outer_after\n"
"keep")
def test_preprocess_broken_define(self):
result = self.preprocess("`define")
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"Verilog `define without argument\n%s",
"at fn.v line 1:\n"
"`define\n"
"~~~~~~~")
def test_preprocess_broken_define_first_argument(self):
result = self.preprocess('`define "foo"')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"Verilog `define invalid name\n%s",
"at fn.v line 1:\n"
'`define "foo"\n'
" ~~~~~")
def test_preprocess_broken_define_argument_list(self):
result = self.preprocess('`define foo(')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(\n'
" ~")
result = self.preprocess('`define foo(a')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a\n'
" ~")
result = self.preprocess('`define foo(a=')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a=\n'
" ~")
result = self.preprocess('`define foo(a=b')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a=b\n'
" ~")
result = self.preprocess('`define foo(a=)')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo(a=)\n'
" ~")
result = self.preprocess('`define foo("a"')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo("a"\n'
" ~")
result = self.preprocess('`define foo("a"=')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define argument list\n%s",
"at fn.v line 1:\n"
'`define foo("a"=\n'
" ~")
def test_preprocess_substitute_define_broken_args(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1 2)""")
result.assert_has_tokens("")
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo""")
result.assert_has_tokens("")
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(""")
result.assert_has_tokens("")
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1""")
result.assert_has_tokens("")
def test_preprocess_substitute_define_missing_argument(self):
result = self.preprocess("""\
`define foo(arg1, arg2)arg1,arg2
`foo(1)""")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"Missing value for argument arg2\n%s",
"at fn.v line 2:\n"
'`foo(1)\n'
"~~~~")
def test_preprocess_substitute_define_too_many_argument(self):
result = self.preprocess("""\
`define foo(arg1)arg1
`foo(1, 2)""")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"Too many arguments got 2 expected 1\n%s",
"at fn.v line 2:\n"
'`foo(1, 2)\n'
"~~~~")
def test_preprocess_substitute_define_with_nested_argument(self):
result = self.preprocess(
"`define foo(arg1, arg2)arg1\n"
"`foo([1, 2], 3)")
self.assertFalse(result.logger.warning.called)
result.assert_has_tokens("[1, 2]")
result = self.preprocess(
"`define foo(arg1, arg2)arg1\n"
"`foo({1, 2}, 3)")
self.assertFalse(result.logger.warning.called)
result.assert_has_tokens("{1, 2}")
result = self.preprocess(
"`define foo(arg1, arg2)arg1\n"
"`foo((1, 2), 3)")
self.assertFalse(result.logger.warning.called)
result.assert_has_tokens("(1, 2)")
result = self.preprocess(
"`define foo(arg1)arg1\n"
"`foo((1, 2))")
self.assertFalse(result.logger.warning.called)
result.assert_has_tokens("(1, 2)")
# Not OK in simulator but we let the simulator
# tell the user that this is a problem
result = self.preprocess(
"`define foo(arg1)arg1\n"
"`foo([1, 2)")
self.assertFalse(result.logger.warning.called)
result.assert_has_tokens("[1, 2")
def test_preprocess_substitute_define_eof(self):
result = self.preprocess(
"`define foo(arg1, arg2)arg1,arg2\n"
"`foo(1 2")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define actuals\n%s",
"at fn.v line 2:\n"
'`foo(1 2\n'
"~~~~")
result = self.preprocess(
"`define foo(arg1, arg2)arg1,arg2\n"
"`foo((1 2)")
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `define actuals\n%s",
"at fn.v line 2:\n"
'`foo((1 2)\n'
"~~~~")
def test_substitute_undefined(self):
result = self.preprocess('`foo')
result.assert_has_tokens("")
# Debug since there are many custon `names in tools
result.logger.debug.assert_called_once_with(
"Verilog undefined name\n%s",
"at fn.v line 1:\n"
'`foo\n'
"~~~~")
def test_preprocess_include_directive_missing_file(self):
result = self.preprocess('`include "missing.svh"',
include_paths=[self.output_path])
result.assert_has_tokens("")
result.assert_included_files([])
# Is debug message since there are so many builtin includes in tools
result.logger.debug.assert_called_once_with(
"Could not find `include file missing.svh\n%s",
"at fn.v line 1:\n"
'`include "missing.svh"\n'
" ~~~~~~~~~~~~~")
def test_preprocess_include_directive_missing_argument(self):
result = self.preprocess('`include',
include_paths=[self.output_path])
result.assert_has_tokens("")
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `include argument\n%s",
"at fn.v line 1:\n"
'`include\n'
"~~~~~~~~")
def test_preprocess_include_directive_bad_argument(self):
self.write_file("include.svh", "hello hey")
result = self.preprocess('`include foo "include.svh"',
include_paths=[self.output_path])
result.assert_has_tokens(' "include.svh"')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include bad argument\n%s",
"at fn.v line 1:\n"
'`include foo "include.svh"\n'
" ~~~")
def test_preprocess_include_directive_from_define_bad_argument(self):
result = self.preprocess('''\
`define inc foo
`include `inc
keep''',
include_paths=[self.output_path])
result.assert_has_tokens('\nkeep')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include has bad argument\n%s",
"from fn.v line 2:\n"
'`include `inc\n'
' ~~~~\n'
"at fn.v line 1:\n"
'`define inc foo\n'
" ~~~")
def test_preprocess_include_directive_from_empty_define(self):
result = self.preprocess('''\
`define inc
`include `inc
keep''', include_paths=[self.output_path])
result.assert_has_tokens('\nkeep')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include has bad argument, empty define `inc\n%s",
"at fn.v line 2:\n"
'`include `inc\n'
" ~~~~")
def test_preprocess_include_directive_from_define_not_defined(self):
result = self.preprocess('`include `inc', include_paths=[self.output_path])
result.assert_has_tokens('')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include argument not defined\n%s",
"at fn.v line 1:\n"
'`include `inc\n'
" ~~~~")
def test_preprocess_error_in_include_file(self):
self.write_file("include.svh", '`include foo')
result = self.preprocess('\n\n`include "include.svh"',
include_paths=[self.output_path])
result.assert_has_tokens('\n\n')
result.assert_included_files([join(self.output_path, "include.svh")])
result.logger.warning.assert_called_once_with(
"Verilog `include bad argument\n%s",
"from fn.v line 3:\n"
'`include "include.svh"\n'
"~~~~~~~~\n"
"at include.svh line 1:\n"
'`include foo\n'
' ~~~')
def test_preprocess_error_in_expanded_define(self):
result = self.preprocess('''\
`define foo `include wrong
`foo
''', include_paths=[self.output_path])
result.assert_has_tokens('\n')
result.assert_included_files([])
result.logger.warning.assert_called_once_with(
"Verilog `include bad argument\n%s",
"from fn.v line 2:\n"
'`foo\n'
'~~~~\n'
"at fn.v line 1:\n"
'`define foo `include wrong\n'
" ~~~~~")
def test_ifdef_eof(self):
result = self.preprocess('''\
`ifdef foo
taken''')
result.assert_has_tokens("")
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `ifdef\n%s",
"at fn.v line 1:\n"
'`ifdef foo\n'
'~~~~~~')
def test_ifdef_bad_argument(self):
result = self.preprocess('''\
`ifdef "hello"
keep''')
result.assert_has_tokens("\nkeep")
result.logger.warning.assert_called_once_with(
"Bad argument to `ifdef\n%s",
"at fn.v line 1:\n"
'`ifdef "hello"\n'
' ~~~~~~~')
def test_elsif_bad_argument(self):
result = self.preprocess('''\
`ifdef bar
`elsif "hello"
keep''')
result.assert_has_tokens("\nkeep")
result.logger.warning.assert_called_once_with(
"Bad argument to `elsif\n%s",
"at fn.v line 2:\n"
'`elsif "hello"\n'
' ~~~~~~~')
def test_undefineall(self):
result = self.preprocess('''\
`define foo keep
`define bar keep2
`foo
`undefineall''')
result.assert_has_tokens("keep\n")
result.assert_no_defines()
def test_resetall(self):
result = self.preprocess('''\
`define foo keep
`define bar keep2
`foo
`resetall''')
result.assert_has_tokens("keep\n")
result.assert_no_defines()
def test_undef(self):
result = self.preprocess('''\
`define foo keep
`define bar keep2
`foo
`undef foo''')
result.assert_has_tokens("keep\n")
result.assert_has_defines({"bar": Macro("bar", tokenize("keep2"))})
def test_undef_eof(self):
result = self.preprocess('`undef')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"EOF reached when parsing `undef\n%s",
"at fn.v line 1:\n"
'`undef\n'
'~~~~~~')
def test_undef_bad_argument(self):
result = self.preprocess('`undef "foo"')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"Bad argument to `undef\n%s",
"at fn.v line 1:\n"
'`undef "foo"\n'
' ~~~~~')
def test_undef_not_defined(self):
result = self.preprocess('`undef foo')
result.assert_has_tokens("")
result.assert_no_defines()
result.logger.warning.assert_called_once_with(
"`undef argument was not previously defined\n%s",
"at fn.v line 1:\n"
'`undef foo\n'
' ~~~')
def test_ignores_celldefine(self):
result = self.preprocess('`celldefine`endcelldefine keep')
result.assert_has_tokens(" keep")
result.assert_no_log()
def test_ignores_timescale(self):
result = self.preprocess('`timescale 1 ns / 1 ps\nkeep')
result.assert_has_tokens("\nkeep")
result.assert_no_log()
def test_ignores_default_nettype(self):
result = self.preprocess('`default_nettype none\nkeep')
result.assert_has_tokens("\nkeep")
result.assert_no_log()
def test_ignores_nounconnected_drive(self):
result = self.preprocess('`nounconnected_drive keep')
result.assert_has_tokens(" keep")
result.assert_no_log()
def test_ignores_protected_region(self):
result = self.preprocess("""\
keep_before
`pragma protect begin_protected
ASDADSJAKSJDKSAJDISA
`pragma protect end_protected
keep_end""")
result.assert_has_tokens("keep_before\n\nkeep_end")
result.assert_no_log()
def preprocess(self, code, file_name="fn.v", include_paths=None):
"""
Tokenize & Preprocess
"""
tokenizer = VerilogTokenizer()
preprocessor = VerilogPreprocessor(tokenizer)
write_file(file_name, code)
tokens = tokenizer.tokenize(code, file_name=file_name)
defines = {}
included_files = []
with mock.patch("vunit.parsing.verilog.preprocess.LOGGER", autospec=True) as logger:
tokens = preprocessor.preprocess(tokens, defines, include_paths, included_files)
return PreprocessResult(self, tokens, defines,
[file_name for _, file_name in included_files if file_name is not None],
logger)
def write_file(self, file_name, contents):
"""
Write file with contents into output path
"""
full_name = join(self.output_path, file_name)
full_path = dirname(full_name)
if not exists(full_path):
os.makedirs(full_path)
with open(full_name, "w") as fptr:
fptr.write(contents)
class PreprocessResult(object):
"""
Helper object to test preprocessing
"""
def __init__(self, # pylint: disable=too-many-arguments
test, tokens, defines, included_files, logger):
self.test = test
self.tokens = tokens
self.defines = defines
self.included_files = included_files
self.logger = logger
def assert_has_tokens(self, code, noloc=True):
"""
Check that tokens are the same as code
"""
expected = tokenize(code)
if noloc:
self.test.assertEqual(strip_loc(self.tokens), strip_loc(expected))
else:
self.test.assertEqual(self.tokens, expected)
return self
def assert_no_defines(self):
"""
Assert that there were no defines
"""
self.test.assertEqual(self.defines, {})
def assert_included_files(self, included_files):
"""
Assert that these files where included
"""
self.test.assertEqual(self.included_files, included_files)
def assert_has_defines(self, defines):
"""
Assert that these defines were made
"""
self.test.assertEqual(self.defines.keys(), defines.keys())
def macro_strip_loc(define):
"""
Strip location information from a Macro
"""
define.tokens = strip_loc(define.tokens)
for key, value in define.defaults.items():
define.defaults[key] = strip_loc(value)
for key in self.defines:
self.test.assertEqual(macro_strip_loc(self.defines[key]),
macro_strip_loc(defines[key]))
def assert_no_log(self):
"""
Assert that no log call were made
"""
self.test.assertEqual(self.logger.debug.mock_calls, [])
self.test.assertEqual(self.logger.info.mock_calls, [])
self.test.assertEqual(self.logger.warning.mock_calls, [])
self.test.assertEqual(self.logger.error.mock_calls, [])
def tokenize(code, file_name="fn.v"):
"""
Tokenize
"""
tokenizer = VerilogTokenizer()
return tokenizer.tokenize(code, file_name=file_name)
def strip_loc(tokens):
"""
Strip location information
"""
return [Token(token.kind, token.value, None) for token in tokens]
| 33.249433 | 104 | 0.592341 |
793fb6edb9bbd5c1868f75cc7074d3428e713e0d | 5,543 | py | Python | topboard_sdk/model/easy_flow/deploy_batch_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | topboard_sdk/model/easy_flow/deploy_batch_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | topboard_sdk/model/easy_flow/deploy_batch_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: deploy_batch.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topboard_sdk.model.easy_flow import deploy_target_pb2 as topboard__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='deploy_batch.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x12\x64\x65ploy_batch.proto\x12\teasy_flow\x1a\x30topboard_sdk/model/easy_flow/deploy_target.proto\"\xbe\x01\n\x0b\x44\x65ployBatch\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x10\n\x08\x62\x61tchNum\x18\x02 \x01(\x05\x12\x15\n\rbatchInterval\x18\x03 \x01(\x05\x12/\n\x07\x62\x61tches\x18\x04 \x03(\x0b\x32\x1e.easy_flow.DeployBatch.Batches\x12\x12\n\nfailedStop\x18\x05 \x01(\x08\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTargetBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[topboard__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,])
_DEPLOYBATCH_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='easy_flow.DeployBatch.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='easy_flow.DeployBatch.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=223,
serialized_end=274,
)
_DEPLOYBATCH = _descriptor.Descriptor(
name='DeployBatch',
full_name='easy_flow.DeployBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployBatch.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployBatch.batchNum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployBatch.batchInterval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batches', full_name='easy_flow.DeployBatch.batches', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployBatch.failedStop', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYBATCH_BATCHES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=274,
)
_DEPLOYBATCH_BATCHES.fields_by_name['targets'].message_type = topboard__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_DEPLOYBATCH_BATCHES.containing_type = _DEPLOYBATCH
_DEPLOYBATCH.fields_by_name['batches'].message_type = _DEPLOYBATCH_BATCHES
DESCRIPTOR.message_types_by_name['DeployBatch'] = _DEPLOYBATCH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeployBatch = _reflection.GeneratedProtocolMessageType('DeployBatch', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYBATCH_BATCHES,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch.Batches)
})
,
'DESCRIPTOR' : _DEPLOYBATCH,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch)
})
_sym_db.RegisterMessage(DeployBatch)
_sym_db.RegisterMessage(DeployBatch.Batches)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.762238 | 575 | 0.76096 |
793fb93c819ac90c2f3bdfcacc2bef5971e3033e | 1,336 | py | Python | netforce_ui/setup.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 27 | 2015-09-30T23:53:30.000Z | 2021-06-07T04:56:25.000Z | netforce_ui/setup.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 191 | 2015-10-08T11:46:30.000Z | 2019-11-14T02:24:36.000Z | netforce_ui/setup.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 32 | 2015-10-01T03:59:43.000Z | 2022-01-13T07:31:05.000Z | #!/usr/bin/env python3
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup
setup(
name="netforce_ui",
version="3.1.0",
description="Netforce web UI module",
packages=["netforce_ui"],
package_data={"netforce_ui": ["static/img/*"]},
)
| 43.096774 | 80 | 0.75524 |
793fb9c10c4e2496f833b1fde884be826bce4f2c | 1,276 | py | Python | example/migrations/0001_initial.py | ruben3185/baseDRF | 5e20f2ca784945f63a5d8fd7cdff0438afc50c88 | [
"MIT"
] | null | null | null | example/migrations/0001_initial.py | ruben3185/baseDRF | 5e20f2ca784945f63a5d8fd7cdff0438afc50c88 | [
"MIT"
] | 5 | 2021-03-19T00:03:09.000Z | 2021-09-22T18:38:21.000Z | example/migrations/0001_initial.py | ruben3185/baseDRF | 5e20f2ca784945f63a5d8fd7cdff0438afc50c88 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-02-25 02:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Example',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Creado el')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Actualizado el')),
('active', models.BooleanField(default=True)),
('description', models.TextField(blank=True, default=None, null=True, verbose_name='Description')),
('name', models.TextField(blank=True, default=None, null=True, verbose_name='Name')),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Creado por')),
],
options={
'abstract': False,
},
),
]
| 38.666667 | 163 | 0.628527 |
793fbab10caf7d6dd242615f525ad88a524edb86 | 63 | py | Python | vpnporthole/__init__.py | sourcesimian/vpn-porthole | de225ce9c730a40a0358f59c09fc5e16330e9dd7 | [
"MIT"
] | 15 | 2016-07-29T17:44:48.000Z | 2021-09-18T14:43:49.000Z | vpnporthole/__init__.py | sourcesimian/vpn-porthole | de225ce9c730a40a0358f59c09fc5e16330e9dd7 | [
"MIT"
] | 2 | 2017-02-16T10:23:07.000Z | 2018-07-24T12:54:36.000Z | vpnporthole/__init__.py | sourcesimian/vpn-porthole | de225ce9c730a40a0358f59c09fc5e16330e9dd7 | [
"MIT"
] | 3 | 2016-06-25T10:47:56.000Z | 2020-02-20T09:11:03.000Z | from vpnporthole.session import Session
__all__ = ['Session']
| 15.75 | 39 | 0.777778 |
793fbad7a7620c28a695de0cf9454b349331452a | 11,063 | py | Python | models/base_model.py | prasad4fun/neural_sequence_labeling | a580ba1b559e915cb5a69ac15b29d383dc1cf2fe | [
"MIT"
] | null | null | null | models/base_model.py | prasad4fun/neural_sequence_labeling | a580ba1b559e915cb5a69ac15b29d383dc1cf2fe | [
"MIT"
] | null | null | null | models/base_model.py | prasad4fun/neural_sequence_labeling | a580ba1b559e915cb5a69ac15b29d383dc1cf2fe | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from tensorflow.contrib.crf import viterbi_decode, crf_log_likelihood
from tensorflow.python.ops.rnn_cell import LSTMCell, GRUCell, MultiRNNCell
from utils import CoNLLeval, load_dataset, get_logger, process_batch_data, align_data
from data.common import word_convert, UNK
import os
class BaseModel:
def __init__(self, config):
self.cfg = config
self._initialize_config()
self.sess, self.saver = None, None
self._add_placeholders()
self._build_embedding_op()
self._build_model_op()
self._build_loss_op()
self._build_train_op()
print('params number: {}'.format(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
self.initialize_session()
def _initialize_config(self):
# create folders and logger
if not os.path.exists(self.cfg["checkpoint_path"]):
os.makedirs(self.cfg["checkpoint_path"])
if not os.path.exists(self.cfg["summary_path"]):
os.makedirs(self.cfg["summary_path"])
self.logger = get_logger(os.path.join(self.cfg["checkpoint_path"], "log.txt"))
# load dictionary
dict_data = load_dataset(self.cfg["vocab"])
self.word_dict, self.char_dict = dict_data["word_dict"], dict_data["char_dict"]
self.tag_dict = dict_data["tag_dict"]
del dict_data
self.word_vocab_size = len(self.word_dict)
self.char_vocab_size = len(self.char_dict)
self.tag_vocab_size = len(self.tag_dict)
self.rev_word_dict = dict([(idx, word) for word, idx in self.word_dict.items()])
self.rev_char_dict = dict([(idx, char) for char, idx in self.char_dict.items()])
self.rev_tag_dict = dict([(idx, tag) for tag, idx in self.tag_dict.items()])
def initialize_session(self):
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.saver = tf.train.Saver(max_to_keep=self.cfg["max_to_keep"])
self.sess.run(tf.global_variables_initializer())
def restore_last_session(self, ckpt_path=None):
if ckpt_path is not None:
ckpt = tf.train.get_checkpoint_state(ckpt_path)
else:
ckpt = tf.train.get_checkpoint_state(self.cfg["checkpoint_path"]) # get checkpoint state
if ckpt and ckpt.model_checkpoint_path: # restore session
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
def save_session(self, epoch):
self.saver.save(self.sess, self.cfg["checkpoint_path"] + self.cfg["model_name"], global_step=epoch)
def close_session(self):
self.sess.close()
def _add_summary(self):
self.summary = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(self.cfg["summary_path"] + "train", self.sess.graph)
self.test_writer = tf.summary.FileWriter(self.cfg["summary_path"] + "test")
def reinitialize_weights(self, scope_name=None):
"""Reinitialize parameters in a scope"""
if scope_name is None:
self.sess.run(tf.global_variables_initializer())
else:
variables = tf.contrib.framework.get_variables(scope_name)
self.sess.run(tf.variables_initializer(variables))
@staticmethod
def variable_summaries(variable, name=None):
with tf.name_scope(name or "summary"):
mean = tf.reduce_mean(variable)
tf.summary.scalar("mean", mean) # add mean value
stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
tf.summary.scalar("stddev", stddev) # add standard deviation value
tf.summary.scalar("max", tf.reduce_max(variable)) # add maximal value
tf.summary.scalar("min", tf.reduce_min(variable)) # add minimal value
tf.summary.histogram("histogram", variable) # add histogram
@staticmethod
def viterbi_decode(logits, trans_params, seq_len):
viterbi_sequences = []
for logit, lens in zip(logits, seq_len):
logit = logit[:lens] # keep only the valid steps
viterbi_seq, viterbi_score = viterbi_decode(logit, trans_params)
viterbi_sequences += [viterbi_seq]
return viterbi_sequences
def _create_single_rnn_cell(self, num_units):
cell = GRUCell(num_units) if self.cfg["cell_type"] == "gru" else LSTMCell(num_units)
return cell
def _create_rnn_cell(self):
if self.cfg["num_layers"] is None or self.cfg["num_layers"] <= 1:
return self._create_single_rnn_cell(self.cfg["num_units"])
else:
MultiRNNCell([self._create_single_rnn_cell(self.cfg["num_units"]) for _ in range(self.cfg["num_layers"])])
def _add_placeholders(self):
raise NotImplementedError("To be implemented...")
def _get_feed_dict(self, data):
raise NotImplementedError("To be implemented...")
def _build_embedding_op(self):
raise NotImplementedError("To be implemented...")
def _build_model_op(self):
raise NotImplementedError("To be implemented...")
def _build_loss_op(self):
if self.cfg["use_crf"]:
crf_loss, self.trans_params = crf_log_likelihood(self.logits, self.tags, self.seq_len)
self.loss = tf.reduce_mean(-crf_loss)
else: # using softmax
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.tags)
mask = tf.sequence_mask(self.seq_len)
self.loss = tf.reduce_mean(tf.boolean_mask(losses, mask))
tf.summary.scalar("loss", self.loss)
def _build_train_op(self):
with tf.variable_scope("train_step"):
if self.cfg["optimizer"] == 'adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr)
elif self.cfg["optimizer"] == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
elif self.cfg["optimizer"] == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr)
elif self.cfg["optimizer"] == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate=self.lr)
else: # default adam optimizer
if self.cfg["optimizer"] != 'adam':
print('Unsupported optimizing method {}. Using default adam optimizer.'
.format(self.cfg["optimizer"]))
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
if self.cfg["grad_clip"] is not None and self.cfg["grad_clip"] > 0:
grads, vs = zip(*optimizer.compute_gradients(self.loss))
grads, _ = tf.clip_by_global_norm(grads, self.cfg["grad_clip"])
self.train_op = optimizer.apply_gradients(zip(grads, vs))
else:
self.train_op = optimizer.minimize(self.loss)
def _predict_op(self, data):
feed_dict = self._get_feed_dict(data)
if self.cfg["use_crf"]:
logits, trans_params, seq_len = self.sess.run([self.logits, self.trans_params, self.seq_len],
feed_dict=feed_dict)
return self.viterbi_decode(logits, trans_params, seq_len)
else:
pred_logits = tf.cast(tf.argmax(self.logits, axis=-1), tf.int32)
logits = self.sess.run(pred_logits, feed_dict=feed_dict)
return logits
def train_epoch(self, train_set, valid_data, epoch):
raise NotImplementedError("To be implemented...")
def train(self, train_set, valid_data, valid_set, test_set):
self.logger.info("Start training...")
best_f1, no_imprv_epoch, init_lr = -np.inf, 0, self.cfg["lr"]
self._add_summary()
for epoch in range(1, self.cfg["epochs"] + 1):
self.logger.info('Epoch {}/{}:'.format(epoch, self.cfg["epochs"]))
self.train_epoch(train_set, valid_data, epoch) # train epochs
self.evaluate(valid_set, "dev")
score = self.evaluate(test_set, "test")
if self.cfg["use_lr_decay"]: # learning rate decay
self.cfg["lr"] = max(init_lr / (1.0 + self.cfg["lr_decay"] * epoch), self.cfg["minimal_lr"])
if score["FB1"] > best_f1:
best_f1 = score["FB1"]
no_imprv_epoch = 0
self.save_session(epoch)
self.logger.info(' -- new BEST score on test dataset: {:04.2f}'.format(best_f1))
else:
no_imprv_epoch += 1
if no_imprv_epoch >= self.cfg["no_imprv_tolerance"]:
self.logger.info('early stop at {}th epoch without improvement, BEST score on testset: {:04.2f}'
.format(epoch, best_f1))
break
self.train_writer.close()
self.test_writer.close()
def evaluate(self, dataset, name):
save_path = os.path.join(self.cfg["checkpoint_path"], "result.txt")
predictions, groundtruth, words_list = list(), list(), list()
for data in dataset:
predicts = self._predict_op(data)
for tags, preds, words, seq_len in zip(data["tags"], predicts, data["words"], data["seq_len"]):
tags = [self.rev_tag_dict[x] for x in tags[:seq_len]]
preds = [self.rev_tag_dict[x] for x in preds[:seq_len]]
words = [self.rev_word_dict[x] for x in words[:seq_len]]
predictions.append(preds)
groundtruth.append(tags)
words_list.append(words)
ce = CoNLLeval()
score = ce.conlleval(predictions, groundtruth, words_list, save_path)
self.logger.info("{} dataset -- acc: {:04.2f}, pre: {:04.2f}, rec: {:04.2f}, FB1: {:04.2f}"
.format(name, score["accuracy"], score["precision"], score["recall"], score["FB1"]))
return score
def words_to_indices(self, words):
"""
Convert input words into batchnized word/chars indices for inference
:param words: input words
:return: batchnized word indices
"""
chars_idx = []
for word in words:
chars = [self.char_dict[char] if char in self.char_dict else self.char_dict[UNK] for char in word]
chars_idx.append(chars)
words = [word_convert(word, language=self.cfg["language"]) for word in words]
words_idx = [self.word_dict[word] if word in self.word_dict else self.word_dict[UNK] for word in words]
return process_batch_data([words_idx], [chars_idx])
def inference(self, sentence):
words = sentence.lstrip().rstrip().split(" ")
data = self.words_to_indices(words)
predicts = self._predict_op(data)
predicts = [self.rev_tag_dict[idx] for idx in list(predicts[0])]
results = align_data({"input": words, "output": predicts})
return "{}\n{}".format(results["input"], results["output"])
| 48.52193 | 119 | 0.62813 |
793fbafd294f5509c4810829139b7c529bf8dc42 | 4,610 | py | Python | src/command_modules/azure-cli-documentdb/azure/cli/command_modules/documentdb/custom.py | henrypan/azure-cli | 8de0ab5216ed3dc700546ae9a3c485710322376b | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-documentdb/azure/cli/command_modules/documentdb/custom.py | henrypan/azure-cli | 8de0ab5216ed3dc700546ae9a3c485710322376b | [
"MIT"
] | 2 | 2021-03-25T21:38:56.000Z | 2021-11-15T17:46:45.000Z | src/command_modules/azure-cli-documentdb/azure/cli/command_modules/documentdb/custom.py | Visual-Studio-China/azure-cli-int | 48c7c7f371a0ecc4ebfd4dcfdc72764beddf5c31 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.mgmt.documentdb.models import (
ConsistencyPolicy,
DatabaseAccountCreateUpdateParameters,
Location
)
from azure.mgmt.documentdb.models.document_db_enums import DatabaseAccountKind
# pylint:disable=too-many-arguments
def cli_documentdb_create(client,
resource_group_name,
account_name,
locations=None,
kind=DatabaseAccountKind.global_document_db.value,
default_consistency_level=None,
max_staleness_prefix=100,
max_interval=5,
ip_range_filter=None):
# pylint:disable=line-too-long
"""Create a new Azure DocumentDB database account.
"""
consistency_policy = None
if default_consistency_level is not None:
consistency_policy = ConsistencyPolicy(default_consistency_level, max_staleness_prefix, max_interval)
from azure.mgmt.resource import ResourceManagementClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
resource_client = get_mgmt_service_client(ResourceManagementClient)
rg = resource_client.resource_groups.get(resource_group_name)
resource_group_location = rg.location # pylint: disable=no-member
if not locations:
locations.append(Location(location_name=resource_group_location, failover_priority=0))
params = DatabaseAccountCreateUpdateParameters(
resource_group_location,
locations,
kind=kind,
consistency_policy=consistency_policy,
ip_range_filter=ip_range_filter)
async_docdb_create = client.create_or_update(resource_group_name, account_name, params)
docdb_account = async_docdb_create.result()
docdb_account = client.get(resource_group_name, account_name) # Workaround
return docdb_account
def cli_documentdb_update(client,
resource_group_name,
account_name,
locations=None,
default_consistency_level=None,
max_staleness_prefix=None,
max_interval=None,
ip_range_filter=None):
# pylint:disable=line-too-long
"""Update an existing Azure DocumentDB database account.
"""
existing = client.get(resource_group_name, account_name)
update_consistency_policy = False
if max_interval is not None or max_staleness_prefix is not None or default_consistency_level is not None:
update_consistency_policy = True
if max_staleness_prefix is None:
max_staleness_prefix = existing.consistency_policy.max_staleness_prefix
if max_interval is None:
max_interval = existing.consistency_policy.max_interval_in_seconds
if default_consistency_level is None:
default_consistency_level = existing.consistency_policy.default_consistency_level
consistency_policy = None
if update_consistency_policy:
consistency_policy = ConsistencyPolicy(default_consistency_level, max_staleness_prefix, max_interval)
else:
consistency_policy = existing.consistency_policy
if not locations:
for loc in existing.read_locations:
locations.append(Location(location_name=loc.location_name, failover_priority=loc.failover_priority))
if ip_range_filter is None:
ip_range_filter = existing.ip_range_filter
params = DatabaseAccountCreateUpdateParameters(
existing.location,
locations,
kind=existing.kind,
consistency_policy=consistency_policy,
ip_range_filter=ip_range_filter)
async_docdb_create = client.create_or_update(resource_group_name, account_name, params)
docdb_account = async_docdb_create.result()
docdb_account = client.get(resource_group_name, account_name) # Workaround
return docdb_account
def cli_documentdb_list(client,
resource_group_name=None):
"""Lists all Azure DocumentDB database accounts within a given resource group or subscription.
"""
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
else:
return client.list()
| 40.438596 | 112 | 0.678308 |
793fbcf257a721f25d504940eba6d751200c5792 | 4,103 | py | Python | src/tests/test_notes.py | testdrivenio/fastapi-crud-sync | 3bdc52175cc2250ea7251ae73d91a26c1ee97327 | [
"MIT"
] | 46 | 2020-01-14T16:43:40.000Z | 2022-03-30T08:33:41.000Z | src/tests/test_notes.py | devanl/fastapi-crud | 4a0e1a254c4be1ba136f69f35e1161bad98ac0db | [
"MIT"
] | 1 | 2021-04-21T00:04:13.000Z | 2021-04-21T00:04:13.000Z | src/tests/test_notes.py | devanl/fastapi-crud | 4a0e1a254c4be1ba136f69f35e1161bad98ac0db | [
"MIT"
] | 16 | 2020-02-27T09:42:29.000Z | 2022-01-06T07:55:21.000Z | import json
import pytest
from app.api import crud
def test_create_note(test_app, monkeypatch):
test_data = {"title": "something", "description": "something else", "id": 1}
def mock_post(db_session, payload):
return test_data
monkeypatch.setattr(crud, "post", mock_post)
response = test_app.post("/notes/", data=json.dumps(test_data),)
assert response.status_code == 201
assert response.json() == test_data
def test_create_note_invalid_json(test_app):
response = test_app.post("/notes/", data=json.dumps({"title": "something"}))
assert response.status_code == 422
response = test_app.post(
"/notes/", data=json.dumps({"title": "1", "description": "2"})
)
assert response.status_code == 422
def test_read_note(test_app, monkeypatch):
test_data = {"title": "something", "description": "something else", "id": 1}
def mock_get(db_session, id):
return test_data
monkeypatch.setattr(crud, "get", mock_get)
response = test_app.get("/notes/1")
assert response.status_code == 200
assert response.json() == test_data
def test_read_note_incorrect_id(test_app, monkeypatch):
def mock_get(db_session, id):
return None
monkeypatch.setattr(crud, "get", mock_get)
response = test_app.get("/notes/999")
assert response.status_code == 404
assert response.json()["detail"] == "Note not found"
response = test_app.get("/notes/0")
assert response.status_code == 422
def test_read_all_notes(test_app, monkeypatch):
test_data = [
{"title": "something", "description": "something else", "id": 1},
{"title": "someone", "description": "someone else", "id": 2},
]
def mock_get_all(db_session):
return test_data
monkeypatch.setattr(crud, "get_all", mock_get_all)
response = test_app.get("/notes/")
assert response.status_code == 200
assert response.json() == test_data
def test_update_note(test_app, monkeypatch):
test_data = {"title": "something", "description": "something else", "id": 1}
test_update_data = {"title": "someone", "description": "someone else", "id": 1}
def mock_get(db_session, id):
return test_data
monkeypatch.setattr(crud, "get", mock_get)
def mock_put(db_session, note, title, description):
return test_update_data
monkeypatch.setattr(crud, "put", mock_put)
response = test_app.put("/notes/1/", data=json.dumps(test_update_data),)
assert response.status_code == 200
assert response.json() == test_update_data
@pytest.mark.parametrize(
"id, payload, status_code",
[
[1, {}, 422],
[1, {"description": "bar"}, 422],
[999, {"title": "foo", "description": "bar"}, 404],
[1, {"title": "1", "description": "bar"}, 422],
[1, {"title": "foo", "description": "1"}, 422],
[0, {"title": "foo", "description": "bar"}, 422],
],
)
def test_update_note_invalid(test_app, monkeypatch, id, payload, status_code):
def mock_get(db_session, id):
return None
monkeypatch.setattr(crud, "get", mock_get)
response = test_app.put(f"/notes/{id}/", data=json.dumps(payload),)
assert response.status_code == status_code
def test_remove_note(test_app, monkeypatch):
test_data = {"title": "something", "description": "something else", "id": 1}
def mock_get(db_session, id):
return test_data
monkeypatch.setattr(crud, "get", mock_get)
def mock_delete(db_session, id):
return test_data
monkeypatch.setattr(crud, "delete", mock_delete)
response = test_app.delete("/notes/1/")
assert response.status_code == 200
assert response.json() == test_data
def test_remove_note_incorrect_id(test_app, monkeypatch):
def mock_get(db_session, id):
return None
monkeypatch.setattr(crud, "get", mock_get)
response = test_app.delete("/notes/999/")
assert response.status_code == 404
assert response.json()["detail"] == "Note not found"
response = test_app.delete("/notes/0/")
assert response.status_code == 422
| 28.493056 | 83 | 0.656105 |
793fbd69df34b9330fb884a391405aa49fda29fb | 1,270 | py | Python | music21/stream/enums.py | cuthbertLab/music21 | 1be16c255460107c10d7b4bc8eb77f0d115b5eac | [
"MIT"
] | 1,449 | 2015-01-09T15:53:56.000Z | 2022-03-31T18:24:46.000Z | venv/Lib/site-packages/music21/stream/enums.py | alimirzazadeh/wolfGANg | 5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c | [
"MIT"
] | 1,179 | 2015-01-07T17:07:54.000Z | 2022-03-31T16:46:02.000Z | music21/stream/enums.py | cuthbertLab/music21 | 1be16c255460107c10d7b4bc8eb77f0d115b5eac | [
"MIT"
] | 393 | 2015-01-03T20:38:16.000Z | 2022-03-25T16:51:22.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: stream/enums.py
# Purpose: enumerations for streams
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2020 Michael Scott Cuthbert and the music21 Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
import enum
class StaffType(enum.Enum):
'''
These are the same as MusicXML staff-type, except uppercase and "other"
which reflects any other type. Probably the best way of using "other"
is to designate what it means with a .editorial.staffTypeExplanation = 'other'
>>> stream.enums.StaffType.OSSIA
<StaffType.OSSIA: 'ossia'>
To get the musicxml name:
>>> stream.enums.StaffType.OSSIA.value
'ossia'
>>> stream.enums.StaffType('cue')
<StaffType.CUE: 'cue'>
>>> stream.enums.StaffType('tiny')
Traceback (most recent call last):
ValueError: 'tiny' is not a valid StaffType
'''
REGULAR = 'regular'
OSSIA = 'ossia'
CUE = 'cue'
EDITORIAL = 'editorial'
ALTERNATE = 'alternate'
OTHER = 'other'
if __name__ == '__main__':
from music21 import mainTest
mainTest()
| 28.222222 | 82 | 0.570079 |
793fbdf41b053abb4bfd2a8c388bfc8afba22a8d | 1,197 | py | Python | samples/client/petstore/python/test/test_category.py | deerghayu/swagger-codegen | 1e33403e381be1db91b1427d3f70f7476cb4da67 | [
"Apache-2.0"
] | null | null | null | samples/client/petstore/python/test/test_category.py | deerghayu/swagger-codegen | 1e33403e381be1db91b1427d3f70f7476cb4da67 | [
"Apache-2.0"
] | 2 | 2021-10-05T14:41:26.000Z | 2022-03-28T16:07:30.000Z | samples/client/petstore/python/test/test_category.py | deerghayu/swagger-codegen | 1e33403e381be1db91b1427d3f70f7476cb4da67 | [
"Apache-2.0"
] | 5 | 2020-04-01T02:37:35.000Z | 2021-04-07T08:04:07.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.category import Category
class TestCategory(unittest.TestCase):
""" Category unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCategory(self):
"""
Test Category
"""
model = swagger_client.models.category.Category()
if __name__ == '__main__':
unittest.main() | 24.428571 | 75 | 0.718463 |
793fbe46fb3c743581d99e192778e45c7e8c5a02 | 2,580 | py | Python | app/notify_util.py | banyancheung/lottery-number-tracking | 0dad04300ed6e15f2bf35c52bf6ba1365ac34382 | [
"MIT"
] | 8 | 2020-08-24T02:20:53.000Z | 2022-02-24T09:56:13.000Z | app/notify_util.py | banyancheung/lottery-number-tracking | 0dad04300ed6e15f2bf35c52bf6ba1365ac34382 | [
"MIT"
] | null | null | null | app/notify_util.py | banyancheung/lottery-number-tracking | 0dad04300ed6e15f2bf35c52bf6ba1365ac34382 | [
"MIT"
] | 2 | 2020-08-24T02:46:10.000Z | 2020-11-02T06:55:00.000Z | import base64
import hashlib
import hmac
import json
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import time
import urllib
import requests
def notify(config, origin_content):
for channel, setting in config.items():
content = format_content(channel, origin_content)
if channel == 'smtp':
notify_email(setting, origin_content['prize_msg'], content)
if channel == 'dingtalk':
notify_dingtalk(setting, content)
def format_content(channel, origin_content):
text = f"""
{origin_content['prize_msg']}\n
开奖号码:{origin_content['lottery_res']}\n
你的号码: \n
{origin_content['lottery_num']}
\n
"""
if origin_content['is_prize']:
text += "中奖信息:\n"
for prize in origin_content['lottery_prize']:
text += f"{prize['prize_name']}:[{prize['prize_require']}],{prize['prize_num']}注,估算奖金:{prize['prize_money']}\n"
if channel == 'dingtalk':
content = json.dumps({
'msgtype': 'text',
'text': {
'content': text
}
})
elif channel == 'smtp':
content = text
return content
def notify_email(config, subject, content):
from_addr = config['sender'] # 邮件发送账号
to_addrs = config['receive'] # 接收邮件账号
qq_code = config['code'] # 授权码(这个要填自己获取到的)
smtp_server = 'smtp.qq.com' # 固定写死
smtp_port = 465 # 固定端口
# 配置服务器
stmp = smtplib.SMTP_SSL(smtp_server, smtp_port)
stmp.login(from_addr, qq_code)
# 组装发送内容
message = MIMEText(content, 'plain', 'utf-8') # 发送的内容
message['From'] = Header("开奖通知", 'utf-8') # 发件人
message['To'] = Header("订阅者", 'utf-8') # 收件人
message['Subject'] = Header(subject, 'utf-8') # 邮件标题
try:
stmp.sendmail(from_addr, to_addrs, message.as_string())
except Exception as e:
print('邮件发送失败--' + str(e))
def notify_dingtalk(config, content):
timestamp = str(round(time.time() * 1000))
secret_enc = config['sign'].encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, config['sign'])
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
query_param = {'access_token': config['access_token'], 'timestamp': timestamp, 'sign': sign}
url = 'https://oapi.dingtalk.com/robot/send'
headers = {
"content-type": "application/json"
}
requests.post(url, headers=headers, params=query_param, data=content)
| 31.851852 | 123 | 0.64031 |
793fbf80d60f24705c468334a38466b31c1797ad | 1,342 | py | Python | tests/template_tests/filter_tests/test_autoescape.py | arachnegl/django | c4e796aa1bab70ee66151d24a061af811e08ad08 | [
"BSD-3-Clause"
] | 1 | 2019-01-14T10:58:43.000Z | 2019-01-14T10:58:43.000Z | tests/template_tests/filter_tests/test_autoescape.py | arachnegl/django | c4e796aa1bab70ee66151d24a061af811e08ad08 | [
"BSD-3-Clause"
] | null | null | null | tests/template_tests/filter_tests/test_autoescape.py | arachnegl/django | c4e796aa1bab70ee66151d24a061af811e08ad08 | [
"BSD-3-Clause"
] | 1 | 2020-05-25T08:55:19.000Z | 2020-05-25T08:55:19.000Z | from django.test import SimpleTestCase
from ..utils import setup, SafeClass, UnsafeClass
class AutoescapeStringfilterTests(SimpleTestCase):
"""
Filters decorated with stringfilter still respect is_safe.
"""
@setup({'autoescape-stringfilter01': '{{ unsafe|capfirst }}'})
def test_autoescape_stringfilter01(self):
output = self.engine.render_to_string('autoescape-stringfilter01', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter02': '{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter02(self):
output = self.engine.render_to_string('autoescape-stringfilter02', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter03': '{{ safe|capfirst }}'})
def test_autoescape_stringfilter03(self):
output = self.engine.render_to_string('autoescape-stringfilter03', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
@setup({'autoescape-stringfilter04': '{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter04(self):
output = self.engine.render_to_string('autoescape-stringfilter04', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
| 44.733333 | 105 | 0.695976 |
793fc03da33156c0602b7c9b96b9b664c70652c8 | 2,082 | py | Python | api/infrastructure/fileTXT.py | MayaraMachado/rota_viagem | 8131e63964600e1eb5271f107c6ebcbd10f30346 | [
"MIT"
] | null | null | null | api/infrastructure/fileTXT.py | MayaraMachado/rota_viagem | 8131e63964600e1eb5271f107c6ebcbd10f30346 | [
"MIT"
] | null | null | null | api/infrastructure/fileTXT.py | MayaraMachado/rota_viagem | 8131e63964600e1eb5271f107c6ebcbd10f30346 | [
"MIT"
] | null | null | null | class FileTXT:
'''
Class responsible for managing the reading and writing of the query file.
'''
def __init__(self, file):
'''
Instance and makes the call to read the file.
Args:
----
- file (str) filepath to access
'''
self.lines = []
self.filepath = file
self.valid = self.__read_file()
def __format_line(self, line, format_list=True):
'''
Transcribes the structure of the line, it can be either as the result
of reading a file to make it a list, or writing it to a file, to make
the list string.
Args:
----
- line (list or str) object representing the line that was executed.
- format_list (boolean) indicates whether to transform to list.
Returns:
-----
- (list or str) the result of the formatting
'''
if format_list:
return line.split(',')
else:
return ','.join(line)
def __read_file(self):
'''
Read the file.
Returns:
---
- boolean indicating whether the reading was completed.
Raises:
----
- FileNotFoundError:
- If the file is not found while attempting to read.
'''
archive = open(self.filepath, 'r')
lines = []
for line in archive:
line = line.strip()
line = self.__format_line(line)
lines.append(line)
archive.close()
self.lines = lines
return True
def get_lines(self):
'''
Gets the lines from the read file
Returns:
----
- list of lists
'''
return self.lines
def write_file(self, lines):
'''
Write at the end of an existing file.
Args:
-----
- lines (list of list) representing the lines to be written in the file.
Returns:
---
- boolean indicating whether the writing was completed.
Raises:
----
- FileNotFoundError:
- If the file is not found while attempting to write.
'''
with open(self.filepath, "a+") as file_object:
file_object.seek(0)
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
for line in lines:
line_formated = self.__format_line(line, format_list=False)
file_object.write(line_formated)
return True
| 20.411765 | 75 | 0.644092 |
793fc1b6a7d7abdb94935dd47f00c7fec5de8b94 | 877 | py | Python | src/download_data.py | shyan0903/522_Ramen | 3f09087fda543568d3d7c3e9759e92a815479cfc | [
"MIT"
] | 2 | 2021-11-28T03:33:45.000Z | 2021-11-28T03:33:52.000Z | src/download_data.py | shyan0903/522_Ramen | 3f09087fda543568d3d7c3e9759e92a815479cfc | [
"MIT"
] | 18 | 2021-11-19T03:29:31.000Z | 2021-12-12T02:15:25.000Z | src/download_data.py | shyan0903/522_Ramen | 3f09087fda543568d3d7c3e9759e92a815479cfc | [
"MIT"
] | 3 | 2021-11-20T07:35:05.000Z | 2021-11-20T23:03:25.000Z | # author: Irene and Anthea Chen
# date: 2021-11-24
"""Download needed dataframe
Usage: download_data.py --url=<url> --out_file=<out_file>
Options:
--url=<url> the url for the data set
--out_file=<out_file> Path (including filename) of where to locally write the file
"""
from docopt import docopt
import requests
import os
import pandas as pd
opt = docopt(__doc__)
def main(url, out_file):
try:
request = requests.get(url)
request.status_code == 200
except Exception as req:
print("Website at the provided url does not exist.")
print(req)
data = pd.read_excel(url, header=0)
try:
data.to_csv(out_file, index = False)
except:
os.makedirs(os.path.dirname(out_file))
data.to_csv(out_file, index = False)
if __name__ == "__main__":
main(opt["--url"], opt["--out_file"])
| 23.078947 | 85 | 0.645382 |
793fc2230eeaa8f1090349f1c9faa553c18d655d | 2,999 | py | Python | tests/integrational/native_threads/test_state.py | Versature/pubnub-python | a558d212a44ada6fbf2793a32e93685c959b8b22 | [
"MIT"
] | null | null | null | tests/integrational/native_threads/test_state.py | Versature/pubnub-python | a558d212a44ada6fbf2793a32e93685c959b8b22 | [
"MIT"
] | null | null | null | tests/integrational/native_threads/test_state.py | Versature/pubnub-python | a558d212a44ada6fbf2793a32e93685c959b8b22 | [
"MIT"
] | null | null | null | import logging
import threading
import unittest
import pubnub
from pubnub.models.consumer.presence import PNSetStateResult, PNGetStateResult
from pubnub.pubnub import PubNub
from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import pn_vcr
pubnub.set_stream_logger('pubnub', logging.DEBUG)
class TestPubNubState(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
def callback(self, response, status):
self.response = response
self.status = status
self.event.set()
@pn_vcr.use_cassette('tests/integrational/fixtures/native_threads/state/state_of_single_channel.yaml',
filter_query_parameters=['uuid', 'pnsdk'], match_on=['state_object_in_query'])
def test_single_channel(self):
ch = "state-native-sync-ch"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "state-native-sync-uuid"
state = {"name": "Alex", "count": 5}
pubnub.set_state() \
.channels(ch) \
.state(state) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNSetStateResult)
assert self.response.state['name'] == "Alex"
assert self.response.state['count'] == 5
self.event.clear()
pubnub.get_state() \
.channels(ch) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNGetStateResult)
assert self.response.channels[ch]['name'] == "Alex"
assert self.response.channels[ch]['count'] == 5
@pn_vcr.use_cassette('tests/integrational/fixtures/native_threads/state/state_of_multiple_channels.yaml',
filter_query_parameters=['uuid', 'pnsdk'], match_on=['state_object_in_query'])
def test_multiple_channels(self):
ch1 = "state-native-sync-ch-1"
ch2 = "state-native-sync-ch-2"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "state-native-sync-uuid"
state = {"name": "Alex", "count": 5}
pubnub.set_state() \
.channels([ch1, ch2]) \
.state(state) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNSetStateResult)
assert self.response.state['name'] == "Alex"
assert self.response.state['count'] == 5
self.event.clear()
pubnub.get_state() \
.channels([ch1, ch2]) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNGetStateResult)
assert self.response.channels[ch1]['name'] == "Alex"
assert self.response.channels[ch1]['count'] == 5
assert self.response.channels[ch2]['name'] == "Alex"
assert self.response.channels[ch2]['count'] == 5
| 35.282353 | 109 | 0.628876 |
793fc27b359019f4f9c3cfdc1bb0915e25881d95 | 10,858 | py | Python | echolect/filtering/filtering.py | ryanvolz/echolect | ec2594925f34fdaea69b64e725fccb0c99665a55 | [
"BSD-3-Clause"
] | 1 | 2022-03-24T22:48:12.000Z | 2022-03-24T22:48:12.000Z | echolect/filtering/filtering.py | scivision/echolect | ec2594925f34fdaea69b64e725fccb0c99665a55 | [
"BSD-3-Clause"
] | 1 | 2015-03-25T20:41:24.000Z | 2015-03-25T20:41:24.000Z | echolect/filtering/filtering.py | scivision/echolect | ec2594925f34fdaea69b64e725fccb0c99665a55 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2014, Ryan Volz
# All rights reserved.
#
# Distributed under the terms of the BSD 3-Clause ("BSD New") license.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
try:
import numba
except ImportError:
HAS_NUMBA = False
else:
HAS_NUMBA = True
del numba
from . import filters
from . import dopplerbanks
from . import util
from .util import *
__all__ = ['measure_filters', 'Filter', 'filter', 'doppler_coefs',
'measure_doppler_banks', 'DopplerBank', 'DopplerBankMax',
'doppler_bank', 'doppler_bank_max',
'matched_coefs', 'Matched', 'MatchedDoppler', 'MatchedDopplerMax',
'matched', 'matched_doppler', 'matched_doppler_max',
'inverse_coefs', 'Inverse', 'InverseDoppler', 'InverseDopplerMax',
'inverse', 'inverse_doppler', 'inverse_doppler_max']
__all__.extend(util.__all__)
# ******** Filter functions ********
def measure_filters(h, M, xdtype=np.complex_, number=100, disp=True,
meas_all=False):
flist = [
filters.CythonConv(h, M, xdtype),
filters.FFTW(h, M, xdtype, powerof2=True),
]
if HAS_NUMBA:
flist.extend([
filters.NumbaConv(h, M, xdtype),
filters.NumbaFFTW(h, M, xdtype, powerof2=True),
])
if meas_all:
flist.extend([
filters.Conv(h, M),
filters.StridedConv(h, M),
filters.SparseConv(h, M),
filters.FFTPack(h, M, xdtype, powerof2=True),
filters.NumpyFFT(h, M, xdtype, powerof2=True),
])
x = np.empty(M, xdtype)
x.real = 2*np.random.rand(M) - 1
if np.iscomplexobj(x):
x.imag = 2*np.random.rand(M) - 1
times = time_filters(flist, x, number)
# sort in order of times
tups = zip(times, flist)
tups.sort()
if disp:
for time, filt in tups:
print(filt.func_name + ': {0} s per call'.format(time/number))
times, flist = zip(*tups)
return times, flist
def Filter(h, M, xdtype=np.complex_, measure=True):
if measure is True:
times, flist = measure_filters(h, M, xdtype, number=10, disp=False,
meas_all=False)
filt = flist[np.argmin(times)]
else:
filt = filters.FFTW(h, M, xdtype, powerof2=True)
return filt
def filter(h, x, mode=None):
xshape = x.shape
filt = Filter(h, xshape[-1], measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
if mode is None or mode == 'full':
return res
else:
slc = getattr(filt, mode)
return res[..., slc]
def doppler_coefs(h, f):
"""Doppler shift the given filter h to match normalized frequency f.
The Doppler-shifted filter applied to a signal x will give the same result
(except for a constant phase shift) as the original filter applied to a
Doppler-shifted x, where x has been modulated by the complex exponential
with normalized frequency f.
The result of using this filter will also be equivalent to the k'th filter
of a N-filter Doppler bank if f == k/N.
Parameters
----------
h : 1-D ndarray
Coefficients of the filter to be Doppler-shifted.
f : float, typically in [-0.5, 0.5]
Normalized frequency (true frequency = f/T Hz) of Doppler shift.
Returns
-------
hd : 1-D ndarray
Coefficients of the Doppler-shifted filter.
"""
L = len(h)
hd = h*np.exp(-2*np.pi*1j*np.arange(L)*f)[::-1]
return hd
# ******** Doppler bank functions ********
def measure_doppler_banks(h, N, M, xdtype=np.complex_, number=100, disp=True,
meas_all=False):
flist = [
dopplerbanks.ShiftConvFFT(h, N, M, xdtype, powerof2=True),
dopplerbanks.SweepSpectraCython(h, N, M, xdtype),
dopplerbanks.SweepSpectraStridedInput(h, N, M, xdtype),
]
if HAS_NUMBA:
flist.extend([dopplerbanks.SweepSpectraNumba(h, N, M, xdtype)])
if meas_all:
flist.extend([
dopplerbanks.ShiftConv(h, N, M),
dopplerbanks.ShiftConvSparse(h, N, M),
dopplerbanks.ShiftConvSparseMod(h, N, M),
dopplerbanks.SweepSpectraStridedTaps(h, N, M),
dopplerbanks.SweepSpectraStridedTapsMod(h, N, M),
])
if HAS_NUMBA:
flist.extend([dopplerbanks.ShiftConvNumbaFFT(h, N, M, xdtype,
powerof2=True)])
x = np.empty(M, xdtype)
x.real = 2*np.random.rand(M) - 1
if np.iscomplexobj(x):
x.imag = 2*np.random.rand(M) - 1
times = time_filters(flist, x, number)
# sort in order of times
tups = zip(times, flist)
tups.sort()
if disp:
for time, filt in tups:
print(filt.func_name + ': {0} s per call'.format(time/number))
times, flist = zip(*tups)
return times, flist
def DopplerBank(h, N, M, xdtype=np.complex_, measure=True):
if measure is True:
times, flist = measure_doppler_banks(h, N, M, xdtype, number=10,
disp=False, meas_all=False)
bank = flist[np.argmin(times)]
else:
bank = dopplerbanks.SweepSpectraStridedInput(h, N, M, xdtype)
return bank
def DopplerBankMax(h, N, M, xdtype=np.complex_, measure=True):
bank = DopplerBank(h, N, M, xdtype, measure)
def doppler_bank_max(x):
"""Apply a Doppler filter bank to the input, selecting frequency of
maximum response.
Parameters
----------
x : 1-D ndarray
Values to be filtered.
Returns
-------
y : 1-D ndarray
Filtered values for frequency with maximum response.
f : float
Normalized frequency (true frequency = f/T Hz) of maximum response
"""
y = bank(x)
shift = np.unravel_index(np.argmax(y.real**2 + y.imag**2), y.shape)[0]
f = float(shift)/N
return y[shift], f
doppler_bank_max.__dict__.update(bank.__dict__)
doppler_bank_max.bank = bank
return doppler_bank_max
def doppler_bank(h, N, x, mode=None):
xshape = x.shape
filt = DopplerBank(h, N, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return apply_filter_mode(filt, res, mode)
def doppler_bank_max(h, N, x, mode=None):
xshape = x.shape
filt = DopplerBankMax(h, N, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return (apply_filter_mode(filt, res[0], mode), res[1])
# ******** Matched filter functions ********
def matched_coefs(s):
return s.conj()[::-1].copy('C')
def Matched(s, M, xdtype=np.complex_, measure=True):
h = matched_coefs(s)
filt = Filter(h, M, xdtype, measure)
filt.nodelay = slice(filt.L - 1, None)
return filt
def MatchedDoppler(s, N, M, xdtype=np.complex_, measure=True):
h = matched_coefs(s)
filt = DopplerBank(h, N, M, xdtype, measure)
filt.nodelay = slice(filt.L - 1, None)
return filt
def MatchedDopplerMax(s, N, M, xdtype=np.complex_, measure=True):
h = matched_coefs(s)
filt = DopplerBankMax(h, N, M, xdtype, measure)
filt.nodelay = slice(filt.L - 1, None)
return filt
def matched(s, x, mode=None):
xshape = x.shape
filt = Matched(s, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return apply_filter_mode(filt, res, mode)
def matched_doppler(s, N, x, mode=None):
xshape = x.shape
filt = MatchedDoppler(s, N, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return apply_filter_mode(filt, res, mode)
def matched_doppler_max(s, N, x, mode=None):
xshape = x.shape
filt = MatchedDopplerMax(s, N, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return (apply_filter_mode(filt, res[0], mode), res[1])
# ******** Inverse filter functions ********
def inverse_coefs(s, ntaps):
S = np.fft.fft(s, n=ntaps)
# q is the output we want from the inverse filter, a delta with proper delay
q = np.zeros(ntaps, dtype=s.dtype)
# delay = (ntaps + len(s) - 1)//2 places delta in middle of output with
# outlen = ntaps + len(s) - 1
# this ensures that the non-circular convolution that we use to apply this
# filter gives a result as close as possible to the ideal inverse circular
# convolution
q[(ntaps + len(s) - 1)//2] = 1
Q = np.fft.fft(q)
H = Q/S
h = np.fft.ifft(H)
if not np.iscomplexobj(s):
h = h.real.copy('C') # copy needed so h is C-contiguous
return h
def Inverse(s, ntaps, M, xdtype=np.complex_, measure=True):
h = inverse_coefs(s, ntaps)
filt = Filter(h, M, xdtype, measure)
delay = (filt.L + len(s) - 1)//2
# using delay of the ideal output we defined in inverse_coefs
filt.nodelay = slice(delay, None)
return filt
def InverseDoppler(s, ntaps, N, M, xdtype=np.complex_, measure=True):
h = inverse_coefs(s, ntaps)
filt = DopplerBank(h, N, M, xdtype, measure)
delay = (filt.L + len(s) - 1)//2
# using delay of the ideal output we defined in inverse_coefs
filt.nodelay = slice(delay, None)
return filt
def InverseDopplerMax(s, ntaps, N, M, xdtype=np.complex_, measure=True):
h = inverse_coefs(s, ntaps)
filt = DopplerBankMax(h, N, M, xdtype, measure)
delay = (filt.L + len(s) - 1)//2
# using delay of the ideal output we defined in inverse_coefs
filt.nodelay = slice(delay, None)
return filt
def inverse(s, ntaps, x, mode=None):
xshape = x.shape
filt = Inverse(s, ntaps, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return apply_filter_mode(filt, res, mode)
def inverse_doppler(s, ntaps, N, x, mode=None):
xshape = x.shape
filt = InverseDoppler(s, ntaps, N, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return apply_filter_mode(filt, res, mode)
def inverse_doppler_max(s, ntaps, N, x, mode=None):
xshape = x.shape
filt = InverseDopplerMax(s, ntaps, N, xshape[-1], x.dtype, measure=False)
if len(xshape) > 1:
res = apply_to_2d(filt, x)
else:
res = filt(x)
return (apply_filter_mode(filt, res[0], mode), res[1])
| 30.672316 | 80 | 0.596519 |
793fc2a2183f4dc3ea2a2e5fde1195b39a7aaec7 | 2,104 | py | Python | dnacentersdk/models/validators/v1_3_0/jsd_58a3699e489b9529.py | daxm/dnacentersdk | 5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b | [
"MIT"
] | 4 | 2019-08-05T23:08:06.000Z | 2019-09-19T14:16:31.000Z | dnacentersdk/models/validators/v1_3_0/jsd_58a3699e489b9529.py | daxm/dnacentersdk | 5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b | [
"MIT"
] | 1 | 2020-01-11T18:12:14.000Z | 2020-07-13T09:14:55.000Z | dnacentersdk/models/validators/v1_3_0/jsd_58a3699e489b9529.py | daxm/dnacentersdk | 5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b | [
"MIT"
] | 4 | 2020-02-28T15:44:04.000Z | 2020-11-01T19:35:08.000Z | # -*- coding: utf-8 -*-
"""DNA Center Get Credential sub type by credential Id data model.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator58A3699E489B9529(object):
"""Get Credential sub type by credential Id request schema
definition."""
def __init__(self):
super(JSONSchemaValidator58A3699E489B9529, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"additionalProperties": false,
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 35.661017 | 78 | 0.71673 |
793fc3a5a1439856410fabedd68305be87121987 | 409 | py | Python | test/test_tokenizer.py | ArielTriana/battle-sim | 75205bbff62024d28b42fd25ce268440ecc6f009 | [
"MIT"
] | 2 | 2021-11-23T15:47:07.000Z | 2022-03-03T01:38:19.000Z | test/test_tokenizer.py | ArielTriana/battle-sim | 75205bbff62024d28b42fd25ce268440ecc6f009 | [
"MIT"
] | 11 | 2021-11-05T15:47:39.000Z | 2022-02-07T05:05:11.000Z | test/test_tokenizer.py | ArielTriana/battle-sim | 75205bbff62024d28b42fd25ce268440ecc6f009 | [
"MIT"
] | 1 | 2021-12-07T00:00:48.000Z | 2021-12-07T00:00:48.000Z | import pytest
from src.language.tokenizer import Tokenizer
def test_1():
case1 = "number a = 3.34"
tokens = Tokenizer()(case1)
assert len(tokens) == 5
assert tokens[3].lexeme == "3.34"
def test_2():
case2 = "function number fibo(number n) -> {\nif n lte 1 -> {\n return 1 \n} return fibo(n- 1) + fibo(n - 2)}"
tokens = Tokenizer()(case2)
assert len(tokens) == 34
| 18.590909 | 114 | 0.596577 |
793fc5736cb4e6d539aa5675230d750e3cc1a963 | 7,454 | py | Python | transformer-xh/main.py | maximzubkov/Transformer-XH | 1712c195b75473ba50779baf92e291b1d29ad248 | [
"MIT"
] | null | null | null | transformer-xh/main.py | maximzubkov/Transformer-XH | 1712c195b75473ba50779baf92e291b1d29ad248 | [
"MIT"
] | null | null | null | transformer-xh/main.py | maximzubkov/Transformer-XH | 1712c195b75473ba50779baf92e291b1d29ad248 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import argparse
import os
import json
import numpy as np
from model import Model_Hotpot, Model_FEVER
import data
import logging
import random
import torch.nn as nn
import torch.distributed as dist
from tqdm import tqdm
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers import AdamW, WarmupLinearSchedule
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
import torch.nn.functional as F
from Trainer import train_hotpot, train_fever
from Evaluator import evaluation_hotpot, evaluation_fever
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
def parse_args():
parser = argparse.ArgumentParser("Transformer-XH")
parser.add_argument("--config-file", "--cf",
help="pointer to the configuration file of the experiment", type=str, required=True)
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--checkpoint',
type=int,
default=2500)
parser.add_argument('--loss_scale',
type=float, default=0,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--test',
default=False,
action='store_true',
help="Whether on test mode")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
config = json.load(open(args.config_file, 'r', encoding="utf-8"))
base_dir = config['system']['base_dir']
os.makedirs(os.path.join(base_dir, config['name']), exist_ok=True)
os.makedirs(os.path.join(base_dir, config['name'], "saved_models/"), exist_ok=True)
logging.info("********* Model Configuration ************")
args.config = config
args.task = config['task']
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available()
and not args.no_cuda else "cpu")
else:
device = torch.device("cuda", args.local_rank)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if args.fp16:
args.fp16 = True
#### here we only support single GPU training
n_gpu = 1
logging.info("device: {} n_gpu: {}, 16-bits training: {}".format(
device, n_gpu, args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(
config["training"]["train_batch_size"] / args.gradient_accumulation_steps)
args.max_seq_length = config["model"]["bert_max_len"]
# Setting all the seeds so that the task is random but same accross processes
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# set device
args.device = device
args.n_gpu = n_gpu
# Loading Tokenizer
tokenizer = BertTokenizer.from_pretrained(config["bert_token_file"])
args.tokenizer = tokenizer
if config['task'] == 'hotpotqa':
model = Model_Hotpot(args, config)
elif config['task'] == 'fever':
model = Model_FEVER(args, config)
if args.fp16:
model.half()
model.network.to(device)
### Model Evaluation
if args.test:
model.load(os.path.join(base_dir, config['name'], "saved_models/model_finetuned_epoch_{}.pt".format(0)))
model.eval()
eval_file = config["system"]['test_data']
if config['task'] == 'hotpotqa':
final_pred = evaluation_hotpot(model, eval_file, config, args)
json.dump(final_pred, open("out_dev.json", "w"))
elif config['task'] == 'fever':
auc, pred_dict = evaluation_fever(model, eval_file, config, args)
### Model Training
else:
# Prepare Optimizer
param_optimizer = list(model.network.named_parameters())
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=config["training"]["learning_rate"])
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=config["training"]["warmup_proportion"], t_total=config["training"]["total_training_steps"])
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer, FusedAdam
except:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=config["training"]["learning_rate"],
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(
optimizer, static_loss_scale=args.loss_scale)
best_score = -1
if config['task'] == 'hotpotqa':
for index in range(config['training']['epochs']):
best_score = train_hotpot(model, index, config, args, best_score, optimizer, scheduler)
elif config['task'] == 'fever':
for index in range(config['training']['epochs']):
best_score = train_fever(model, index, config, args, best_score, optimizer, scheduler)
| 39.231579 | 157 | 0.612825 |
793fc5b1b9ea75fe301eb2c7c0a5f703090ee4e2 | 1,294 | py | Python | alchemy/scripts/update_existing_annotation_context.py | edu-gp/annotation_tool | c383cfeca9f5773e540deaad8d06996ecbb95b32 | [
"Apache-2.0"
] | 2 | 2020-07-17T11:45:05.000Z | 2020-07-17T13:39:14.000Z | alchemy/scripts/update_existing_annotation_context.py | georgianpartners/annotation_tool | c383cfeca9f5773e540deaad8d06996ecbb95b32 | [
"Apache-2.0"
] | 46 | 2020-09-30T14:40:45.000Z | 2021-02-26T14:31:14.000Z | alchemy/scripts/update_existing_annotation_context.py | edu-gp/annotation_tool | c383cfeca9f5773e540deaad8d06996ecbb95b32 | [
"Apache-2.0"
] | null | null | null | import logging
from alchemy.db.config import DevelopmentConfig
from alchemy.db.model import AnnotationRequest, ClassificationAnnotation, Database, User
if __name__ == "__main__":
logging.root.setLevel(logging.INFO)
db = Database(DevelopmentConfig.SQLALCHEMY_DATABASE_URI)
annotations = (
db.session.query(ClassificationAnnotation)
.join(User)
.filter(User.username != "salesforce_bot")
.all()
)
entities = [annotation.entity for annotation in annotations]
annotation_request_context_and_entity = (
db.session.query(AnnotationRequest.context, AnnotationRequest.entity)
.filter(AnnotationRequest.entity.in_(entities))
.all()
)
entity_to_context = {
item[1]: item[0] for item in annotation_request_context_and_entity
}
to_update = []
for annotation in annotations:
if annotation.entity in entity_to_context:
logging.info("Updating Entity {}".format(annotation.entity))
annotation.context = entity_to_context[annotation.entity]
to_update.append(annotation)
else:
logging.info("Did not find request for entity {}".format(annotation.entity))
db.session.add_all(to_update)
db.session.commit()
db.session.close()
| 34.052632 | 88 | 0.693199 |
793fc6d1994b86f86b9993b914d736042ab5617d | 12,413 | py | Python | insomniac/action_runners/interact/action_handle_blogger.py | edukessen/Insomniac | 44334aa5991c506282f5a5e6432d602774569eae | [
"MIT"
] | null | null | null | insomniac/action_runners/interact/action_handle_blogger.py | edukessen/Insomniac | 44334aa5991c506282f5a5e6432d602774569eae | [
"MIT"
] | null | null | null | insomniac/action_runners/interact/action_handle_blogger.py | edukessen/Insomniac | 44334aa5991c506282f5a5e6432d602774569eae | [
"MIT"
] | null | null | null | from functools import partial
from insomniac.action_runners.actions_runners_manager import ActionState
from insomniac.actions_impl import interact_with_user, InteractionStrategy
from insomniac.actions_types import LikeAction, FollowAction, InteractAction, GetProfileAction, StoryWatchAction, \
BloggerInteractionType, CommentAction, FilterAction, SourceType
from insomniac.limits import process_limits
from insomniac.report import print_short_report, print_interaction_types
from insomniac.sleeper import sleeper
from insomniac.softban_indicator import softban_indicator
from insomniac.storage import FollowingStatus
from insomniac.utils import *
from insomniac.views import TabBarView, ProfileView
def extract_blogger_instructions(source):
split_idx = source.find('-')
if split_idx == -1:
print("There is no special interaction-instructions for " + source + ". Working with " + source + " followers.")
return source, BloggerInteractionType.FOLLOWERS
selected_instruction = None
source_profile_name = source[:split_idx]
interaction_instructions_str = source[split_idx+1:]
for blogger_instruction in BloggerInteractionType:
if blogger_instruction.value == interaction_instructions_str:
selected_instruction = blogger_instruction
break
if selected_instruction is None:
print("Couldn't use interaction-instructions " + interaction_instructions_str +
". Working with " + source + " followers.")
selected_instruction = BloggerInteractionType.FOLLOWERS
return source_profile_name, selected_instruction
def handle_blogger(device,
username,
instructions,
session_state,
likes_count,
stories_count,
follow_percentage,
like_percentage,
comment_percentage,
comments_list,
storage,
on_action,
is_limit_reached,
is_passed_filters,
action_status):
is_myself = username == session_state.my_username
source_type = f'{SourceType.BLOGGER.value}-{instructions.value}'
interaction = partial(interact_with_user,
device=device,
user_source=username,
source_type=source_type,
my_username=session_state.my_username,
on_action=on_action)
search_view = TabBarView(device).navigate_to_search()
blogger_profile_view = search_view.navigate_to_username(username, on_action)
if blogger_profile_view is None:
return
sleeper.random_sleep()
is_profile_empty = softban_indicator.detect_empty_profile(device)
if is_profile_empty:
return
followers_following_list_view = None
if instructions == BloggerInteractionType.FOLLOWERS:
followers_following_list_view = blogger_profile_view.navigate_to_followers()
elif instructions == BloggerInteractionType.FOLLOWING:
followers_following_list_view = blogger_profile_view.navigate_to_following()
if is_myself:
followers_following_list_view.scroll_to_bottom()
followers_following_list_view.scroll_to_top()
def pre_conditions(follower_name, follower_name_view):
if storage.is_user_in_blacklist(follower_name):
print("@" + follower_name + " is in blacklist. Skip.")
return False
elif storage.check_user_was_filtered(follower_name):
print("@" + follower_name + ": already filtered in past. Skip.")
return False
elif not is_myself and storage.check_user_was_interacted(follower_name):
print("@" + follower_name + ": already interacted. Skip.")
return False
elif is_myself and storage.check_user_was_interacted_recently(follower_name):
print("@" + follower_name + ": already interacted in the last week. Skip.")
return False
return True
def interact_with_follower(follower_name, follower_name_view):
"""
:return: whether we should continue interaction with other users after this one
"""
is_interact_limit_reached, interact_reached_source_limit, interact_reached_session_limit = \
is_limit_reached(InteractAction(source_name=username, source_type=source_type, user=follower_name, succeed=True), session_state)
if not process_limits(is_interact_limit_reached, interact_reached_session_limit,
interact_reached_source_limit, action_status, "Interaction"):
return False
is_get_profile_limit_reached, get_profile_reached_source_limit, get_profile_reached_session_limit = \
is_limit_reached(GetProfileAction(user=follower_name), session_state)
if not process_limits(is_get_profile_limit_reached, get_profile_reached_session_limit,
get_profile_reached_source_limit, action_status, "Get-Profile"):
return False
is_all_filters_satisfied = False
if is_passed_filters is not None:
print_debug(f"Running filter-ahead on @{follower_name}")
should_continue, is_all_filters_satisfied = is_passed_filters(device, follower_name, reset=True,
filters_tags=['BEFORE_PROFILE_CLICK'])
if not should_continue:
on_action(FilterAction(user=follower_name))
return True
if not is_all_filters_satisfied:
print_debug("Not all filters are satisfied with filter-ahead, continue filtering inside the profile-page")
print("@" + follower_name + ": interact")
follower_name_view.click()
on_action(GetProfileAction(user=follower_name))
sleeper.random_sleep()
is_profile_empty = softban_indicator.detect_empty_profile(device)
if is_profile_empty:
print("Back to followers list")
device.back()
return True
follower_profile_view = ProfileView(device, follower_name == session_state.my_username)
if is_passed_filters is not None:
if not is_all_filters_satisfied:
should_continue, _ = is_passed_filters(device, follower_name, reset=False)
if not should_continue:
on_action(FilterAction(user=follower_name))
# Continue to next follower
print("Back to profiles list")
device.back()
return True
is_like_limit_reached, like_reached_source_limit, like_reached_session_limit = \
is_limit_reached(LikeAction(source_name=username, source_type=source_type, user=follower_name), session_state)
is_follow_limit_reached, follow_reached_source_limit, follow_reached_session_limit = \
is_limit_reached(FollowAction(source_name=username, source_type=source_type, user=follower_name), session_state)
is_watch_limit_reached, watch_reached_source_limit, watch_reached_session_limit = \
is_limit_reached(StoryWatchAction(source_name=username, source_type=source_type,user=follower_name), session_state)
is_comment_limit_reached, comment_reached_source_limit, comment_reached_session_limit = \
is_limit_reached(CommentAction(source_name=username, source_type=source_type, user=follower_name, comment=""), session_state)
is_private = follower_profile_view.is_private_account()
if is_private:
if is_passed_filters is None:
print(COLOR_OKGREEN + "@" + follower_name + " has private account, won't interact." + COLOR_ENDC)
on_action(FilterAction(user=follower_name))
on_action(InteractAction(source_name=username, source_type=source_type, user=follower_name, succeed=False))
print("Back to profiles list")
device.back()
return True
print("@" + follower_name + ": Private account - images wont be liked.")
do_have_stories = follower_profile_view.is_story_available()
if not do_have_stories:
print("@" + follower_name + ": seems there are no stories to be watched.")
is_likes_enabled = likes_count != '0'
is_stories_enabled = stories_count != '0'
is_follow_enabled = follow_percentage != 0
is_comment_enabled = comment_percentage != 0
likes_value = get_value(likes_count, "Likes count: {}", 2, max_count=12)
stories_value = get_value(stories_count, "Stories to watch: {}", 1)
can_like = not is_like_limit_reached and not is_private and likes_value > 0
can_follow = (not is_follow_limit_reached) and storage.get_following_status(follower_name) == FollowingStatus.NONE and follow_percentage > 0
can_watch = (not is_watch_limit_reached) and do_have_stories and stories_value > 0
can_comment = (not is_comment_limit_reached) and not is_private and comment_percentage > 0
can_interact = can_like or can_follow or can_watch or can_comment
if not can_interact:
print("@" + follower_name + ": Cant be interacted (due to limits / already followed). Skip.")
on_action(InteractAction(source_name=username, source_type=source_type, user=follower_name, succeed=False))
else:
print_interaction_types(follower_name, can_like, can_follow, can_watch, can_comment)
interaction_strategy = InteractionStrategy(do_like=can_like,
do_follow=can_follow,
do_story_watch=can_watch,
do_comment=can_comment,
likes_count=likes_value,
follow_percentage=follow_percentage,
like_percentage=like_percentage,
stories_count=stories_value,
comment_percentage=comment_percentage,
comments_list=comments_list)
is_liked, is_followed, is_watch, is_commented = interaction(username=follower_name, interaction_strategy=interaction_strategy)
if is_liked or is_followed or is_watch or is_commented:
on_action(InteractAction(source_name=username, source_type=source_type, user=follower_name, succeed=True))
print_short_report(f"@{username}", session_state)
else:
on_action(InteractAction(source_name=username, source_type=source_type, user=follower_name, succeed=False))
can_continue = True
if ((is_like_limit_reached and is_likes_enabled) or not is_likes_enabled) and \
((is_follow_limit_reached and is_follow_enabled) or not is_follow_enabled) and \
((is_comment_limit_reached and is_comment_enabled) or not is_comment_enabled) and \
((is_watch_limit_reached and is_stories_enabled) or not is_stories_enabled):
# If one of the limits reached for source-limit, move to next source
if (like_reached_source_limit is not None and like_reached_session_limit is None) or \
(follow_reached_source_limit is not None and follow_reached_session_limit is None):
can_continue = False
action_status.set_limit(ActionState.SOURCE_LIMIT_REACHED)
# If all of the limits reached for session-limit, finish the session
if ((like_reached_session_limit is not None and is_likes_enabled) or not is_likes_enabled) and \
((follow_reached_session_limit is not None and is_follow_enabled) or not is_follow_enabled):
can_continue = False
action_status.set_limit(ActionState.SESSION_LIMIT_REACHED)
print("Back to profiles list")
device.back()
return can_continue
followers_following_list_view.iterate_over_followers(is_myself, interact_with_follower, pre_conditions)
| 51.082305 | 148 | 0.66664 |
793fc8ef05e7ebe219efb57209d7931cd48a6d8d | 15,791 | py | Python | scrapli/ssh_config.py | dmfigol/scrapli | d1f8641a1e3f490dab6bae704f092255efe6655c | [
"MIT"
] | null | null | null | scrapli/ssh_config.py | dmfigol/scrapli | d1f8641a1e3f490dab6bae704f092255efe6655c | [
"MIT"
] | null | null | null | scrapli/ssh_config.py | dmfigol/scrapli | d1f8641a1e3f490dab6bae704f092255efe6655c | [
"MIT"
] | null | null | null | """scrapli.ssh_config"""
import os
import re
import shlex
import sys
from copy import deepcopy
from typing import Dict, Optional
from scrapli.exceptions import ScrapliTypeError
if sys.version_info >= (3, 8):
Match = re.Match
else:
from typing import Match # pragma: no cover
HOST_ATTRS = (
"port",
"user",
"address_family",
"bind_address",
"connect_timeout",
"identities_only",
"identity_file",
"keyboard_interactive",
"password_authentication",
"preferred_authentication",
)
class SSHConfig:
def __init__(self, ssh_config_file: str) -> None:
"""
Initialize SSHConfig Object
Parse OpenSSH config file
Try to load the following data for all entries in config file:
Host
HostName
Port
User
*AddressFamily
*BindAddress
*ConnectTimeout
IdentitiesOnly
IdentityFile
*KbdInteractiveAuthentication
*PasswordAuthentication
*PreferredAuthentications
* items are mostly ready to load but are unused in scrapli right now so are not being set
at this point.
NOTE: this does *not* accept duplicate "*" entries -- the final "*" entry will overwrite any
previous "*" entries. In general for system transport this shouldn't matter much because
scrapli only cares about parsing the config file to see if a key (any key) exists for a
given host (we care about that because ideally we use "pipes" auth, but this is only an
option if we have a key to auth with).
Args:
ssh_config_file: string path to ssh configuration file
Returns:
None
Raises:
ScrapliTypeError: if non-string value provided for ssh_config_file
"""
if not isinstance(ssh_config_file, str):
raise ScrapliTypeError(f"`ssh_config_file` expected str, got {type(ssh_config_file)}")
self.ssh_config_file = os.path.expanduser(ssh_config_file)
if self.ssh_config_file:
with open(self.ssh_config_file, "r") as f:
self.ssh_config = f.read()
self.hosts = self._parse()
if not self.hosts:
self.hosts = {}
if "*" not in self.hosts.keys():
self.hosts["*"] = Host()
self.hosts["*"].hosts = "*"
else:
self.hosts = {}
self.hosts["*"] = Host()
self.hosts["*"].hosts = "*"
# finally merge all args from less specific hosts into the more specific hosts, preserving
# the options from the more specific hosts of course
self._merge_hosts()
def __str__(self) -> str:
"""
Magic str method for SSHConfig class
Args:
N/A
Returns:
str: string representation of object
Raises:
N/A
"""
return "SSHConfig Object"
def __repr__(self) -> str:
"""
Magic repr method for SSHConfig class
Args:
N/A
Returns:
str: repr for class object
Raises:
N/A
"""
class_dict = self.__dict__.copy()
del class_dict["ssh_config"]
return f"SSHConfig {class_dict}"
def __bool__(self) -> bool:
"""
Magic bool method; return True if ssh_config_file
Args:
N/A
Returns:
bool: True/False if ssh_config_file
Raises:
N/A
"""
if self.ssh_config:
return True
return False
@staticmethod
def _strip_comments(line: str) -> str:
"""
Strip out comments from ssh config file lines
Args:
line: to strip comments from
Returns:
str: rejoined ssh config file line after stripping comments
Raises:
N/A
"""
line = " ".join(shlex.split(line, comments=True))
return line
def _parse(self) -> Dict[str, "Host"]:
"""
Parse SSH configuration file
Args:
N/A
Returns:
discovered_hosts: dict of host objects discovered in ssh config file
Raises:
N/A
"""
# uncomment next line and handle global patterns (stuff before hosts) at some point
# global_config_pattern = re.compile(r"^.*?\b(?=host)", flags=re.I | re.S)
# use word boundaries with a positive lookahead to get everything between the word host
# need to do this as whitespace/formatting is not really a thing in ssh_config file
# match host\s to ensure we don't pick up hostname and split things there accidentally
host_pattern = re.compile(r"\bhost.*?\b(?=host\s|\s+$|$)", flags=re.I | re.S)
host_entries = re.findall(pattern=host_pattern, string=self.ssh_config)
discovered_hosts: Dict[str, Host] = {}
if not host_entries:
return discovered_hosts
# do we need to add whitespace between match and end of line to ensure we match correctly?
hosts_pattern = re.compile(r"^\s*host[\s=]+(.*)$", flags=re.I | re.M)
hostname_pattern = re.compile(r"^\s*hostname[\s=]+([\w.-]*)$", flags=re.I | re.M)
port_pattern = re.compile(r"^\s*port[\s=]+([\d]*)$", flags=re.I | re.M)
user_pattern = re.compile(r"^\s*user[\s=]+([\w]*)$", flags=re.I | re.M)
# address_family_pattern = None
# bind_address_pattern = None
# connect_timeout_pattern = None
identities_only_pattern = re.compile(
r"^\s*identitiesonly[\s=]+(yes|no)$", flags=re.I | re.M
)
identity_file_pattern = re.compile(
r"^\s*identityfile[\s=]+([\w.\/\@~-]*)$", flags=re.I | re.M
)
# keyboard_interactive_pattern = None
# password_authentication_pattern = None
# preferred_authentication_pattern = None
for host_entry in host_entries:
host = Host()
host_line = re.search(pattern=hosts_pattern, string=host_entry)
if isinstance(host_line, Match):
host.hosts = self._strip_comments(host_line.groups()[0])
else:
host.hosts = ""
hostname = re.search(pattern=hostname_pattern, string=host_entry)
if isinstance(hostname, Match):
host.hostname = self._strip_comments(hostname.groups()[0])
port = re.search(pattern=port_pattern, string=host_entry)
if isinstance(port, Match):
host.port = int(self._strip_comments(port.groups()[0]))
user = re.search(pattern=user_pattern, string=host_entry)
if isinstance(user, Match):
host.user = self._strip_comments(user.groups()[0])
# address_family = re.search(user_pattern, host_entry[0])
# bind_address = re.search(user_pattern, host_entry[0])
# connect_timeout = re.search(user_pattern, host_entry[0])
identities_only = re.search(pattern=identities_only_pattern, string=host_entry)
if isinstance(identities_only, Match):
host.identities_only = self._strip_comments(identities_only.groups()[0])
identity_file = re.search(pattern=identity_file_pattern, string=host_entry)
if isinstance(identity_file, Match):
host.identity_file = os.path.expanduser(
self._strip_comments(identity_file.groups()[0])
)
# keyboard_interactive = re.search(user_pattern, host_entry[0])
# password_authentication = re.search(user_pattern, host_entry[0])
# preferred_authentication = re.search(user_pattern, host_entry[0])
discovered_hosts[host.hosts] = host
return discovered_hosts
def _merge_hosts(self) -> None:
"""
Merge less specific host pattern data into a given host
Args:
N/A
Returns:
None
Raises:
N/A
"""
for host in self.hosts:
_current_hosts = deepcopy(self.hosts)
while True:
fuzzy_match = self._lookup_fuzzy_match(host=host, hosts=_current_hosts)
for attr in HOST_ATTRS:
if not getattr(self.hosts[host], attr):
setattr(self.hosts[host], attr, getattr(self.hosts[fuzzy_match], attr))
try:
_current_hosts.pop(fuzzy_match)
except KeyError:
# this means we hit the "*" entry twice and we can bail out
break
def _lookup_fuzzy_match(self, host: str, hosts: Optional[Dict[str, "Host"]] = None) -> str:
"""
Look up fuzzy matched hosts
Get the best match ssh config Host entry for a given host; this allows for using
the splat and question-mark operators in ssh config file
Args:
host: host to lookup in discovered_hosts dict
hosts: hosts dict to operate on; used for passing in partial dict of hosts while
performing merge operations
Returns:
str: Nearest match (if applicable) host or `*` if none found
Raises:
N/A
"""
hosts = hosts or self.hosts
possible_matches = []
for host_entry in hosts.keys():
host_list = host_entry.split()
for host_pattern in host_list:
# replace periods with literal period
# replace asterisk (match 0 or more things) with appropriate regex
# replace question mark (match one thing) with appropriate regex
cleaned_host_pattern = (
host_pattern.replace(".", r"\.").replace("*", r"(.*)").replace("?", r"(.)")
)
# compile with case insensitive
search_pattern = re.compile(cleaned_host_pattern, flags=re.I)
result = re.search(pattern=search_pattern, string=host)
# if we get a result, append it and the original pattern to the possible matches
if result:
possible_matches.append((result, host_entry))
# initialize a None best match
current_match = None
for match in possible_matches:
if current_match is None:
current_match = match
# count how many chars were replaced to get regex to work
chars_replaced = 0
for start_char, end_char in match[0].regs[1:]:
chars_replaced += end_char - start_char
# count how many chars were replaced to get regex to work on best match
best_match_chars_replaced = 0
for start_char, end_char in current_match[0].regs[1:]:
best_match_chars_replaced += end_char - start_char
# if match replaced less chars than "best_match" we have a new best match
if chars_replaced < best_match_chars_replaced:
current_match = match
if current_match is not None:
best_match = current_match[1]
else:
best_match = "*"
return best_match
def lookup(self, host: str) -> "Host":
"""
Lookup a given host
Args:
host: host to lookup in discovered_hosts dict
Returns:
Host: best matched host from parsed ssh config file hosts, "*" if no better match found
Raises:
N/A
"""
# return exact 1:1 match if exists
if host in self.hosts.keys():
return self.hosts[host]
# return match if given host is an exact match for a host entry
for host_entry in self.hosts:
host_list = host_entry.split()
if host in host_list:
return self.hosts[host_entry]
# otherwise need to select the most correct host entry
fuzzy_match = self._lookup_fuzzy_match(host)
return self.hosts[fuzzy_match]
class Host:
def __init__(self) -> None:
"""
Host Object
Create a Host object based on ssh config file information
"""
self.hosts: str = ""
self.hostname: Optional[str] = None
self.port: Optional[int] = None
self.user: str = ""
self.address_family: Optional[str] = None
self.bind_address: Optional[str] = None
self.connect_timeout: Optional[str] = None
self.identities_only: Optional[str] = None
self.identity_file: Optional[str] = None
self.keyboard_interactive: Optional[str] = None
self.password_authentication: Optional[str] = None
self.preferred_authentication: Optional[str] = None
def __str__(self) -> str:
"""
Magic str method for HostEntry class
Args:
N/A
Returns:
str: string for class object
Raises:
N/A
"""
return f"Host: {self.hosts}"
def __repr__(self) -> str:
"""
Magic repr method for HostEntry class
Args:
N/A
Returns:
str: repr for class object
Raises:
N/A
"""
class_dict = self.__dict__.copy()
return f"Host {class_dict}"
class SSHKnownHosts:
def __init__(self, ssh_known_hosts_file: str) -> None:
"""
Initialize SSHKnownHosts Object
Parse OpenSSH known hosts file
Try to load the following data for all entries in known hosts file:
Host
Key Type
Public Key
Args:
ssh_known_hosts_file: string path to ssh known hosts file
Returns:
None
Raises:
TypeError: if non-string value provided for ssh_known_hosts
"""
if not isinstance(ssh_known_hosts_file, str):
raise TypeError(
f"`ssh_known_hosts_file` expected str, got {type(ssh_known_hosts_file)}"
)
self.ssh_known_hosts_file = os.path.expanduser(ssh_known_hosts_file)
if self.ssh_known_hosts_file:
with open(self.ssh_known_hosts_file, "r") as f:
self.ssh_known_hosts = f.read()
self.hosts = self._parse()
if not self.hosts:
self.hosts = {}
else:
self.hosts = {}
def _parse(self) -> Dict[str, Dict[str, str]]:
"""
Parse SSH configuration file
Args:
N/A
Returns:
discovered_hosts: dict of host objects discovered in known hosts file
Raises:
N/A
"""
# match any non whitespace from start of the line... this should cover v4/v6/names
# skip a space and match any word (also w/ hyphen) to get key type, lastly
# match any non whitespace to the end of the line to get the public key
host_pattern = re.compile(r"^\S+\s[\w\-]+\s\S+$", flags=re.I | re.M)
host_entries = re.findall(pattern=host_pattern, string=self.ssh_known_hosts)
known_hosts: Dict[str, Dict[str, str]] = {}
for host_entry in host_entries:
host, key_type, public_key = host_entry.split()
# to simplify lookups down the line, split any list of hosts and just create a unique
# entry per host
for individual_host in host.split(","):
known_hosts[individual_host] = {}
known_hosts[individual_host]["key_type"] = key_type
known_hosts[individual_host]["public_key"] = public_key
return known_hosts
| 33.244211 | 100 | 0.577924 |
793fc923949fa2f76c918e844dcca1f5da124d0e | 1,227 | py | Python | {{cookiecutter.project_slug}}/workspace/app/scripts/backend_pre_start.py | Arya-ai/thresher | 11c7208e1c2ec553192b20dc87af779b673171e1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/workspace/app/scripts/backend_pre_start.py | Arya-ai/thresher | 11c7208e1c2ec553192b20dc87af779b673171e1 | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/workspace/app/scripts/backend_pre_start.py | Arya-ai/thresher | 11c7208e1c2ec553192b20dc87af779b673171e1 | [
"MIT"
] | 1 | 2020-11-10T11:15:55.000Z | 2020-11-10T11:15:55.000Z | import pika
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_fixed
from app.utils.tenacity import custom_before_log, custom_after_log
from app.scripts.healthcheck import db_healthcheck, broker_healthcheck
max_tries = 60 * 5 # 5 minutes
wait_seconds = 1
@retry(
stop=stop_after_attempt(max_tries),
wait=wait_fixed(wait_seconds),
before=custom_before_log('Test DB Connection', logger, logger.level("DEBUG")),
after=custom_after_log('Test DB Connection', logger, logger.level("WARNING"))
)
def test_db_connection():
status, err = db_healthcheck()
if not status:
raise err
@retry(
stop=stop_after_attempt(max_tries),
wait=wait_fixed(wait_seconds),
before=custom_before_log('Test RabbitMQ Connection', logger, logger.level("DEBUG")),
after=custom_after_log('Test RabbitMQ Connection', logger, logger.level("WARNING"))
)
def test_queue_connection():
status, err = broker_healthcheck()
if not status:
raise err
def init():
test_db_connection()
test_queue_connection()
def main():
logger.info("Initializing service")
init()
logger.info("Service finished initializing")
if __name__ == "__main__":
main()
| 24.54 | 88 | 0.730236 |
793fcadff384346bf9cbea591885cda5b47c3eec | 377 | py | Python | portfolio/blog/migrations/0009_auto_20150224_1648.py | adambeagle/portfoliosite | 634eac1793b4f583e28613a61e5e592cfdcd2e68 | [
"MIT"
] | null | null | null | portfolio/blog/migrations/0009_auto_20150224_1648.py | adambeagle/portfoliosite | 634eac1793b4f583e28613a61e5e592cfdcd2e68 | [
"MIT"
] | null | null | null | portfolio/blog/migrations/0009_auto_20150224_1648.py | adambeagle/portfoliosite | 634eac1793b4f583e28613a61e5e592cfdcd2e68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20150224_1629'),
]
operations = [
migrations.RenameField(
model_name='tag',
old_name='name',
new_name='slug',
),
]
| 18.85 | 44 | 0.583554 |
793fcb5314a33a26c006d264176476ccfc6004c6 | 919 | py | Python | stubs/micropython-v1_12-pyboard/_thread.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_12-pyboard/_thread.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_12-pyboard/_thread.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: '_thread' on micropython-v1.12-pyboard
"""
# MCU: {'ver': 'v1.12', 'port': 'pyboard', 'arch': 'armv7emsp', 'sysname': 'pyboard', 'release': '1.12.0', 'name': 'micropython', 'mpy': 7685, 'version': '1.12.0', 'machine': 'PYBv1.1 with STM32F405RG', 'build': '', 'nodename': 'pyboard', 'platform': 'pyboard', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
class LockType:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def acquire(self, *args, **kwargs) -> Any:
...
def locked(self, *args, **kwargs) -> Any:
...
def release(self, *args, **kwargs) -> Any:
...
def allocate_lock(*args, **kwargs) -> Any:
...
def exit(*args, **kwargs) -> Any:
...
def get_ident(*args, **kwargs) -> Any:
...
def stack_size(*args, **kwargs) -> Any:
...
def start_new_thread(*args, **kwargs) -> Any:
...
| 20.886364 | 286 | 0.535365 |
793fcc3cf9be089274552ef9f6cae53fe239a5cd | 186 | py | Python | 03/03_P22.py | monikuri/2110101_Com_Prog | bed16297685571edd7101b3e911d74ae3c62310c | [
"MIT"
] | null | null | null | 03/03_P22.py | monikuri/2110101_Com_Prog | bed16297685571edd7101b3e911d74ae3c62310c | [
"MIT"
] | null | null | null | 03/03_P22.py | monikuri/2110101_Com_Prog | bed16297685571edd7101b3e911d74ae3c62310c | [
"MIT"
] | null | null | null | import math
x = int(input())
ans = -1
for a in range(1,int(x/3)):
for b in range(1,x-a):
c = x - (a+b)
if c**2 == a**2 + b**2:
ans = max(ans,c)
print(ans) | 20.666667 | 31 | 0.451613 |
793fce5660795395ee2757952f1b8955b9ef1ef3 | 1,647 | py | Python | jsb_web/leases/urls.py | kojeachon/jsb_web | 93c0d9590adc01a2fa1934b7ef92e8a2d1264042 | [
"MIT"
] | null | null | null | jsb_web/leases/urls.py | kojeachon/jsb_web | 93c0d9590adc01a2fa1934b7ef92e8a2d1264042 | [
"MIT"
] | null | null | null | jsb_web/leases/urls.py | kojeachon/jsb_web | 93c0d9590adc01a2fa1934b7ef92e8a2d1264042 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "leases"
urlpatterns = [
path('', views.index, name='index'),
path('roomcreate/',views.room_create,name='room_create'),
path('create/',views.lease_create,name='lease_create'),
path('list/',views.lease_list,name='lease_list'),
path("roomcreate2/", views.RoomCreate.as_view(), name="room_create2"),
path("roomlist/", views.RoomList.as_view(), name="room_list"),
path("room/<int:pk>", views.RoomDetailView.as_view(), name="room_detail"),
path("room_edit/<int:pk>/edit", views.RoomUpdate.as_view(), name="room_edit"),
path("leaselist/", views.LeaseList.as_view(), name="leaselist"),
path("lease/<int:pk>", views.LeaseDetailView.as_view(), name="lease_detail"),
# API Json Data
path('test/',views.test,name='test'),
path('type1-json/',views.get_json_type1_data,name='type1-json'),
path('type2-json/<str:type1>/', views.get_json_type2_data, name='type2-json'),
# path('type2-json/', views.get_json_type2_data, name='type2-json'),
path('roomnumber-json/<str:type2>/', views.get_json_roomnumber_data, name='roomnumber-json'),
path('save-lease-create/', views.save_lease_create, name='save-lease-create'),
path('leases-json/',views.get_json_leases_data,name='leases-json'),
# path('<int:post_id>/update', views.post_update, name="post_update"),
# path('<int:post_id>/comment_create', views.comment_create, name='comment_create'),
# path('<int:comment_id>/comment_delete', views.comment_delete, name="comment_delete"),
path("lease_edit/<int:pk>/edit", views.LeaseUpdate.as_view(), name="lease_edit"),
] | 49.909091 | 97 | 0.697632 |
793fce61a55c5a66fe3af6753d117a376039c817 | 10,950 | py | Python | tests/test_retrieval.py | hoytak/treedict | d9fc18834ae87df1ea6b94682f536ab62da2e8f0 | [
"BSD-3-Clause"
] | 13 | 2015-07-01T12:21:10.000Z | 2022-01-14T09:09:05.000Z | tests/test_retrieval.py | hoytak/treedict | d9fc18834ae87df1ea6b94682f536ab62da2e8f0 | [
"BSD-3-Clause"
] | 6 | 2015-01-22T21:49:37.000Z | 2020-07-03T23:24:38.000Z | tests/test_retrieval.py | hoytak/treedict | d9fc18834ae87df1ea6b94682f536ab62da2e8f0 | [
"BSD-3-Clause"
] | 8 | 2016-02-25T22:06:14.000Z | 2022-02-19T20:56:03.000Z | #!/usr/bin/env python
# Copyright (c) 2009-2011, Hoyt Koepke ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name 'treedict' nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Hoyt Koepke ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Hoyt Koepke BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import random, unittest, collections
from treedict import TreeDict, getTree
import treedict
from copy import deepcopy, copy
from hashlib import md5
import random
from treedict.treedict import _ldist
from common import *
class TestRetrieval(unittest.TestCase):
def test_existance_01(self):
p = sample_tree()
self.assert_("123" not in p)
self.assert_(not p.has_key("123"))
def test_existance_02(self):
p = sample_tree()
self.assert_(123 not in p)
self.assert_(not p.has_key(123))
def test_existance_03(self):
p = sample_tree()
self.assert_(None not in p)
self.assert_(not p.has_key(None))
def test_existance_04(self):
p = sample_tree()
self.assert_("si3dkdkdmmd" not in p)
self.assert_(not p.has_key("si3dkdkdmmd"))
def test_existance_05(self):
p = sample_tree()
self.assert_(p not in p)
self.assert_(not p.has_key(p))
def test_existance_06_dangling_node(self):
p = makeTDInstance('roor')
p.a
self.assert_('a' not in p)
def test_existance_06b_dangling_node(self):
p = makeTDInstance('roor')
p.b = 123
p.a
self.assert_('b' in p)
self.assert_('a' not in p)
def test_existance_06c_dangling_node(self):
p = makeTDInstance('roor')
p.b = 123
p.a
p.aa.b.c
p.bb.c.d = None
self.assert_('a' not in p)
self.assert_('b' in p)
self.assert_('aa' not in p)
self.assert_('bb' in p)
def testExistanceThroughLink(self):
p = makeTDInstance()
p.a.b.link = p.d
p.d.v = 1
self.assert_('a.b.link.v' in p)
def testContains_01(self):
p1 = makeTDInstance()
p1.a.b = 123
self.assert_('a' in p1)
self.assert_('a.b' in p1)
self.assert_('b' not in p1)
self.assert_('b' in p1.a)
def testContains_02(self):
p1 = makeTDInstance()
p1.a.b = 123
p1.d
self.assert_('a' in p1)
self.assert_('a.b' in p1)
self.assert_('d' not in p1)
def testForwardReference(self):
p = makeTDInstance('test')
p.a.b.c = p.d.e.f
p.d.e.f.g = 10
self.assert_(p.a.b.c.g == 10)
def testDefaultValue_01(self):
p = makeTDInstance()
p.a.b = 123
self.assert_(p.get('a.b') == 123)
self.assert_(p.get('a.b', default_value = 1) == 123)
self.assert_(p.get('a.c', default_value = 1) == 1)
self.assert_(p.get('a.c', default_value = None) is None)
def testRetrieve_01_NonExistantBranchFromFrozenTree(self):
p = makeTDInstance()
p.a.b.c = 1
p.freeze()
self.assert_(p.a.b.c == 1)
self.assertRaises(AttributeError, lambda: p.a.d)
def testRetrieve_02_NonExistantBranchFromFrozenTree_control(self):
p = makeTDInstance()
p.a.b.c = 1
# control; no freeze
self.assert_(p.a.b.c == 1)
p.a.d
def testRetrieve_03_ThroughLink(self):
p = makeTDInstance()
p.a.b.link = p.d
p.d.v = 1
self.assert_(p["a.b.link.v"] == 1)
self.assert_(p.get("a.b.link.v") == 1)
def testRetrieve_04_ThroughMultipleLinks(self):
p = makeTDInstance()
p.l7.v = 1
p.l6 = p.l7
p.l5 = p.l6
p.l4 = p.l5
p.l3 = p.l4
p.l2 = p.l3
p.l1 = p.l2
self.assert_(p.l7.v == 1)
self.assert_(p.l6.v == 1)
self.assert_(p.l5.v == 1)
self.assert_(p.l4.v == 1)
self.assert_(p.l3.v == 1)
self.assert_(p.l2.v == 1)
self.assert_(p.l1.v == 1)
def testNonexistantValues_01(self):
p = makeTDInstance()
self.assertRaises(KeyError, lambda: p["a"])
def testNonexistantValues_02(self):
p = makeTDInstance()
self.assertRaises(KeyError, lambda: p[0])
def testNonexistantValues_03(self):
p = makeTDInstance()
self.assertRaises(KeyError, lambda: p[None])
def testNonexistantValues_04(self):
p = makeTDInstance()
p.freeze()
self.assertRaises(AttributeError, lambda: p.a)
##################################################
# Some of the format specifying stuff
def testConvertTo_01(self):
t = makeTDInstance()
t.z = 3
t.a.x = 1
t.a.y = 2
# Test default
self.assert_(t.convertTo() == t.convertTo('nested_dict'))
self.assert_(t.convertTo() == {'a' : {'x' : 1, 'y' : 2}, 'z' : 3})
def testConvertTo_02(self):
t = random_tree(200)
d = t.convertTo('nested_dict')
t2 = TreeDict.fromdict(d, expand_nested = True)
self.assert_(t == t2)
def testConvertTo_03_self_linked_01(self):
t = makeTDInstance()
t.makeBranch('b')
t.a.b = t.b
t.b.a = t.a
d = t.convertTo('nested_dict')
self.assert_(type(d['a']) is dict)
self.assert_(type(d['b']) is dict)
self.assert_(d['a']['b'] is d['b'])
self.assert_(d['b']['a'] is d['a'])
def testConvertTo_03_self_linked_02(self):
t = random_selflinked_tree(0, 1)
d = t.convertTo('nested_dict')
t2 = TreeDict.fromdict(d, expand_nested = True)
t.attach(recursive = True)
t2.attach(recursive = True)
self.assert_(t == t2)
def testConvertTo_04_root_linked_01(self):
t = makeTDInstance()
t.a = t
d = t.convertTo('nested_dict')
self.assert_(d['a'] is d)
def testConvertTo_04_root_linked_02(self):
t = makeTDInstance()
t.a.b.c = t
t.a.b.x = t.a
d = t.convertTo('nested_dict')
self.assert_(d['a']['b']['c'] is d)
self.assert_(d['a']['b']['x'] is d['a'])
def testConvertTo_05_only_local_as_values_01(self):
t = makeTDInstance()
t.x.y = 1
t.a.b.c = 1
t.a.b.d = t.a.b
t.a.b.xl = t.x
t.a.xl = t.x
d = t.a.convertTo('nested_dict', convert_values = False)
self.assert_(type(d['b']['d']) is dict)
self.assert_(type(d['b']) is dict)
self.assert_(d['b'] is d['b']['d'])
self.assert_(d['b']['c'] == 1)
# TreeDict values are only converted if they are a branch somewhere in the
self.assert_(isinstance(d['b']['xl'], TreeDict))
self.assert_(isinstance(d['xl'], TreeDict))
self.assert_(d['xl'] is d['b']['xl'])
def testConvertTo_05_only_local_as_values_01_control(self):
t = makeTDInstance()
t.x.y = 1
t.a.b.c = 1
t.a.b.d = t.a.b
t.a.b.xl = t.x
t.a.xl = t.x
d = t.a.convertTo('nested_dict', convert_values = True)
self.assert_(type(d['b']['d']) is dict)
self.assert_(type(d['b']) is dict)
self.assert_(d['b'] is d['b']['d'])
self.assert_(d['b']['c'] == 1)
# TreeDict values are only converted if they are a branch somewhere in the
self.assert_(type(d['b']['xl']) is dict)
self.assert_(type(d['xl']) is dict)
self.assert_(d['xl'] is d['b']['xl'])
def testConvertTo_05_only_local_as_values_02(self):
t = makeTDInstance()
t.x.y = 1
t.a.b.c = 1
a_refs = random_node_list(0, 100, 0.5)
x_refs = random_node_list(1, 100, 0.5)
for n in a_refs:
t.a[n] = t.a.b
for n in x_refs:
t.a[n] = t.x
d = t.a.convertTo('nested_dict', convert_values = False)
def get_value(d, n):
for n in n.split('.'):
d = d[n]
return d
for n in a_refs:
self.assert_(type(get_value(d, n)) is dict)
self.assert_(get_value(d, n) is d['b'])
for n in x_refs:
self.assert_(isinstance(get_value(d, n), TreeDict))
self.assert_(get_value(d, n) is t.x)
def testConvertTo_06_prune_empty_01(self):
t = makeTDInstance()
t.makeBranch('a')
d = t.convertTo('nested_dict', prune_empty = True)
self.assert_(d == {})
def testConvertTo_06_prune_empty_02(self):
t = makeTDInstance()
t.a.x = 1
t.a.makeBranch('b')
d = t.convertTo('nested_dict', prune_empty = False)
self.assert_(d == {'a' : {'x' : 1, 'b' : {} } } )
d2 = t.convertTo('nested_dict', prune_empty = True)
self.assert_(d2 == {'a' : {'x' : 1 } } )
def testConvertTo_07_lists(self):
t = makeTDInstance()
t.a.b = [1, makeTDInstance(x = 1)]
d = t.convertTo('nested_dict', expand_lists = False)
self.assert_(d == {'a' : {'b' : [1, makeTDInstance(x = 1)]}})
d2 = t.convertTo('nested_dict', expand_lists = True)
self.assert_(d2 == {'a' : {'b' : [1, {'x' : 1} ]}})
def testConvertTo_08_self_referencing_lists(self):
t = makeTDInstance()
t.a = [t]
d = t.convertTo('nested_dict', expand_lists = False)
self.assert_(d['a'][0] is t)
d2 = t.convertTo('nested_dict', expand_lists = True)
self.assert_(d2['a'][0] is d2)
if __name__ == '__main__':
unittest.main()
| 26.57767 | 83 | 0.567306 |
793fce9019cb6950eb166e098919813588853388 | 56 | py | Python | bridger/tasks.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 2 | 2020-03-17T00:53:23.000Z | 2020-07-16T07:00:33.000Z | bridger/tasks.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 76 | 2019-12-05T01:15:57.000Z | 2021-09-07T16:47:27.000Z | bridger/tasks.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T15:09:47.000Z | 2020-02-05T15:09:47.000Z | from .notifications.tasks import send_mail, send_system
| 28 | 55 | 0.857143 |
793fced048431943fa6da52a33446dbac171c17a | 109,819 | py | Python | tools/ci_build/build.py | waikuen2010/onnxruntime | dbe1b57a71248946ade29c762817699fd1adc3ff | [
"MIT"
] | null | null | null | tools/ci_build/build.py | waikuen2010/onnxruntime | dbe1b57a71248946ade29c762817699fd1adc3ff | [
"MIT"
] | null | null | null | tools/ci_build/build.py | waikuen2010/onnxruntime | dbe1b57a71248946ade29c762817699fd1adc3ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import contextlib
import glob
import os
import re
import shlex
import shutil
import subprocess
import sys
import platform
from amd_hipify import amd_hipify
from distutils.version import LooseVersion
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
from util import ( # noqa: E402
run,
is_windows, is_macOS, is_linux,
get_logger)
import util.android as android # noqa: E402
log = get_logger("build")
class BaseError(Exception):
"""Base class for errors originating from build.py."""
pass
class BuildError(BaseError):
"""Error from running build steps."""
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
"""Usage related error."""
def __init__(self, message):
super().__init__(message)
def _check_python_version():
# According to the BUILD.md, python 3.5+ is required:
# Python 2 is definitely not supported and it should be safer to consider
# it won't run with python 4:
if sys.version_info[0] != 3:
raise BuildError(
"Bad python major version: expecting python 3, found version "
"'{}'".format(sys.version))
if sys.version_info[1] < 6:
raise BuildError(
"Bad python minor version: expecting python 3.6+, found version "
"'{}'".format(sys.version))
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Need bool; got %r' % s)
return {'true': True, 'false': False}[s.lower()]
_check_python_version()
def _openvino_verify_device_type(device_read):
choices = ["CPU_FP32", "GPU_FP32", "GPU_FP16", "VAD-M_FP16", "MYRIAD_FP16", "VAD-F_FP32"]
choices1 = ["CPU_FP32_NO_PARTITION", "GPU_FP32_NO_PARTITION", "GPU_FP16_NO_PARTITION",
"VAD-M_FP16_NO_PARTITION", "MYRIAD_FP16_NO_PARTITION", "VAD-F_FP32_NO_PARTITION"]
status_hetero = True
res = False
if (device_read in choices):
res = True
elif (device_read in choices1):
res = True
elif (device_read.startswith("HETERO:") or device_read.startswith("MULTI:")):
res = True
comma_separated_devices = device_read.split(":")
comma_separated_devices = comma_separated_devices[1].split(',')
if (len(comma_separated_devices) < 2):
print("At least two devices required in Hetero Mode")
status_hetero = False
dev_options = ["CPU", "GPU", "MYRIAD", "FPGA", "HDDL"]
for dev in comma_separated_devices:
if (dev not in dev_options):
status_hetero = False
break
def invalid_hetero_build():
print("\n" + "If trying to build Hetero or Multi, specifiy the supported devices along with it." + + "\n")
print("specify the keyword HETERO or MULTI followed by the devices ")
print("in the order of priority you want to build" + "\n")
print("The different hardware devices that can be added in HETERO or MULTI")
print("are ['CPU','GPU','MYRIAD','FPGA','HDDL']" + "\n")
print("An example of how to specify the hetero build type. Ex: HETERO:GPU,CPU" + "\n")
print("An example of how to specify the MULTI build type. Ex: MULTI:MYRIAD,CPU" + "\n")
sys.exit("Wrong Build Type selected")
if (res is False):
print("\n" + "You have selcted wrong configuration for the build.")
print("pick the build type for specific Hardware Device from following options: ", choices)
print("(or) from the following options with graph partitioning disabled: ", choices1)
print("\n")
if not (device_read.startswith("HETERO:") or device_read.startswith("MULTI:")):
invalid_hetero_build()
sys.exit("Wrong Build Type selected")
if (status_hetero is False):
invalid_hetero_build()
return device_read
def parse_arguments():
parser = argparse.ArgumentParser(
description="ONNXRuntime CI build driver.",
usage=""" # noqa
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
""")
# Main arguments
parser.add_argument(
"--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config", nargs="+", default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.")
parser.add_argument(
"--update", action='store_true', help="Update makefiles.")
parser.add_argument("--build", action='store_true', help="Build.")
parser.add_argument(
"--clean", action='store_true',
help="Run 'cmake --build --target clean' for the selected config/s.")
parser.add_argument(
"--parallel", nargs='?', const='0', default='1', type=int,
help="Use parallel build. The optional value specifies the maximum number of parallel jobs. "
"If the optional value is 0 or unspecified, it is interpreted as the number of CPUs.")
parser.add_argument("--test", action='store_true', help="Run unit tests.")
parser.add_argument("--skip_tests", action='store_true', help="Skip all tests.")
# Training options
parser.add_argument(
"--enable_nvtx_profile", action='store_true', help="Enable NVTX profile in ORT.")
parser.add_argument(
"--enable_memory_profile", action='store_true', help="Enable memory profile in ORT.")
parser.add_argument(
"--enable_training", action='store_true', help="Enable training in ORT.")
parser.add_argument(
"--enable_training_ops", action='store_true', help="Enable training ops in inference graph.")
parser.add_argument(
"--enable_training_torch_interop", action='store_true', help="Enable training kernels interop with torch.")
parser.add_argument(
"--disable_nccl", action='store_true', help="Disable Nccl.")
parser.add_argument(
"--mpi_home", help="Path to MPI installation dir")
parser.add_argument(
"--nccl_home", help="Path to NCCL installation dir")
parser.add_argument(
"--use_mpi", nargs='?', default=True, const=True, type=_str_to_bool)
# enable ONNX tests
parser.add_argument(
"--enable_onnx_tests", action='store_true',
help="""When running the Test phase, run onnx_test_running against
available test data directories.""")
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
parser.add_argument(
"--fuzz_testing", action='store_true', help="Enable Fuzz testing of the onnxruntime.")
parser.add_argument(
"--enable_symbolic_shape_infer_tests", action='store_true',
help="""When running the Test phase, run symbolic shape inference against
available test data directories.""")
# generate documentation
parser.add_argument("--gen_doc", nargs='?', const='yes', type=str,
help="Generate documentation listing standard ONNX operators and types implemented by "
"various execution providers and contrib operator schemas. "
"Use `--gen_doc validate` to validate these match the current contents in /docs.")
parser.add_argument(
"--gen-api-doc", action='store_true',
help="Generate API documentation for PyTorch frontend")
# CUDA related
parser.add_argument("--use_cuda", action='store_true', help="Enable CUDA.")
parser.add_argument(
"--cuda_version", help="The version of CUDA toolkit to use. "
"Auto-detect if not specified. e.g. 9.0")
parser.add_argument(
"--cuda_home", help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and "
"--cuda_home is not specified.")
parser.add_argument(
"--cudnn_home", help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and "
"--cudnn_home is not specified.")
parser.add_argument(
"--enable_cuda_line_info", action='store_true', help="Enable CUDA line info.")
# Python bindings
parser.add_argument(
"--enable_pybind", action='store_true', help="Enable Python Bindings.")
parser.add_argument(
"--build_wheel", action='store_true', help="Build Python Wheel.")
parser.add_argument(
"--wheel_name_suffix", help="Suffix to append to created wheel names. "
"This value is currently only used for nightly builds.")
parser.add_argument(
"--numpy_version", help="Installs a specific version of numpy "
"before building the python binding.")
parser.add_argument(
"--skip-keras-test", action='store_true',
help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument(
"--build_csharp", action='store_true',
help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
"For building C# bindings and packaging them into nuget package use --build_nuget arg.")
parser.add_argument(
"--build_nuget", action='store_true',
help="Build C#.Net DLL and NuGet package on the local machine. "
"Currently only Windows and Linux platforms are supported.")
# Java bindings
parser.add_argument(
"--build_java", action='store_true', help="Build Java bindings.")
# Node.js binding
parser.add_argument(
"--build_nodejs", action='store_true',
help="Build Node.js binding and NPM package.")
# Objective-C binding
parser.add_argument(
"--build_objc", action='store_true',
help="Build Objective-C binding.")
# Build a shared lib
parser.add_argument(
"--build_shared_lib", action='store_true',
help="Build a shared library for the ONNXRuntime.")
# Build a shared lib
parser.add_argument(
"--build_apple_framework", action='store_true',
help="Build a macOS/iOS framework for the ONNXRuntime.")
# Build options
parser.add_argument(
"--cmake_extra_defines", nargs="+",
help="Extra definitions to pass to CMake during build system "
"generation. These are just CMake -D options without the leading -D.")
parser.add_argument(
"--target",
help="Build a specific target, e.g. winml_dll")
# This flag is needed when :
# 1. The OS is 64 bits Windows
# 2. And the target binary is for 32 bits Windows
# 3. And the python used for running this script is 64 bits.
# But if you can get a 32 bits python, the build will run better and you won't need this flag.
parser.add_argument(
"--x86", action='store_true',
help="[cross-compiling] Create Windows x86 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm", action='store_true',
help="[cross-compiling] Create ARM makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm64", action='store_true',
help="[cross-compiling] Create ARM64 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm64ec", action='store_true',
help="[cross-compiling] Create ARM64EC makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--android", action='store_true', help='Build for Android')
parser.add_argument(
"--android_abi", default="arm64-v8a",
choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
help="Specify the target Android Application Binary Interface (ABI)")
parser.add_argument("--android_api", type=int, default=27, help='Android API Level, e.g. 21')
parser.add_argument(
"--android_sdk_path", type=str, default=os.environ.get("ANDROID_HOME", ""),
help="Path to the Android SDK")
parser.add_argument(
"--android_ndk_path", type=str, default=os.environ.get("ANDROID_NDK_HOME", ""),
help="Path to the Android NDK")
parser.add_argument("--android_cpp_shared", action="store_true",
help="Build with shared libc++ instead of the default static libc++.")
parser.add_argument("--android_run_emulator", action="store_true",
help="Start up an Android emulator if needed.")
parser.add_argument("--ios", action='store_true', help="build for ios")
parser.add_argument(
"--ios_sysroot", default="",
help="Specify the location name of the macOS platform SDK to be used")
parser.add_argument(
"--ios_toolchain_dir", default="",
help="Path to ios toolchain binaries")
parser.add_argument(
"--ios_toolchain_file", default="",
help="Path to ios toolchain file, "
"or cmake/onnxruntime_ios.toolchain.cmake will be used")
parser.add_argument(
"--xcode_code_signing_team_id", default="",
help="The development team ID used for code signing in Xcode")
parser.add_argument(
"--xcode_code_signing_identity", default="",
help="The development identity used for code signing in Xcode")
parser.add_argument(
"--use_xcode", action='store_true',
help="Use Xcode as cmake generator, this is only supported on MacOS.")
parser.add_argument(
"--osx_arch",
default="arm64" if platform.machine() == "arm64" else "x86_64",
choices=["arm64", "arm64e", "x86_64"],
help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS")
parser.add_argument(
"--apple_deploy_target", type=str,
help="Specify the minimum version of the target platform "
"(e.g. macOS or iOS)"
"This is only supported on MacOS")
# WebAssembly build
parser.add_argument("--build_wasm", action='store_true', help="Build for WebAssembly")
parser.add_argument("--enable_wasm_simd", action='store_true', help="Enable WebAssembly SIMD")
parser.add_argument(
"--disable_wasm_exception_catching", action='store_true',
help="Disable exception catching in WebAssembly.")
parser.add_argument(
"--enable_wasm_exception_throwing_override", action='store_true',
help="Enable exception throwing in WebAssembly, this will override default disabling exception throwing "
"behavior when disable exceptions.")
parser.add_argument(
"--enable_wasm_threads", action='store_true',
help="Enable WebAssembly multi-threads support")
parser.add_argument(
"--enable_wasm_profiling", action='store_true',
help="Enable WebAsselby profiling and preserve function names")
parser.add_argument(
"--enable_wasm_debug_info", action='store_true',
help="Build WebAssembly with DWARF format debug info")
parser.add_argument(
"--wasm_malloc", default="dlmalloc", help="Specify memory allocator for WebAssembly")
parser.add_argument(
"--emsdk_version", default="2.0.26", help="Specify version of emsdk")
# Enable onnxruntime-extensions
parser.add_argument(
"--use_extensions", action='store_true',
help="Enable custom operators in onnxruntime-extensions, use git submodule onnxruntime-extensions "
"in path cmake/external/onnxruntime-extensions by default.")
parser.add_argument(
"--extensions_overridden_path", type=str,
help="Path to pre-pulled onnxruntime-extensions, will override default onnxruntime-extensions path.")
# Arguments needed by CI
parser.add_argument(
"--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument(
"--ctest_path", default="ctest", help="Path to the CTest program. It can be an empty string. If it is empty, "
"we will use this script driving the test programs directly.")
parser.add_argument(
"--skip_submodule_sync", action='store_true', help="Don't do a "
"'git submodule update'. Makes the Update phase faster.")
parser.add_argument(
"--use_vstest", action='store_true',
help="Use use_vstest for running unitests.")
parser.add_argument(
"--use_mimalloc", default=['none'],
choices=['none', 'stl', 'arena', 'all'], help="Use mimalloc.")
parser.add_argument(
"--use_dnnl", action='store_true', help="Build with DNNL.")
parser.add_argument(
"--dnnl_gpu_runtime", action='store', default='', type=str.lower,
help="e.g. --dnnl_gpu_runtime ocl")
parser.add_argument(
"--dnnl_opencl_root", action='store', default='',
help="Path to OpenCL SDK. "
"e.g. --dnnl_opencl_root \"C:/Program Files (x86)/IntelSWTools/sw_dev_tools/OpenCL/sdk\"")
parser.add_argument(
"--use_openvino", nargs="?", const="CPU_FP32",
type=_openvino_verify_device_type,
help="Build with OpenVINO for specific hardware.")
parser.add_argument(
"--use_coreml", action='store_true', help="Build with CoreML support.")
parser.add_argument(
"--use_nnapi", action='store_true', help="Build with NNAPI support.")
parser.add_argument(
"--nnapi_min_api", type=int,
help="Minimum Android API level to enable NNAPI, should be no less than 27")
parser.add_argument(
"--use_rknpu", action='store_true', help="Build with RKNPU.")
parser.add_argument(
"--use_preinstalled_eigen", action='store_true',
help="Use pre-installed Eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
parser.add_argument(
"--use_openmp", action='store_true', help="Build with OpenMP")
parser.add_argument(
"--enable_msinternal", action="store_true",
help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument(
"--use_vitisai", action='store_true', help="Build with Vitis-AI")
parser.add_argument(
"--use_nuphar", action='store_true', help="Build with nuphar")
parser.add_argument(
"--use_tensorrt", action='store_true', help="Build with TensorRT")
parser.add_argument(
"--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument(
"--use_migraphx", action='store_true', help="Build with MIGraphX")
parser.add_argument(
"--migraphx_home", help="Path to MIGraphX installation dir")
parser.add_argument(
"--use_full_protobuf", action='store_true',
help="Use the full protobuf library")
parser.add_argument("--skip_onnx_tests", action='store_true',
help="Explicitly disable all onnx related tests. Note: Use --skip_tests to skip all tests.")
parser.add_argument("--skip_winml_tests", action='store_true', help="Explicitly disable all WinML related tests")
parser.add_argument("--skip_nodejs_tests", action='store_true', help="Explicitly disable all Node.js binding tests")
parser.add_argument(
"--enable_msvc_static_runtime", action='store_true',
help="Enable static linking of MSVC runtimes.")
parser.add_argument(
"--enable_language_interop_ops", action='store_true',
help="Enable operator implemented in language other than cpp")
parser.add_argument(
"--cmake_generator",
choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Visual Studio 17 2022', 'Ninja'],
default='Visual Studio 16 2019' if is_windows() else None,
help="Specify the generator that CMake invokes. "
"This is only supported on Windows")
parser.add_argument(
"--enable_multi_device_test", action='store_true',
help="Test with multi-device. Mostly used for multi-device GPU")
parser.add_argument(
"--use_dml", action='store_true', help="Build with DirectML.")
parser.add_argument(
"--use_winml", action='store_true', help="Build with WinML.")
parser.add_argument(
"--winml_root_namespace_override", type=str,
help="Specify the namespace that WinML builds into.")
parser.add_argument(
"--use_telemetry", action='store_true',
help="Only official builds can set this flag to enable telemetry.")
parser.add_argument(
"--enable_wcos", action='store_true',
help="Build for Windows Core OS.")
parser.add_argument(
"--enable_windows_store", action='store_true',
help="Build for Windows Store")
parser.add_argument(
"--enable_lto", action='store_true',
help="Enable Link Time Optimization")
parser.add_argument(
"--enable_transformers_tool_test", action='store_true',
help="Enable transformers tool test")
parser.add_argument(
"--use_acl", nargs="?", const="ACL_1905",
choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
help="Build with ACL for ARM architectures.")
parser.add_argument(
"--acl_home", help="Path to ACL home dir")
parser.add_argument(
"--acl_libs", help="Path to ACL libraries")
parser.add_argument(
"--use_armnn", action='store_true',
help="Enable ArmNN Execution Provider.")
parser.add_argument(
"--armnn_relu", action='store_true',
help="Use the Relu operator implementation from the ArmNN EP.")
parser.add_argument(
"--armnn_bn", action='store_true',
help="Use the Batch Normalization operator implementation from the ArmNN EP.")
parser.add_argument(
"--armnn_home", help="Path to ArmNN home dir")
parser.add_argument(
"--armnn_libs", help="Path to ArmNN libraries")
parser.add_argument(
"--build_micro_benchmarks", action='store_true',
help="Build ONNXRuntime micro-benchmarks.")
# options to reduce binary size
parser.add_argument("--minimal_build", default=None, nargs='*', type=str.lower,
help="Create a build that only supports ORT format models. "
"See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. "
"RTTI is automatically disabled in a minimal build. "
"To enable execution providers that compile kernels at runtime (e.g. NNAPI) pass 'extended' "
"as a parameter. e.g. '--minimal_build extended'. "
"To enable support for custom operators pass 'custom_ops' as a parameter. "
"e.g. '--minimal_build custom_ops'. This can be combined with an 'extended' build by passing "
"'--minimal_build extended custom_ops'")
parser.add_argument("--include_ops_by_config", type=str,
help="Include ops from config file. "
"See /docs/Reduced_Operator_Kernel_build.md for more information.")
parser.add_argument("--enable_reduced_operator_type_support", action='store_true',
help='If --include_ops_by_config is specified, and the configuration file has type reduction '
'information, limit the types individual operators support where possible to further '
'reduce the build size. '
'See /docs/Reduced_Operator_Kernel_build.md for more information.')
parser.add_argument("--disable_contrib_ops", action='store_true',
help="Disable contrib ops (reduces binary size)")
parser.add_argument("--disable_ml_ops", action='store_true',
help="Disable traditional ML ops (reduces binary size)")
parser.add_argument("--disable_rtti", action='store_true', help="Disable RTTI (reduces binary size)")
parser.add_argument("--disable_exceptions", action='store_true',
help="Disable exceptions to reduce binary size. Requires --minimal_build.")
parser.add_argument("--disable_ort_format_load", action='store_true',
help='Disable support for loading ORT format models in a non-minimal build.')
parser.add_argument(
"--rocm_version", help="The version of ROCM stack to use. ")
parser.add_argument("--use_rocm", action='store_true', help="Build with ROCm")
parser.add_argument("--rocm_home", help="Path to ROCm installation dir")
# Code coverage
parser.add_argument("--code_coverage", action='store_true',
help="Generate code coverage when targetting Android (only).")
parser.add_argument(
"--ms_experimental", action='store_true', help="Build microsoft experimental operators.")\
# eager mode
parser.add_argument(
"--build_eager_mode", action='store_true',
help="Build ONNXRuntime micro-benchmarks.")
parser.add_argument('--eager_customop_module', default=None,
help='Module containing custom op mappings for eager mode.')
parser.add_argument('--eager_customop_header', default=None,
help='Header containing custom op definitions for eager mode.')
parser.add_argument(
"--enable_external_custom_op_schemas", action='store_true',
help="Enable registering user defined custom operation schemas at shared library load time.\
This feature is only supported/available on Ubuntu.")
return parser.parse_args()
def is_reduced_ops_build(args):
return args.include_ops_by_config is not None
def resolve_executable_path(command_or_path):
"""Returns the absolute path of an executable."""
if command_or_path and command_or_path.strip():
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError("Failed to resolve executable path for "
"'{}'.".format(command_or_path))
return os.path.abspath(executable_path)
else:
return None
def get_linux_distro():
try:
with open('/etc/os-release', 'r') as f:
dist_info = dict(
line.strip().split('=', 1) for line in f.readlines())
return dist_info.get('NAME', '').strip('"'), dist_info.get(
'VERSION', '').strip('"')
except (IOError, ValueError):
return '', ''
def is_ubuntu_1604():
dist, ver = get_linux_distro()
return dist == 'Ubuntu' and ver.startswith('16.04')
def get_config_build_dir(build_dir, config):
# build directory per configuration
return os.path.join(build_dir, config)
def run_subprocess(args, cwd=None, capture_stdout=False, dll_path=None,
shell=False, env={}, python_path=None):
if isinstance(args, str):
raise ValueError("args should be a sequence of strings, not a string")
my_env = os.environ.copy()
if dll_path:
if is_windows():
if "PATH" in my_env:
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
my_env["PATH"] = dll_path
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
if python_path:
if "PYTHONPATH" in my_env:
my_env["PYTHONPATH"] += os.pathsep + python_path
else:
my_env["PYTHONPATH"] = python_path
my_env.update(env)
return run(*args, cwd=cwd, capture_stdout=capture_stdout, shell=shell, env=my_env)
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"],
cwd=source_dir)
def is_docker():
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path))
)
def install_python_deps(numpy_version=""):
dep_packages = ['setuptools', 'wheel', 'pytest']
dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version
else 'numpy>=1.16.6')
dep_packages.append('sympy>=1.1')
dep_packages.append('packaging')
dep_packages.append('cerberus')
run_subprocess([sys.executable, '-m', 'pip', 'install'] + dep_packages)
def setup_test_data(build_dir, configs):
# create a shortcut for test models if there is a 'models'
# folder in build_dir
if is_windows():
src_model_dir = os.path.join(build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
src_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', src_model_dir))
run_subprocess(['mklink', '/D', '/J', src_model_dir,
'C:\\local\\models'], shell=True)
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
'C:\\local\\models'], shell=True)
elif os.path.exists(src_model_dir) and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
src_model_dir, dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
src_model_dir], shell=True)
def use_dev_mode(args):
if args.use_acl:
return 'OFF'
if args.use_armnn:
return 'OFF'
if args.ios and is_macOS():
return 'OFF'
SYSTEM_COLLECTIONURI = os.getenv('SYSTEM_COLLECTIONURI')
if SYSTEM_COLLECTIONURI and not SYSTEM_COLLECTIONURI == 'https://dev.azure.com/onnxruntime/':
return 'OFF'
return 'ON'
def add_cmake_define_without_override(cmake_extra_defines, key, value):
for x in cmake_extra_defines:
if x.startswith(key + "="):
return cmake_extra_defines
cmake_extra_defines.append(key + "=" + value)
def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home,
mpi_home, nccl_home, tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
cmake_args = [
cmake_path, cmake_dir,
"-Donnxruntime_RUN_ONNX_TESTS=" + ("ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_BUILD_WINML_TESTS=" + ("OFF" if args.skip_winml_tests else "ON"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
# There are two ways of locating python C API header file. "find_package(PythonLibs 3.5 REQUIRED)"
# and "find_package(Python 3.5 COMPONENTS Development.Module)". The first one is deprecated and it
# depends on the "PYTHON_EXECUTABLE" variable. The second needs "Python_EXECUTABLE". Here we set both
# of them to get the best compatibility.
"-DPython_EXECUTABLE=" + sys.executable,
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_ROCM_VERSION=" + (args.rocm_version if args.use_rocm else ""),
"-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "stl" or args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "arena" or args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + ("ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
"-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
"-Donnxruntime_BUILD_OBJC=" + ("ON" if args.build_objc else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + ("ON" if args.build_shared_lib else "OFF"),
"-Donnxruntime_BUILD_APPLE_FRAMEWORK=" + ("ON" if args.build_apple_framework else "OFF"),
"-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
"-Donnxruntime_DNNL_GPU_RUNTIME=" + (args.dnnl_gpu_runtime if args.use_dnnl else ""),
"-Donnxruntime_DNNL_OPENCL_ROOT=" + (args.dnnl_opencl_root if args.use_dnnl else ""),
"-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
"-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
"-Donnxruntime_USE_OPENMP=" + (
"ON" if args.use_openmp and not (
args.use_nnapi or
args.android or (args.ios and is_macOS())
or args.use_rknpu)
else "OFF"),
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_TENSORRT_HOME=" + (tensorrt_home if args.use_tensorrt else ""),
# set vars for migraphx
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
"-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
# By default - we currently support only cross compiling for ARM/ARM64
# (no native compilation supported through this script).
"-Donnxruntime_CROSS_COMPILING=" + ("ON" if args.arm64 or args.arm64ec or args.arm else "OFF"),
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
"-Donnxruntime_DISABLE_ML_OPS=" + ("ON" if args.disable_ml_ops else "OFF"),
"-Donnxruntime_DISABLE_RTTI=" + ("ON" if args.disable_rtti else "OFF"),
"-Donnxruntime_DISABLE_EXCEPTIONS=" + ("ON" if args.disable_exceptions else "OFF"),
"-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=" + ("ON" if args.disable_ort_format_load else "OFF"),
# Need to use 'is not None' with minimal_build check as it could be an empty list.
"-Donnxruntime_MINIMAL_BUILD=" + ("ON" if args.minimal_build is not None else "OFF"),
"-Donnxruntime_EXTENDED_MINIMAL_BUILD=" + ("ON" if args.minimal_build and 'extended' in args.minimal_build
else "OFF"),
"-Donnxruntime_MINIMAL_BUILD_CUSTOM_OPS=" + ("ON" if (args.minimal_build is not None and ('custom_ops' in
args.minimal_build or args.use_extensions))
else "OFF"),
"-Donnxruntime_REDUCED_OPS_BUILD=" + ("ON" if is_reduced_ops_build(args) else "OFF"),
# enable pyop if it is nightly build
"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + ("ON" if args.enable_language_interop_ops else "OFF"),
"-Donnxruntime_USE_DML=" + ("ON" if args.use_dml else "OFF"),
"-Donnxruntime_USE_WINML=" + ("ON" if args.use_winml else "OFF"),
"-Donnxruntime_BUILD_MS_EXPERIMENTAL_OPS=" + ("ON" if args.ms_experimental else "OFF"),
"-Donnxruntime_USE_TELEMETRY=" + ("ON" if args.use_telemetry else "OFF"),
"-Donnxruntime_ENABLE_LTO=" + ("ON" if args.enable_lto else "OFF"),
"-Donnxruntime_ENABLE_TRANSFORMERS_TOOL_TEST=" + ("ON" if args.enable_transformers_tool_test else "OFF"),
"-Donnxruntime_USE_ACL=" + ("ON" if args.use_acl else "OFF"),
"-Donnxruntime_USE_ACL_1902=" + ("ON" if args.use_acl == "ACL_1902" else "OFF"),
"-Donnxruntime_USE_ACL_1905=" + ("ON" if args.use_acl == "ACL_1905" else "OFF"),
"-Donnxruntime_USE_ACL_1908=" + ("ON" if args.use_acl == "ACL_1908" else "OFF"),
"-Donnxruntime_USE_ACL_2002=" + ("ON" if args.use_acl == "ACL_2002" else "OFF"),
"-Donnxruntime_USE_ARMNN=" + ("ON" if args.use_armnn else "OFF"),
"-Donnxruntime_ARMNN_RELU_USE_CPU=" + ("OFF" if args.armnn_relu else "ON"),
"-Donnxruntime_ARMNN_BN_USE_CPU=" + ("OFF" if args.armnn_bn else "ON"),
# Training related flags
"-Donnxruntime_ENABLE_NVTX_PROFILE=" + ("ON" if args.enable_nvtx_profile else "OFF"),
"-Donnxruntime_ENABLE_TRAINING=" + ("ON" if args.enable_training else "OFF"),
"-Donnxruntime_ENABLE_TRAINING_OPS=" + ("ON" if args.enable_training_ops else "OFF"),
"-Donnxruntime_ENABLE_TRAINING_TORCH_INTEROP=" + ("ON" if args.enable_training_torch_interop else "OFF"),
# Enable advanced computations such as AVX for some traininig related ops.
"-Donnxruntime_ENABLE_CPU_FP16_OPS=" + ("ON" if args.enable_training else "OFF"),
"-Donnxruntime_USE_NCCL=" + ("OFF" if args.disable_nccl else "ON"),
"-Donnxruntime_BUILD_BENCHMARKS=" + ("ON" if args.build_micro_benchmarks else "OFF"),
"-Donnxruntime_USE_ROCM=" + ("ON" if args.use_rocm else "OFF"),
"-Donnxruntime_ROCM_HOME=" + (rocm_home if args.use_rocm else ""),
"-DOnnxruntime_GCOV_COVERAGE=" + ("ON" if args.code_coverage else "OFF"),
"-Donnxruntime_USE_MPI=" + ("ON" if args.use_mpi else "OFF"),
"-Donnxruntime_ENABLE_MEMORY_PROFILE=" + ("ON" if args.enable_memory_profile else "OFF"),
"-Donnxruntime_ENABLE_CUDA_LINE_NUMBER_INFO=" + ("ON" if args.enable_cuda_line_info else "OFF"),
"-Donnxruntime_BUILD_WEBASSEMBLY=" + ("ON" if args.build_wasm else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_SIMD=" + ("ON" if args.enable_wasm_simd else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_EXCEPTION_CATCHING=" + ("OFF" if args.disable_wasm_exception_catching
else "ON"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_EXCEPTION_THROWING=" + ("ON" if args.enable_wasm_exception_throwing_override
else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_THREADS=" + ("ON" if args.enable_wasm_threads else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_DEBUG_INFO=" + ("ON" if args.enable_wasm_debug_info else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_PROFILING=" + ("ON" if args.enable_wasm_profiling else "OFF"),
"-Donnxruntime_WEBASSEMBLY_MALLOC=" + args.wasm_malloc,
"-Donnxruntime_ENABLE_EAGER_MODE=" + ("ON" if args.build_eager_mode else "OFF"),
"-Donnxruntime_ENABLE_EXTERNAL_CUSTOM_OP_SCHEMAS=" + ("ON" if args.enable_external_custom_op_schemas
else "OFF"),
]
# It should be default ON in CI build pipelines, and OFF in packaging pipelines.
# And OFF for the people who are not actively developing onnx runtime.
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_DEV_MODE", use_dev_mode(args))
if args.use_cuda:
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_USE_CUDA", "ON")
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_CUDA_VERSION", args.cuda_version)
# TODO: this variable is not really needed
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_CUDA_HOME", cuda_home)
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_CUDNN_HOME", cudnn_home)
if is_windows():
if args.enable_msvc_static_runtime:
add_cmake_define_without_override(cmake_extra_defines, "CMAKE_MSVC_RUNTIME_LIBRARY",
"MultiThreaded$<$<CONFIG:Debug>:Debug>")
add_cmake_define_without_override(cmake_extra_defines, "ONNX_USE_MSVC_STATIC_RUNTIME", "ON")
add_cmake_define_without_override(cmake_extra_defines, "protobuf_MSVC_STATIC_RUNTIME", "ON")
add_cmake_define_without_override(cmake_extra_defines, "gtest_force_shared_crt", "OFF")
else:
# CMAKE_MSVC_RUNTIME_LIBRARY is default to MultiThreaded$<$<CONFIG:Debug>:Debug>DLL
add_cmake_define_without_override(cmake_extra_defines, "ONNX_USE_MSVC_STATIC_RUNTIME", "OFF")
add_cmake_define_without_override(cmake_extra_defines, "protobuf_MSVC_STATIC_RUNTIME", "OFF")
add_cmake_define_without_override(cmake_extra_defines, "gtest_force_shared_crt", "ON")
if acl_home and os.path.exists(acl_home):
cmake_args += ["-Donnxruntime_ACL_HOME=" + acl_home]
if acl_libs and os.path.exists(acl_libs):
cmake_args += ["-Donnxruntime_ACL_LIBS=" + acl_libs]
if armnn_home and os.path.exists(armnn_home):
cmake_args += ["-Donnxruntime_ARMNN_HOME=" + armnn_home]
if armnn_libs and os.path.exists(armnn_libs):
cmake_args += ["-Donnxruntime_ARMNN_LIBS=" + armnn_libs]
if mpi_home and os.path.exists(mpi_home):
if args.use_mpi:
cmake_args += ["-Donnxruntime_MPI_HOME=" + mpi_home]
else:
log.warning("mpi_home is supplied but use_mpi is set to false."
" Build will continue without linking MPI libraries.")
if nccl_home and os.path.exists(nccl_home):
cmake_args += ["-Donnxruntime_NCCL_HOME=" + nccl_home]
if args.winml_root_namespace_override:
cmake_args += ["-Donnxruntime_WINML_NAMESPACE_OVERRIDE=" +
args.winml_root_namespace_override]
if args.use_openvino:
cmake_args += ["-Donnxruntime_USE_OPENVINO=ON",
"-Donnxruntime_USE_OPENVINO_MYRIAD=" + (
"ON" if args.use_openvino == "MYRIAD_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + (
"ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + (
"ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + (
"ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M=" + (
"ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_F=" + (
"ON" if args.use_openvino == "VAD-F_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_MYRIAD_NP=" + (
"ON" if args.use_openvino == "MYRIAD_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32_NP=" + (
"ON" if args.use_openvino == "GPU_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16_NP=" + (
"ON" if args.use_openvino == "GPU_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32_NP=" + (
"ON" if args.use_openvino == "CPU_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M_NP=" + (
"ON" if args.use_openvino == "VAD-M_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_F_NP=" + (
"ON" if args.use_openvino == "VAD-F_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_HETERO=" + (
"ON" if args.use_openvino.startswith("HETERO") else "OFF"),
"-Donnxruntime_USE_OPENVINO_DEVICE=" + (args.use_openvino),
"-Donnxruntime_USE_OPENVINO_MULTI=" + (
"ON" if args.use_openvino.startswith("MULTI") else "OFF")]
# TensorRT and OpenVINO providers currently only support
# full_protobuf option.
if (args.use_full_protobuf or args.use_tensorrt or
args.use_openvino or args.use_vitisai or args.gen_doc):
cmake_args += [
"-Donnxruntime_USE_FULL_PROTOBUF=ON",
"-DProtobuf_USE_STATIC_LIBS=ON"
]
if args.use_nuphar and args.llvm_path is not None:
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
if args.use_cuda and not is_windows():
nvml_stub_path = cuda_home + "/lib64/stubs"
cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
if args.use_preinstalled_eigen:
cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON",
"-Deigen_SOURCE_PATH=" + args.eigen_path]
if args.nnapi_min_api:
cmake_args += ["-Donnxruntime_NNAPI_MIN_API=" + str(args.nnapi_min_api)]
if args.android:
if not args.android_ndk_path:
raise BuildError("android_ndk_path required to build for Android")
if not args.android_sdk_path:
raise BuildError("android_sdk_path required to build for Android")
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + os.path.join(
args.android_ndk_path, 'build', 'cmake', 'android.toolchain.cmake'),
"-DANDROID_PLATFORM=android-" + str(args.android_api),
"-DANDROID_ABI=" + str(args.android_abi),
"-DANDROID_MIN_SDK=" + str(args.android_api),
]
if args.android_cpp_shared:
cmake_args += ["-DANDROID_STL=c++_shared"]
if is_macOS() and not args.android:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES=" + args.osx_arch]
# since cmake 3.19, it uses the xcode latest buildsystem, which is not supported by this project.
cmake_verstr = subprocess.check_output(['cmake', '--version']).decode('utf-8').split()[2]
if args.use_xcode and LooseVersion(cmake_verstr) >= LooseVersion('3.19.0'):
cmake_args += ["-T", "buildsystem=1"]
if args.apple_deploy_target:
cmake_args += ["-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target]
# Code sign the binaries, if the code signing development identity and/or team id are provided
if args.xcode_code_signing_identity:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY=" + args.xcode_code_signing_identity]
if args.xcode_code_signing_team_id:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=" + args.xcode_code_signing_team_id]
if args.use_coreml:
cmake_args += ["-Donnxruntime_USE_COREML=ON"]
if args.ios:
if is_macOS():
needed_args = [
args.use_xcode,
args.ios_sysroot,
args.apple_deploy_target,
]
arg_names = [
"--use_xcode " +
"<need use xcode to cross build iOS on MacOS>",
"--ios_sysroot " +
"<the location or name of the macOS platform SDK>",
"--apple_deploy_target " +
"<the minimum version of the target platform>",
]
if not all(needed_args):
raise BuildError(
"iOS build on MacOS canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
cmake_args += [
"-DCMAKE_SYSTEM_NAME=iOS",
"-Donnxruntime_BUILD_SHARED_LIB=ON",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target,
# we do not need protoc binary for ios cross build
"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF",
"-DCMAKE_TOOLCHAIN_FILE=" + (
args.ios_toolchain_file if args.ios_toolchain_file
else "../cmake/onnxruntime_ios.toolchain.cmake")
]
else:
# TODO: the cross compiling on Linux is not officially supported by Apple
# and is already broken with the latest codebase, so it should be removed.
# We are cross compiling on Linux
needed_args = [
args.ios_sysroot,
args.arm64 or args.arm,
args.ios_toolchain_dir
]
arg_names = [
"--ios_sysroot <path to sysroot>",
"--arm or --arm64",
"--ios_toolchain_dir <path to toolchain>"
]
if not all(needed_args):
raise BuildError(
"iOS build canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
compilers = sorted(
glob.glob(args.ios_toolchain_dir + "/bin/*-clang*"))
os.environ["PATH"] = os.path.join(
args.ios_toolchain_dir, "bin") + os.pathsep + os.environ.get(
"PATH", "")
os.environ["LD_LIBRARY_PATH"] = os.path.join(
args.ios_toolchain_dir, "/lib") + os.pathsep + os.environ.get(
"LD_LIBRARY_PATH", "")
if len(compilers) != 2:
raise BuildError(
"error identifying compilers in ios_toolchain_dir")
cmake_args += [
"-DCMAKE_OSX_ARCHITECTURES=" +
("arm64" if args.arm64 else "arm"),
"-DCMAKE_SYSTEM_NAME=iOSCross",
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_C_COMPILER=" + compilers[0],
"-DCMAKE_CXX_COMPILER=" + compilers[1]
]
if args.build_wasm:
emsdk_dir = os.path.join(cmake_dir, "external", "emsdk")
emscripten_cmake_toolchain_file = os.path.join(emsdk_dir, "upstream", "emscripten", "cmake", "Modules",
"Platform", "Emscripten.cmake")
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + emscripten_cmake_toolchain_file
]
if args.disable_wasm_exception_catching:
# WebAssembly unittest requires exception catching to work. If this feature is disabled, we do not build
# unit test.
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
]
# Append onnxruntime-extensions cmake options
if args.use_extensions:
cmake_args += ["-Donnxruntime_USE_EXTENSIONS=ON"]
# default path of onnxruntime-extensions, using git submodule
onnxruntime_extensions_path = os.path.join(cmake_dir, "external", "onnxruntime-extensions")
if args.extensions_overridden_path and os.path.exists(args.extensions_overridden_path):
# use absolute path here because onnxruntime-extensions is outside onnxruntime
onnxruntime_extensions_path = os.path.abspath(args.extensions_overridden_path)
cmake_args += [
"-Donnxruntime_EXTENSIONS_PATH=" + onnxruntime_extensions_path]
print('[onnxruntime-extensions] onnxruntime_extensions_path: ', onnxruntime_extensions_path)
if is_reduced_ops_build(args):
operators_config_file = os.path.abspath(args.include_ops_by_config)
cmake_tool_dir = os.path.join(onnxruntime_extensions_path, 'tools')
# generate _selectedoplist.cmake by operators config file
run_subprocess([sys.executable, 'gen_selectedops.py', operators_config_file], cwd=cmake_tool_dir)
if path_to_protoc_exe:
cmake_args += [
"-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s" % path_to_protoc_exe]
if args.fuzz_testing:
if not (args.build_shared_lib and
is_windows() and
args.cmake_generator == 'Visual Studio 16 2019' and
args.use_full_protobuf):
raise BuildError(
"Fuzz test has only be tested with build shared libs option using MSVC on windows")
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=ON",
"-Donnxruntime_FUZZ_TEST=ON",
"-Donnxruntime_USE_FULL_PROTOBUF=ON"]
if args.gen_doc:
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_PYBIND_EXPORT_OPSCHEMA", "ON")
else:
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_PYBIND_EXPORT_OPSCHEMA", "OFF")
if args.build_eager_mode:
import torch
cmake_args += ["-Donnxruntime_PREBUILT_PYTORCH_PATH=%s" % os.path.dirname(torch.__file__)]
cmake_args += ["-D{}".format(define) for define in cmake_extra_defines]
cmake_args += cmake_extra_args
# ADO pipelines will store the pipeline build number
# (e.g. 191101-2300.1.master) and source version in environment
# variables. If present, use these values to define the
# WinML/ORT DLL versions.
build_number = os.getenv('Build_BuildNumber')
source_version = os.getenv('Build_SourceVersion')
if build_number and source_version:
build_matches = re.fullmatch(
r"(\d\d)(\d\d)(\d\d)(\d\d)\.(\d+)", build_number)
if build_matches:
YY = build_matches.group(2)
MM = build_matches.group(3)
DD = build_matches.group(4)
# Get ORT major and minor number
with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:
first_line = f.readline()
ort_version_matches = re.match(r"(\d+).(\d+)", first_line)
if not ort_version_matches:
raise BuildError("Couldn't read version from VERSION_FILE")
ort_major = ort_version_matches.group(1)
ort_minor = ort_version_matches.group(2)
# Example (BuildNumber: 191101-2300.1.master,
# SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)
# MajorPart = 1
# MinorPart = 0
# BuildPart = 1911
# PrivatePart = 123
# String = 191101-2300.1.master.0bce7ae
cmake_args += [
"-DVERSION_MAJOR_PART={}".format(ort_major),
"-DVERSION_MINOR_PART={}".format(ort_minor),
"-DVERSION_BUILD_PART={}".format(YY),
"-DVERSION_PRIVATE_PART={}{}".format(MM, DD),
"-DVERSION_STRING={}.{}.{}.{}".format(
ort_major, ort_minor, build_number,
source_version[0:7])
]
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
if args.use_nuphar:
os.environ["PATH"] = os.path.join(
config_build_dir, "external", "tvm",
config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
run_subprocess(
cmake_args + [
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
("ON" if config.lower() == 'debug' and not args.use_nuphar and not
args.use_openvino and not
args.enable_msvc_static_runtime
else "OFF"), "-DCMAKE_BUILD_TYPE={}".format(config)],
cwd=config_build_dir)
def clean_targets(cmake_path, build_dir, configs):
for config in configs:
log.info("Cleaning targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config,
"--target", "clean"]
run_subprocess(cmd_args)
def build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, target=None):
for config in configs:
log.info("Building targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config]
if target:
cmd_args.extend(['--target', target])
build_tool_args = []
if num_parallel_jobs != 1:
if is_windows() and args.cmake_generator != 'Ninja' and not args.build_wasm:
build_tool_args += [
"/maxcpucount:{}".format(num_parallel_jobs),
# if nodeReuse is true, msbuild processes will stay around for a bit after the build completes
"/nodeReuse:False",
]
elif (is_macOS() and args.use_xcode):
# CMake will generate correct build tool args for Xcode
cmd_args += ["--parallel", str(num_parallel_jobs)]
else:
build_tool_args += ["-j{}".format(num_parallel_jobs)]
if build_tool_args:
cmd_args += ["--"]
cmd_args += build_tool_args
env = {}
if args.android:
env['ANDROID_SDK_ROOT'] = args.android_sdk_path
env['ANDROID_NDK_HOME'] = args.android_ndk_path
run_subprocess(cmd_args, env=env)
def add_dir_if_exists(directory, dir_list):
if os.path.isdir(directory):
dir_list.append(directory)
def setup_cuda_vars(args):
cuda_home = ""
cudnn_home = ""
if args.use_cuda:
cuda_home = args.cuda_home if args.cuda_home else os.getenv(
"CUDA_HOME")
cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(
"CUDNN_HOME")
cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))
cudnn_home_valid = (cudnn_home is not None and os.path.exists(
cudnn_home))
if not cuda_home_valid or not cudnn_home_valid:
raise BuildError(
"cuda_home and cudnn_home paths must be specified and valid.",
"cuda_home='{}' valid={}. cudnn_home='{}' valid={}"
.format(
cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))
return cuda_home, cudnn_home
def setup_tensorrt_vars(args):
tensorrt_home = ""
if args.use_tensorrt:
tensorrt_home = (args.tensorrt_home if args.tensorrt_home
else os.getenv("TENSORRT_HOME"))
tensorrt_home_valid = (tensorrt_home is not None and
os.path.exists(tensorrt_home))
if not tensorrt_home_valid:
raise BuildError(
"tensorrt_home paths must be specified and valid.",
"tensorrt_home='{}' valid={}."
.format(tensorrt_home, tensorrt_home_valid))
# Set maximum workspace size in byte for
# TensorRT (1GB = 1073741824 bytes).
os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
# Set maximum number of iterations to detect unsupported nodes
# and partition the models for TensorRT.
os.environ["ORT_TENSORRT_MAX_PARTITION_ITERATIONS"] = "1000"
# Set minimum subgraph node size in graph partitioning
# for TensorRT.
os.environ["ORT_TENSORRT_MIN_SUBGRAPH_SIZE"] = "1"
# Set FP16 flag
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
return tensorrt_home
def setup_migraphx_vars(args):
migraphx_home = None
if (args.use_migraphx):
print("migraphx_home = {}".format(args.migraphx_home))
migraphx_home = args.migraphx_home or os.getenv("MIGRAPHX_HOME") or None
migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))
if (migraphx_home_not_valid):
raise BuildError("migraphx_home paths must be specified and valid.",
"migraphx_home='{}' valid={}."
.format(migraphx_home, migraphx_home_not_valid))
return migraphx_home or ''
def setup_dml_build(args, cmake_path, build_dir, configs):
if args.use_dml:
for config in configs:
# Run the RESTORE_PACKAGES target to perform the initial
# NuGet setup.
cmd_args = [cmake_path,
"--build", get_config_build_dir(build_dir, config),
"--config", config,
"--target", "RESTORE_PACKAGES"]
run_subprocess(cmd_args)
def setup_rocm_build(args, configs):
rocm_home = None
if (args.use_rocm):
print("rocm_home = {}".format(args.rocm_home))
rocm_home = args.rocm_home or None
rocm_home_not_valid = (rocm_home and not os.path.exists(rocm_home))
if (rocm_home_not_valid):
raise BuildError("rocm_home paths must be specified and valid.",
"rocm_home='{}' valid={}."
.format(rocm_home, rocm_home_not_valid))
for config in configs:
amd_hipify(get_config_build_dir(args.build_dir, config))
return rocm_home or ''
def run_android_tests(args, source_dir, build_dir, config, cwd):
sdk_tool_paths = android.get_sdk_tool_paths(args.android_sdk_path)
device_dir = '/data/local/tmp'
def adb_push(src, dest, **kwargs):
return run_subprocess([sdk_tool_paths.adb, 'push', src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, 'shell', *args], **kwargs)
def adb_install(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, 'install', *args], **kwargs)
def run_adb_shell(cmd):
# GCOV_PREFIX_STRIP specifies the depth of the directory hierarchy to strip and
# GCOV_PREFIX specifies the root directory
# for creating the runtime code coverage files.
if args.code_coverage:
adb_shell(
'cd {0} && GCOV_PREFIX={0} GCOV_PREFIX_STRIP={1} {2}'.format(
device_dir, cwd.count(os.sep) + 1, cmd))
else:
adb_shell('cd {} && {}'.format(device_dir, cmd))
if args.android_abi == 'x86_64':
with contextlib.ExitStack() as context_stack:
if args.android_run_emulator:
avd_name = "ort_android"
system_image = "system-images;android-{};google_apis;{}".format(
args.android_api, args.android_abi)
android.create_virtual_device(sdk_tool_paths, system_image, avd_name)
emulator_proc = context_stack.enter_context(
android.start_emulator(
sdk_tool_paths=sdk_tool_paths,
avd_name=avd_name,
extra_args=[
"-partition-size", "2047",
"-wipe-data"]))
context_stack.callback(android.stop_emulator, emulator_proc)
adb_push('testdata', device_dir, cwd=cwd)
adb_push(
os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),
device_dir, cwd=cwd)
adb_push('onnxruntime_test_all', device_dir, cwd=cwd)
adb_shell('chmod +x {}/onnxruntime_test_all'.format(device_dir))
adb_push('onnx_test_runner', device_dir, cwd=cwd)
adb_shell('chmod +x {}/onnx_test_runner'.format(device_dir))
run_adb_shell('{0}/onnxruntime_test_all'.format(device_dir))
if args.build_java:
gradle_executable = 'gradle'
# use the gradle wrapper if it exists, the gradlew should be setup under <repo root>/java
gradlew_path = os.path.join(source_dir, 'java',
'gradlew.bat' if is_windows() else 'gradlew')
if os.path.exists(gradlew_path):
gradle_executable = gradlew_path
android_test_path = os.path.join(cwd, "java", "androidtest", "android")
run_subprocess([gradle_executable, '--no-daemon',
'-DminSdkVer={}'.format(args.android_api),
'clean', 'connectedDebugAndroidTest'],
cwd=android_test_path)
if args.use_nnapi:
adb_shell('cd {0} && {0}/onnx_test_runner -e nnapi {0}/test'.format(device_dir))
else:
adb_shell('cd {0} && {0}/onnx_test_runner {0}/test'.format(device_dir))
# run shared_lib_test if necessary
if args.build_shared_lib:
adb_push('libonnxruntime.so', device_dir, cwd=cwd)
adb_push('onnxruntime_shared_lib_test', device_dir, cwd=cwd)
adb_shell('chmod +x {}/onnxruntime_shared_lib_test'.format(device_dir))
run_adb_shell(
'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} && {0}/onnxruntime_shared_lib_test'.format(
device_dir))
def run_ios_tests(args, source_dir, config, cwd):
run_subprocess(["xcodebuild", "test-without-building", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_test_all_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
run_subprocess(["xcodebuild", "test-without-building", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_shared_lib_test_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
if args.build_apple_framework:
package_test_py = os.path.join(source_dir, 'tools', 'ci_build', 'github', 'apple', 'test_ios_packages.py')
framework_info_file = os.path.join(cwd, 'framework_info.json')
dynamic_framework_dir = os.path.join(cwd, config + '-' + args.ios_sysroot)
static_framework_dir = os.path.join(cwd, config + '-' + args.ios_sysroot, 'static_framework')
# test dynamic framework
run_subprocess([sys.executable, package_test_py,
'--c_framework_dir', dynamic_framework_dir,
'--framework_info_file', framework_info_file], cwd=cwd)
# test static framework
run_subprocess([sys.executable, package_test_py,
'--c_framework_dir', static_framework_dir,
'--framework_info_file', framework_info_file], cwd=cwd)
def run_orttraining_test_orttrainer_frontend_separately(cwd):
class TestNameCollecterPlugin:
def __init__(self):
self.collected = set()
def pytest_collection_modifyitems(self, items):
for item in items:
print('item.name: ', item.name)
test_name = item.name
start = test_name.find('[')
if start > 0:
test_name = test_name[:start]
self.collected.add(test_name)
import pytest
plugin = TestNameCollecterPlugin()
test_script_filename = os.path.join(cwd, "orttraining_test_orttrainer_frontend.py")
pytest.main(['--collect-only', test_script_filename], plugins=[plugin])
for test_name in plugin.collected:
run_subprocess([
sys.executable, '-m', 'pytest',
'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)
def run_training_python_frontend_tests(cwd):
# have to disable due to (with torchvision==0.9.1+cu102 which is required by ortmodule):
# Downloading http://yann.lecun.com/exdb/mnist/
# https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz
# Failed to download (trying next):
# HTTP Error 404: Not Found
# run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)
run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)
# TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.
# shall revert to run_subprocess call once the segfault issue is resolved.
run_orttraining_test_orttrainer_frontend_separately(cwd)
# run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)
run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)
run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_checkpoint_storage.py'], cwd=cwd)
run_subprocess([
sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_checkpoint_functions.py'], cwd=cwd)
# Not technically training related, but it needs torch to be installed.
run_subprocess([
sys.executable, '-m', 'pytest', '-sv', 'test_pytorch_export_contrib_ops.py'], cwd=cwd)
def run_training_python_frontend_e2e_tests(cwd):
# frontend tests are to be added here:
log.info("Running python frontend e2e tests.")
run_subprocess(
[sys.executable, 'orttraining_run_frontend_batch_size_test.py', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
import torch
ngpus = torch.cuda.device_count()
if ngpus > 1:
bert_pretrain_script = 'orttraining_run_bert_pretrain.py'
# TODO: this test will be replaced with convergence test ported from backend
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)
log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)
# with orttraining_run_glue.py.
# 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)
# for fine-tune tests.
# 2. need to run test separately (not to mix between fp16
# and full precision runs. this need to be investigated).
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
for config in configs:
log.info("Running tests for %s configuration", config)
cwd = get_config_build_dir(build_dir, config)
cwd = os.path.abspath(cwd)
if args.android:
run_android_tests(args, source_dir, build_dir, config, cwd)
continue
elif args.ios:
run_ios_tests(args, source_dir, config, cwd)
continue
dll_path_list = []
if args.use_nuphar:
dll_path_list.append(os.path.join(
build_dir, config, "external", "tvm", config))
if args.use_tensorrt:
dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
dll_path = None
if len(dll_path_list) > 0:
dll_path = os.pathsep.join(dll_path_list)
if not ctest_path:
if is_windows():
# Get the "Google Test Adapter" for vstest.
if not os.path.exists(os.path.join(cwd,
'googletestadapter.0.17.1')):
run_subprocess(
['nuget.exe', 'restore',
os.path.join(source_dir, 'packages.config'),
'-ConfigFile', os.path.join(source_dir, 'NuGet.config'),
'-PackagesDirectory', cwd])
cwd2 = os.path.join(cwd, config)
executables = ['onnxruntime_test_all.exe', 'onnxruntime_mlas_test.exe']
if args.build_shared_lib:
executables.append('onnxruntime_shared_lib_test.exe')
executables.append('onnxruntime_global_thread_pools_test.exe')
executables.append('onnxruntime_api_tests_without_env.exe')
run_subprocess(
['vstest.console.exe', '--parallel',
'--TestAdapterPath:..\\googletestadapter.0.17.1\\build\\_common', # noqa
'/Logger:trx', '/Enablecodecoverage', '/Platform:x64',
"/Settings:%s" % os.path.join(
source_dir, 'cmake\\codeconv.runsettings')] + executables,
cwd=cwd2, dll_path=dll_path)
else:
executables = ['onnxruntime_test_all', 'onnxruntime_mlas_test']
if args.build_shared_lib:
executables.append('onnxruntime_shared_lib_test')
executables.append('onnxruntime_global_thread_pools_test')
executables.append('onnxruntime_api_tests_without_env')
for exe in executables:
run_subprocess([os.path.join(cwd, exe)], cwd=cwd, dll_path=dll_path)
else:
ctest_cmd = [ctest_path, "--build-config", config, "--verbose", "--timeout", "7200"]
run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)
if args.enable_pybind:
# Disable python tests for TensorRT because many tests are
# not supported yet.
if args.use_tensorrt:
return
# Disable python tests in a reduced build as we don't know which ops have been included and which
# models can run.
if is_reduced_ops_build(args) or args.minimal_build is not None:
return
if is_windows():
cwd = os.path.join(cwd, config)
run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_contrib_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_sparse_matmul.py'],
cwd=cwd, dll_path=dll_path)
if args.enable_symbolic_shape_infer_tests:
run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],
cwd=cwd, dll_path=dll_path)
# For CUDA enabled builds test IOBinding feature
if args.use_cuda:
# We need to have Torch installed to test the IOBinding feature
# which currently uses Torch's allocator to allocate GPU memory for testing
log.info("Testing IOBinding feature")
run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)
if args.enable_training and args.use_cuda:
# run basic frontend tests
run_training_python_frontend_tests(cwd=cwd)
if args.build_eager_mode:
# run eager mode test
args_list = [sys.executable, os.path.join(cwd, 'eager_test')]
run_subprocess(args_list, cwd=cwd, dll_path=dll_path, python_path=cwd)
try:
import onnx # noqa
onnx_test = True
except ImportError as error:
log.exception(error)
log.warning("onnx is not installed. The ONNX tests will be skipped.")
onnx_test = False
if onnx_test:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_contrib_ops:
run_subprocess([sys.executable, '-m', 'unittest', 'discover', '-s', 'quantization'],
cwd=cwd, dll_path=dll_path)
if args.enable_transformers_tool_test:
import numpy
import google.protobuf
numpy_init_version = numpy.__version__
pb_init_version = google.protobuf.__version__
run_subprocess([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'],
cwd=SCRIPT_DIR)
run_subprocess([sys.executable, '-m', 'pytest', 'transformers'], cwd=cwd)
# Restore initial numpy/protobuf version in case other tests use it
run_subprocess([sys.executable, '-m', 'pip', 'install', 'numpy==' + numpy_init_version])
run_subprocess([sys.executable, '-m', 'pip', 'install', 'protobuf==' + pb_init_version])
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],
cwd=cwd, dll_path=dll_path)
run_subprocess([sys.executable,
os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),
'--output_dir', 'test_models'], cwd=cwd)
if not args.skip_onnx_tests:
run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)
if config != 'Debug':
run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)
if not args.skip_keras_test:
try:
import onnxmltools # noqa
import keras # noqa
onnxml_test = True
except ImportError:
log.warning(
"onnxmltools and keras are not installed. "
"The keras tests will be skipped.")
onnxml_test = False
if onnxml_test:
run_subprocess(
[sys.executable, 'onnxruntime_test_python_keras.py'],
cwd=cwd, dll_path=dll_path)
def nuphar_run_python_tests(build_dir, configs):
for config in configs:
if config == 'Debug':
continue
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
run_subprocess(
[sys.executable, 'onnxruntime_test_python_nuphar.py'],
cwd=cwd, dll_path=dll_path)
def run_nodejs_tests(nodejs_binding_dir):
args = ['npm', 'test', '--', '--timeout=10000']
if is_windows():
args = ['cmd', '/c'] + args
run_subprocess(args, cwd=nodejs_binding_dir)
def build_python_wheel(
source_dir, build_dir, configs, use_cuda, cuda_version, use_rocm, rocm_version, use_dnnl,
use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,
wheel_name_suffix, enable_training, nightly_build=False, default_training_package_device=False,
use_ninja=False, build_eager_mode=False):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows() and not use_ninja:
cwd = os.path.join(cwd, config)
args = [sys.executable, os.path.join(source_dir, 'setup.py'),
'bdist_wheel']
# Any combination of the following arguments can be applied
if nightly_build:
args.append('--nightly_build')
if default_training_package_device:
args.append('--default_training_package_device')
if wheel_name_suffix:
args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))
if enable_training:
args.append("--enable_training")
if build_eager_mode:
args.append("--disable_auditwheel_repair")
# The following arguments are mutually exclusive
if use_tensorrt:
args.append('--use_tensorrt')
elif use_cuda:
# The following line assumes no other EP is enabled
args.append('--wheel_name_suffix=gpu')
if cuda_version:
args.append('--cuda_version={}'.format(cuda_version))
elif use_rocm:
args.append('--use_rocm')
if rocm_version:
args.append('--rocm_version={}'.format(rocm_version))
elif use_openvino:
args.append('--use_openvino')
elif use_dnnl:
args.append('--use_dnnl')
elif use_nuphar:
args.append('--use_nuphar')
elif use_vitisai:
args.append('--use_vitisai')
elif use_acl:
args.append('--use_acl')
elif use_armnn:
args.append('--use_armnn')
elif use_dml:
args.append('--wheel_name_suffix=directml')
run_subprocess(args, cwd=cwd)
def derive_linux_build_property():
if is_windows():
return "/p:IsLinuxBuild=\"false\""
else:
return "/p:IsLinuxBuild=\"true\""
def build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl, use_nuphar):
if not (is_windows() or is_linux()):
raise BuildError(
'Currently csharp builds and nuget package creation is only supportted '
'on Windows and Linux platforms.')
csharp_build_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# derive package name and execution provider based on the build args
execution_provider = "/p:ExecutionProvider=\"None\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime\""
if use_openvino:
execution_provider = "/p:ExecutionProvider=\"openvino\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.OpenVino\""
elif use_tensorrt:
execution_provider = "/p:ExecutionProvider=\"tensorrt\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.TensorRT\""
elif use_dnnl:
execution_provider = "/p:ExecutionProvider=\"dnnl\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.DNNL\""
elif use_cuda:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Gpu\""
elif use_nuphar:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Nuphar\""
else:
pass
# set build directory based on build_dir arg
native_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_dir + "\""
# dotnet restore
cmd_args = ["dotnet", "restore", "OnnxRuntime.CSharp.sln", "--configfile", "Nuget.CSharp.config"]
run_subprocess(cmd_args, cwd=csharp_build_dir)
# build csharp bindings and create nuget package for each config
for config in configs:
if is_linux():
native_build_dir = os.path.join(native_dir, config)
cmd_args = ["make", "install", "DESTDIR=.//nuget-staging"]
run_subprocess(cmd_args, cwd=native_build_dir)
configuration = "/p:Configuration=\"" + config + "\""
cmd_args = ["dotnet", "msbuild", "OnnxRuntime.CSharp.sln", configuration, package_name, is_linux_build,
ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
cmd_args = [
"dotnet", "msbuild", "OnnxRuntime.CSharp.proj", "/t:CreatePackage",
package_name, configuration, execution_provider, is_linux_build, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
def run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):
# Currently only running tests on windows.
if not is_windows():
return
csharp_source_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# define macros based on build args
macros = ""
if use_openvino:
macros += "USE_OPENVINO;"
if use_tensorrt:
macros += "USE_TENSORRT;"
if use_dnnl:
macros += "USE_DNNL;"
if use_cuda:
macros += "USE_CUDA;"
define_constants = ""
if macros != "":
define_constants = "/p:DefineConstants=\"" + macros + "\""
# set build directory based on build_dir arg
native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_build_dir + "\""
# Skip pretrained models test. Only run unit tests as part of the build
# add "--verbosity", "detailed" to this command if required
cmd_args = ["dotnet", "test", "test\\Microsoft.ML.OnnxRuntime.Tests\\Microsoft.ML.OnnxRuntime.Tests.csproj",
"--filter", "FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels",
is_linux_build, define_constants, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_source_dir)
def is_cross_compiling_on_apple(args):
if not is_macOS():
return False
if args.ios:
return True
if args.osx_arch != platform.machine():
return True
return False
def build_protoc_for_host(cmake_path, source_dir, build_dir, args):
if (args.arm or args.arm64 or args.arm64ec or args.enable_windows_store) and \
not (is_windows() or is_cross_compiling_on_apple(args)):
raise BuildError(
'Currently only support building protoc for Windows host while '
'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')
log.info(
"Building protoc for host to be used in cross-compiled build process")
protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')
os.makedirs(protoc_build_dir, exist_ok=True)
# Generate step
cmd_args = [
cmake_path,
os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),
'-Dprotobuf_BUILD_TESTS=OFF',
'-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',
'-Dprotobuf_BUILD_SHARED_LIBS=OFF'
]
is_ninja = args.cmake_generator == 'Ninja'
if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmd_args += ['-G', args.cmake_generator]
if is_windows():
if not is_ninja:
cmd_args += ['-T', 'host=x64']
elif is_macOS():
if args.use_xcode:
cmd_args += ['-G', 'Xcode']
# CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,
# protoc for host should be built using host architecture
# Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.
cmd_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(
'arm64' if platform.machine() == 'arm64' else 'x86_64')]
run_subprocess(cmd_args, cwd=protoc_build_dir)
# Build step
cmd_args = [cmake_path,
"--build", protoc_build_dir,
"--config", "Release",
"--target", "protoc"]
run_subprocess(cmd_args)
# Absolute protoc path is needed for cmake
config_dir = ''
suffix = ''
if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):
config_dir = 'Release'
if is_windows():
suffix = '.exe'
expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)
if not os.path.exists(expected_protoc_path):
raise BuildError("Couldn't find {}. Host build of protoc failed.".format(expected_protoc_path))
return expected_protoc_path
def generate_documentation(source_dir, build_dir, configs, validate):
# Randomly choose one build config
config = next(iter(configs))
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
contrib_op_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')
opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')
shutil.copy(os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'), cwd)
shutil.copy(os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'), cwd)
# limit to just com.microsoft (excludes purely internal stuff like com.microsoft.nchwc).
run_subprocess([sys.executable, 'gen_contrib_doc.py', '--output_path', contrib_op_doc_path,
'--domains', 'com.microsoft'], cwd=cwd)
# we currently limit the documentation created by a build to the CPU and CUDA EPs.
# Run get_opkernel_doc.py directly if you need/want documentation from other EPs that are enabled in the build.
run_subprocess([sys.executable, 'gen_opkernel_doc.py', '--output_path', opkernel_doc_path,
'--providers', 'CPU', 'CUDA'], cwd=cwd)
if validate:
try:
have_diff = False
def diff_file(path, regenerate_qualifiers=''):
diff = subprocess.check_output(['git', 'diff', path], cwd=source_dir)
if diff:
nonlocal have_diff
have_diff = True
log.warning('The updated document {} is different from the checked in version. '
'Please regenerate the file{}, or copy the updated version from the '
'CI build\'s published artifacts if applicable.'.format(path, regenerate_qualifiers))
log.debug('diff:\n' + str(diff))
diff_file(opkernel_doc_path, ' with CPU and CUDA execution providers enabled')
diff_file(contrib_op_doc_path)
if have_diff:
# Output for the CI to publish the updated md files as an artifact
print('##vso[task.setvariable variable=DocUpdateNeeded]true')
raise BuildError('Generated documents have diffs. Check build output for details.')
except subprocess.CalledProcessError:
raise BuildError('git diff returned non-zero error code')
def main():
log.debug("Command line arguments:\n {}".format(" ".join(shlex.quote(arg) for arg in sys.argv[1:])))
args = parse_arguments()
cmake_extra_defines = (args.cmake_extra_defines
if args.cmake_extra_defines else [])
cross_compiling = args.arm or args.arm64 or args.arm64ec or args.android
# If there was no explicit argument saying what to do, default
# to update, build and test (for native builds).
if not (args.update or args.clean or args.build or args.test):
log.debug("Defaulting to running update, build [and test for native builds].")
args.update = True
args.build = True
if cross_compiling:
args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'
else:
args.test = True
if args.skip_tests:
args.test = False
if is_reduced_ops_build(args) and args.update:
from reduce_op_kernels import reduce_ops
reduce_ops(
config_path=args.include_ops_by_config,
enable_type_reduction=args.enable_reduced_operator_type_support,
use_cuda=args.use_cuda)
if args.use_tensorrt:
args.use_cuda = True
if args.build_wheel or args.gen_doc:
args.enable_pybind = True
if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
args.build_shared_lib = True
if args.build_nuget and cross_compiling:
raise BuildError('Currently nuget package creation is not supported while cross-compiling')
if args.enable_pybind and args.disable_rtti:
raise BuildError("Python bindings use typeid so you can't disable RTTI")
if args.enable_pybind and args.disable_exceptions:
raise BuildError('Python bindings require exceptions to be enabled.')
if args.minimal_build is not None and args.disable_ort_format_load:
raise BuildError('Minimal build requires loading ORT format models.')
if args.nnapi_min_api:
if not args.use_nnapi:
raise BuildError("Using --nnapi_min_api requires --use_nnapi")
if args.nnapi_min_api < 27:
raise BuildError("--nnapi_min_api should be 27+")
if args.build_wasm:
if not args.disable_wasm_exception_catching and args.disable_exceptions:
# When '--disable_exceptions' is set, we set '--disable_wasm_exception_catching' as well
args.disable_wasm_exception_catching = True
if args.test and args.disable_wasm_exception_catching and not args.minimal_build:
raise BuildError("WebAssembly tests need exception catching enabled to run if it's not minimal build")
if args.test and args.enable_wasm_debug_info:
# With flag --enable_wasm_debug_info, onnxruntime_test_all.wasm will be very huge (>1GB). This will fail
# Node.js when trying to load the .wasm file.
# To debug ONNX Runtime WebAssembly, use ONNX Runtime Web to debug ort-wasm.wasm in browsers.
raise BuildError("WebAssembly tests cannot be enabled with flag --enable_wasm_debug_info")
if args.code_coverage and not args.android:
raise BuildError("Using --code_coverage requires --android")
if args.gen_api_doc and len(args.config) != 1:
raise BuildError('Using --get-api-doc requires a single build config')
# Disabling unit tests for VAD-F as FPGA only supports
# models with NCHW layout
if args.use_openvino == "VAD-F_FP32":
args.test = False
# Disabling unit tests for GPU and MYRIAD on nuget creation
if args.use_openvino != "CPU_FP32" and args.build_nuget:
args.test = False
configs = set(args.config)
# setup paths and directories
# cmake_path and ctest_path can be None. For example, if a person only wants to run the tests, he/she doesn't need
# to have cmake/ctest.
cmake_path = resolve_executable_path(args.cmake_path)
ctest_path = None if args.use_vstest else resolve_executable_path(
args.ctest_path)
build_dir = args.build_dir
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
# if using cuda, setup cuda paths and env vars
cuda_home, cudnn_home = setup_cuda_vars(args)
mpi_home = args.mpi_home
nccl_home = args.nccl_home
acl_home = args.acl_home
acl_libs = args.acl_libs
armnn_home = args.armnn_home
armnn_libs = args.armnn_libs
# if using tensorrt, setup tensorrt paths
tensorrt_home = setup_tensorrt_vars(args)
# if using migraphx, setup migraphx paths
migraphx_home = setup_migraphx_vars(args)
# if using rocm, setup rocm paths
rocm_home = setup_rocm_build(args, configs)
if args.update or args.build:
os.makedirs(build_dir, exist_ok=True)
log.info("Build started")
if args.update:
cmake_extra_args = []
path_to_protoc_exe = args.path_to_protoc_exe
if not args.skip_submodule_sync:
update_submodules(source_dir)
if is_windows():
cpu_arch = platform.architecture()[0]
if args.build_wasm:
cmake_extra_args = ['-G', 'Ninja']
elif args.cmake_generator == 'Ninja':
if cpu_arch == '32bit' or args.arm or args.arm64 or args.arm64ec:
raise BuildError(
"To cross-compile with Ninja, load the toolset "
"environment for the target processor (e.g. Cross "
"Tools Command Prompt for VS)")
cmake_extra_args = ['-G', args.cmake_generator]
elif args.arm or args.arm64 or args.arm64ec:
# Cross-compiling for ARM(64) architecture
# First build protoc for host to use during cross-compilation
if path_to_protoc_exe is None:
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if args.arm:
cmake_extra_args = ['-A', 'ARM']
elif args.arm64:
cmake_extra_args = ['-A', 'ARM64']
elif args.arm64ec:
cmake_extra_args = ['-A', 'ARM64EC']
cmake_extra_args += ['-G', args.cmake_generator]
# Cannot test on host build machine for cross-compiled
# builds (Override any user-defined behaviour for test if any)
if args.test:
log.warning(
"Cannot test on host build machine for cross-compiled "
"ARM(64) builds. Will skip test running after build.")
args.test = False
elif cpu_arch == '32bit' or args.x86:
cmake_extra_args = [
'-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator
]
else:
if args.msvc_toolset:
toolset = 'host=x64,version=' + args.msvc_toolset
else:
toolset = 'host=x64'
if args.cuda_version:
toolset += ',cuda=' + args.cuda_version
cmake_extra_args = [
'-A', 'x64', '-T', toolset, '-G', args.cmake_generator
]
if args.enable_windows_store:
cmake_extra_defines.append(
'CMAKE_TOOLCHAIN_FILE=' + os.path.join(
source_dir, 'cmake', 'store_toolchain.cmake'))
if args.enable_wcos:
cmake_extra_defines.append('CMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')
elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmake_extra_args += ['-G', args.cmake_generator]
elif is_macOS():
if args.use_xcode:
cmake_extra_args += ['-G', 'Xcode']
if not args.ios and not args.android and \
args.osx_arch == 'arm64' and platform.machine() == 'x86_64':
if args.test:
log.warning(
"Cannot test ARM64 build on X86_64. Will skip test running after build.")
args.test = False
if args.build_wasm:
emsdk_version = args.emsdk_version
emsdk_dir = os.path.join(source_dir, "cmake", "external", "emsdk")
emsdk_file = os.path.join(emsdk_dir, "emsdk.bat") if is_windows() else os.path.join(emsdk_dir, "emsdk")
log.info("Installing emsdk...")
run_subprocess([emsdk_file, "install", emsdk_version], cwd=emsdk_dir)
log.info("Activating emsdk...")
run_subprocess([emsdk_file, "activate", emsdk_version], cwd=emsdk_dir)
if (args.android or args.ios or args.enable_windows_store or args.build_wasm
or is_cross_compiling_on_apple(args)) and args.path_to_protoc_exe is None:
# Cross-compiling for Android, iOS, and WebAssembly
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if is_ubuntu_1604():
if (args.arm or args.arm64):
raise BuildError(
"Only Windows ARM(64) cross-compiled builds supported "
"currently through this script")
if not is_docker() and not args.use_acl and not args.use_armnn:
install_python_deps()
if args.enable_pybind and is_windows():
install_python_deps(args.numpy_version)
if args.enable_onnx_tests:
setup_test_data(build_dir, configs)
if args.use_cuda and args.cuda_version is None:
if is_windows():
# cuda_version is used while generating version_info.py on Windows.
raise BuildError("cuda_version must be specified on Windows.")
else:
args.cuda_version = ""
if args.use_rocm and args.rocm_version is None:
args.rocm_version = ""
if args.build_eager_mode:
eager_root_dir = os.path.join(source_dir, "orttraining", "orttraining", "eager")
if args.eager_customop_module and not args.eager_customop_header:
raise Exception('eager_customop_header must be provided when eager_customop_module is')
elif args.eager_customop_header and not args.eager_customop_module:
raise Exception('eager_customop_module must be provided when eager_customop_header is')
def gen_ops(gen_cpp_name: str, header_file: str, ops_module: str, custom_ops: bool):
gen_cpp_scratch_name = gen_cpp_name + '.working'
print(f'Generating ORT ATen overrides (output_file: {gen_cpp_name}, header_file: {header_file},'
f'ops_module: {ops_module}), custom_ops: {custom_ops}')
cmd = [sys.executable, os.path.join(os.path.join(eager_root_dir, 'opgen', 'opgen.py')),
'--output_file', gen_cpp_scratch_name,
'--ops_module', ops_module,
'--header_file', header_file]
if custom_ops:
cmd += ["--custom_ops"]
subprocess.check_call(cmd)
import filecmp
if (not os.path.isfile(gen_cpp_name) or
not filecmp.cmp(gen_cpp_name, gen_cpp_scratch_name, shallow=False)):
os.rename(gen_cpp_scratch_name, gen_cpp_name)
else:
os.remove(gen_cpp_scratch_name)
def gen_ort_ops():
# generate native aten ops
import torch
regdecs_path = os.path.join(os.path.dirname(torch.__file__), 'include/ATen/RegistrationDeclarations.h')
ops_module = os.path.join(eager_root_dir, 'opgen/opgen/atenops.py')
gen_ops(os.path.join(eager_root_dir, 'ort_aten.g.cpp'), regdecs_path, ops_module, False)
# generate custom ops
if not args.eager_customop_header:
args.eager_customop_header = os.path.realpath(os.path.join(
eager_root_dir,
"opgen",
"CustomOpDeclarations.h"))
if not args.eager_customop_module:
args.eager_customop_module = os.path.join(eager_root_dir, 'opgen/opgen/custom_ops.py')
gen_ops(os.path.join(eager_root_dir, 'ort_customops.g.cpp'),
args.eager_customop_header, args.eager_customop_module, True)
gen_ort_ops()
if args.enable_external_custom_op_schemas and not is_linux():
raise BuildError("Registering external custom op schemas is only supported on Linux.")
generate_build_tree(
cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home, mpi_home, nccl_home,
tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args)
if args.clean:
clean_targets(cmake_path, build_dir, configs)
# if using DML, perform initial nuget package restore
setup_dml_build(args, cmake_path, build_dir, configs)
if args.build:
if args.parallel < 0:
raise BuildError("Invalid parallel job count: {}".format(args.parallel))
num_parallel_jobs = os.cpu_count() if args.parallel == 0 else args.parallel
build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, args.target)
if args.test:
run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
nuphar_run_python_tests(build_dir, configs)
# run node.js binding tests
if args.build_nodejs and not args.skip_nodejs_tests:
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "js", "node"))
run_nodejs_tests(nodejs_binding_dir)
# Build packages after running the tests.
# NOTE: if you have a test that rely on a file which only get copied/generated during packaging step, it could
# fail unexpectedly. Similar, if your packaging step forgot to copy a file into the package, we don't know it
# either.
if args.build:
if args.build_wheel:
nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
default_training_package_device = bool(os.getenv('DEFAULT_TRAINING_PACKAGE_DEVICE') == '1')
build_python_wheel(
source_dir,
build_dir,
configs,
args.use_cuda,
args.cuda_version,
args.use_rocm,
args.rocm_version,
args.use_dnnl,
args.use_tensorrt,
args.use_openvino,
args.use_nuphar,
args.use_vitisai,
args.use_acl,
args.use_armnn,
args.use_dml,
args.wheel_name_suffix,
args.enable_training,
nightly_build=nightly_build,
default_training_package_device=default_training_package_device,
use_ninja=(args.cmake_generator == 'Ninja'),
build_eager_mode=args.build_eager_mode
)
if args.build_nuget:
build_nuget_package(
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl,
args.use_nuphar
)
if args.test and args.build_nuget:
run_csharp_tests(
source_dir,
build_dir,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl)
if args.gen_doc and (args.build or args.test):
generate_documentation(source_dir, build_dir, configs, args.gen_doc == 'validate')
if args.gen_api_doc and (args.build or args.test):
print('Generating Python doc for ORTModule...')
docbuild_dir = os.path.join(source_dir, 'tools', 'doc')
run_subprocess(['bash', 'builddoc.sh', os.path.dirname(sys.executable),
source_dir, build_dir, args.config[0]], cwd=docbuild_dir)
log.info("Build complete")
if __name__ == "__main__":
try:
sys.exit(main())
except BaseError as e:
log.error(str(e))
sys.exit(1)
| 46.791223 | 120 | 0.624537 |
793fcf64d7e17d555e8c265819b3c4bec07e230c | 6,743 | py | Python | demo_fit_teapot.py | apoorvjain25/opendr-1 | 5467263c9ef46118399686b56150556bb2a9fcb9 | [
"MIT"
] | 7 | 2019-12-22T08:36:47.000Z | 2020-11-20T15:28:42.000Z | demo_fit_teapot.py | apoorvjain25/opendr-1 | 5467263c9ef46118399686b56150556bb2a9fcb9 | [
"MIT"
] | null | null | null | demo_fit_teapot.py | apoorvjain25/opendr-1 | 5467263c9ef46118399686b56150556bb2a9fcb9 | [
"MIT"
] | 2 | 2019-07-03T08:57:57.000Z | 2020-03-23T11:16:45.000Z | __author__ = 'pol'
import matplotlib.pyplot as plt
import glfw
import generative_models
from utils import *
import OpenGL.GL as GL
from utils import *
plt.ion()
from OpenGL import contextdata
import sys
#__GL_THREADED_OPTIMIZATIONS
#Main script options:r
glModes = ['glfw','mesa']
glMode = glModes[0]
np.random.seed(1)
width, height = (128, 128)
numPixels = width*height
shapeIm = [width, height,3]
win = -1
clip_start = 0.01
clip_end = 10
frustum = {'near': clip_start, 'far': clip_end, 'width': width, 'height': height}
if glMode == 'glfw':
#Initialize base GLFW context for the Demo and to share context among all renderers.
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
win = glfw.create_window(width, height, "Demo", None, None)
glfw.make_context_current(win)
else:
from OpenGL.raw.osmesa._types import *
from OpenGL.raw.osmesa import mesa
winShared = None
gtCamElevation = np.pi/3
gtCamHeight = 0.3 #meters
chLightAzimuthGT = ch.Ch([0])
chLightElevationGT = ch.Ch([np.pi/3])
chLightIntensityGT = ch.Ch([1])
chGlobalConstantGT = ch.Ch([0.5])
chCamElGT = ch.Ch([gtCamElevation])
chCamHeightGT = ch.Ch([gtCamHeight])
focalLenght = 35 ##milimeters
chCamFocalLengthGT = ch.Ch([35/1000])
#Move camera backwards to match the elevation desired as it looks at origin:
# bottomElev = np.pi/2 - (gtCamElevation + np.arctan(17.5 / focalLenght ))
# ZshiftGT = ch.Ch(-gtCamHeight * np.tan(bottomElev)) #Move camera backwards to match the elevation desired as it looks at origin.
ZshiftGT = ch.Ch([-0.5])
# Baackground cube - add to renderer by default.
verticesCube, facesCube, normalsCube, vColorsCube, texturesListCube, haveTexturesCube = getCubeData()
uvCube = np.zeros([verticesCube.shape[0],2])
chCubePosition = ch.Ch([0, 0, 0])
chCubeScale = ch.Ch([10.0])
chCubeAzimuth = ch.Ch([0])
chCubeVCColors = ch.Ch(np.ones_like(vColorsCube) * 1) #white cube
v_transf, vn_transf = transformObject([verticesCube], [normalsCube], chCubeScale, chCubeAzimuth, chCubePosition)
v_scene = [v_transf]
f_list_scene = [[[facesCube]]]
vc_scene = [[chCubeVCColors]]
vn_scene = [vn_transf]
uv_scene = [[uvCube]]
haveTextures_list_scene = [haveTexturesCube]
textures_list_scene = [texturesListCube]
#Example object 1: teapot
chPositionGT = ch.Ch([0, 0, 0.])
# chPositionGT = ch.Ch([-0.23, 0.36, 0.])
chScaleGT = ch.Ch([1.0, 1.0, 1.0])
chColorGT = ch.Ch([1.0, 1.0, 1.0])
chAzimuthGT = ch.Ch([np.pi/3])
chVColorsGT = ch.Ch([0.7, 0.0, 0.0])
import shape_model
# %% Load data
#You can get the teapot data from here: https://drive.google.com/open?id=1JO5ZsXHb_KTsjFMFx7rxY0YVAwnM3TMY
filePath = 'data/teapotModel.pkl'
teapotModel = shape_model.loadObject(filePath)
faces = teapotModel['faces']
# %% Sample random shape Params
latentDim = np.shape(teapotModel['ppcaW'])[1]
shapeParams = np.zeros(latentDim)
chShapeParams = ch.Ch(shapeParams.copy())
meshLinearTransform = teapotModel['meshLinearTransform']
W = teapotModel['ppcaW']
b = teapotModel['ppcaB']
chVertices = shape_model.VerticesModel(chShapeParams=chShapeParams, meshLinearTransform=meshLinearTransform, W=W, b=b)
chVertices.init()
chVertices = ch.dot(geometry.RotateZ(-np.pi/2)[0:3, 0:3], chVertices.T).T
smFaces = [[faces]]
smVColors = [chVColorsGT * np.ones(chVertices.shape)]
smUVs = ch.Ch(np.zeros([chVertices.shape[0],2]))
smHaveTextures = [[False]]
smTexturesList = [[None]]
chVertices = chVertices - ch.mean(chVertices, axis=0)
# minZ = ch.min(chVertices[:, 2])
# chMinZ = ch.min(chVertices[:, 2])
# zeroZVerts = chVertices[:, 2] - chMinZ
# chVertices = ch.hstack([chVertices[:, 0:2], zeroZVerts.reshape([-1, 1])])
chVertices = chVertices * 0.09
smCenter = ch.array([0, 0, 0.1])
smVertices = [chVertices]
chNormals = shape_model.chGetNormals(chVertices, faces)
smNormals = [chNormals]
center = smCenter
UVs = smUVs
v = smVertices
vn = smNormals
Faces = smFaces
VColors = smVColors
HaveTextures = smHaveTextures
TexturesList = smTexturesList
v_transf, vn_transf = transformObject(v, vn, chScaleGT, chAzimuthGT, chPositionGT)
vc_illuminated = computeGlobalAndDirectionalLighting(vn_transf, VColors, chLightAzimuthGT, chLightElevationGT, chLightIntensityGT, chGlobalConstantGT)
v_scene += [v_transf]
f_list_scene += [smFaces]
vc_scene += [vc_illuminated]
vn_scene += [vn_transf]
uv_scene += [UVs]
haveTextures_list_scene += [HaveTextures]
textures_list_scene += [TexturesList]
#COnfigure lighting
lightParamsGT = {'chLightAzimuth': chLightAzimuthGT, 'chLightElevation': chLightElevationGT, 'chLightIntensity': chLightIntensityGT, 'chGlobalConstant':chGlobalConstantGT}
c0 = width/2 #principal point
c1 = height/2 #principal point
a1 = 3.657 #Aspect ratio / mm to pixels
a2 = 3.657 #Aspect ratio / mm to pixels
cameraParamsGT = {'Zshift':ZshiftGT, 'chCamEl': chCamElGT, 'chCamHeight':chCamHeightGT, 'chCamFocalLength':chCamFocalLengthGT, 'a':np.array([a1,a2]), 'width': width, 'height':height, 'c':np.array([c0, c1])}
#Create renderer object
renderer = createRenderer(glMode, cameraParamsGT, v_scene, vc_scene, f_list_scene, vn_scene, uv_scene, haveTextures_list_scene,
textures_list_scene, frustum, None)
# Initialize renderer
renderer.overdraw = True
renderer.nsamples = 8
renderer.msaa = True #Without anti-aliasing optimization often does not work.
renderer.initGL()
renderer.initGLTexture()
renderer.debug = False
winShared = renderer.win
plt.figure()
plt.title('GT object')
plt.imshow(renderer.r)
rendererGT = ch.Ch(renderer.r.copy()) #Fix the GT position
#Vary teapot PCA shape:
chShapeParams[0] = chShapeParams[0].r + 2
chShapeParams[1] = chShapeParams[1].r - 2
plt.figure()
plt.title('Init object')
renderer.r
plt.imshow(renderer.r)
variances = ch.Ch([0.3])**2
negLikModel = -ch.sum(generative_models.LogGaussianModel(renderer=renderer, groundtruth=rendererGT, variances=variances, useMask=True)) / numPixels
global iter
iter = 0
def cb(_):
pass
global method
methods = ['dogleg', 'minimize', 'BFGS', 'L-BFGS-B', 'Nelder-Mead'] #Nelder-mead is the finite difference simplex method
method = 1
options = {'disp': True, 'maxiter': 5}
ch.minimize({'raw': negLikModel}, bounds=None, method=methods[method], x0=chShapeParams, callback=cb, options=options)
plt.figure()
plt.title('Fitted object')
renderer.r
plt.imshow(renderer.r)
plt.show(0.1)
#Clean up.
renderer.makeCurrentContext()
renderer.clear()
contextdata.cleanupContext(contextdata.getContext())
# glfw.destroy_window(renderer.win)
del renderer | 30.511312 | 206 | 0.742844 |
793fcfeda5aedee4df07972fefdcf8715fa9f63c | 421 | py | Python | confirmation/migrations/0010_alter_confirmation_expiry_date.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 17,004 | 2015-09-25T18:27:24.000Z | 2022-03-31T22:02:32.000Z | confirmation/migrations/0010_alter_confirmation_expiry_date.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 20,344 | 2015-09-25T19:02:42.000Z | 2022-03-31T23:54:40.000Z | confirmation/migrations/0010_alter_confirmation_expiry_date.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 7,271 | 2015-09-25T18:48:39.000Z | 2022-03-31T21:06:11.000Z | # Generated by Django 3.2.5 on 2021-08-02 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0009_confirmation_expiry_date_backfill"),
]
operations = [
migrations.AlterField(
model_name="confirmation",
name="expiry_date",
field=models.DateTimeField(db_index=True),
),
]
| 22.157895 | 67 | 0.631829 |
793fd02c861f289206ff7c130bc67e5d7521040f | 7,191 | py | Python | pycycle/elements/combustor.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | null | null | null | pycycle/elements/combustor.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | null | null | null | pycycle/elements/combustor.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | null | null | null | """ Class definition for Combustor."""
import numpy as np
import openmdao.api as om
from pycycle.constants import AIR_FUEL_ELEMENTS, AIR_ELEMENTS
from pycycle.thermo.thermo import Thermo
from pycycle.thermo.cea.thermo_add import ThermoAdd
from pycycle.thermo.cea.species_data import Properties, janaf
from pycycle.elements.duct import PressureLoss
from pycycle.flow_in import FlowIn
from pycycle.passthrough import PassThrough
class Combustor(om.Group):
"""
A combustor that adds a fuel to an incoming flow mixture and burns it
--------------
Flow Stations
--------------
Fl_I
Fl_O
-------------
Design
-------------
inputs
--------
Fl_I:FAR
dPqP
MN
outputs
--------
Wfuel
-------------
Off-Design
-------------
inputs
--------
Fl_I:FAR
dPqP
area
outputs
--------
Wfuel
"""
def initialize(self):
self.options.declare('inflow_thermo_data', default=None,
desc='Thermodynamic data set for incoming flow. This only needs to be set if different thermo data is used for incoming flow and outgoing flow.', recordable=False)
self.options.declare('thermo_data', default=janaf,
desc='Thermodynamic data set for flow. This is used for incoming and outgoing flow unless inflow_thermo_data is set, in which case it is used only for outgoing flow.', recordable=False)
self.options.declare('inflow_elements', default=AIR_ELEMENTS,
desc='set of elements present in the air flow')
self.options.declare('air_fuel_elements', default=AIR_FUEL_ELEMENTS,
desc='set of elements present in the fuel')
self.options.declare('design', default=True,
desc='Switch between on-design and off-design calculation.')
self.options.declare('statics', default=True,
desc='If True, calculate static properties.')
self.options.declare('fuel_type', default="JP-7",
desc='Type of fuel.')
def setup(self):
thermo_data = self.options['thermo_data']
if self.options['inflow_thermo_data'] is not None:
# Set the inflow thermodynamic data package if it is different from the outflow one
inflow_thermo_data = self.options['inflow_thermo_data']
else:
# Set the inflow thermodynamic data package if it is the same as the outflow one
inflow_thermo_data = thermo_data
inflow_elements = self.options['inflow_elements']
air_fuel_elements = self.options['air_fuel_elements']
design = self.options['design']
statics = self.options['statics']
fuel_type = self.options['fuel_type']
num_air_element = len(inflow_elements)
# Create combustor flow station
in_flow = FlowIn(fl_name='Fl_I')
self.add_subsystem('in_flow', in_flow, promotes=['Fl_I:tot:*', 'Fl_I:stat:*'])
# Perform combustor engineering calculations
self.add_subsystem('mix_fuel',
ThermoAdd(inflow_thermo_data=inflow_thermo_data, mix_thermo_data=thermo_data,
inflow_elements=inflow_elements, mix_elements=fuel_type),
promotes=['Fl_I:stat:W', ('mix:ratio', 'Fl_I:FAR'), 'Fl_I:tot:composition', 'Fl_I:tot:h', ('mix:W','Wfuel'), 'Wout'])
# Pressure loss
prom_in = [('Pt_in', 'Fl_I:tot:P'),'dPqP']
self.add_subsystem('p_loss', PressureLoss(), promotes_inputs=prom_in)
# Calculate vitiated flow station properties
vit_flow = Thermo(mode='total_hP', fl_name='Fl_O:tot',
method='CEA',
thermo_kwargs={'elements':air_fuel_elements,
'spec':thermo_data})
self.add_subsystem('vitiated_flow', vit_flow, promotes_outputs=['Fl_O:*'])
self.connect("mix_fuel.mass_avg_h", "vitiated_flow.h")
self.connect("mix_fuel.composition_out", "vitiated_flow.composition")
self.connect("p_loss.Pt_out","vitiated_flow.P")
if statics:
if design:
# Calculate static properties.
out_stat = Thermo(mode='static_MN', fl_name='Fl_O:stat',
method='CEA',
thermo_kwargs={'elements':air_fuel_elements,
'spec':thermo_data})
prom_in = ['MN']
prom_out = ['Fl_O:stat:*']
self.add_subsystem('out_stat', out_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect("mix_fuel.composition_out", "out_stat.composition")
self.connect('Fl_O:tot:S', 'out_stat.S')
self.connect('Fl_O:tot:h', 'out_stat.ht')
self.connect('Fl_O:tot:P', 'out_stat.guess:Pt')
self.connect('Fl_O:tot:gamma', 'out_stat.guess:gamt')
self.connect('Wout','out_stat.W')
else:
# Calculate static properties.
out_stat = Thermo(mode='static_A', fl_name='Fl_O:stat',
method='CEA',
thermo_kwargs={'elements':air_fuel_elements,
'spec':thermo_data})
prom_in = ['area']
prom_out = ['Fl_O:stat:*']
self.add_subsystem('out_stat', out_stat, promotes_inputs=prom_in,
promotes_outputs=prom_out)
self.connect("mix_fuel.composition_out", "out_stat.composition")
self.connect('Fl_O:tot:S', 'out_stat.S')
self.connect('Fl_O:tot:h', 'out_stat.ht')
self.connect('Fl_O:tot:P', 'out_stat.guess:Pt')
self.connect('Fl_O:tot:gamma', 'out_stat.guess:gamt')
self.connect('Wout','out_stat.W')
else:
self.add_subsystem('W_passthru', PassThrough('Wout', 'Fl_O:stat:W', 1.0, units= "lbm/s"),
promotes=['*'])
if __name__ == "__main__":
p = om.Problem()
p.model = om.Group()
p.model.add_subsystem('comp', MixFuel(), promotes=['*'])
p.model.add_subsystem('d1', om.IndepVarComp('Fl_I:stat:W', val=1.0, units='lbm/s', desc='weight flow'),
promotes=['*'])
p.model.add_subsystem('d2', om.IndepVarComp('Fl_I:FAR', val=0.2, desc='Fuel to air ratio'), promotes=['*'])
p.model.add_subsystem('d3', om.IndepVarComp('Fl_I:tot:h', val=1.0, units='Btu/lbm', desc='total enthalpy'),
promotes=['*'])
p.model.add_subsystem('d4', om.IndepVarComp('fuel_Tt', val=518.0, units='degR', desc='fuel temperature'),
promotes=['*'])
p.setup(check=False, force_alloc_complex=True)
p.run_model()
p.check_partials(compact_print=True, method='cs')
| 40.398876 | 214 | 0.564317 |
793fd057b4691ab9593cae9d99d179188d733362 | 6,458 | py | Python | django_simple_api/fields.py | Django-Simple-API/django-simple-api | cbda3b9667b4b00cd01a85a54f2f5b17926dfef6 | [
"MIT"
] | 35 | 2021-02-25T09:57:05.000Z | 2022-02-18T09:12:45.000Z | django_simple_api/fields.py | AberSheeran/django-simple-api | 52c5efbbce930378178625511c7a3d39a1004d79 | [
"MIT"
] | 3 | 2021-02-23T15:14:32.000Z | 2021-12-19T22:42:45.000Z | django_simple_api/fields.py | AberSheeran/django-simple-api | 52c5efbbce930378178625511c7a3d39a1004d79 | [
"MIT"
] | 7 | 2020-12-15T12:44:48.000Z | 2021-02-01T06:34:30.000Z | from typing import Any, Optional
from pydantic.fields import NoArgAnyCallable, Undefined
from ._fields import BodyInfo, CookieInfo, HeaderInfo, PathInfo, QueryInfo
__all__ = ["Path", "Query", "Header", "Cookie", "Body"]
def Path(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
exclusive: bool = False,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclusive: decide whether this field receives all parameters
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = PathInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclusive=exclusive,
**extra,
)
field_info._validate()
return field_info
def Query(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
exclusive: bool = False,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclusive: decide whether this field receives all parameters
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = QueryInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclusive=exclusive,
**extra,
)
field_info._validate()
return field_info
def Header(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
exclusive: bool = False,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclusive: decide whether this field receives all parameters
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = HeaderInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclusive=exclusive,
**extra,
)
field_info._validate()
return field_info
def Cookie(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
exclusive: bool = False,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclusive: decide whether this field receives all parameters
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = CookieInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclusive=exclusive,
**extra,
)
field_info._validate()
return field_info
def Body(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
exclusive: bool = False,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclusive: decide whether this field receives all parameters
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = BodyInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclusive=exclusive,
**extra,
)
field_info._validate()
return field_info
| 34.351064 | 102 | 0.67126 |
793fd1885909289761761a7cbc45240a62c5b1ad | 8,556 | py | Python | wavefront_sdk/common/utils.py | susanjlindner/wavefront-sdk-python | c6c55765c059b563295614f2e39430ac597de465 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | wavefront_sdk/common/utils.py | susanjlindner/wavefront-sdk-python | c6c55765c059b563295614f2e39430ac597de465 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | wavefront_sdk/common/utils.py | susanjlindner/wavefront-sdk-python | c6c55765c059b563295614f2e39430ac597de465 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Utils module contains useful function for preparing and processing data.
@author: Hao Song ([email protected])
"""
import re
import io
from gzip import GzipFile
import threading
# pylint: disable=too-few-public-methods
class AtomicCounter(object):
"""An atomic, thread-safe incrementing counter."""
def __init__(self, initial=0):
"""
Construct Atomic Counter.
@param initial: Initial value of the counter
"""
self.value = initial
self._lock = threading.Lock()
def increment(self, num=1):
"""
Increment atomic counter value.
@param num: Num to be increased, 1 by default
@return: Current value after increment
"""
with self._lock:
self.value += num
return self.value
def get(self):
"""
Get current atomic counter value.
@return: Current atomic counter value.
@rtype: float or int
"""
return self.value
def chunks(data_list, batch_size):
"""
Split list of data into chunks with fixed batch size.
@param data_list: List of data
@param batch_size: Batch size of each chunk
@return: Return a lazy generator object for iteration
"""
for i in range(0, len(data_list), batch_size):
yield data_list[i:i + batch_size]
def gzip_compress(data, compresslevel=9):
"""
Compress data using GZIP.
@param data: Data to compress
@param compresslevel: Compress Level
@return: Compressed data
"""
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) \
as gzip_file:
gzip_file.write(data)
return buf.getvalue()
def sanitize(string):
"""
Sanitize a string, replace whitespace with "-".
@param string: Input string
@return: Sanitized string
"""
whitespace_sanitized = re.sub(r"[\s]+", "-", string)
if '"' in whitespace_sanitized:
return '"' + re.sub(r"[\"]+", '\\\\\"', whitespace_sanitized) + '"'
return '"' + whitespace_sanitized + '"'
def is_blank(string):
"""
Check is a string is black or not, either none or only contains whitespace.
@param string: String to be checked
@return: Is blank or not
"""
return string is None or len(string) == 0 or string.isspace()
# return len(re.sub(r"[\s]+", "", s)) == 0
# pylint: disable=too-many-arguments
def metric_to_line_data(name, value, timestamp, source, tags, default_source):
"""
Metric Data to String.
Wavefront Metrics Data format
<metricName> <metricValue> [<timestamp>] source=<source> [pointTags]
Example: "new-york.power.usage 42422 1533531013 source=localhost
datacenter=dc1"
@param name: Metric Name
@type name: str
@param value: Metric Value
@type value: float
@param timestamp: Timestamp
@type timestamp: long
@param source: Source
@type source: str
@param tags: Tags
@type tags: dict
@param default_source:
@type default_source: str
@return: String
"""
if is_blank(name):
raise ValueError("Metrics name cannot be blank")
if is_blank(source):
source = default_source
str_builder = [sanitize(name), str(float(value))]
if timestamp is not None:
str_builder.append(str(int(timestamp)))
str_builder.append("source=" + sanitize(source))
if tags is not None:
for key, val in tags.items():
if is_blank(key):
raise ValueError("Metric point tag key cannot be blank")
if is_blank(val):
raise ValueError("Metric point tag value cannot be blank")
str_builder.append(sanitize(key) + '=' + sanitize(val))
return ' '.join(str_builder) + '\n'
# pylint: disable=too-many-arguments
def histogram_to_line_data(name, centroids, histogram_granularities, timestamp,
source, tags, default_source):
"""
Wavefront Histogram Data format.
{!M | !H | !D} [<timestamp>] #<count> <mean> [centroids] <histogramName>
source=<source> [pointTags]
Example: "!M 1533531013 #20 30.0 #10 5.1 request.latency source=appServer1
region=us-west"
@param name: Histogram Name
@type name: str
@param centroids: List of centroids(pairs)
@type centroids: list
@param histogram_granularities: Histogram Granularities
@type histogram_granularities: set
@param timestamp: Timestamp
@type timestamp: long
@param source: Source
@type source: str
@param tags: Tags
@type tags: dict
@param default_source: Default Source
@type default_source: str
@return: String data of Histogram
"""
if is_blank(name):
raise ValueError("Histogram name cannot be blank")
if not histogram_granularities:
raise ValueError("Histogram granularities cannot be null or empty")
if not centroids:
raise ValueError("A distribution should have at least one centroid")
if is_blank(source):
source = default_source
line_builder = []
for histogram_granularity in histogram_granularities:
str_builder = [histogram_granularity]
if timestamp is not None:
str_builder.append(str(int(timestamp)))
for centroid_1, centroid_2 in centroids:
str_builder.append("#" + str(centroid_2))
str_builder.append(str(centroid_1))
str_builder.append(sanitize(name))
str_builder.append("source=" + sanitize(source))
if tags is not None:
for key in tags:
if is_blank(key):
raise ValueError("Histogram tag key cannot be blank")
if is_blank(tags[key]):
raise ValueError("Histogram tag value cannot be blank")
str_builder.append(sanitize(key) + '=' + sanitize(tags[key]))
line_builder.append(' '.join(str_builder))
return '\n'.join(line_builder) + '\n'
# pylint: disable=too-many-arguments,unused-argument,too-many-locals
def tracing_span_to_line_data(name, start_millis, duration_millis, source,
trace_id, span_id, parents, follows_from, tags,
span_logs, default_source):
"""
Wavefront Tracing Span Data format.
<tracingSpanName> source=<source> [pointTags] <start_millis>
<duration_milli_seconds>
Example: "getAllUsers source=localhost
traceId=7b3bf470-9456-11e8-9eb6-529269fb1459
spanId=0313bafe-9457-11e8-9eb6-529269fb1459
parent=2f64e538-9457-11e8-9eb6-529269fb1459
application=Wavefront http.method=GET
1533531013 343500"
@param name: Span Name
@type name: str
@param start_millis: Start time
@type start_millis: long
@param duration_millis: Duration time
@type duration_millis: long
@param source: Source
@type source: str
@param trace_id: Trace ID
@type trace_id: UUID
@param span_id: Span ID
@type span_id: UUID
@param parents: Parents Span ID
@type parents: List of UUID
@param follows_from: Follows Span ID
@type follows_from: List of UUID
@param tags: Tags
@type tags: list
@param span_logs: Span Log
@param default_source: Default Source
@type default_source: str
@return: String data of tracing span
"""
if is_blank(name):
raise ValueError("Span name cannot be blank")
if is_blank(source):
source = default_source
str_builder = [sanitize(name),
"source=" + sanitize(source),
"traceId=" + str(trace_id),
"spanId=" + str(span_id)]
if parents is not None:
for uuid in parents:
str_builder.append("parent=" + str(uuid))
if follows_from is not None:
for uuid in follows_from:
str_builder.append("followsFrom=" + str(uuid))
if tags is not None:
tag_set = set()
for key, val in tags:
if is_blank(key):
raise ValueError("Span tag key cannot be blank")
if is_blank(val):
raise ValueError("Span tag val cannot be blank")
cur_tag = sanitize(key) + "=" + sanitize(val)
if cur_tag not in tag_set:
str_builder.append(cur_tag)
tag_set.add(cur_tag)
str_builder.append(str(start_millis))
str_builder.append(str(duration_millis))
return ' '.join(str_builder) + '\n'
| 31 | 79 | 0.627396 |
793fd1ab7e2382de2a421c546eb3f0e13ce25082 | 21,388 | py | Python | train/trainer/run.py | mickuehl/artificial-podcast | ba3e0558863db989778bdfd8316665a6fe0129a8 | [
"MIT"
] | null | null | null | train/trainer/run.py | mickuehl/artificial-podcast | ba3e0558863db989778bdfd8316665a6fe0129a8 | [
"MIT"
] | null | null | null | train/trainer/run.py | mickuehl/artificial-podcast | ba3e0558863db989778bdfd8316665a6fe0129a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from . import utils
# setup Google Cloud Logging
# see https://googleapis.dev/python/logging/latest/stdlib-usage.html
#import google.cloud.logging
#from google.cloud.logging.handlers import CloudLoggingHandler
#loggerClient = google.cloud.logging.Client()
#handler = CloudLoggingHandler(loggerClient, name="train.art-podcast")
logger = logging.getLogger(__name__)
#logger.addHandler(handler)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": "Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None,
metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": "Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
# remove the '--job-dir' attribute that gets inserted by the AI platform and that interfers with the argument parsing
try:
idx = sys.argv.index('--job-dir')
sys.argv.pop(idx)
sys.argv.pop(idx)
except:
print("No '--job-dir' found and removed.")
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
# added downloading of training and validations files from Google Cloud Storage
if data_args.train_file is not None:
if data_args.train_file.startswith('gs://'):
local_path = utils.download_files_from_gcs(data_args.train_file,'training.txt')[0]
data_files["train"] = local_path
else:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
if data_args.validation_file.startswith('gs://'):
local_path = utils.download_files_from_gcs(data_args.validation_file,'validation.txt')[0]
data_files["validation"] = local_path
else:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 42.776 | 152 | 0.670516 |
793fd3a6e376629495bf687dd8f8a0e936cf896c | 17,274 | py | Python | custom_components/unifiprotect/sensor.py | xp/unifiprotect | 6c63dab52db5c11ea0bae948f54c7aa3c07b828b | [
"MIT"
] | null | null | null | custom_components/unifiprotect/sensor.py | xp/unifiprotect | 6c63dab52db5c11ea0bae948f54c7aa3c07b828b | [
"MIT"
] | null | null | null | custom_components/unifiprotect/sensor.py | xp/unifiprotect | 6c63dab52db5c11ea0bae948f54c7aa3c07b828b | [
"MIT"
] | null | null | null | """This component provides sensors for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
import logging
from typing import Any
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_BYTES,
DATA_RATE_BYTES_PER_SECOND,
DATA_RATE_MEGABITS_PER_SECOND,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
TIME_SECONDS,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from pyunifiprotect.data import NVR, Camera, Event
from pyunifiprotect.data.base import ProtectAdoptableDeviceModel
from pyunifiprotect.data.devices import Sensor
from .const import DOMAIN
from .data import ProtectData
from .entity import (
EventThumbnailMixin,
ProtectDeviceEntity,
ProtectNVREntity,
async_all_device_entities,
)
from .models import ProtectRequiredKeysMixin
_LOGGER = logging.getLogger(__name__)
OBJECT_TYPE_NONE = "none"
DEVICE_CLASS_DETECTION = "unifiprotect__detection"
@dataclass
class ProtectSensorEntityDescription(ProtectRequiredKeysMixin, SensorEntityDescription):
"""Describes UniFi Protect Sensor entity."""
precision: int | None = None
def get_ufp_value(self, obj: ProtectAdoptableDeviceModel | NVR) -> Any:
"""Return value from UniFi Protect device."""
value = super().get_ufp_value(obj)
if isinstance(value, float) and self.precision:
value = round(value, self.precision)
return value
def _get_uptime(obj: ProtectAdoptableDeviceModel | NVR) -> datetime | None:
if obj.up_since is None:
return None
# up_since can vary slightly over time
# truncate to ensure no extra state_change events fire
return obj.up_since.replace(second=0, microsecond=0)
def _get_nvr_recording_capacity(obj: Any) -> int:
assert isinstance(obj, NVR)
if obj.storage_stats.capacity is None:
return 0
return int(obj.storage_stats.capacity.total_seconds())
def _get_nvr_memory(obj: Any) -> float | None:
assert isinstance(obj, NVR)
memory = obj.system_info.memory
if memory.available is None or memory.total is None:
return None
return (1 - memory.available / memory.total) * 100
def _get_alarm_sound(obj: ProtectAdoptableDeviceModel | NVR) -> str:
assert isinstance(obj, Sensor)
alarm_type = OBJECT_TYPE_NONE
if (
obj.is_alarm_detected
and obj.last_alarm_event is not None
and obj.last_alarm_event.metadata is not None
):
alarm_type = obj.last_alarm_event.metadata.alarm_type or OBJECT_TYPE_NONE
return alarm_type.lower()
ALL_DEVICES_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="uptime",
name="Uptime",
icon="mdi:clock",
device_class=SensorDeviceClass.TIMESTAMP,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
ufp_value_fn=_get_uptime,
),
ProtectSensorEntityDescription(
key="ble_signal",
name="Bluetooth Signal Strength",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="bluetooth_connection_state.signal_strength",
ufp_required_field="bluetooth_connection_state.signal_strength",
),
ProtectSensorEntityDescription(
key="phy_rate",
name="Link Speed",
native_unit_of_measurement=DATA_RATE_MEGABITS_PER_SECOND,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="wired_connection_state.phy_rate",
ufp_required_field="wired_connection_state.phy_rate",
),
ProtectSensorEntityDescription(
key="wifi_signal",
name="WiFi Signal Strength",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="wifi_connection_state.signal_strength",
ufp_required_field="wifi_connection_state.signal_strength",
),
)
CAMERA_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="oldest_recording",
name="Oldest Recording",
device_class=SensorDeviceClass.TIMESTAMP,
entity_category=EntityCategory.DIAGNOSTIC,
ufp_value="stats.video.recording_start",
),
ProtectSensorEntityDescription(
key="storage_used",
name="Storage Used",
native_unit_of_measurement=DATA_BYTES,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="stats.storage.used",
),
ProtectSensorEntityDescription(
key="write_rate",
name="Disk Write Rate",
native_unit_of_measurement=DATA_RATE_BYTES_PER_SECOND,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="stats.storage.rate",
precision=2,
),
ProtectSensorEntityDescription(
key="voltage",
name="Voltage",
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="voltage",
# no feature flag, but voltage will be null if device does not have voltage sensor
# (i.e. is not G4 Doorbell or not on 1.20.1+)
ufp_required_field="voltage",
precision=2,
),
)
CAMERA_DISABLED_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="stats_rx",
name="Received Data",
native_unit_of_measurement=DATA_BYTES,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL_INCREASING,
ufp_value="stats.rx_bytes",
),
ProtectSensorEntityDescription(
key="stats_tx",
name="Transferred Data",
native_unit_of_measurement=DATA_BYTES,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL_INCREASING,
ufp_value="stats.tx_bytes",
),
)
SENSE_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="battery_level",
name="Battery Level",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="battery_status.percentage",
),
ProtectSensorEntityDescription(
key="light_level",
name="Light Level",
native_unit_of_measurement=LIGHT_LUX,
device_class=SensorDeviceClass.ILLUMINANCE,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="stats.light.value",
ufp_enabled="is_light_sensor_enabled",
),
ProtectSensorEntityDescription(
key="humidity_level",
name="Humidity Level",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="stats.humidity.value",
ufp_enabled="is_humidity_sensor_enabled",
),
ProtectSensorEntityDescription(
key="temperature_level",
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="stats.temperature.value",
ufp_enabled="is_temperature_sensor_enabled",
),
ProtectSensorEntityDescription(
key="alarm_sound",
name="Alarm Sound Detected",
ufp_value_fn=_get_alarm_sound,
ufp_enabled="is_alarm_sensor_enabled",
),
)
NVR_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="uptime",
name="Uptime",
icon="mdi:clock",
device_class=SensorDeviceClass.TIMESTAMP,
entity_category=EntityCategory.DIAGNOSTIC,
ufp_value_fn=_get_uptime,
),
ProtectSensorEntityDescription(
key="storage_utilization",
name="Storage Utilization",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.utilization",
precision=2,
),
ProtectSensorEntityDescription(
key="record_rotating",
name="Type: Timelapse Video",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:server",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.storage_distribution.timelapse_recordings.percentage",
precision=2,
),
ProtectSensorEntityDescription(
key="record_timelapse",
name="Type: Continuous Video",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:server",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.storage_distribution.continuous_recordings.percentage",
precision=2,
),
ProtectSensorEntityDescription(
key="record_detections",
name="Type: Detections Video",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:server",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.storage_distribution.detections_recordings.percentage",
precision=2,
),
ProtectSensorEntityDescription(
key="resolution_HD",
name="Resolution: HD Video",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:cctv",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.storage_distribution.hd_usage.percentage",
precision=2,
),
ProtectSensorEntityDescription(
key="resolution_4K",
name="Resolution: 4K Video",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:cctv",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.storage_distribution.uhd_usage.percentage",
precision=2,
),
ProtectSensorEntityDescription(
key="resolution_free",
name="Resolution: Free Space",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:cctv",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="storage_stats.storage_distribution.free.percentage",
precision=2,
),
ProtectSensorEntityDescription(
key="record_capacity",
name="Recording Capacity",
native_unit_of_measurement=TIME_SECONDS,
icon="mdi:record-rec",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value_fn=_get_nvr_recording_capacity,
),
)
NVR_DISABLED_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="cpu_utilization",
name="CPU Utilization",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:speedometer",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="system_info.cpu.average_load",
),
ProtectSensorEntityDescription(
key="cpu_temperature",
name="CPU Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value="system_info.cpu.temperature",
),
ProtectSensorEntityDescription(
key="memory_utilization",
name="Memory Utilization",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:memory",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
ufp_value_fn=_get_nvr_memory,
precision=2,
),
)
MOTION_SENSORS: tuple[ProtectSensorEntityDescription, ...] = (
ProtectSensorEntityDescription(
key="detected_object",
name="Detected Object",
device_class=DEVICE_CLASS_DETECTION,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up sensors for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectDeviceSensor,
all_descs=ALL_DEVICES_SENSORS,
camera_descs=CAMERA_SENSORS + CAMERA_DISABLED_SENSORS,
sense_descs=SENSE_SENSORS,
)
entities += _async_motion_entities(data)
entities += _async_nvr_entities(data)
async_add_entities(entities)
@callback
def _async_motion_entities(
data: ProtectData,
) -> list[ProtectDeviceEntity]:
entities: list[ProtectDeviceEntity] = []
for device in data.api.bootstrap.cameras.values():
if not device.feature_flags.has_smart_detect:
continue
for description in MOTION_SENSORS:
entities.append(ProtectEventSensor(data, device, description))
_LOGGER.debug(
"Adding sensor entity %s for %s",
description.name,
device.name,
)
return entities
@callback
def _async_nvr_entities(
data: ProtectData,
) -> list[ProtectDeviceEntity]:
entities: list[ProtectDeviceEntity] = []
device = data.api.bootstrap.nvr
for description in NVR_SENSORS + NVR_DISABLED_SENSORS:
entities.append(ProtectNVRSensor(data, device, description))
_LOGGER.debug("Adding NVR sensor entity %s", description.name)
return entities
class ProtectDeviceSensor(ProtectDeviceEntity, SensorEntity):
"""A Ubiquiti UniFi Protect Sensor."""
entity_description: ProtectSensorEntityDescription
def __init__(
self,
data: ProtectData,
device: ProtectAdoptableDeviceModel,
description: ProtectSensorEntityDescription,
) -> None:
"""Initialize an UniFi Protect sensor."""
super().__init__(data, device, description)
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
self._attr_native_value = self.entity_description.get_ufp_value(self.device)
class ProtectNVRSensor(ProtectNVREntity, SensorEntity):
"""A Ubiquiti UniFi Protect Sensor."""
entity_description: ProtectSensorEntityDescription
def __init__(
self,
data: ProtectData,
device: NVR,
description: ProtectSensorEntityDescription,
) -> None:
"""Initialize an UniFi Protect sensor."""
super().__init__(data, device, description)
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
self._attr_native_value = self.entity_description.get_ufp_value(self.device)
class ProtectEventSensor(ProtectDeviceSensor, EventThumbnailMixin):
"""A UniFi Protect Device Sensor with access tokens."""
device: Camera
@callback
def _async_get_event(self) -> Event | None:
"""Get event from Protect device."""
event: Event | None = None
if (
self.device.is_smart_detected
and self.device.last_smart_detect_event is not None
and len(self.device.last_smart_detect_event.smart_detect_types) > 0
):
event = self.device.last_smart_detect_event
return event
@callback
def _async_update_device_from_protect(self) -> None:
# do not call ProtectDeviceSensor method since we want event to get value here
EventThumbnailMixin._async_update_device_from_protect(self)
if self._event is None:
self._attr_native_value = OBJECT_TYPE_NONE
else:
self._attr_native_value = self._event.smart_detect_types[0].value
| 34.071006 | 90 | 0.708 |
793fd451119f4447e83bf9343dec06062b75bb71 | 143 | py | Python | src/samstatic_flask/config.py | city10th/samstatic_flask | f8a8934f724d85ab2eb7ae680aa7d14747377a94 | [
"MIT"
] | null | null | null | src/samstatic_flask/config.py | city10th/samstatic_flask | f8a8934f724d85ab2eb7ae680aa7d14747377a94 | [
"MIT"
] | null | null | null | src/samstatic_flask/config.py | city10th/samstatic_flask | f8a8934f724d85ab2eb7ae680aa7d14747377a94 | [
"MIT"
] | null | null | null | from enum import Enum
class Option(str, Enum):
DEACTIVE = 'DEACTIVE'
ALL = 'ALL'
ALLOWED = 'ALLOWED'
DISALLOWED = 'DISALLOWED' | 20.428571 | 29 | 0.643357 |
793fd4d27dccea538610516cb505bd3ea81e61f8 | 7,220 | py | Python | misc/cvpr2020_challenge/train_baselines.py | ioanacroi/collaborative-experts | 063e85abe7127cce4c32b6953b20a8d0b7cc7635 | [
"Apache-2.0"
] | 237 | 2019-07-31T16:04:35.000Z | 2022-03-28T14:33:32.000Z | misc/cvpr2020_challenge/train_baselines.py | caijincen712/CE | ffd6c0bc8dc3c0375e40982bf5dce2c35359f1b6 | [
"Apache-2.0"
] | 26 | 2019-08-11T09:19:34.000Z | 2022-03-28T14:15:27.000Z | misc/cvpr2020_challenge/train_baselines.py | caijincen712/CE | ffd6c0bc8dc3c0375e40982bf5dce2c35359f1b6 | [
"Apache-2.0"
] | 46 | 2019-08-05T10:18:08.000Z | 2022-03-27T16:12:56.000Z | """Simple baselines for the CVPR2020 video pentathlon challenge
%run -i misc/cvpr2020_challenge/train_baselines.py --mini_train --train_single_epoch
ipy misc/cvpr2020_challenge/train_baselines.py -- --yaspify
"""
import sys
import json
import argparse
from typing import Dict, List
from pathlib import Path
from datetime import datetime
from subprocess import PIPE, Popen
from yaspi.yaspi import Yaspi
from typeguard import typechecked
from utils.util import filter_cmd_args
from misc.cvpr2020_challenge.prepare_submission import generate_predictions
@typechecked
def launch_and_monitor_cmd(cmd: str) -> List:
lines = []
with Popen(cmd.split(), stdout=PIPE, bufsize=1, universal_newlines=True) as proc:
for line in proc.stdout:
print(line, end='')
lines.append(line)
return lines
@typechecked
def dataset_name2json_key(dataset: str) -> str:
# Ensure that ActivityNet dataset key is stored with CamelCase convention when
# storing in jsons
json_key = {"activity-net": "ActivityNet"}.get(dataset, dataset)
return json_key
@typechecked
def parse_paths_from_logs(logs: List[str], queries: List[str],) -> Dict[str, str]:
prefixes = {
"predictions": "Saved similarity matrix predictions to",
"ckpts": "The best performing ckpt can be found at",
}
paths = {}
for key in queries:
prefix = prefixes[key]
matches = [x.startswith(prefix) for x in logs]
found = sum(matches)
assert found == 1, f"Expected to find one match for `{prefix}`, found {found}"
pos = matches.index(True)
paths[key] = logs[pos].rstrip().split(" ")[-1]
return paths
@typechecked
def train_baseline_for_dataset(
challenge_config_dir: Path,
mini_train: bool,
train_single_epoch: bool,
device: int,
dataset: str,
) -> Dict[str, str]:
folder = dataset.lower()
config_path = challenge_config_dir / folder / "baseline-public-trainval.json"
flags = f" --config {config_path} --device {device}"
if mini_train:
flags += f" --mini_train"
if train_single_epoch:
flags += f" --train_single_epoch"
cmd = f"python -u train.py {flags}"
print(f"Launching baseline for {dataset} with command: {cmd}")
logs = launch_and_monitor_cmd(cmd=cmd)
exp_paths = parse_paths_from_logs(logs, queries=["predictions", "ckpts"])
return exp_paths
@typechecked
def train_baselines(
dest_dir: Path,
challenge_config_dir: Path,
datasets: List[str],
slurm: bool,
mini_train: bool,
train_single_epoch: bool,
device: int,
timestamp: str,
aggregate: bool = False,
):
challenge_phase = "public_server_val"
fname = f"baselines-{timestamp}-{challenge_phase}-{'-'.join(datasets)}.json"
outputs = {key: {} for key in ("predictions", "ckpts")}
dest_dir.mkdir(exist_ok=True, parents=True)
dest_paths = {key: dest_dir / f"{key}-{fname}" for key in outputs}
if aggregate:
for dataset in datasets:
fname = f"baselines-{timestamp}-{challenge_phase}-{dataset}.json"
for key in outputs:
with open(dest_dir / f"{key}-{fname}", "r") as f:
outputs[key].update(json.load(f))
else:
for dataset in datasets:
exp_paths = train_baseline_for_dataset(
dataset=dataset,
device=device,
mini_train=mini_train,
train_single_epoch=train_single_epoch,
challenge_config_dir=challenge_config_dir,
)
dataset_key = dataset_name2json_key(dataset)
outputs["ckpts"][dataset_key] = exp_paths["ckpts"]
outputs["predictions"][dataset_key] = {challenge_phase:
exp_paths["predictions"]}
for key, dest_path in dest_paths.items():
print(f"Writing baseline {key} list to {dest_path}")
with open(dest_path, "w") as f:
json.dump(outputs[key], f, indent=4, sort_keys=True)
if not slurm:
generate_predictions(
refresh=True,
validate=True,
dest_dir=dest_dir,
challenge_phase=challenge_phase,
predictions_path=dest_paths["predictions"],
)
@typechecked
def train_baselines_with_yaspi(
yaspi_defaults_path: Path,
common_kwargs: Dict,
timestamp: str,
datasets: List[str],
):
with open(yaspi_defaults_path, "r") as f:
yaspi_defaults = json.load(f)
cmd_args = sys.argv
remove = ["--yaspify", "--datasets"]
cmd_args = filter_cmd_args(cmd_args, remove=remove)
cmd_args.extend(["--timestamp", timestamp])
base_cmd = f"python {' '.join(cmd_args)}"
job_name = f"baselines-{timestamp}"
job_queue = [f'"--datasets {dataset}"' for dataset in datasets]
job_queue = " ".join(job_queue)
job = Yaspi(
cmd=base_cmd,
job_queue=job_queue,
job_name=job_name,
job_array_size=len(datasets),
**yaspi_defaults,
)
job.submit(watch=True, conserve_resources=5)
train_baselines(**common_kwargs, aggregate=True)
def main():
parser = argparse.ArgumentParser(description="train baselines")
parser.add_argument("--debug", action="store_true", help="launch in debug mode")
parser.add_argument("--timestamp")
parser.add_argument("--yaspify", action="store_true", help="launch via slurm")
parser.add_argument("--slurm", action="store_true")
parser.add_argument("--device", type=int, default=0,
help="gpu device to use for training")
parser.add_argument("--datasets", nargs="+",
default=["MSRVTT", "MSVD", "DiDeMo", "YouCook2", "activity-net"],
help="The challenge datasets to train baselines for")
parser.add_argument('--mini_train', action="store_true")
parser.add_argument('--train_single_epoch', action="store_true")
parser.add_argument('--dest_dir', type=Path,
default="data/cvpr2020-challenge-submissions")
parser.add_argument('--challenge_config_dir', type=Path,
default="configs/cvpr2020-challenge")
parser.add_argument("--yaspi_defaults_path", type=Path,
default="misc/yaspi_gpu_defaults.json")
args = parser.parse_args()
if args.timestamp:
timestamp = args.timestamp
else:
timestamp = datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S")
common_kwargs = dict(
device=args.device,
timestamp=timestamp,
dest_dir=args.dest_dir,
datasets=args.datasets,
slurm=args.slurm,
mini_train=args.mini_train,
train_single_epoch=args.train_single_epoch,
challenge_config_dir=args.challenge_config_dir,
)
if args.yaspify:
train_baselines_with_yaspi(
datasets=args.datasets,
timestamp=timestamp,
common_kwargs=common_kwargs,
yaspi_defaults_path=args.yaspi_defaults_path
)
else:
train_baselines(**common_kwargs)
if __name__ == "__main__":
main()
| 34.380952 | 89 | 0.637535 |
793fd4e654cd8d0c5cec88831a29444362d6642d | 3,708 | py | Python | pscheduler-test-latency/tests/spec-to-cli_test.py | igarny/pscheduler | 0ab6e68bb3adb808e1116bab0eb7438bf4c31e2c | [
"Apache-2.0"
] | 47 | 2016-09-28T14:19:10.000Z | 2022-03-21T13:26:47.000Z | pscheduler-test-latency/tests/spec-to-cli_test.py | igarny/pscheduler | 0ab6e68bb3adb808e1116bab0eb7438bf4c31e2c | [
"Apache-2.0"
] | 993 | 2016-07-07T19:30:32.000Z | 2022-03-21T10:25:52.000Z | pscheduler-test-latency/tests/spec-to-cli_test.py | mfeit-internet2/pscheduler-dev | d2cd4065a6fce88628b0ca63edc7a69f2672dad2 | [
"Apache-2.0"
] | 36 | 2016-09-15T09:39:45.000Z | 2021-06-23T15:05:13.000Z | """
tests for the spec-to-cli command
"""
import pscheduler
import unittest
class CliToSpecTest(pscheduler.TestSpecToCliUnitTest):
name = 'latency'
def test_srcdst_opts(self):
#dest-only
expected_args = {
'--dest': '10.0.0.1'
}
self.assert_spec_to_cli('{"schema": 1, "dest": "10.0.0.1"}', expected_args)
#source and dest
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2'
}
self.assert_spec_to_cli('{"schema": 1, "source": "10.0.0.1", "dest": "10.0.0.2"}', expected_args)
#source-node and dest-node
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2',
'--source-node': '10.1.1.1',
'--dest-node': '10.1.1.2'
}
self.assert_spec_to_cli("""
{ "schema": 1,
"source": "10.0.0.1",
"dest": "10.0.0.2",
"source-node": "10.1.1.1",
"dest-node": "10.1.1.2"}""", expected_args)
def test_packet_opts(self):
#packet-options
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2',
'--packet-count': 600,
'--packet-interval': '0.001',
'--packet-timeout': '1',
'--packet-padding': '1000',
'--bucket-width': '0.001'
}
self.assert_spec_to_cli("""
{ "schema": 1,
"source": "10.0.0.1",
"dest": "10.0.0.2",
"packet-count": 600,
"packet-interval": 0.001,
"packet-timeout": 1,
"packet-padding": 1000,
"bucket-width": 0.001}""", expected_args)
def test_ctrlport_opts(self):
#control ports
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2',
'--ctrl-port': 861,
}
self.assert_spec_to_cli("""
{ "schema": 1,
"source": "10.0.0.1",
"dest": "10.0.0.2",
"ctrl-port": 861}""", expected_args)
def test_dataports_opts(self):
#data ports
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2',
'--data-ports': '1000-2000',
}
self.assert_spec_to_cli("""
{ "schema": 1,
"source": "10.0.0.1",
"dest": "10.0.0.2",
"data-ports": {"lower": 1000, "upper": 2000}}""", expected_args)
def test_ip_opts(self):
#ip opts
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2',
'--ip-version': 4,
'--ip-tos': 128,
}
self.assert_spec_to_cli("""
{ "schema": 1,
"source": "10.0.0.1",
"dest": "10.0.0.2",
"ip-version": 4,
"ip-tos": 128}""", expected_args)
def test_boolean_opts(self):
#boolean opts
expected_args = {
'--source': '10.0.0.1',
'--dest': '10.0.0.2',
'--flip': None,
'--reverse': None,
'--output-raw': None,
}
self.assert_spec_to_cli("""
{ "schema": 2,
"source": "10.0.0.1",
"dest": "10.0.0.2",
"flip": true,
"reverse": true,
"output-raw": true}""", expected_args)
def test_failure(self):
#dest-only
self.run_cmd('{"garbage": "10.0.0.1"}', expected_status=1, json_out=False)
if __name__ == '__main__':
unittest.main()
| 29.903226 | 105 | 0.424757 |
793fd5f00182b17808a289e4bf2ae9dc48a9346d | 7,910 | py | Python | tests/models/commondb/measurement/measurement_integration_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | tests/models/commondb/measurement/measurement_integration_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | tests/models/commondb/measurement/measurement_integration_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.commondb.measurement.measurement_integration_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Measurement module integrations."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKMeasurementRecord
from ramstk.models.dbtables import RAMSTKMeasurementTable
from tests import (
SystemTestDeleteMethods,
SystemTestGetterSetterMethods,
SystemTestInsertMethods,
SystemTestSelectMethods,
)
@pytest.mark.usefixtures("test_attributes", "integration_test_table_model")
class TestSelectMeasurement(SystemTestSelectMethods):
"""Class for testing Measurement table do_select() and do_select_all() methods."""
__test__ = True
_do_select_msg = "request_select_measurement"
_record = RAMSTKMeasurementRecord
_tag = "measurement"
@pytest.mark.usefixtures("integration_test_table_model")
class TestUpdateMeasurement:
"""Class for testing Measurement table do_update() and do_update_all() methods."""
__test__ = True
_next_id = 0
_record = RAMSTKMeasurementRecord
_tag = "measurement"
_update_id = 1
def on_succeed_update(self, tree):
"""Listen for succeed_update messages."""
assert isinstance(tree, Tree)
assert tree.get_node(1).data["measurement"].description == "New Measurement"
assert tree.get_node(1).data["measurement"].code == "New Code"
print("\033[36m\nsucceed_update_measurement topic was broadcast")
def on_succeed_update_all(self):
"""Listen for succeed_update messages."""
print(
f"\033[36m\n\tsucceed_update_all topic was broadcast on update all "
f"{self._tag}s"
)
def on_fail_update_wrong_data_type(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == (
f"The value for one or more attributes for {self._tag} ID "
f"{self._update_id} was the wrong type."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on wrong data "
f"type."
)
def on_fail_update_root_node_wrong_data_type(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == "Attempting to update the root node 0."
print(f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on root node.")
def on_fail_update_non_existent_id(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert (
message == f"Attempted to save non-existent {self._tag} with {self._tag} "
f"ID 100."
)
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on non-existent "
f"ID."
)
def on_fail_update_no_data_package(self, logger_name, message):
"""Listen for do_log_debug messages."""
assert logger_name == "DEBUG"
assert message == f"No data package found for {self._tag} ID {self._update_id}."
print(
f"\033[35m\n\tfail_update_{self._tag} topic was broadcast on no data "
f"package."
)
@pytest.mark.integration
def test_do_update(self, integration_test_table_model):
"""Should update record attribute."""
pub.subscribe(self.on_succeed_update, "succeed_update_measurement")
integration_test_table_model.tree.get_node(1).data[
"measurement"
].description = "New Measurement"
integration_test_table_model.tree.get_node(1).data[
"measurement"
].code = "New Code"
integration_test_table_model.do_update(1)
pub.unsubscribe(self.on_succeed_update, "succeed_update_measurement")
assert (
integration_test_table_model.tree.get_node(1)
.data["measurement"]
.description
== "New Measurement"
)
assert (
integration_test_table_model.tree.get_node(1).data["measurement"].code
== "New Code"
)
integration_test_table_model.tree.get_node(1).data[
"measurement"
].description = "Integrated Circuit"
integration_test_table_model.tree.get_node(1).data[
"measurement"
].code = "Old Code"
integration_test_table_model.do_update(1)
assert (
integration_test_table_model.tree.get_node(1)
.data["measurement"]
.description
== "Integrated Circuit"
)
assert (
integration_test_table_model.tree.get_node(1).data["measurement"].code
== "Old Code"
)
@pytest.mark.integration
def test_do_update_all(self, integration_test_table_model):
"""Should update all the records in the database."""
pub.subscribe(self.on_succeed_update_all, "succeed_update_all_measurement")
pub.sendMessage("request_update_all_measurement")
pub.unsubscribe(self.on_succeed_update_all, "succeed_update_all_measurement")
@pytest.mark.integration
def test_do_update_wrong_data_type(self, integration_test_table_model):
"""Should send the do_log_debug message with wrong attribute data type."""
pub.subscribe(self.on_fail_update_wrong_data_type, "fail_update_measurement")
integration_test_table_model.tree.get_node(1).data[
"measurement"
].mild_maxt_limit = "Hi ya"
integration_test_table_model.do_update(1)
pub.unsubscribe(self.on_fail_update_wrong_data_type, "fail_update_measurement")
@pytest.mark.integration
def test_do_update_root_node_wrong_data_type(self, integration_test_table_model):
"""Should send the do_log_debug message when attempting to update root node."""
pub.subscribe(
self.on_fail_update_root_node_wrong_data_type, "fail_update_measurement"
)
integration_test_table_model.tree.get_node(1).data[
"measurement"
].mild_maxt_limit = "Hey bud"
integration_test_table_model.do_update(0)
pub.unsubscribe(
self.on_fail_update_root_node_wrong_data_type, "fail_update_measurement"
)
@pytest.mark.integration
def test_do_update_non_existent_id(self, integration_test_table_model):
"""Should send the do_log_debug message with non-existent ID in tree."""
pub.subscribe(self.on_fail_update_non_existent_id, "fail_update_measurement")
integration_test_table_model.do_select_all({"measurement_id": 1})
integration_test_table_model.do_update("skullduggery")
pub.unsubscribe(self.on_fail_update_non_existent_id, "fail_update_measurement")
@pytest.mark.integration
def test_do_update_no_data_package(self, integration_test_table_model):
"""Should send the do_log_debug message with no data package in tree."""
pub.subscribe(self.on_fail_update_no_data_package, "fail_update_measurement")
integration_test_table_model.tree.get_node(1).data.pop("measurement")
integration_test_table_model.do_update(1)
pub.unsubscribe(self.on_fail_update_no_data_package, "fail_update_measurement")
@pytest.mark.usefixtures("integration_test_table_model")
class TestGetterSetterMeasurement(SystemTestGetterSetterMethods):
"""Class for testing Measurement table getter and setter methods."""
__test__ = True
_package = {"measurement_type": "Some Kind"}
_record = RAMSTKMeasurementRecord
_tag = "measurement"
_test_id = 1
| 36.62037 | 88 | 0.686726 |
793fd67c6f9aa26ae717d7e944d871e174fb9186 | 3,800 | py | Python | tests/test_docker_swarm_action.py | rycus86/webhook-proxy | d8471c7fbced3cf5823b6ff1b85e056fe5011a0f | [
"MIT"
] | 18 | 2018-03-12T14:07:09.000Z | 2021-05-29T21:47:29.000Z | tests/test_docker_swarm_action.py | rycus86/webhook-proxy | d8471c7fbced3cf5823b6ff1b85e056fe5011a0f | [
"MIT"
] | 3 | 2018-07-07T14:39:54.000Z | 2019-04-19T21:06:39.000Z | tests/test_docker_swarm_action.py | rycus86/webhook-proxy | d8471c7fbced3cf5823b6ff1b85e056fe5011a0f | [
"MIT"
] | 6 | 2018-07-07T14:32:13.000Z | 2021-04-18T10:10:07.000Z | from unittest_helper import ActionTestBase
from actions.action_docker_swarm import DockerSwarmAction
class DockerSwarmActionTest(ActionTestBase):
def setUp(self):
self.mock_client = MockClient()
DockerSwarmAction.client = self.mock_client
def test_restart(self):
self._invoke({'docker-swarm': {'$restart': {'service_id': 'mock-service'}}})
self.verify('force_update', 1)
self.mock_client.service_attributes = {
'Spec': {'TaskTemplate': {'ForceUpdate': 12}}
}
self._invoke({'docker-swarm': {'$restart': {'service_id': 'fake'}}})
self.verify('force_update', 13)
def test_scale(self):
self._invoke({'docker-swarm': {'$scale': {'service_id': 'mocked', 'replicas': 12}}})
self.verify('mode', {'replicated': {'Replicas': 12}})
def test_update(self):
self._invoke({'docker-swarm': {'$update': {
'service_id': 'updating',
'image': 'test-image:1.0.y'
}}})
self.verify('image', 'test-image:1.0.y')
self._invoke({'docker-swarm': {'$update': {
'service_id': 'updating',
'container_labels': [{'test.label': 'test', 'mock.label': 'mock'}]
}}})
self.verify('container_labels',
[{'test.label': 'test', 'mock.label': 'mock'}])
self._invoke({'docker-swarm': {'$update': {
'service_id': 'updating',
'labels': [{'service.label': 'testing'}]
}}})
self.verify('labels', [{'service.label': 'testing'}])
self._invoke({'docker-swarm': {'$update': {
'service_id': 'updating',
'resources': {'mem_limit': 512}
}}})
self.verify('resources', {'mem_limit': 512})
def verify(self, key, value):
def assertPropertyEquals(data, prop):
self.assertIsNotNone(data)
if '.' in prop:
current, remainder = prop.split('.', 1)
assertPropertyEquals(data.get(current), remainder)
else:
self.assertEqual(data.get(prop), value,
msg='%s != %s for %s' % (data.get(prop), value, key))
assertPropertyEquals(self.mock_client.last_update, key)
class MockClient(object):
def __init__(self):
self.last_update = dict()
self.service_attributes = None
@property
def services(self):
return self
def get(self, *args, **kwargs):
details = Mock(attrs={
'ID': 'testId',
'Version': {'Index': 12},
'Spec': {
'Name': args[0],
'Mode': {'Replicated': {'Replicas': 1}},
'TaskTemplate': {
'ContainerSpec': {
'Image': 'alpine:mock'
},
'ForceUpdate': 0
}
}
},
reload=lambda: True,
update=self.update_service,
decode=lambda: details)
if self.service_attributes:
self._merge_attributes(details.attrs, self.service_attributes)
return details
def _merge_attributes(self, details, overwrite):
for key, value in overwrite.items():
if key not in details:
details[key] = value
elif isinstance(value, dict):
self._merge_attributes(details[key], overwrite[key])
else:
details[key] = value
def update_service(self, **kwargs):
self.last_update = kwargs
return True
class Mock(dict):
def __getattr__(self, name):
return self.get(name)
def update(self, *args, **kwargs):
return self['update'](*args, **kwargs)
| 30.4 | 92 | 0.526842 |
793fd715869eb0561b00dc2731effff0ef291dc4 | 11,163 | py | Python | tests/patterns/test_patterns_Ppar_send.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | tests/patterns/test_patterns_Ppar_send.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | tests/patterns/test_patterns_Ppar_send.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import pytest
import uqbar.strings
import supriya.patterns
pattern_01 = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([1001, 1002, 1003], 1),
)
]
)
pattern_02 = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
amplitude=1.0,
duration=1.0,
frequency=supriya.patterns.Pseq([1001, 1002], 1),
),
supriya.patterns.Pmono(
amplitude=1.0,
duration=0.75,
frequency=supriya.patterns.Pseq([2001, 2002, 2003], 1),
),
]
)
pattern_06 = supriya.patterns.Ppar(
[
supriya.patterns.Pgpar(
[
[
supriya.patterns.Pbind(
delta=10,
duration=10,
frequency=supriya.patterns.Pseq([1001, 1002, 1003]),
),
supriya.patterns.Pbind(
delta=12,
duration=10,
frequency=supriya.patterns.Pseq([2001, 2002, 2003]),
),
]
]
),
supriya.patterns.Pgpar(
[
[
supriya.patterns.Pbind(
delta=10,
duration=10,
frequency=supriya.patterns.Pseq([3001, 3002]),
),
supriya.patterns.Pbind(
delta=12,
duration=10,
frequency=supriya.patterns.Pseq([4001, 4002]),
),
]
]
),
]
)
def test_send_01():
events = pytest.helpers.setup_pattern_send(pattern_01, iterations=1)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_01, iterations=2)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1002,
uuid=UUID('B'),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_01, iterations=3)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1002,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=1.0,
duration=1.0,
frequency=1003,
uuid=UUID('C'),
)
"""
)
def test_send_02():
events = pytest.helpers.setup_pattern_send(pattern_02, iterations=1)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_02, iterations=2)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2001,
is_stop=False,
uuid=UUID('B'),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_02, iterations=3)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2001,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=0.75,
frequency=2002,
is_stop=False,
uuid=UUID('B'),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_02, iterations=4)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2001,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=0.75,
frequency=2002,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=1.0,
frequency=1002,
uuid=UUID('C'),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_02, iterations=5)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
amplitude=1.0,
delta=0.0,
duration=1.0,
frequency=1001,
uuid=UUID('A'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2001,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.25,
duration=0.75,
frequency=2002,
is_stop=False,
uuid=UUID('B'),
)
NoteEvent(
amplitude=1.0,
delta=0.5,
duration=1.0,
frequency=1002,
uuid=UUID('C'),
)
NoteEvent(
amplitude=1.0,
delta=0.75,
duration=0.75,
frequency=2003,
uuid=UUID('B'),
)
"""
)
def test_send_06():
events = pytest.helpers.setup_pattern_send(pattern_06, iterations=1)
# This is odd, but in practice you wouldn't stop on this event.
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
),
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_06, iterations=2)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=1001,
target_node=UUID('A'),
uuid=UUID('B'),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
),
is_stop=True,
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_06, iterations=3)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=1001,
target_node=UUID('A'),
uuid=UUID('B'),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=2001,
target_node=UUID('A'),
uuid=UUID('C'),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
),
is_stop=True,
)
"""
)
events = pytest.helpers.setup_pattern_send(pattern_06, iterations=4)
# This is odd, but in practice you wouldn't stop on this event.
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('A'),
),
),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=1001,
target_node=UUID('A'),
uuid=UUID('B'),
)
NoteEvent(
delta=0.0,
duration=10,
frequency=2001,
target_node=UUID('A'),
uuid=UUID('C'),
)
CompositeEvent(
events=(
GroupEvent(
add_action=AddAction.ADD_TO_TAIL,
uuid=UUID('D'),
),
),
)
CompositeEvent(
events=(
NullEvent(
delta=0.25,
),
GroupEvent(
is_stop=True,
uuid=UUID('A'),
),
),
is_stop=True,
)
"""
)
| 26.020979 | 76 | 0.430171 |
793fd9750b4af7e7b1a515937f9dd18adfa88fbb | 10,990 | py | Python | Boatwright_et_al.,2018/assembly_and_qc_scripts/identify_duplicates_fasta_files.py | BBarbazukLab/papers | fc77bcae17d475da99d758407be3ff7f9b298c3d | [
"MIT"
] | 3 | 2018-09-18T15:22:25.000Z | 2019-07-10T17:57:42.000Z | Boatwright_et_al.,2018/assembly_and_qc_scripts/identify_duplicates_fasta_files.py | BBarbazukLab/papers | fc77bcae17d475da99d758407be3ff7f9b298c3d | [
"MIT"
] | null | null | null | Boatwright_et_al.,2018/assembly_and_qc_scripts/identify_duplicates_fasta_files.py | BBarbazukLab/papers | fc77bcae17d475da99d758407be3ff7f9b298c3d | [
"MIT"
] | 4 | 2018-12-01T15:05:15.000Z | 2019-12-17T13:43:55.000Z | #!/usr/bin/env python
import cProfile
import argparse
import logging
from Bio import SeqIO
import collections
import re
import math
import os
import itertools
def getOptions():
"""Function to pull in command line arguments"""
parser = argparse.ArgumentParser(description='Tool for identifying duplicates and creating various useful output')
parser.add_argument('-i','--input',dest='fa', action='store', required=True, help='A list of fa file [Required]')
parser.add_argument('-o','--out', dest='out', action='store', required=True, help='Output file for counts in csv format [Required]')
parser.add_argument('--pixel', dest='pix', action='store', default=100, required=False, help='Number of pixels to consider a read as an optical duplicate [Default:100]')
parser.add_argument('-a', dest='append', action='store_true', help='This flag will cause the output dataset to be appended too.')
parser.add_argument('-t','--table', dest='table', action='store', required=False, help='Output table with the duplicate counts for each uniq sequence')
parser.add_argument('-f','--faOut', dest='faOut', action='store', required=False, help='Output a FASTA file optical duplicates are reduced')
parser.add_argument('-g','--log',dest='log',action='store',required=False, help='Create an error log')
args = parser.parse_args()
return(args)
def setLogger(fname,loglevel):
"""Function for handling error logging"""
logging.basicConfig(filename=fname, level=loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
def readFASTA(fname):
"""Read a fasta file and store information into two different dictionaries.
The first is mySeqDict which has the sequence as the key and a list of
headers with that have that sequence as the values. The second is
myReadDict, which has read name as the key and coordinate infromation as
values."""
logging.info("Reading the FASTA file.")
mySeqDict = collections.defaultdict(list)
myReadDict = dict()
with open(fname,'r') as FA:
for record in SeqIO.parse(FA, 'fasta'):
# Create mySeqDict
mySeqDict[str(record.seq)].append(record.name)
# Parse Header and create myReadDict
match = parseHeader(record.name)
if len(match) == 4: # if there is no PE read information, append a 1
match.append(1)
myReadDict[record.name] = {'lane':match[0], 'tile':match[1],'coord':(match[2],match[3]),'read':match[4]}
logging.info("Finished reading the FASTA file.")
return(mySeqDict,myReadDict)
def parseHeader(faName):
"""Function to parse the FASTA header line. This does a simple regex to
pull out different parts of the header line. Of importance are:
[0] = Lane
[1] = tile
[2] = x-coord
[3] = y-coord
[4] = read number"""
match = re.search('.*:?([0-9]):([0-9]+):([0-9]+):([0-9]+).*\/?([1-2])*',faName)
matchList = filter(None,match.groups()) # Removes any empty strings, ie if there is no PE information
matchIntList = [int(x) for x in matchList]
return(matchIntList)
def checkOptical(myList, readDict, pix):
"""Given a list of FASTA headers, this function will check if reads are
within a certain 'args.pix' distance of each other. If reads are within
this distance, then they will be called optical duplicates."""
# Create a storage dictionary
myDict = dict()
# Create a list of sets, where each set contains headers that are optical duplicates.
listOfSets = identifySets(myList, readDict, pix)
# reduce the set list so that each set is a group of optical duplicates.
redSetList = reduceSet(listOfSets)
return(redSetList)
def identifySets(myList, readDict, pix):
"""This function steps through a list of headers and creates sets of those
that fall within the args.pix range. The resulting sets may overlap so
they need reduced."""
# Create list to store results
setList = list()
# Compare each header and create sets of headers that overlap.
for index, item1 in enumerate(myList):
# Grab coordinate information from readDict
item1Set = {item1}
lane1 = readDict[item1]['lane']
tile1 = readDict[item1]['tile']
coord1 = readDict[item1]['coord']
for item2 in myList[index+1:]:
# Grab coordinate information from readDict
lane2 = readDict[item2]['lane']
tile2 = readDict[item2]['tile']
coord2 = readDict[item2]['coord']
if lane1 == lane2 and tile1 == tile2:
eucDist = math.sqrt((coord1[0]-coord2[0])**2 + (coord1[1]-coord2[1])**2 )
if eucDist <= pix:
item1Set.add(item2)
setList.append(item1Set)
return(setList)
def reduceSet(setList):
"""Step through a list of sets and combine sets that overlap.
This will create a unique list of sets, where each set contains the headers
of reads that optical duplicates."""
setList2 = [setList[0]]
for item1 in setList[1:]:
inSetList2 = False
for item2 in setList2:
if item1 & item2: # If there is an intersection
item2 |= item1 # Combine sets and go to next item in setList
inSetList2 = True
break
if not inSetList2: # If I could not find any overlaps then append set to setList2
setList2.append(item1)
return(setList2)
def dupCounts(setList,pdup_cnt,opdup_cnt,opdupset_cnt,flagFA,dropOp):
"""This function calculates various counts and returns a list of counts."""
dupset_num = 0
for item in setList:
if len(item) > 1:
opdup_cnt += len(item)
opdupset_cnt += 1
dupset_num += 1
if flagFA:
# If the user asks for FASTA output, store a set of optical
# duplicate headers. I will keep one optical duplicate from
# each set, to create a reduced list and not completely remove
# reads that are optical duplicates.
myList = list(item)
dropOp |= set(myList[1:])
pdup_cnt += len(setList) - dupset_num
return(pdup_cnt,opdup_cnt,opdupset_cnt,dropOp)
def writeOutput(handle, myList):
"""Function to write output from a list to a csv"""
handle.write(','.join(str(x) for x in myList) + "\n")
def writeCount(args,myOutHeader,myOut):
"""Write a summary table of counts. If the append flag is added, check if
the output file exists and try to append to it."""
if args.append:
if os.path.exists(args.out):
try:
with open(args.out, 'a') as handle:
writeOutput(handle,myOut)
except:
logging.error("Could not open output file, it must be busy.")
else:
try:
with open(args.out, 'w') as handle:
writeOutput(handle,myOutHeader)
writeOutput(handle,myOut)
except:
logging.error("Could not open output file, do I have write permission?")
else:
try:
with open(args.out, 'w') as handle:
writeOutput(handle,myOutHeader)
writeOutput(handle,myOut)
except:
logging.error("Could not open output file, do I have write permission?")
def writeTable(table, mySeqDict):
"""Write a table with how many time each sequence is duplicated."""
myDict = dict()
for seq in mySeqDict:
myDict[seq] = len(mySeqDict[seq])
with open(table,'w') as handle:
for item in sorted(myDict,key=myDict.get,reverse=True):
writeOutput(handle, [myDict[item],item])
def writeFAout(faIn,faOut,dropOp):
"""Output a FASTA file that reduces the optical duplicates, so that only a
single read is left from an optical duplicate set."""
with open(faIn, 'r') as FA:
with open(faOut, 'w') as FO:
for record in SeqIO.parse(FA, 'fasta'):
if not {record.name} & dropOp:
SeqIO.write(record,FO,"fasta")
def main():
args = getOptions()
if args.log:
setLogger(args.log,logging.INFO)
## READ IN DATA ##
# Read in the FASTA file and return a dictionary with SEQ as the key and
# list(HEADERS) as the values. Also return the total number of reads.
logging.info("Starting to read in FASTA file and creating dictionary.")
mySeq, myRead = readFASTA(args.fa)
logging.info("Finished reading FASTA file.")
# Simple Counts
total_read = len(myRead) # total number of reads
uniq_seq = len(mySeq) # number of uniq sequences
# Initialize counts
uniq_read = 0
pdupCnt = 0
opdupCnt = 0
opdupSetCnt = 0
# Initialize a set to store headers of reads I may want to drop from my
# FASTA file
dropOp = set()
## Loop Through Data and identify duplicates ##
# Loop through each sequence in the dictionary and examine it for
# duplicates and optical duplicates.
logging.info("Starting to identify duplicates.")
for key in mySeq:
# Copy the dictionary value using list() so that I don't modify the
# dictionary value later in the script.
myValue = list(mySeq[str(key)])
myLength = len(myValue)
if myLength == 1:
# If there is only one header for a given sequence, then this is a unique read.
uniq_read += 1
elif myLength > 1:
# If there are more than one header for a given, then these are reads.
total_dup = myLength
# Identify if the duplicates are optical dups or something else.
readSet = checkOptical(myValue, myRead, args.pix)
(pdupCnt, opdupCnt, opdupSetCnt, dropOp) = dupCounts(readSet,pdupCnt, opdupCnt, opdupSetCnt, args.faOut, dropOp)
else:
logging.error("There is something wrong with the fasta dictionary at key %s" % key)
logging.info("Finished identifying duplicates.")
## Write desired output ##
per_uniq_read = round(float(uniq_read)/total_read * 100, 2)
myOutHeader= ["File_Name","Total_Reads", "Num_Unique_Reads", "Per_Uniq_Reads", "Num_Unique_Seq", "Num_PCR_Dups", "Num_OP_Dups", "Num_OP_Dup_Sets"]
myOut = [args.fa,total_read, uniq_read, per_uniq_read, uniq_seq, pdupCnt, opdupCnt, opdupSetCnt]
# Test if we want to append the file, if so handle the headers correctly.
logging.info("Writing summary counts table.")
writeCount(args,myOutHeader,myOut)
if args.table:
logging.info("Writing sequence counts table.")
writeTable(args.table,mySeq)
if args.faOut:
logging.info("Writing reduced fasta file.")
writeFAout(args.faOut,dropOp)
logging.info("Script Finished.")
if __name__ == '__main__':
main()
| 40.553506 | 173 | 0.638217 |
793fd9c23598e3aa3a6e0132ea296c3aa5ae4f6f | 3,162 | py | Python | MaxLatestVersion/session.py | getlove555/getbotline | 639e157495849e12ac7dd4bae6012841cf511892 | [
"MIT"
] | 6 | 2020-05-23T21:47:52.000Z | 2021-03-30T00:19:08.000Z | MaxLatestVersion/session.py | getlove555/getbotline | 639e157495849e12ac7dd4bae6012841cf511892 | [
"MIT"
] | 4 | 2020-08-01T10:10:14.000Z | 2021-01-03T00:55:05.000Z | MaxLatestVersion/session.py | LOUREN03/lourenelle | 5448a8634d438f35df98e43ad135f232cf74d2b1 | [
"MIT"
] | 20 | 2020-05-11T08:53:30.000Z | 2021-07-16T09:50:20.000Z | # -*- coding: utf-8 -*-
from .transport import THttpClient
from thrift.protocol import TCompactProtocol
from TEAM_BOT_MAX import AuthService, TalkService, ChannelService, CallService, SquareService, ShopService, LiffService
class Session:
def __init__(self, url, headers, path='', customThrift=False):
self.host = url + path
self.headers = headers
self.customThrift = customThrift
def Auth(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._auth = AuthService.Client(self.protocol)
if isopen:
self.transport.open()
return self._auth
def Talk(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._talk = TalkService.Client(self.protocol)
if isopen:
self.transport.open()
return self._talk
def Channel(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._channel = ChannelService.Client(self.protocol)
if isopen:
self.transport.open()
return self._channel
def Call(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._call = CallService.Client(self.protocol)
if isopen:
self.transport.open()
return self._call
def Square(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._square = SquareService.Client(self.protocol)
if isopen:
self.transport.open()
return self._square
def Liff(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._liff = LiffService.Client(self.protocol)
if isopen:
self.transport.open()
return self._liff
def Shop(self, isopen=True):
self.transport = THttpClient(self.host, customThrift=self.customThrift)
self.transport.setCustomHeaders(self.headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._shop = ShopService.Client(self.protocol)
if isopen:
self.transport.open()
return self._shop | 33.284211 | 119 | 0.66888 |
793fdae2f6f13efbacdcab8c4d2ddb3686884ff7 | 847 | py | Python | examples/mesh_slice.py | almarklein/visvis2 | 728a4b7434879d3606f32836eda3971ed73905f8 | [
"BSD-2-Clause"
] | 7 | 2019-12-20T14:15:53.000Z | 2020-05-03T17:43:02.000Z | examples/mesh_slice.py | almarklein/visvis2 | 728a4b7434879d3606f32836eda3971ed73905f8 | [
"BSD-2-Clause"
] | 30 | 2020-02-10T08:59:30.000Z | 2020-06-16T21:21:33.000Z | examples/mesh_slice.py | almarklein/visvis2 | 728a4b7434879d3606f32836eda3971ed73905f8 | [
"BSD-2-Clause"
] | null | null | null | """
Example showing off the mesh slice material.
"""
from wgpu.gui.auto import WgpuCanvas, run
import pygfx as gfx
canvas = WgpuCanvas()
renderer = gfx.renderers.WgpuRenderer(canvas)
scene = gfx.Scene()
geometry = gfx.torus_knot_geometry(1, 0.3, 128, 16)
material1 = gfx.MeshPhongMaterial(color=(0.5, 0.5, 0.5, 1.0))
material2 = gfx.MeshSliceMaterial(thickness=8, color=(1, 1, 0, 1), plane=(0, 0, 1, 0))
obj1 = gfx.Mesh(geometry, material1)
obj2 = gfx.Mesh(geometry, material2)
scene.add(obj1)
scene.add(obj2)
camera = gfx.PerspectiveCamera(70, 2)
camera.position.z = 4
def animate():
dist = material2.plane[3]
dist += 0.02
if dist > 1:
dist = -1.5
material2.plane = 1, 0, 1, dist
renderer.render(scene, camera)
canvas.request_draw()
if __name__ == "__main__":
canvas.request_draw(animate)
run()
| 21.175 | 86 | 0.678867 |
793fdb89d0677662c004f6e9880c893623cc6304 | 13,728 | py | Python | python/paddle/tensor/__init__.py | IcarusWizard/Paddle | c07b632cd4c7faac3150e8454d5800643faff4c9 | [
"Apache-2.0"
] | 1 | 2021-12-27T02:39:31.000Z | 2021-12-27T02:39:31.000Z | python/paddle/tensor/__init__.py | IcarusWizard/Paddle | c07b632cd4c7faac3150e8454d5800643faff4c9 | [
"Apache-2.0"
] | null | null | null | python/paddle/tensor/__init__.py | IcarusWizard/Paddle | c07b632cd4c7faac3150e8454d5800643faff4c9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .attribute import rank # noqa: F401
from .attribute import shape # noqa: F401
from .attribute import real # noqa: F401
from .attribute import imag # noqa: F401
from .creation import to_tensor # noqa: F401
from .creation import diag # noqa: F401
from .creation import diagflat # noqa: F401
from .creation import eye # noqa: F401
from .creation import linspace # noqa: F401
from .creation import ones # noqa: F401
from .creation import ones_like # noqa: F401
from .creation import zeros # noqa: F401
from .creation import zeros_like # noqa: F401
from .creation import arange # noqa: F401
from .creation import full # noqa: F401
from .creation import full_like # noqa: F401
from .creation import triu # noqa: F401
from .creation import tril # noqa: F401
from .creation import meshgrid # noqa: F401
from .creation import empty # noqa: F401
from .creation import empty_like # noqa: F401
from .linalg import matmul # noqa: F401
from .linalg import dot # noqa: F401
from .linalg import norm # noqa: F401
from .linalg import cond # noqa: F401
from .linalg import transpose # noqa: F401
from .linalg import dist # noqa: F401
from .linalg import t # noqa: F401
from .linalg import cross # noqa: F401
from .linalg import cholesky # noqa: F401
from .linalg import bmm # noqa: F401
from .linalg import histogram # noqa: F401
from .linalg import bincount # noqa: F401
from .linalg import mv # noqa: F401
from .linalg import eig # noqa: F401
from .linalg import matrix_power # noqa: F401
from .linalg import qr # noqa: F401
from .linalg import eigvals # noqa: F401
from .linalg import multi_dot # noqa: F401
from .linalg import svd # noqa: F401
from .linalg import eigvalsh # noqa: F401
from .linalg import eigh # noqa: F401
from .linalg import pinv # noqa: F401
from .linalg import solve # noqa: F401
from .logic import equal # noqa: F401
from .logic import greater_equal # noqa: F401
from .logic import greater_than # noqa: F401
from .logic import is_empty # noqa: F401
from .logic import less_equal # noqa: F401
from .logic import less_than # noqa: F401
from .logic import logical_and # noqa: F401
from .logic import logical_not # noqa: F401
from .logic import logical_or # noqa: F401
from .logic import logical_xor # noqa: F401
from .logic import bitwise_and # noqa: F401
from .logic import bitwise_or # noqa: F401
from .logic import bitwise_xor # noqa: F401
from .logic import bitwise_not # noqa: F401
from .logic import not_equal # noqa: F401
from .logic import allclose # noqa: F401
from .logic import isclose # noqa: F401
from .logic import equal_all # noqa: F401
from .logic import is_tensor # noqa: F401
from .manipulation import cast # noqa: F401
from .manipulation import concat # noqa: F401
from .manipulation import expand # noqa: F401
from .manipulation import broadcast_to # noqa: F401
from .manipulation import broadcast_tensors # noqa: F401
from .manipulation import expand_as # noqa: F401
from .manipulation import tile # noqa: F401
from .manipulation import flatten # noqa: F401
from .manipulation import flatten_ # noqa: F401
from .manipulation import gather # noqa: F401
from .manipulation import gather_nd # noqa: F401
from .manipulation import reshape # noqa: F401
from .manipulation import reshape_ # noqa: F401
from .manipulation import flip as reverse # noqa: F401
from .manipulation import scatter # noqa: F401
from .manipulation import scatter_ # noqa: F401
from .manipulation import scatter_nd_add # noqa: F401
from .manipulation import scatter_nd # noqa: F401
from .manipulation import shard_index # noqa: F401
from .manipulation import slice # noqa: F401
from .manipulation import split # noqa: F401
from .manipulation import squeeze # noqa: F401
from .manipulation import squeeze_ # noqa: F401
from .manipulation import stack # noqa: F401
from .manipulation import strided_slice # noqa: F401
from .manipulation import unique # noqa: F401
from .manipulation import unique_consecutive # noqa: F401
from .manipulation import unsqueeze # noqa: F401
from .manipulation import unsqueeze_ # noqa: F401
from .manipulation import unstack # noqa: F401
from .manipulation import flip # noqa: F401
from .manipulation import rot90 # noqa: F401
from .manipulation import unbind # noqa: F401
from .manipulation import roll # noqa: F401
from .manipulation import chunk # noqa: F401
from .manipulation import tensordot # noqa: F401
from .math import abs # noqa: F401
from .math import acos # noqa: F401
from .math import asin # noqa: F401
from .math import atan # noqa: F401
from .math import ceil # noqa: F401
from .math import ceil_ # noqa: F401
from .math import cos # noqa: F401
from .math import tan # noqa: F401
from .math import cosh # noqa: F401
from .math import cumsum # noqa: F401
from .math import cumprod # noqa: F401
from .math import exp # noqa: F401
from .math import exp_ # noqa: F401
from .math import expm1 # noqa: F401
from .math import floor # noqa: F401
from .math import floor_ # noqa: F401
from .math import increment # noqa: F401
from .math import log # noqa: F401
from .math import multiplex # noqa: F401
from .math import pow # noqa: F401
from .math import reciprocal # noqa: F401
from .math import reciprocal_ # noqa: F401
from .math import round # noqa: F401
from .math import round_ # noqa: F401
from .math import rsqrt # noqa: F401
from .math import rsqrt_ # noqa: F401
from .math import scale # noqa: F401
from .math import scale_ # noqa: F401
from .math import sign # noqa: F401
from .math import sin # noqa: F401
from .math import sinh # noqa: F401
from .math import sqrt # noqa: F401
from .math import sqrt_ # noqa: F401
from .math import square # noqa: F401
from .math import stanh # noqa: F401
from .math import sum # noqa: F401
from .math import tanh # noqa: F401
from .math import tanh_ # noqa: F401
from .math import add_n # noqa: F401
from .math import max # noqa: F401
from .math import maximum # noqa: F401
from .math import min # noqa: F401
from .math import minimum # noqa: F401
from .math import mm # noqa: F401
from .math import divide # noqa: F401
from .math import floor_divide # noqa: F401
from .math import remainder # noqa: F401
from .math import mod # noqa: F401
from .math import floor_mod # noqa: F401
from .math import multiply # noqa: F401
from .math import add # noqa: F401
from .math import add_ # noqa: F401
from .math import subtract # noqa: F401
from .math import subtract_ # noqa: F401
from .math import atan2 # noqa: F401
from .math import logsumexp # noqa: F401
from .math import inverse # noqa: F401
from .math import log2 # noqa: F401
from .math import log10 # noqa: F401
from .math import log1p # noqa: F401
from .math import erf # noqa: F401
from .math import addmm # noqa: F401
from .math import clip # noqa: F401
from .math import clip_ # noqa: F401
from .math import trace # noqa: F401
from .math import kron # noqa: F401
from .math import isfinite # noqa: F401
from .math import isinf # noqa: F401
from .math import isnan # noqa: F401
from .math import prod # noqa: F401
from .math import all # noqa: F401
from .math import any # noqa: F401
from .math import broadcast_shape # noqa: F401
from .math import conj # noqa: F401
from .math import trunc # noqa: F401
from .math import digamma # noqa: F401
from .math import neg # noqa: F401
from .math import lgamma # noqa: F401
from .math import diagonal # noqa: F401
from .math import rad2deg # noqa: F401
from .math import deg2rad # noqa: F401
from .math import diff # noqa: F401
from .math import angle # noqa: F401
from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
from .random import normal # noqa: F401
from .random import uniform # noqa: F401
from .random import uniform_ # noqa: F401
from .random import randn # noqa: F401
from .random import rand # noqa: F401
from .random import randint # noqa: F401
from .random import randint_like # noqa: F401
from .random import randperm # noqa: F401
from .search import argmax # noqa: F401
from .search import argmin # noqa: F401
from .search import argsort # noqa: F401
from .search import searchsorted # noqa: F401
from .search import topk # noqa: F401
from .search import where # noqa: F401
from .search import index_select # noqa: F401
from .search import nonzero # noqa: F401
from .search import sort # noqa: F401
from .search import index_sample # noqa: F401
from .search import masked_select # noqa: F401
from .stat import mean # noqa: F401
from .stat import std # noqa: F401
from .stat import var # noqa: F401
from .stat import numel # noqa: F401
from .stat import median # noqa: F401
from .to_string import set_printoptions # noqa: F401
from .array import array_length # noqa: F401
from .array import array_read # noqa: F401
from .array import array_write # noqa: F401
from .array import create_array # noqa: F401
from .einsum import einsum # noqa: F401
#this list used in math_op_patch.py for _binary_creator_
tensor_method_func = [ #noqa
'matmul',
'dot',
'norm',
'cond',
'transpose',
'dist',
't',
'cross',
'cholesky',
'bmm',
'histogram',
'bincount',
'mv',
'matrix_power',
'qr',
'eigvals',
'eigvalsh',
'abs',
'acos',
'all',
'any',
'asin',
'atan',
'ceil',
'ceil_',
'cos',
'cosh',
'cumsum',
'cumprod',
'exp',
'exp_',
'floor',
'floor_',
'increment',
'log',
'log2',
'log10',
'logsumexp',
'multiplex',
'pow',
'prod',
'reciprocal',
'reciprocal_',
'round',
'round_',
'rsqrt',
'rsqrt_',
'scale',
'scale_',
'sign',
'sin',
'sinh',
'sqrt',
'sqrt_',
'square',
'stanh',
'sum',
'tanh',
'tanh_',
'add_n',
'max',
'maximum',
'min',
'minimum',
'mm',
'divide',
'floor_divide',
'remainder',
'mod',
'floor_mod',
'multiply',
'add',
'add_',
'subtract',
'subtract_',
'atan',
'logsumexp',
'inverse',
'log1p',
'erf',
'addmm',
'clip',
'clip_',
'trace',
'kron',
'isfinite',
'isinf',
'isnan',
'broadcast_shape',
'conj',
'neg',
'lgamma',
'equal',
'equal_all',
'greater_equal',
'greater_than',
'is_empty',
'less_equal',
'less_than',
'logical_and',
'logical_not',
'logical_or',
'logical_xor',
'not_equal',
'allclose',
'isclose',
'is_tensor',
'cast',
'concat',
'expand',
'broadcast_to',
'expand_as',
'flatten',
'flatten_',
'gather',
'gather_nd',
'reshape',
'reshape_',
'reverse',
'scatter',
'scatter_',
'scatter_nd_add',
'scatter_nd',
'shard_index',
'slice',
'split',
'chunk',
'tensordot',
'squeeze',
'squeeze_',
'stack',
'strided_slice',
'transpose',
'unique',
'unique_consecutive',
'unsqueeze',
'unsqueeze_',
'unstack',
'flip',
'rot90',
'unbind',
'roll',
'tile',
'argmax',
'argmin',
'argsort',
'masked_select',
'topk',
'where',
'index_select',
'nonzero',
'sort',
'index_sample',
'mean',
'std',
'var',
'numel',
'median',
'rank',
'shape',
'real',
'imag',
'digamma',
'diagonal',
'trunc',
'bitwise_and',
'bitwise_or',
'bitwise_xor',
'bitwise_not',
'broadcast_tensors',
'eig',
'uniform_',
'multi_dot',
'solve',
'triangular_solve',
'diff',
'angle',
]
#this list used in math_op_patch.py for magic_method bind
magic_method_func = [
('__and__', 'bitwise_and'),
('__or__', 'bitwise_or'),
('__xor__', 'bitwise_xor'),
('__invert__', 'bitwise_not'),
]
| 32.608076 | 74 | 0.617424 |
793fdc0cd101b7e4abb6d5b783fbac54f4a95683 | 1,762 | py | Python | setup.py | harper25/pipenv-pipes | ece06e3b9260d70ea7036811565cf862ee53f777 | [
"MIT"
] | 128 | 2018-05-03T07:08:15.000Z | 2022-02-01T17:26:29.000Z | setup.py | harper25/pipenv-pipes | ece06e3b9260d70ea7036811565cf862ee53f777 | [
"MIT"
] | 27 | 2018-05-03T00:38:02.000Z | 2021-06-01T22:15:42.000Z | setup.py | harper25/pipenv-pipes | ece06e3b9260d70ea7036811565cf862ee53f777 | [
"MIT"
] | 11 | 2018-05-03T07:40:35.000Z | 2021-12-13T05:24:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import os
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0', # Required
'colorama>=0.3', # Note required, but nice as default
]
setup_requirements = [
'pytest-runner'
]
test_requirements = [
'click',
'pytest',
'pytest-lazy-fixture',
'pytest-cov',
]
setup(
name='pipenv_pipes',
license="MIT license",
keywords='pipenv_pipes',
url='https://github.com/gtalarico/pipenv-pipes',
version='0.7.1',
author="Gui Talarico",
author_email='[email protected]',
long_description=readme + '\n\n' + history,
description="CLI Tool to help manage Pipenv Enviroments "
"and corresponding Project Directories.",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'pipes=pipenv_pipes.cli:pipes',
],
},
install_requires=requirements,
include_package_data=True,
packages=find_packages(
include=[
'pipenv_pipes',
'pipenv_pipes.picker'
]),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
zip_safe=False,
)
| 25.536232 | 61 | 0.609535 |
793fdee07a839e5c59c136fb3e8cdb39b3823a48 | 2,679 | py | Python | SLpackage/private/pacbio/pythonpkgs/kineticstools/lib/python2.7/site-packages/kineticsTools/BasicLdaEnricher.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 5 | 2022-02-20T07:10:02.000Z | 2022-03-18T17:47:53.000Z | SLpackage/private/pacbio/pythonpkgs/kineticstools/lib/python2.7/site-packages/kineticsTools/BasicLdaEnricher.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | SLpackage/private/pacbio/pythonpkgs/kineticstools/lib/python2.7/site-packages/kineticsTools/BasicLdaEnricher.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
# Basic LDA Enricher class
from math import sqrt
import math
import scipy.stats as s
import array as a
from scipy.optimize import fminbound
from scipy.special import gammaln as gamln
from numpy import log, pi, log10, e, log1p, exp
import numpy as np
from .MultiSiteCommon import MultiSiteCommon
from .MixtureEstimationMethods import MixtureEstimationMethods
class BasicLdaEnricher(MultiSiteCommon):
def __init__(self, gbmModel, sequence, rawKinetics, identifyFlag, modsToCall=['H', 'J', 'K']):
MultiSiteCommon.__init__(self, gbmModel, sequence, rawKinetics)
# FIXME: For debugging LDA, load in parameters for forward and reverse strands:
self.fwd_model = np.genfromtxt("/home/UNIXHOME/obanerjee/nat_fwd_model_expanded.csv", delimiter=',')
self.rev_model = np.genfromtxt("/home/UNIXHOME/obanerjee/nat_rev_model_expanded.csv", delimiter=',')
if identifyFlag:
if 'K' in modsToCall:
self.fwd_model = np.genfromtxt("/home/UNIXHOME/obanerjee/tet_fwd_model_expanded.csv", delimiter=',')
self.rev_model = np.genfromtxt("/home/UNIXHOME/obanerjee/tet_rev_model_expanded.csv", delimiter=',')
# write a method to take perSiteResults dictionary in and add a column Ca5C
def useLDAmodel(self, kinetics, pos, model, up, down):
""" Test out LDA model """
res = np.zeros((up + down + 1, 5))
ind = 0
# range from -down to +up
for offset in range(-down, (up + 1)):
a = pos + offset
# res[ind,] = [kinetics[a]["tMean"], kinetics[a]["modelPrediction"], kinetics[a]["tErr"], kinetics[a]["coverage"]]
res[ind, ] = [kinetics[a]["tMean"], kinetics[a]["modelPrediction"], kinetics[a]["tErr"], kinetics[a]["coverage"], np.exp(kinetics[a]["tStatistic"]) - 0.01]
ind += 1
apply = np.hstack(np.log(res + 0.01).transpose())
tmp = sum(np.multiply(apply, model[1:])) + model[0]
return tmp
def callLDAstrand(self, kinetics, strand, model, up, down):
tmp = [d for d in kinetics if d["strand"] == strand]
tmp.sort(key=lambda x: x["tpl"])
L = len(tmp)
for pos in range(down, (L - up)):
if tmp[pos]["base"] == 'C':
tmp[pos]["Ca5C"] = self.useLDAmodel(tmp, pos, model, up, down)
return tmp
def callEnricherFunction(self, kinetics, up=10, down=10):
fwd = self.callLDAstrand(kinetics, 0, self.fwd_model, up, down)
rev = self.callLDAstrand(kinetics, 1, self.rev_model, up, down)
res = fwd + rev
res.sort(key=lambda x: x["tpl"])
return res
| 38.271429 | 167 | 0.639418 |
793fdf552bde99568787bb29a29c9af30933f25e | 1,124 | py | Python | ecommerce/templatetags/general_tags.py | diptimoymandal/django-ecommerce | eb30ee51561feec0a614d964e3b7b1a9ceb2af4a | [
"MIT"
] | null | null | null | ecommerce/templatetags/general_tags.py | diptimoymandal/django-ecommerce | eb30ee51561feec0a614d964e3b7b1a9ceb2af4a | [
"MIT"
] | null | null | null | ecommerce/templatetags/general_tags.py | diptimoymandal/django-ecommerce | eb30ee51561feec0a614d964e3b7b1a9ceb2af4a | [
"MIT"
] | null | null | null | from django import template
from django.utils.safestring import mark_safe
from django.template.exceptions import TemplateSyntaxError
register = template.Library()
company = {
'name': 'Nawoka',
'address': 'Lille',
'domain': 'nawoka.fr',
'email': '[email protected]',
'telephone': '',
'services': [
],
'available_days': [
],
'shipping_detay': '',
'shipping_company': 'EMA',
'return_delay': '14 jours'
}
@register.simple_tag
def company_details(key, urlize=False):
try:
value = company[key]
except KeyError:
available_keys = company.keys()
raise TemplateSyntaxError(('Could not get the following key "%s".'
' Available keys are %s' % (key, ', '.join(available_keys))))
else:
if key == 'domain':
value = f"https://{value}/"
if key == 'email' and urlize:
value = mark_safe(f"<a href='mailto:{value}'>{value}</a>")
if key == 'domain' and urlize == True:
value = mark_safe(f"<a href='{value}'>{value}</a>")
return value
| 26.139535 | 97 | 0.570285 |
793fe02ba87907e731140ed462f00dc1b12570c2 | 848 | py | Python | tests/test_op.py | nmandery/h3ronpy | 58602a294bbb916f16c2080fb13aef0e5bd569a2 | [
"MIT"
] | 9 | 2021-11-02T00:41:28.000Z | 2022-03-10T20:08:16.000Z | tests/test_op.py | nmandery/h3ronpy | 58602a294bbb916f16c2080fb13aef0e5bd569a2 | [
"MIT"
] | 4 | 2021-09-01T16:16:27.000Z | 2022-02-17T11:08:53.000Z | tests/test_op.py | nmandery/h3ronpy | 58602a294bbb916f16c2080fb13aef0e5bd569a2 | [
"MIT"
] | null | null | null | import h3.api.numpy_int as h3
import numpy as np
from h3ronpy.op import kring_distances_agg, kring_distances
def test_kring_distances_agg():
h3indexes = np.array([h3.geo_to_h3(10.2, 45.5, 8), h3.geo_to_h3(10.3, 45.1, 8)], dtype=np.uint64)
k_max = 4
df = kring_distances_agg(h3indexes, k_max, aggregation_method='min')
assert len(df) > 100
assert df['k'].min() == 0
assert df['k'].max() == k_max
assert len(np.unique(df["h3index"])) == len(df)
def test_kring_distances():
h3indexes = np.array([h3.geo_to_h3(10.2, 45.5, 8), h3.geo_to_h3(10.3, 45.1, 8)], dtype=np.uint64)
k_max = 4
k_min = 2
df = kring_distances(h3indexes, 4, k_min=k_min)
assert len(df) > 100
assert df['ring_k'].min() == k_min
assert df['ring_k'].max() == k_max
assert 'h3index' in df
assert 'ring_h3index' in df
| 31.407407 | 101 | 0.654481 |
793fe16238699f0330f6f43aa51bb6283b7bb71d | 2,800 | py | Python | daemon/wakeserver/network.py | opiopan/wakeserver | 31ecba34dbe62c9db4745b007996e5bf662e08b7 | [
"MIT"
] | 1 | 2019-05-22T06:58:19.000Z | 2019-05-22T06:58:19.000Z | daemon/wakeserver/network.py | opiopan/wakeserver | 31ecba34dbe62c9db4745b007996e5bf662e08b7 | [
"MIT"
] | null | null | null | daemon/wakeserver/network.py | opiopan/wakeserver | 31ecba34dbe62c9db4745b007996e5bf662e08b7 | [
"MIT"
] | 1 | 2017-06-02T02:44:39.000Z | 2017-06-02T02:44:39.000Z | import os
import sys
import time
import json
import socket
import subprocess
import requests
import monitoring
MASTER_SERVICE = '_wakeserver._tcp'
SLAVE_SERVICE = '_wakeserver_slave._tcp'
LISTSERVICE = '/var/www/wakeserver/bin/listservice'
HOSTNAME = socket.gethostname() + '.local'
MASTER_PORT = ':8080'
SLAVE_PORT = ':8081'
HOST_KEY = 'host'
SERVERS_KEY = 'servers'
NAME_KEY = 'name'
ISON_KEY = 'isOn'
HTTPTIMEOUT = 10
isMaster = True
remotes = []
def applyRemote(data):
global remotes
if HOST_KEY in data:
newhost = data[HOST_KEY]
needToAppend = True
for host in remotes:
if newhost == host:
needToAppend = False
break
if needToAppend:
print 'NETWORK: new remote: {}'.format(newhost)
remotes.append(newhost)
else:
return None
if SERVERS_KEY in data:
print 'NETWORK: apply data from: {}'.format(newhost)
for server in data[SERVERS_KEY]:
name = server[NAME_KEY] if NAME_KEY in server else None
status = server[ISON_KEY] if ISON_KEY in server else None
if monitoring.monitor:
monitoring.monitor.setStatus(name, status)
return makeSyncData()
def makeSyncData(server = None):
global isMaster
data = {HOST_KEY: HOSTNAME + (MASTER_PORT if isMaster else SLAVE_PORT)}
servers = [server] if server else monitoring.monitor.servers
if not isMaster:
hosts = []
for server in servers:
sdata = {NAME_KEY: server['name'],
ISON_KEY: server['status'] == 'on'}
hosts.append(sdata)
data[SERVERS_KEY] = hosts
return data
def syncRemote(server = None):
global remotes
global isMaster
body = makeSyncData(server)
for remote in remotes:
try:
url = 'http://' + remote + '/remote'
print 'NETWORK: synchronizing with {0}'.format(remote)
resp = requests.post(url, json = body, timeout = HTTPTIMEOUT)
if resp.status_code == requests.codes.ok and isMaster:
applyRemote(resp.json())
except:
print 'NETWORK: error while accessing to {0}'.format(remote)
def initNetwork(ismaster):
global remotes
global isMaster
isMaster = ismaster
proc = subprocess.Popen([LISTSERVICE,
SLAVE_SERVICE if isMaster else MASTER_SERVICE],
stdout = subprocess.PIPE)
while proc:
line = proc.stdout.readline()
if len(line) == 0:
proc.wait()
break
remotes.append(line[:-1])
print 'NETWORK: detected {0} remotes:'.format(len(remotes))
for name in remotes:
print ' {0}'.format(name)
| 27.45098 | 76 | 0.604286 |
793fe1f40bc61c6b1598840561520ba04c056031 | 2,068 | py | Python | KaiRoS/utils.py | rentruewang/kairos | e9fa82d6ba51dcd9bb2ff44fe63c81f9c616a78a | [
"MIT"
] | 1 | 2021-04-08T14:35:21.000Z | 2021-04-08T14:35:21.000Z | KaiRoS/utils.py | rentruewang/KaiRoS | e9fa82d6ba51dcd9bb2ff44fe63c81f9c616a78a | [
"MIT"
] | null | null | null | KaiRoS/utils.py | rentruewang/KaiRoS | e9fa82d6ba51dcd9bb2ff44fe63c81f9c616a78a | [
"MIT"
] | null | null | null | import os
import sys
import termios
import tty
from datetime import datetime, timedelta
from os import path
import requests
from requests.exceptions import ConnectionError
def print_help():
print(
"""
usage: kairos [h] [n] [f]
optional arguments:
h, -h, help, --help
show this help message and exit
n, -n, now, --now
Displays the current weather
f, -f, fore, --fore, forecast, --forecast
Displays the weather forecast
"""
)
def handle_connectionerror():
print("ConnectionError :: please first connect to the Internet!")
exit()
# adapted from https://github.com/recantha/EduKit3-RC-Keyboard/blob/master/rc_keyboard.py
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def clear_scr():
os.system("clear")
def println():
print(flush=True)
def wind_degrees(deg):
return int((8 * (deg + 360 / 16) / 360) % 8)
def time_from_now(time):
from_now = datetime.strptime(time, "%Y-%m-%d %H:%M:%S") - datetime.now()
days = from_now.days
hours = from_now.seconds // (60 * 60)
minutes = (from_now.seconds % (60 * 60)) // 60
seconds = from_now.seconds % 60
dy = "day" if days <= 1 else "days"
hr = "hour" if hours <= 1 else "hours"
mn = "minute" if minutes <= 1 else "minutes"
sc = "second" if seconds <= 1 else "seconds"
return "{D} {dy}, {H} {hr}, {M} {mn}, and {S} {sc} from now".format(
D=from_now.days, H=hours, M=minutes, S=seconds, dy=dy, hr=hr, mn=mn, sc=sc
)
def from_future(time):
from_now = datetime.strptime(time, "%Y-%m-%d %H:%M:%S") - datetime.now()
days = from_now.days
hours = from_now.seconds // (60 * 60)
minutes = (from_now.seconds % (60 * 60)) // 60
seconds = from_now.seconds % 60
return all((days >= 0, hours >= 0, minutes >= 0, seconds >= 0))
def C2F(c):
return c * (9 / 5) + 32
| 24.329412 | 89 | 0.612669 |
793fe350ea27b70e5491d166ba85a305efd37729 | 2,509 | py | Python | bitex/api/WSS/gdax.py | no777/bitex | 68042a02419b7032a37f090d1c0a192d9bb34f4c | [
"MIT"
] | null | null | null | bitex/api/WSS/gdax.py | no777/bitex | 68042a02419b7032a37f090d1c0a192d9bb34f4c | [
"MIT"
] | null | null | null | bitex/api/WSS/gdax.py | no777/bitex | 68042a02419b7032a37f090d1c0a192d9bb34f4c | [
"MIT"
] | null | null | null | # Import Built-Ins
import logging
import json
import threading
import time
# Import Third-Party
from websocket import create_connection, WebSocketTimeoutException
import requests
# Import Homebrew
from bitex.api.WSS.base import WSSAPI
from datetime import datetime
# Init Logging Facilities
log = logging.getLogger(__name__)
class GDAXWSS(WSSAPI):
def __init__(self):
super(GDAXWSS, self).__init__('wss://ws-feed.gdax.com', 'GDAX')
self.conn = None
r = requests.get('https://api.gdax.com/products').json()
self.pairs = [x['id'] for x in r]
self._data_thread = None
def start(self):
super(GDAXWSS, self).start()
self._data_thread = threading.Thread(target=self._process_data)
self._data_thread.daemon = True
self._data_thread.start()
def stop(self):
if self.running:
super(GDAXWSS, self).stop()
if self._data_thread:
self._data_thread.join()
self._data_thread = None
def _process_data(self):
self.conn = create_connection(self.addr)
payload = json.dumps({'type': 'subscribe', 'product_ids': self.pairs})
self.conn.send(payload)
while self.running:
try:
data = json.loads(self.conn.recv())
except (WebSocketTimeoutException, ConnectionResetError):
log.warning("restarted")
self._controller_q.put('restart')
type=data['type']
# reason = data['reason']
if type=='match':
product_id = data['product_id']
# log.info(product_id)
if product_id=='BTC-USD':
log.debug(data)
amount = float(data['size'])
if data['side']=="sell":
amount = -amount
date_str = (data['time'])
# //2018-12-03T14:38:33.665000Z
ts = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ')
timestamp = (ts - datetime(1970, 1, 1)).total_seconds()
# print("ts %s" % timestamp)
self.data_q.put(('trades',
timestamp,amount,data['price'],))
# if 'product_id' in data:
# self.data_q.put(('order_book', data['product_id'],
# data, time.time()))
self.conn = None | 32.584416 | 78 | 0.534874 |
793fe38ec2eda9ae0f1fa4f47302a7de484f81af | 8,381 | py | Python | cachier/core.py | fx-kirin/cachier | 0cbe7efcd1e7dfa7d4df7245cc90d4263cc75e86 | [
"MIT"
] | null | null | null | cachier/core.py | fx-kirin/cachier | 0cbe7efcd1e7dfa7d4df7245cc90d4263cc75e86 | [
"MIT"
] | null | null | null | cachier/core.py | fx-kirin/cachier | 0cbe7efcd1e7dfa7d4df7245cc90d4263cc75e86 | [
"MIT"
] | null | null | null | """Persistent, stale-free memoization decorators for Python."""
# This file is part of Cachier.
# https://github.com/shaypal5/cachier
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Shay Palachy <[email protected]>
# python 2 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from functools import wraps
import datetime
from concurrent.futures import ThreadPoolExecutor
from .pickle_core import _PickleCore
from .mongo_core import _MongoCore, RecalculationNeeded
MAX_WORKERS_ENVAR_NAME = 'CACHIER_MAX_WORKERS'
DEFAULT_MAX_WORKERS = 8
def _max_workers():
try:
return int(os.environ[MAX_WORKERS_ENVAR_NAME])
except KeyError:
os.environ[MAX_WORKERS_ENVAR_NAME] = str(DEFAULT_MAX_WORKERS)
return DEFAULT_MAX_WORKERS
def _set_max_workers(max_workers):
os.environ[MAX_WORKERS_ENVAR_NAME] = str(max_workers)
_get_executor(True)
def _get_executor(reset=False):
if reset:
_get_executor.executor = ThreadPoolExecutor(_max_workers())
try:
return _get_executor.executor
except AttributeError:
_get_executor.executor = ThreadPoolExecutor(_max_workers())
return _get_executor.executor
def _function_thread(core, key, func, args, kwds):
try:
func_res = func(*args, **kwds)
core.set_entry(key, func_res)
except BaseException as exc: # pylint: disable=W0703
print(
'Function call failed with the following exception:\n{}'.format(
exc
)
)
def _calc_entry(core, key, func, args, kwds):
try:
core.mark_entry_being_calculated(key)
# _get_executor().submit(core.mark_entry_being_calculated, key)
func_res = func(*args, **kwds)
core.set_entry(key, func_res)
# _get_executor().submit(core.set_entry, key, func_res)
return func_res
finally:
core.mark_entry_not_calculated(key)
def cachier(
stale_after=None,
next_time=False,
pickle_reload=True,
mongetter=None,
cache_dir=None,
):
"""A persistent, stale-free memoization decorator.
The positional and keyword arguments to the wrapped function must be
hashable (i.e. Python's immutable built-in objects, not mutable
containers). Also, notice that since objects which are instances of
user-defined classes are hashable but all compare unequal (their hash
value is their id), equal objects across different sessions will not yield
identical keys.
Arguments
---------
stale_after : datetime.timedelta, optional
The time delta afterwhich a cached result is considered stale. Calls
made after the result goes stale will trigger a recalculation of the
result, but whether a stale or fresh result will be returned is
determined by the optional next_time argument.
next_time : bool, optional
If set to True, a stale result will be returned when finding one, not
waiting for the calculation of the fresh result to return. Defaults to
False.
pickle_reload : bool, optional
If set to True, in-memory cache will be reloaded on each cache read,
enabling different threads to share cache. Should be set to False for
faster reads in single-thread programs. Defaults to True.
mongetter : callable, optional
A callable that takes no arguments and returns a pymongo.Collection
object with writing permissions. If unset a local pickle cache is used
instead.
cache_dir : str, optional
A fully qualified path to a file directory to be used for cache files.
The running process must have running permissions to this folder. If
not provided, a default directory at `~/.cachier/` is used.
"""
# print('Inside the wrapper maker')
# print('mongetter={}'.format(mongetter))
# print('stale_after={}'.format(stale_after))
# print('next_time={}'.format(next_time))
if mongetter:
core = _MongoCore(mongetter, stale_after, next_time)
else:
core = _PickleCore( # pylint: disable=R0204
stale_after=stale_after,
next_time=next_time,
reload=pickle_reload,
cache_dir=cache_dir,
)
def _cachier_decorator(func):
core.set_func(func)
@wraps(func)
def func_wrapper(*args, **kwds): # pylint: disable=C0111,R0911
# print('Inside general wrapper for {}.'.format(func.__name__))
ignore_cache = kwds.pop('ignore_cache', False)
overwrite_cache = kwds.pop('overwrite_cache', False)
verbose_cache = kwds.pop('verbose_cache', False)
_print = lambda x: None # skipcq: FLK-E731 # noqa: E731
if verbose_cache:
_print = print
if ignore_cache:
return func(*args, **kwds)
key, entry = core.get_entry(args, kwds)
if overwrite_cache:
return _calc_entry(core, key, func, args, kwds)
if entry is not None: # pylint: disable=R0101
_print('Entry found.')
if entry.get('value', None) is not None:
_print('Cached result found.')
if stale_after:
now = datetime.datetime.now()
if now - entry['time'] > stale_after:
_print('But it is stale... :(')
if entry['being_calculated']:
if next_time:
_print('Returning stale.')
return entry['value'] # return stale val
_print('Already calc. Waiting on change.')
try:
return core.wait_on_entry_calc(key)
except RecalculationNeeded:
return _calc_entry(
core, key, func, args, kwds
)
if next_time:
_print('Async calc and return stale')
try:
core.mark_entry_being_calculated(key)
_get_executor().submit(
_function_thread,
core,
key,
func,
args,
kwds,
)
finally:
core.mark_entry_not_calculated(key)
return entry['value']
_print('Calling decorated function and waiting')
return _calc_entry(core, key, func, args, kwds)
_print('And it is fresh!')
return entry['value']
if entry['being_calculated']:
_print('No value but being calculated. Waiting.')
try:
return core.wait_on_entry_calc(key)
except RecalculationNeeded:
return _calc_entry(core, key, func, args, kwds)
_print('No entry found. No current calc. Calling like a boss.')
return _calc_entry(core, key, func, args, kwds)
def clear_cache():
"""Clear the cache."""
core.clear_cache()
def clear_being_calculated():
"""Marks all entries in this cache as not being calculated."""
core.clear_being_calculated()
def cache_dpath():
"""Returns the path to the cache dir, if exists; None if not."""
try:
return core.expended_cache_dir
except AttributeError:
return None
func_wrapper.clear_cache = clear_cache
func_wrapper.clear_being_calculated = clear_being_calculated
func_wrapper.cache_dpath = cache_dpath
return func_wrapper
return _cachier_decorator
| 38.800926 | 78 | 0.577258 |
793fe3e852c6e1b0f9d58d4e1c54ad17f4435b59 | 1,356 | py | Python | CalibTracker/SiStripCommon/python/theBigNtupleMC_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CalibTracker/SiStripCommon/python/theBigNtupleMC_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CalibTracker/SiStripCommon/python/theBigNtupleMC_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTrackClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowRechitClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowSimhitClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTracksProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowSimTracksProducer_cfi import *
shallowTree = cms.EDAnalyzer("ShallowTree",
outputCommands = cms.untracked.vstring(
'drop *',
'keep *_shallowEventRun_*_*',
'keep *_shallowClusters_*_*',
'keep *_shallowRechitClusters_*_*',
'keep *_shallowTracks_*_*',
'keep *_shallowTrackClusters_*_*',
'keep *_shallowSimhitClusters_*_*',
'keep *_shallowSimTracks_*_*'
))
theBigNtuple = cms.Sequence( (shallowEventRun +
shallowClusters +
shallowRechitClusters +
shallowSimhitClusters +
shallowTrackClusters +
shallowTracks +
shallowSimTracks) *
shallowTree
)
| 42.375 | 74 | 0.65118 |
793fe4c1a39c93ff7b03e71fb83a4355e1337f67 | 7,445 | py | Python | bot/cogs/utils.py | atmishra/bot | ad1a33e80152343a81eeeabf0117ced76b83e273 | [
"MIT"
] | 1 | 2022-01-01T17:33:48.000Z | 2022-01-01T17:33:48.000Z | bot/cogs/utils.py | atmishra/bot | ad1a33e80152343a81eeeabf0117ced76b83e273 | [
"MIT"
] | null | null | null | bot/cogs/utils.py | atmishra/bot | ad1a33e80152343a81eeeabf0117ced76b83e273 | [
"MIT"
] | 1 | 2020-11-01T19:57:00.000Z | 2020-11-01T19:57:00.000Z | import logging
import re
import unicodedata
from asyncio import TimeoutError, sleep
from email.parser import HeaderParser
from io import StringIO
from typing import Tuple
from dateutil import relativedelta
from discord import Colour, Embed, Message, Role
from discord.ext.commands import Bot, Cog, Context, command
from bot.constants import Channels, MODERATION_ROLES, Mention, STAFF_ROLES
from bot.decorators import in_channel, with_role
from bot.utils.time import humanize_delta
log = logging.getLogger(__name__)
class Utils(Cog):
"""A selection of utilities which don't have a clear category."""
def __init__(self, bot: Bot):
self.bot = bot
self.base_pep_url = "http://www.python.org/dev/peps/pep-"
self.base_github_pep_url = "https://raw.githubusercontent.com/python/peps/master/pep-"
@command(name='pep', aliases=('get_pep', 'p'))
async def pep_command(self, ctx: Context, pep_number: str) -> None:
"""Fetches information about a PEP and sends it to the channel."""
if pep_number.isdigit():
pep_number = int(pep_number)
else:
await ctx.invoke(self.bot.get_command("help"), "pep")
return
possible_extensions = ['.txt', '.rst']
found_pep = False
for extension in possible_extensions:
# Attempt to fetch the PEP
pep_url = f"{self.base_github_pep_url}{pep_number:04}{extension}"
log.trace(f"Requesting PEP {pep_number} with {pep_url}")
response = await self.bot.http_session.get(pep_url)
if response.status == 200:
log.trace("PEP found")
found_pep = True
pep_content = await response.text()
# Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
pep_header = HeaderParser().parse(StringIO(pep_content))
# Assemble the embed
pep_embed = Embed(
title=f"**PEP {pep_number} - {pep_header['Title']}**",
description=f"[Link]({self.base_pep_url}{pep_number:04})",
)
pep_embed.set_thumbnail(url="https://www.python.org/static/opengraph-icon-200x200.png")
# Add the interesting information
if "Status" in pep_header:
pep_embed.add_field(name="Status", value=pep_header["Status"])
if "Python-Version" in pep_header:
pep_embed.add_field(name="Python-Version", value=pep_header["Python-Version"])
if "Created" in pep_header:
pep_embed.add_field(name="Created", value=pep_header["Created"])
if "Type" in pep_header:
pep_embed.add_field(name="Type", value=pep_header["Type"])
elif response.status != 404:
# any response except 200 and 404 is expected
found_pep = True # actually not, but it's easier to display this way
log.trace(f"The user requested PEP {pep_number}, but the response had an unexpected status code: "
f"{response.status}.\n{response.text}")
error_message = "Unexpected HTTP error during PEP search. Please let us know."
pep_embed = Embed(title="Unexpected error", description=error_message)
pep_embed.colour = Colour.red()
break
if not found_pep:
log.trace("PEP was not found")
not_found = f"PEP {pep_number} does not exist."
pep_embed = Embed(title="PEP not found", description=not_found)
pep_embed.colour = Colour.red()
await ctx.message.channel.send(embed=pep_embed)
@command()
@in_channel(Channels.bot, bypass_roles=STAFF_ROLES)
async def charinfo(self, ctx: Context, *, characters: str) -> None:
"""Shows you information on up to 25 unicode characters."""
match = re.match(r"<(a?):(\w+):(\d+)>", characters)
if match:
embed = Embed(
title="Non-Character Detected",
description=(
"Only unicode characters can be processed, but a custom Discord emoji "
"was found. Please remove it and try again."
)
)
embed.colour = Colour.red()
await ctx.send(embed=embed)
return
if len(characters) > 25:
embed = Embed(title=f"Too many characters ({len(characters)}/25)")
embed.colour = Colour.red()
await ctx.send(embed=embed)
return
def get_info(char: str) -> Tuple[str, str]:
digit = f"{ord(char):x}"
if len(digit) <= 4:
u_code = f"\\u{digit:>04}"
else:
u_code = f"\\U{digit:>08}"
url = f"https://www.compart.com/en/unicode/U+{digit:>04}"
name = f"[{unicodedata.name(char, '')}]({url})"
info = f"`{u_code.ljust(10)}`: {name} - {char}"
return info, u_code
charlist, rawlist = zip(*(get_info(c) for c in characters))
embed = Embed(description="\n".join(charlist))
embed.set_author(name="Character Info")
if len(characters) > 1:
embed.add_field(name='Raw', value=f"`{''.join(rawlist)}`", inline=False)
await ctx.send(embed=embed)
@command()
@with_role(*MODERATION_ROLES)
async def mention(self, ctx: Context, *, role: Role) -> None:
"""Set a role to be mentionable for a limited time."""
if role.mentionable:
await ctx.send(f"{role} is already mentionable!")
return
await role.edit(reason=f"Role unlocked by {ctx.author}", mentionable=True)
human_time = humanize_delta(relativedelta.relativedelta(seconds=Mention.message_timeout))
await ctx.send(
f"{role} has been made mentionable. I will reset it in {human_time}, or when someone mentions this role."
)
def check(m: Message) -> bool:
"""Checks that the message contains the role mention."""
return role in m.role_mentions
try:
msg = await self.bot.wait_for("message", check=check, timeout=Mention.message_timeout)
except TimeoutError:
await role.edit(mentionable=False, reason="Automatic role lock - timeout.")
await ctx.send(f"{ctx.author.mention}, you took too long. I have reset {role} to be unmentionable.")
return
if any(r.id in MODERATION_ROLES for r in msg.author.roles):
await sleep(Mention.reset_delay)
await role.edit(mentionable=False, reason=f"Automatic role lock by {msg.author}")
await ctx.send(
f"{ctx.author.mention}, I have reset {role} to be unmentionable as "
f"{msg.author if msg.author != ctx.author else 'you'} sent a message mentioning it."
)
return
await role.edit(mentionable=False, reason=f"Automatic role lock - unauthorised use by {msg.author}")
await ctx.send(
f"{ctx.author.mention}, I have reset {role} to be unmentionable "
f"as I detected unauthorised use by {msg.author} (ID: {msg.author.id})."
)
def setup(bot: Bot) -> None:
"""Utils cog load."""
bot.add_cog(Utils(bot))
log.info("Cog loaded: Utils")
| 40.906593 | 117 | 0.593553 |
793fe52bbf404e4b39983c50a6fe64148ac6370d | 8,304 | py | Python | nicos_mlz/toftof/devices/chopper/base.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/toftof/devices/chopper/base.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4 | 2019-11-08T10:18:16.000Z | 2021-01-13T13:07:29.000Z | nicos_mlz/toftof/devices/chopper/base.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <[email protected]>
#
# *****************************************************************************
"""Base classes for the TOFTOF chopper system."""
from nicos.core import Attach, HasLimits, HasTimeout, Moveable, Override, \
Param, Readable, intrange, listof, oneof, status, tupleof
from nicos_mlz.toftof.devices import calculations as calc
class BaseChopperController(HasTimeout, Readable):
parameters = {
'ch5_90deg_offset': Param('Whether chopper 5 is mounted the right way '
'(= 0) or with 90deg offset (= 1)',
type=intrange(0, 1), default=0,
category='general',
),
'phase_accuracy': Param('Required accuracy of the chopper phases',
settable=True, default=10, type=float,),
'speed_accuracy': Param('Required accuracy of the chopper speeds',
settable=True, default=2, type=float,),
'resolution': Param('Current energy resolution',
volatile=True, type=tupleof(float, float),),
'wavelength': Param('Selected wavelength',
unit='AA', settable=True, type=float,
default=4.5),
'speed': Param('Disk speed',
unit='rpm', userparam=False, type=int, default=6000),
'ratio': Param('Frame-overlap ratio',
type=int, settable=True, default=1),
'crc': Param('Counter-rotating mode',
type=int, settable=True, default=1),
'slittype': Param('Slit type',
type=int, settable=True, default=1),
'phases': Param('Current phases',
type=listof(float), internal=True, default=[0] * 8),
'changetime': Param('Time of last change',
userparam=False, type=float,),
'frametime': Param('Time between neutron pulses',
unit='s', type=float, mandatory=False,
volatile=True, category='general',),
}
parameter_overrides = {
'timeout': Override(default=90),
}
def _readspeeds(self):
return [0] * 8
def _getparams(self):
return (self.wavelength, self.speed, self.ratio,
self.crc, self.slittype)
def _change(self, name, value):
raise NotImplementedError('please use a proper derived class and '
'implement "_change" method!')
def doRead(self, maxage=0):
"""Read average speed from all choppers."""
speeds = self._readspeeds()
speed = 0.0
for ch in [1, 2, 3, 4, 6, 7]:
speed += speeds[ch - 1]
if self.ratio is not None:
if self.ratio == 1:
speed += speeds[5 - 1]
elif self.ratio < 9:
speed += speeds[5 - 1] * self.ratio / (self.ratio - 1.)
else:
speed += speeds[5 - 1] * self.ratio / 7.
return speed / 7.
else:
return speed / 6.
def doReadResolution(self):
return calc.Eres1(self.wavelength, self.speed)
def doReadFrametime(self):
_, chspeed, chratio, _, _ = self._getparams()
return calc.calculateFrameTime(chspeed, chratio)
class SpeedReadout(Readable):
"""The current speed readout device of the chopper."""
attached_devices = {
'chopper': Attach('Chopper controller', BaseChopperController),
}
parameter_overrides = {
'unit': Override(mandatory=False, default='rpm'),
}
def doRead(self, maxage=0):
return [v / 279.618375
for v in self._attached_chopper._readspeeds_actual()]
def doStatus(self, maxage=0):
stat = self._attached_chopper.status(maxage)
if stat[0] != status.OK:
return stat[0], 'changing'
return status.OK, 'idle'
class PropertyChanger(Moveable):
"""This is essentially a ParamDevice
and can be replace once Controller uses single setters
(NICOS-style interface).
"""
attached_devices = {
'chopper': Attach('Chopper controller', BaseChopperController),
'chdelay': Attach('Setting chopper delay', Moveable),
}
def doStatus(self, maxage=0):
stat = self._attached_chopper.status(maxage)
if stat[0] != status.OK:
return stat[0], 'changing'
return status.OK, 'idle'
def doRead(self, maxage=0):
return getattr(self._attached_chopper, self._prop)
def doStart(self, target):
ch5_90deg_offset = self._attached_chopper.ch5_90deg_offset
chwl, chspeed, chratio, chst = self._chopper_params(target)
_chdelay = calc.calculateChopperDelay(chwl, chspeed, chratio, chst,
ch5_90deg_offset)
self.log.debug('setting chopper delay to: %d', _chdelay)
self._attached_chdelay.move(_chdelay)
self._attached_chopper._change(self._prop, target)
def doReadTarget(self):
return getattr(self._attached_chopper, self._prop)
class Wavelength(HasLimits, PropertyChanger):
"""The wave length parameter device of the chopper."""
_prop = 'wavelength'
parameter_overrides = {
'unit': Override(mandatory=False, default='AA'),
}
valuetype = float
def _chopper_params(self, target):
_, chspeed, chratio, _, chst = self._attached_chopper._getparams()
return target, chspeed, chratio, chst
class Speed(HasLimits, PropertyChanger):
"""The speed parameter device of the chopper."""
_prop = 'speed'
parameter_overrides = {
'unit': Override(mandatory=False, default='rpm'),
}
valuetype = float
def _chopper_params(self, target):
chwl, _, chratio, _, chst = self._attached_chopper._getparams()
return chwl, target, chratio, chst
class Ratio(PropertyChanger):
"""The ratio parameter device of the chopper."""
_prop = 'ratio'
parameter_overrides = {
'unit': Override(mandatory=False, default=''),
'fmtstr': Override(default='%d'),
}
valuetype = oneof(*range(1, 11))
def _chopper_params(self, target):
chwl, chspeed, _, _, chst = self._attached_chopper._getparams()
return chwl, chspeed, target, chst
class CRC(PropertyChanger):
"""The crc (rotation direction of disc 5) parameter device of the
chopper.
"""
_prop = 'crc'
parameter_overrides = {
'unit': Override(mandatory=False, default=''),
'fmtstr': Override(default='%d'),
}
valuetype = oneof(0, 1)
def _chopper_params(self, target):
chwl, chspeed, chratio, _, chst = self._attached_chopper._getparams()
return chwl, chspeed, chratio, chst
class SlitType(PropertyChanger):
"""The slit type parameter device of the chopper."""
_prop = 'slittype'
parameter_overrides = {
'unit': Override(mandatory=False, default=''),
'fmtstr': Override(default='%d'),
}
valuetype = oneof(0, 1, 2)
def _chopper_params(self, target):
chwl, chspeed, chratio, _, _ = self._attached_chopper._getparams()
return chwl, chspeed, chratio, target
| 35.186441 | 79 | 0.594292 |
793fe551c7aa87a13f1e22df6b123b977f101d52 | 2,325 | py | Python | Simulated Annealing/simulated_annealing_subplots.py | armandwayoff/Graph-Optimization | 9a7472d023935a49e270f1d1d7191af26ca6ffe9 | [
"MIT"
] | 1 | 2021-12-07T18:03:27.000Z | 2021-12-07T18:03:27.000Z | Simulated Annealing/simulated_annealing_subplots.py | armandwayoff/Graph-Optimization | 9a7472d023935a49e270f1d1d7191af26ca6ffe9 | [
"MIT"
] | null | null | null | Simulated Annealing/simulated_annealing_subplots.py | armandwayoff/Graph-Optimization | 9a7472d023935a49e270f1d1d7191af26ca6ffe9 | [
"MIT"
] | null | null | null | # inspired by https://www.youtube.com/watch?v=NPE3zncXA5s
import matplotlib.pyplot as plt
import networkx as nx
from math import *
from random import *
import time
class Vertex:
def __init__(self, x, y):
self.x = x
self.y = y
def dist(x1, y1, x2, y2):
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def total_distance(lst):
d = 0
for j in range(len(lst) - 1):
d += dist(vertices[lst[j]].x, vertices[lst[j]].y, vertices[lst[j + 1]].x, vertices[lst[j + 1]].y)
return d
def reverse_sublist(lst, start, end):
lst[start:end + 1] = lst[start:end + 1][::-1]
return lst
NUMBER_VERTICES = 20
WIDTH = HEIGHT = 1 # dimension of the canvas
NUMBER_ITERATIONS = 10 ** 3
NUMBER_ITERATIONS_PER_CHAIN = 500
VERTEX_SIZE = 120
INITIAL_TEMP = 50
ALPHA = 0.99
T = INITIAL_TEMP
vertices = []
path = []
temperatures = []
distances = []
G = nx.Graph()
for i in range(NUMBER_VERTICES):
new_vertex = Vertex(uniform(0, WIDTH), uniform(0, HEIGHT))
vertices.append(new_vertex)
G.add_node(i, pos=(new_vertex.x, new_vertex.y))
path.append(i)
path.append(0)
start_time = time.time()
record_distance = dist(0, 0, WIDTH, HEIGHT) * NUMBER_VERTICES
for _ in range(NUMBER_ITERATIONS):
temperatures.append(T)
T *= ALPHA
for _ in range(NUMBER_ITERATIONS_PER_CHAIN):
selected_vertices = sample(range(1, NUMBER_VERTICES), 2)
test = path.copy()
test = reverse_sublist(test, selected_vertices[0], selected_vertices[1])
test_distance = total_distance(test)
if test_distance < record_distance:
record_distance = test_distance
path = test
else:
r = uniform(0, 1)
if r < exp((record_distance - test_distance) / T):
record_distance = test_distance
distances.append(record_distance)
print("--- %s seconds ---" % (time.time() - start_time))
for i in range(NUMBER_VERTICES):
G.add_edge(path[i], path[i + 1])
plt.subplot(212)
pos = nx.get_node_attributes(G, 'pos')
nx.draw(G, pos, node_size=VERTEX_SIZE, node_color='blue', edge_color='cyan', width=3)
plt.title("Path")
plt.subplot(221)
plt.plot(range(len(temperatures)), temperatures)
plt.title("Temperature")
plt.subplot(222)
plt.plot(range(len(distances)), distances)
plt.title("Total Length")
plt.show()
| 25 | 105 | 0.656344 |
793fe5a37b44a8bf92994abaef516b81f199a6e6 | 582 | py | Python | idm/my_signals/farm.py | sadfucking/IrCB-Duty | 67cce59900118412b1276c8c7f554500234cc899 | [
"MIT"
] | 18 | 2020-07-25T17:44:16.000Z | 2022-03-09T08:10:12.000Z | idm/my_signals/farm.py | sadfucking/IrCB-Duty | 67cce59900118412b1276c8c7f554500234cc899 | [
"MIT"
] | 6 | 2020-10-07T17:57:40.000Z | 2022-01-29T17:16:17.000Z | idm/my_signals/farm.py | sadfucking/IrCB-Duty | 67cce59900118412b1276c8c7f554500234cc899 | [
"MIT"
] | 36 | 2020-07-27T07:10:45.000Z | 2022-03-30T19:19:28.000Z | from idm.objects import dp, MySignalEvent
from time import sleep
farm_data = {
"owner_id": -174105461,
"post_id": 6713149
}
@dp.longpoll_event_register('ферма')
@dp.my_signal_event_register('ферма')
def farming(event: MySignalEvent) -> str:
comment_id = event.api('wall.createComment', message='ферма', **farm_data)['comment_id']
event.msg_op(2, '⏱ Комментарий оставлен')
sleep(2)
reply_text = event.api('wall.getComments', **farm_data,
comment_id=comment_id)['items'][0]['text']
event.msg_op(2, reply_text)
return "ok"
| 29.1 | 92 | 0.671821 |
793fe735000ac7111e1fde876fef76f0b3061cdb | 28,016 | py | Python | models/00_settings.py | apocsantos/eden | 8b71af6b4dc72e2c4d656dbe1bb3943426705422 | [
"MIT"
] | 1 | 2017-03-15T23:29:41.000Z | 2017-03-15T23:29:41.000Z | models/00_settings.py | apocsantos/eden | 8b71af6b4dc72e2c4d656dbe1bb3943426705422 | [
"MIT"
] | null | null | null | models/00_settings.py | apocsantos/eden | 8b71af6b4dc72e2c4d656dbe1bb3943426705422 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Global settings:
Those which are typically edited during a deployment are in
000_config.py & their results parsed into here. Deployers
shouldn't typically need to edit any settings here.
"""
# Keep all our configuration options off the main global variables
# Use response.s3 for one-off variables which are visible in views without explicit passing
s3.formats = Storage()
# Workaround for this Bug in Selenium with FF4:
# http://code.google.com/p/selenium/issues/detail?id=1604
s3.interactive = settings.get_ui_confirm()
# Use session for persistent per-user variables (beware of a user having multiple tabs open!)
if not session.s3:
session.s3 = Storage()
s3.base_url = "%s/%s" % (settings.get_base_public_url(),
appname)
s3.download_url = "%s/default/download" % s3.base_url
###############
# Client tests
###############
def s3_is_mobile_client(request):
"""
Simple UA Test whether client is a mobile device
"""
if request.env.http_x_wap_profile or request.env.http_profile:
return True
if request.env.http_accept and \
request.env.http_accept.find("text/vnd.wap.wml") > 0:
return True
keys = ["iphone", "ipod", "android", "opera mini", "blackberry", "palm",
"windows ce", "iemobile", "smartphone", "medi", "sk-0", "vk-v",
"aptu", "xda-", "mtv ", "v750", "p800", "opwv", "send", "xda2",
"sage", "t618", "qwap", "veri", "t610", "tcl-", "vx60", "vx61",
"lg-k", "lg-l", "lg-m", "lg-o", "lg-a", "lg-b", "lg-c", "xdag",
"lg-f", "lg-g", "sl45", "emul", "lg-p", "lg-s", "lg-t", "lg-u",
"lg-w", "6590", "t250", "qc21", "ig01", "port", "m1-w", "770s",
"n710", "ez60", "mt50", "g1 u", "vk40", "bird", "tagt", "pose",
"jemu", "beck", "go.w", "jata", "gene", "smar", "g-mo", "o2-x",
"htc_", "hei-", "fake", "qc-7", "smal", "htcp", "htcs", "craw",
"htct", "aste", "htca", "htcg", "teli", "telm", "kgt", "mwbp",
"kwc-", "owg1", "htc ", "kgt/", "htc-", "benq", "slid", "qc60",
"dmob", "blac", "smt5", "nec-", "sec-", "sec1", "sec0", "fetc",
"spv ", "mcca", "nem-", "spv-", "o2im", "m50/", "ts70", "arch",
"qtek", "opti", "devi", "winw", "rove", "winc", "talk", "pant",
"netf", "pana", "esl8", "pand", "vite", "v400", "whit", "scoo",
"good", "nzph", "mtp1", "doco", "raks", "wonu", "cmd-", "cell",
"mode", "im1k", "modo", "lg-d", "idea", "jigs", "bumb", "sany",
"vulc", "vx70", "psio", "fly_", "mate", "pock", "cdm-", "fly-",
"i230", "lge-", "lge/", "argo", "qc32", "n701", "n700", "mc21",
"n500", "midp", "t-mo", "airn", "bw-u", "iac", "bw-n", "lg g",
"erk0", "sony", "alav", "503i", "pt-g", "au-m", "treo", "ipaq",
"dang", "seri", "mywa", "eml2", "smb3", "brvw", "sgh-", "maxo",
"pg-c", "qci-", "vx85", "vx83", "vx80", "vx81", "pg-8", "pg-6",
"phil", "pg-1", "pg-2", "pg-3", "ds12", "scp-", "dc-s", "brew",
"hipt", "kddi", "qc07", "elai", "802s", "506i", "dica", "mo01",
"mo02", "avan", "kyoc", "ikom", "siem", "kyok", "dopo", "g560",
"i-ma", "6310", "sie-", "grad", "ibro", "sy01", "nok6", "el49",
"rim9", "upsi", "inno", "wap-", "sc01", "ds-d", "aur ", "comp",
"wapp", "wapr", "waps", "wapt", "wapu", "wapv", "wapy", "newg",
"wapa", "wapi", "wapj", "wapm", "hutc", "lg/u", "yas-", "hita",
"lg/l", "lg/k", "i-go", "4thp", "bell", "502i", "zeto", "ez40",
"java", "n300", "n302", "mmef", "pn-2", "newt", "1207", "sdk/",
"gf-5", "bilb", "zte-", "maui", "qc-3", "qc-2", "blaz", "r600",
"hp i", "qc-5", "moto", "cond", "motv", "virg", "ccwa", "audi",
"shar", "i-20", "samm", "sama", "sams", "sch-", "mot ", "http",
"505i", "mot-", "n502", "topl", "n505", "mobi", "3gso", "wmlb",
"ezwa", "qc12", "abac", "tdg-", "neon", "mio8", "sp01", "rozo",
"vx98", "dait", "t600", "anyw", "tx-9", "sava", "m-cr", "tsm-",
"mioa", "tsm5", "klon", "capi", "tsm3", "hcit", "libw", "lg50",
"mc01", "amoi", "lg54", "ez70", "se47", "n203", "vk52", "vk53",
"vk50", "webc", "haie", "semc", "grun", "play", "palm", "a wa",
"anny", "prox", "o2 x", "ezze", "symb", "hs-c", "pg13", "mits",
"kpt ", "qa-a", "501i", "pdxg", "iris", "pluc", "acoo", "soft",
"hpip", "iac/", "iac-", "aus ", "s55/", "vx53", "vx52", "chtm",
"meri", "merc", "your", "huaw", "cldc", "voda", "smit", "x700",
"mozz", "lexi", "up.b", "sph-", "keji", "jbro", "wig ", "attw",
"pire", "r380", "lynx", "anex", "vm40", "hd-m", "504i", "w3c ",
"c55/", "w3c-", "upg1", "t218", "tosh", "acer", "hd-t", "eric",
"hd-p", "noki", "acs-", "dbte", "n202", "tim-", "alco", "ezos",
"dall", "leno", "alca", "asus", "m3ga", "utst", "aiko", "n102",
"n101", "n100", "oran"]
ua = (request.env.http_user_agent or "").lower()
if [key for key in keys if key in ua]:
return True
return False
# Store in session
if session.s3.mobile is None:
session.s3.mobile = s3_is_mobile_client(request)
def s3_populate_browser_compatibility(request):
"""
Use WURFL for browser compatibility detection
@ToDo: define a list of features to store
"""
features = Storage(
#category = ["list","of","features","to","store"]
)
try:
from pywurfl.algorithms import TwoStepAnalysis
except ImportError:
s3_debug("pywurfl python module has not been installed, browser compatibility listing will not be populated. Download pywurfl from http://pypi.python.org/pypi/pywurfl/")
return False
import wurfl
device = wurfl.devices.select_ua(unicode(request.env.http_user_agent),
search=TwoStepAnalysis(wurfl.devices))
browser = Storage()
#for feature in device:
#if feature[0] not in category_list:
#category_list.append(feature[0])
#for category in features:
#if category in
#browser[category] = Storage()
for feature in device:
if feature[0] in features and \
feature[1] in features[feature[0]]:
browser[feature[0]][feature[1]] = feature[2]
return browser
# Store in session
# - commented-out until we make use of it
#if session.s3.browser is None:
# session.s3.browser = s3_populate_browser_compatibility(request)
##################
# Global variables
##################
# Interactive view formats
s3.interactive_view_formats = ("html", "popup", "iframe")
# Strings
messages["UNAUTHORISED"] = "Not authorised!"
messages["BADFORMAT"] = "Unsupported data format!"
messages["BADMETHOD"] = "Unsupported method!"
messages["BADRECORD"] = "Record not found!"
messages["INVALIDREQUEST"] = "Invalid request!"
messages["XLWT_ERROR"] = "xlwt module not available within the running Python - this needs installing for XLS output!"
messages["REPORTLAB_ERROR"] = "ReportLab module not available within the running Python - this needs installing for PDF output!"
# Common Labels
#messages["BREADCRUMB"] = ">> "
messages["UNKNOWN_OPT"] = "Unknown"
messages["NONE"] = "-"
messages["READ"] = settings.get_ui_read_label()
messages["UPDATE"] = settings.get_ui_update_label()
messages["DELETE"] = "Delete"
messages["COPY"] = "Copy"
messages["NOT_APPLICABLE"] = "N/A"
messages["ADD_PERSON"] = "Add Person"
messages["ADD_LOCATION"] = "Add Location"
messages["SELECT_LOCATION"] = "Select a location"
for u in messages:
if isinstance(messages[u], str):
globals()[u] = T(messages[u])
# Pass to CRUD
s3mgr.LABEL["READ"] = READ
s3mgr.LABEL["UPDATE"] = UPDATE
s3mgr.LABEL["DELETE"] = DELETE
s3mgr.LABEL["COPY"] = COPY
# Data Export Settings
ROWSPERPAGE = 20
PRETTY_PRINT = False
# To get included in <HEAD>
s3.stylesheets = []
s3.external_stylesheets = []
s3_script_dir = "/%s/static/scripts/S3" % appname
s3.script_dir = s3_script_dir
# To get included at the end of <BODY>
s3.scripts = []
s3.js_global = []
s3.jquery_ready = []
###########
# Languages
###########
s3.l10n_languages = settings.get_L10n_languages()
# Default strings are in US English
T.current_languages = ["en", "en-us"]
# Check if user has selected a specific language
if request.vars._language:
language = request.vars._language
session.s3.language = language
elif session.s3.language:
# Use the last-selected language
language = session.s3.language
elif auth.is_logged_in():
# Use user preference
language = auth.user.language
else:
# Use system default
language = settings.get_L10n_default_language()
#else:
# # Use what browser requests (default web2py behaviour)
# T.force(T.http_accept_language)
# IE doesn't set request.env.http_accept_language
#if language != "en":
T.force(language)
# Store for views (e.g. Ext)
if language.find("-") == -1:
# Ext peculiarities
if language == "vi":
s3.language = "vn"
elif language == "el":
s3.language = "el_GR"
else:
s3.language = language
else:
lang_parts = language.split("-")
s3.language = "%s_%s" % (lang_parts[0], lang_parts[1].upper())
# List of Languages which use a Right-to-Left script (Arabic, Hebrew, Farsi, Urdu)
s3_rtl_languages = ["ur", "ar"]
if T.accepted_language in s3_rtl_languages:
s3.rtl = True
else:
s3.rtl = False
s3_date_format = settings.get_L10n_date_format()
s3_datetime_format = settings.get_L10n_datetime_format()
######
# Mail
######
# These settings could be made configurable as part of the Messaging Module
# - however also need to be used by Auth (order issues), DB calls are overheads
# - as easy for admin to edit source here as to edit DB (although an admin panel can be nice)
mail.settings.server = settings.get_mail_server()
mail.settings.tls = settings.get_mail_server_tls()
mail_server_login = settings.get_mail_server_login()
if mail_server_login:
mail.settings.login = mail_server_login
mail.settings.sender = settings.get_mail_sender()
######
# Auth
######
_messages = auth.messages
_settings = auth.settings
_settings.lock_keys = False
_settings.password_min_length = 4
_settings.expiration = 28800 # seconds
#auth.settings.username_field = True
_settings.hmac_key = settings.get_auth_hmac_key()
auth.define_tables(migrate=migrate,
fake_migrate=fake_migrate)
_settings.facebook = settings.get_auth_facebook()
_settings.google = settings.get_auth_google()
if settings.get_auth_openid():
# Requires http://pypi.python.org/pypi/python-openid/
try:
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(auth)
from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
extended_login_form = ExtendedLoginForm(auth, openid_login_form,
signals=["oid", "janrain_nonce"])
auth.settings.login_form = extended_login_form
except ImportError:
session.warning = T("Library support not available for OpenID")
# Require captcha verification for registration
#auth.settings.captcha = RECAPTCHA(request, public_key="PUBLIC_KEY", private_key="PRIVATE_KEY")
# Require Email Verification
_settings.registration_requires_verification = settings.get_auth_registration_requires_verification()
# Email settings for registration verification
_settings.mailer = mail
_messages.verify_email = "Click on the link %(url)s%(key)s to verify your email" % \
dict(url="%s/default/user/verify_email/" % s3.base_url,
key="%(key)s")
_settings.on_failed_authorization = URL(c="default", f="user",
args="not_authorized")
_messages.verify_email_subject = "%(system_name)s - Verify Email" % \
{"system_name" : settings.get_system_name()}
_settings.reset_password_requires_verification = True
_messages.reset_password = "%s %s/default/user/reset_password/%s %s" % \
(T("Click on the link"),
s3.base_url,
"%(key)s",
T("to reset your password"))
_messages.help_mobile_phone = T("Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.")
# Require Admin approval for self-registered users
_settings.registration_requires_approval = settings.get_auth_registration_requires_approval()
_messages.registration_pending = "Registration is still pending approval from Approver (%s) - please wait until confirmation received." % \
settings.get_mail_approver()
_messages.registration_pending_approval = "Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated." % \
settings.get_mail_approver()
_settings.verify_email_next = URL(c="default", f="index")
# Notify Approver of new pending user registration. Action may be required.
_settings.verify_email_onaccept = auth.s3_verify_email_onaccept
_messages["approve_user"] = \
"""Your action is required to approve a New User for %(system_name)s:
%(name_format)s
Please go to %(base_url)s/admin/user to approve this user.""" \
% dict(system_name = settings.get_system_name(),
name_format = \
"""%(first_name)s %(last_name)s
%(email)s""",
base_url = s3.base_url)
_messages["new_user"] = \
"""A New User has registered for %(system_name)s:
%(name_format)s
No action is required.""" \
% dict(system_name = settings.get_system_name(),
name_format = \
"""%(first_name)s %(last_name)s
%(email)s""")
# Allow use of LDAP accounts for login
# NB Currently this means that change password should be disabled:
#_settings.actions_disabled.append("change_password")
# (NB These are not automatically added to PR or to Authenticated role since they enter via the login() method not register())
#from gluon.contrib.login_methods.ldap_auth import ldap_auth
# Require even alternate login methods to register users 1st
#_settings.alternate_requires_registration = True
# Active Directory
#_settings.login_methods.append(ldap_auth(mode="ad", server="dc.domain.org", base_dn="ou=Users,dc=domain,dc=org"))
# or if not wanting local users at all (no passwords saved within DB):
#_settings.login_methods = [ldap_auth(mode="ad", server="dc.domain.org", base_dn="ou=Users,dc=domain,dc=org")]
# Domino
#_settings.login_methods.append(ldap_auth(mode="domino", server="domino.domain.org"))
# OpenLDAP
#_settings.login_methods.append(ldap_auth(server="directory.sahanafoundation.org", base_dn="ou=users,dc=sahanafoundation,dc=org"))
# Allow use of Email accounts for login
#_settings.login_methods.append(email_auth("smtp.gmail.com:587", "@gmail.com"))
# We don't wish to clutter the groups list with 1 per user.
_settings.create_user_groups = False
# We need to allow basic logins for Webservices
_settings.allow_basic_login = True
_settings.logout_onlogout = s3_auth_on_logout
_settings.login_onaccept = s3_auth_on_login
if settings.get_auth_registration_volunteer() and \
settings.has_module("vol"):
_settings.register_next = URL(c="vol", f="person")
# Default Language for authenticated users
_settings.table_user.language.default = settings.get_L10n_default_language()
# Languages available in User Profiles
field = _settings.table_user.language
if len(s3.l10n_languages) > 1:
field.requires = IS_IN_SET(s3.l10n_languages,
zero=None)
else:
field.default = s3.l10n_languages.keys()[0]
field.readable = False
field.writable = False
_settings.lock_keys = True
#########
# Session
#########
def s3_sessions():
"""
Extend session to support:
Multiple flash classes
Settings
Debug mode
Security mode
Audit modes
"""
response.error = session.error
response.confirmation = session.confirmation
response.information = session.information
response.warning = session.warning
session.error = []
session.confirmation = []
session.information = []
session.warning = []
# Are we running in debug mode?
session.s3.debug = s3.debug
# Should we use Content-Delivery Networks?
session.s3.cdn = settings.get_base_cdn()
# Security Policy
session.s3.security_policy = settings.get_security_policy()
# We Audit if either the Global or Module asks us to
# (ignore gracefully if module author hasn't implemented this)
try:
session.s3.audit_read = settings.get_security_audit_read() \
or settings.modules[request.controller].get("audit_read", False)
session.s3.audit_write = settings.get_security_audit_write() \
or settings.modules[request.controller].get("audit_write", False)
except:
# Controller doesn't link to a 'module' (e.g. appadmin)
session.s3.audit_read = False
session.s3.audit_write = False
return
# Extend the session
s3_sessions()
# Shortcuts for system role IDs, see modules/s3aaa.py/AuthS3
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
if session.s3.debug:
# Add the developer toolbar from modules/s3/s3utils.py
s3.toolbar = s3_dev_toolbar
######
# CRUD
######
def s3_formstyle(id, label, widget, comment, hidden=False):
"""
Provide the Sahana Eden Form Style
Label above the Inputs:
http://uxmovement.com/design-articles/faster-with-top-aligned-labels
"""
row = []
if hidden:
_class = "hidden"
else:
_class = ""
# Label on the 1st row
row.append(TR(TD(label, _class="w2p_fl"), TD(""), _id=id + "1", _class=_class))
# Widget & Comment on the 2nd Row
row.append(TR(widget, TD(comment, _class="w2p_fc"), _id=id, _class=_class))
return tuple(row)
s3_formstyle_mobile = s3_formstyle
s3.crud.formstyle = s3_formstyle
s3.crud.submit_button = T("Save")
# Optional class for Submit buttons
#s3.crud.submit_style = "submit-button"
s3.crud.confirm_delete = T("Do you really want to delete these records?")
s3.crud.archive_not_delete = settings.get_security_archive_not_delete()
s3.crud.navigate_away_confirm = settings.get_ui_navigate_away_confirm()
#s3.navigate_away_confirm = s3.crud.navigate_away_confirm
# Web2py Crud
# Breaks refresh of List after Create: http://groups.google.com/group/web2py/browse_thread/thread/d5083ed08c685e34
#crud.settings.keepvalues = True
crud.messages.submit_button = s3.crud.submit_button
crud.settings.formstyle = s3.crud.formstyle
##################
# XML/JSON Formats
##################
s3mgr.crud = s3base.S3CRUD
s3mgr.search = s3base.S3Search
# Content Type Headers, default is application/xml for XML formats
# and text/x-json for JSON formats, other content types must be
# specified here:
s3mgr.content_type = Storage(
tc = "application/atom+xml", # TableCast feeds
rss = "application/rss+xml", # RSS
georss = "application/rss+xml", # GeoRSS
kml = "application/vnd.google-earth.kml+xml", # KML
)
# JSON Formats
s3mgr.json_formats = ["geojson", "s3json"]
# CSV Formats
s3mgr.csv_formats = ["hrf", "s3csv"]
s3mgr.ROWSPERPAGE = 20
#######
# Menus
#######
# Import menus and layouts
from eden.layouts import *
import eden.menus as default_menus
S3MainMenu = default_menus.S3MainMenu
S3OptionsMenu = default_menus.S3OptionsMenu
current.menu = Storage(options=None, override={})
if auth.permission.format in ("html"):
menus = "applications.%s.private.templates.%s.menus" % \
(appname, settings.get_theme())
try:
exec("import %s as deployment_menus" % menus)
except ImportError:
pass
else:
if "S3MainMenu" in deployment_menus.__dict__:
S3MainMenu = deployment_menus.S3MainMenu
if "S3OptionsMenu" in deployment_menus.__dict__:
S3OptionsMenu = deployment_menus.S3OptionsMenu
main = S3MainMenu.menu()
else:
main = None
menu = current.menu
menu["main"] = main
# Override controller menus, @todo: replace by current.menu.override
s3_menu_dict = {}
##########
# Messages
##########
from gluon.storage import Messages
s3.messages = Messages(T)
system_name = settings.get_system_name_short()
s3.messages.confirmation_email_subject = "%s %s" % (system_name,
T("access granted"))
s3.messages.confirmation_email = "%s %s %s %s. %s." % (T("Welcome to the"),
system_name,
T("Portal at"),
s3.base_url,
T("Thanks for your assistance"))
# Valid Extensions for Image Upload fields
IMAGE_EXTENSIONS = ["png", "PNG", "jpg", "JPG", "jpeg", "JPEG", "gif", "GIF", "tif", "TIF", "tiff", "TIFF", "bmp", "BMP", "raw", "RAW"]
s3.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS
# -----------------------------------------------------------------------------
# List of Nations (ISO-3166-1 Country Codes)
# @ToDo: Pull this list from the list of L0s in the database
# - or the same list as that is pre-populated from
# - don't want to have to maintain 2 sets of lists
# @ToDo: Add Telephone codes (need to convert to Storage())
#
s3_list_of_nations = {
"AF": "Afghanistan",
"AX": "Åland Islands",
"AL": "Albania",
"DZ": "Algeria",
"AS": "American Samoa",
"AD": "Andorra",
"AO": "Angola",
"AI": "Anguilla",
"AQ": "Antarctica",
"AG": "Antigua and Barbuda",
"AR": "Argentina",
"AM": "Armenia",
"AW": "Aruba",
"AU": "Australia",
"AT": "Austria",
"AZ": "Azerbaijan",
"BS": "Bahamas",
"BH": "Bahrain",
"BD": "Bangladesh",
"BB": "Barbados",
"BY": "Belarus",
"BE": "Belgium",
"BZ": "Belize",
"BJ": "Benin",
"BM": "Bermuda",
"BT": "Bhutan",
"BO": "Bolivia, Plurinational State of",
"BA": "Bosnia and Herzegovina",
"BW": "Botswana",
"BV": "Bouvet Island",
"BR": "Brazil",
"IO": "British Indian Ocean Territory",
"BN": "Brunei Darussalam",
"BG": "Bulgaria",
"BF": "Burkina Faso",
"BI": "Burundi",
"KH": "Cambodia",
"CM": "Cameroon",
"CA": "Canada",
"CV": "Cape Verde",
"KY": "Cayman Islands",
"CF": "Central African Republic",
"TD": "Chad",
"CL": "Chile",
"CN": "China",
"CX": "Christmas Island",
"CC": "Cocos (Keeling) Islands",
"CO": "Colombia",
"KM": "Comoros",
"CG": "Congo",
"CD": "Congo, The Democratic Republic of the",
"CK": "Cook Islands",
"CR": "Costa Rica",
"CI": "Côte d'Ivoire",
"HR": "Croatia",
"CU": "Cuba",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DK": "Denmark",
"DJ": "Djibouti",
"DM": "Dominica",
"DO": "Dominican Republic",
"EC": "Ecuador",
"EG": "Egypt",
"SV": "El Salvador",
"GQ": "Equatorial Guinea",
"ER": "Eritrea",
"EE": "Estonia",
"ET": "Ethiopia",
"FK": "Falkland Islands (Malvinas)",
"FO": "Faroe Islands",
"FJ": "Fiji",
"FI": "Finland",
"FR": "France",
"GF": "French Guiana",
"PF": "French Polynesia",
"TF": "French Southern Territories",
"GA": "Gabon",
"GM": "Gambia",
"GE": "Georgia",
"DE": "Germany",
"GH": "Ghana",
"GI": "Gibraltar",
"GR": "Greece",
"GL": "Greenland",
"GD": "Grenada",
"GP": "Guadeloupe",
"GU": "Guam",
"GT": "Guatemala",
"GG": "Guernsey",
"GN": "Guinea",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HT": "Haiti",
"HM": "Heard Island and McDonald Islands",
"VA": "Holy See (Vatican City State)",
"HN": "Honduras",
"HK": "Hong Kong",
"HU": "Hungary",
"IS": "Iceland",
"IN": "India",
"ID": "Indonesia",
"IR": "Iran, Islamic Republic of",
"IQ": "Iraq",
"IE": "Ireland",
"IM": "Isle of man",
"IL": "Israel",
"IT": "Italy",
"JM": "Jamaica",
"JP": "Japan",
"JE": "Jersey",
"JO": "Jordan",
"KZ": "Kazakhstan",
"KE": "Kenya",
"KI": "Kiribati",
"KP": "Korea, Democratic People's Republic of",
"KR": "Korea, Republic of",
"KW": "Kuwait",
"KG": "Kyrgyzstan",
"LA": "Lao People's Democratic Republic",
"LV": "Latvia",
"LB": "Lebanon",
"LS": "Lesotho",
"LR": "Liberia",
"LY": "Libyan Arab Jamahiriya",
"LI": "Liechtenstein",
"LT": "Lithuania",
"LU": "Luxembourg",
"MO": "Macao",
"MK": "Macedonia, the former Yugoslav Republic of",
"MG": "Madagascar",
"MW": "Malawi",
"MY": "Malaysia",
"MV": "Maldives",
"ML": "Mali",
"MT": "Malta",
"MH": "Marshall Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MU": "Mauritius",
"YT": "Mayotte",
"MX": "Mexico",
"FM": "Micronesia, Federated States of",
"MD": "Moldova, Republic of",
"MC": "Monaco",
"MN": "Mongolia",
"ME": "Montenegro",
"MS": "Montserrat",
"MA": "Morocco",
"MZ": "Mozambique",
"MM": "Myanmar",
"NA": "Namibia",
"NR": "Nauru",
"NP": "Nepal",
"NL": "Netherlands",
"AN": "Netherlands Antilles",
"NC": "New Caledonia",
"NZ": "New Zealand",
"NI": "Nicaragua",
"NE": "Niger",
"NG": "Nigeria",
"NU": "Niue",
"NF": "Norfolk Island",
"MP": "Northern Mariana Islands",
"NO": "Norway",
"OM": "Oman",
"PK": "Pakistan",
"PW": "Palau",
"PS": "Palestinian Territory, occupied",
"PA": "Panama",
"PG": "Papua New Guinea",
"PY": "Paraguay",
"PE": "Peru",
"PH": "Philippines",
"PN": "Pitcairn",
"PL": "Poland",
"PT": "Portugal",
"PR": "Puerto Rico",
"QA": "Qatar",
"RE": "Réunion",
"RO": "Romania",
"RU": "Russian Federation",
"RW": "Rwanda",
"BL": "Saint Barthélemy",
"SH": "Saint Helena, Ascension and Tristan da Cunha",
"KN": "Saint Kitts and Nevis",
"LC": "Saint Lucia",
"MF": "Saint Martin",
"PM": "Saint Pierre and Miquelon",
"VC": "Saint Vincent and the Grenadines",
"WS": "Samoa",
"SM": "San Marino",
"ST": "Sao Tome and Principe",
"SA": "Saudi Arabia",
"SN": "Senegal",
"RS": "Serbia",
"SC": "Seychelles",
"SL": "Sierra Leone",
"SG": "Singapore",
"SK": "Slovakia",
"SI": "Slovenia",
"SB": "Solomon Islands",
"SO": "Somalia",
"ZA": "South Africa",
"GS": "South Georgia and the South Sandwich Islands",
"ES": "Spain",
"LK": "Sri Lanka",
"SD": "Sudan",
"SR": "Suriname",
"SJ": "Svalbard and Jan Mayen",
"SZ": "Swaziland",
"SE": "Sweden",
"CH": "Switzerland",
"SY": "Syrian Arab Republic",
"TW": "Taiwan, Province of China",
"TJ": "Tajikistan",
"TZ": "Tanzania, United Republic of",
"TH": "Thailand",
"TL": "Timor-Leste",
"TG": "Togo",
"TK": "Tokelau",
"TO": "Tonga",
"TT": "Trinidad and Tobago",
"TN": "Tunisia",
"TR": "Turkey",
"TM": "Turkmenistan",
"TC": "Turks and Caicos Islands",
"TV": "Tuvalu",
"UG": "Uganda",
"UA": "Ukraine",
"AE": "United Arab Emirates",
"GB": "United Kingdom",
"US": "United States",
"UM": "United States Minor Outlying Islands",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VU": "Vanuatu",
"VE": "Venezuela, Bolivarian Republic of",
"VN": "Vietnam",
"VG": "Virgin Islands, british",
"VI": "Virgin Islands, U.S.",
"WF": "Wallis and Futuna",
"EH": "Western Sahara",
"YE": "Yemen",
"ZM": "Zambia",
"ZW": "Zimbabwe",
"XX": "Unknown"
}
# END =========================================================================
| 33.794934 | 233 | 0.605725 |
793fe76a5263318e3964bd750cd5999b0e40bb93 | 34,556 | py | Python | src/datasets/download/streaming_download_manager.py | rpatil524/datasets | 6382607ee210d7cc3075e3006cbba1ad437858f0 | [
"Apache-2.0"
] | null | null | null | src/datasets/download/streaming_download_manager.py | rpatil524/datasets | 6382607ee210d7cc3075e3006cbba1ad437858f0 | [
"Apache-2.0"
] | null | null | null | src/datasets/download/streaming_download_manager.py | rpatil524/datasets | 6382607ee210d7cc3075e3006cbba1ad437858f0 | [
"Apache-2.0"
] | null | null | null | import glob
import io
import os
import posixpath
import re
import tarfile
import time
from asyncio import TimeoutError
from io import BytesIO
from itertools import chain
from pathlib import Path, PurePath, PurePosixPath
from typing import Callable, Generator, Iterable, List, Optional, Tuple, Union
from xml.etree import ElementTree as ET
import fsspec
from aiohttp.client_exceptions import ClientError
from .. import config
from ..filesystems import COMPRESSION_FILESYSTEMS
from ..utils.file_utils import (
get_authentication_headers_for_url,
http_head,
is_local_path,
is_relative_path,
is_remote_url,
url_or_path_join,
)
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .download_config import DownloadConfig
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = [
"txt",
"csv",
"json",
"jsonl",
"tsv",
"conll",
"conllu",
"orig",
"parquet",
"pkl",
"pickle",
"rel",
"xml",
]
COMPRESSION_EXTENSION_TO_PROTOCOL = {
# single file compression
**{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS},
# archive compression
"zip": "zip",
"tar": "tar",
"tgz": "tar",
}
SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}
SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/")
MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
bytes.fromhex("504B0304"): "zip",
bytes.fromhex("504B0506"): "zip", # empty archive
bytes.fromhex("504B0708"): "zip", # spanned archive
bytes.fromhex("425A68"): "bz2",
bytes.fromhex("1F8B"): "gzip",
bytes.fromhex("FD377A585A00"): "xz",
bytes.fromhex("04224D18"): "lz4",
bytes.fromhex("28B52FFD"): "zstd",
}
MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
b"Rar!": "rar",
}
MAGIC_NUMBER_MAX_LENGTH = max(
len(magic_number)
for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
)
class NonStreamableDatasetError(Exception):
pass
def xjoin(a, *p):
"""
This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xjoin function allows you to apply the join on the first path of the chain.
Example::
>>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
zip://folder1/file.txt::https://host.com/archive.zip
"""
a, *b = str(a).split("::")
if is_local_path(a):
a = Path(a, *p).as_posix()
else:
a = posixpath.join(a, *p)
return "::".join([a] + b)
def xdirname(a):
"""
This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xdirname function allows you to apply the dirname on the first path of the chain.
Example::
>>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip")
zip://folder1::https://host.com/archive.zip
"""
a, *b = str(a).split("::")
if is_local_path(a):
a = os.path.dirname(Path(a).as_posix())
else:
a = posixpath.dirname(a)
# if we end up at the root of the protocol, we get for example a = 'http:'
# so we have to fix it by adding the '//' that was removed:
if a.endswith(":"):
a += "//"
return "::".join([a] + b)
def xbasename(a):
"""
This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xbasename function allows you to apply the basename on the first path of the chain.
Example::
>>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip")
file.txt
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.basename(Path(a).as_posix())
else:
return posixpath.basename(a)
def xsplit(a):
"""
This function extends os.path.split to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xsplit function allows you to apply the xsplit on the first path of the chain.
Example::
>>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip")
('zip://folder1::https://host.com/archive.zip', 'file.txt')
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.split(Path(a).as_posix())
else:
a, tail = posixpath.split(a)
return "::".join([a + "//" if a.endswith(":") else a] + b), tail
def xsplitext(a):
"""
This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xsplitext function allows you to apply the splitext on the first path of the chain.
Example::
>>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip")
('zip://folder1/file::https://host.com/archive.zip', '.txt')
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.splitext(Path(a).as_posix())
else:
a, ext = posixpath.splitext(a)
return "::".join([a] + b), ext
def xisfile(path, use_auth_token: Optional[Union[str, bool]] = None) -> bool:
"""Extend `os.path.isfile` function to support remote files.
Args:
path (:obj:`str`): URL path.
Returns:
:obj:`bool`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.isfile(path)
else:
if rest_hops and fsspec.get_fs_token_paths(rest_hops[0])[0].protocol == "https":
storage_options = {
"https": {"headers": get_authentication_headers_for_url(rest_hops[0], use_auth_token=use_auth_token)}
}
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
return fs.isfile(main_hop)
def xgetsize(path, use_auth_token: Optional[Union[str, bool]] = None) -> int:
"""Extend `os.path.getsize` function to support remote files.
Args:
path (:obj:`str`): URL path.
Returns:
:obj:`int`, optional
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.getsize(path)
else:
if rest_hops and fsspec.get_fs_token_paths(rest_hops[0])[0].protocol == "https":
storage_options = {
"https": {"headers": get_authentication_headers_for_url(rest_hops[0], use_auth_token=use_auth_token)}
}
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
size = fs.size(main_hop)
if size is None:
# use xopen instead of fs.open to make data fetching more robust
with xopen(path, use_auth_token=use_auth_token) as f:
size = len(f.read())
return size
def xisdir(path, use_auth_token: Optional[Union[str, bool]] = None) -> bool:
"""Extend `os.path.isdir` function to support remote files.
Args:
path (:obj:`str`): URL path.
Returns:
:obj:`bool`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.isdir(path)
else:
if rest_hops and fsspec.get_fs_token_paths(rest_hops[0])[0].protocol == "https":
storage_options = {
"https": {"headers": get_authentication_headers_for_url(rest_hops[0], use_auth_token=use_auth_token)}
}
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
return fs.isdir(main_hop)
def xrelpath(path, start=None):
"""Extend `os.path.relpath` function to support remote files.
Args:
path (:obj:`str`): URL path.
start (:obj:`str`): Start URL directory path.
Returns:
:obj:`str`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop)
else:
return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop)
def _as_posix(path: Path):
"""Extend :meth:`pathlib.PurePath.as_posix` to fix missing slashes after protocol.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
Returns:
obj:`str`
"""
path_as_posix = path.as_posix()
path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
return path_as_posix
def xpathjoin(a: Path, *p: Tuple[str, ...]):
"""Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
Args:
a (:obj:`~pathlib.Path`): Calling Path instance.
*p (:obj:`tuple` of :obj:`str`): Other path components.
Returns:
obj:`str`
"""
return type(a)(xjoin(_as_posix(a), *p))
def _add_retries_to_file_obj_read_method(file_obj):
read = file_obj.read
max_retries = config.STREAMING_READ_MAX_RETRIES
def read_with_retries(*args, **kwargs):
for retry in range(1, max_retries + 1):
try:
out = read(*args, **kwargs)
break
except (ClientError, TimeoutError):
logger.warning(
f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
)
time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
else:
raise ConnectionError("Server Disconnected")
return out
file_obj.read = read_with_retries
def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
"""read the magic number from a file-like object and return the compression protocol"""
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol
return compression
compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
def _get_extraction_protocol(urlpath: str, use_auth_token: Optional[Union[str, bool]] = None) -> Optional[str]:
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
path = urlpath.split("::")[0]
# Get extension: https://foo.bar/train.json.gz -> gz
extension = path.split(".")[-1]
# Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
# Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
for symb in "?-_":
extension = extension.split(symb)[0]
if extension in BASE_KNOWN_EXTENSIONS:
return None
elif path.endswith(".tar.gz") or path.endswith(".tgz"):
raise NotImplementedError(
f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. Please use `dl_manager.iter_archive` instead."
)
elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL:
return COMPRESSION_EXTENSION_TO_PROTOCOL[extension]
if is_remote_url(urlpath):
# get headers and cookies for authentication on the HF Hub and for Google Drive
urlpath, kwargs = _prepare_http_url_kwargs(urlpath, use_auth_token=use_auth_token)
else:
urlpath, kwargs = urlpath, {}
with fsspec.open(urlpath, **kwargs) as f:
return _get_extraction_protocol_with_magic_number(f)
def _prepare_http_url_kwargs(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> Tuple[str, dict]:
"""
Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head
In particular it resolves google drive URLs and it adds the authentication headers for the Hugging Face Hub.
"""
kwargs = {"headers": get_authentication_headers_for_url(url, use_auth_token=use_auth_token)}
if "drive.google.com" in url:
response = http_head(url)
cookies = None
for k, v in response.cookies.items():
if k.startswith("download_warning"):
url += "&confirm=" + v
cookies = response.cookies
kwargs["cookies"] = cookies
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in url and "confirm=" not in url:
url += "&confirm=t"
if url.startswith("https://raw.githubusercontent.com/"):
# Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389
kwargs["block_size"] = 0
return url, kwargs
def xopen(file: str, mode="r", *args, use_auth_token: Optional[Union[str, bool]] = None, **kwargs):
"""
This function extends the builtin `open` function to support remote files using fsspec.
It also has a retry mechanism in case connection fails.
The args and kwargs are passed to fsspec.open, except `use_auth_token` which is used for queries to private repos on huggingface.co
"""
# required for `xopen(str(Path(...)))` to work
file = _as_posix(PurePath(file))
main_hop, *rest_hops = file.split("::")
# add headers and cookies for authentication on the HF Hub and for Google Drive
if not rest_hops and (main_hop.startswith("http://") or main_hop.startswith("https://")):
file, new_kwargs = _prepare_http_url_kwargs(file, use_auth_token=use_auth_token)
elif rest_hops and (rest_hops[0].startswith("http://") or rest_hops[0].startswith("https://")):
url = rest_hops[0]
url, http_kwargs = _prepare_http_url_kwargs(url, use_auth_token=use_auth_token)
new_kwargs = {"https": http_kwargs}
file = "::".join([main_hop, url, *rest_hops[1:]])
else:
new_kwargs = {}
kwargs = {**kwargs, **new_kwargs}
try:
file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
except ValueError as e:
if str(e) == "Cannot seek streaming HTTP file":
raise NonStreamableDatasetError(
"Streaming is not possible for this dataset because data host server doesn't support HTTP range "
"requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)"
) from e
else:
raise
_add_retries_to_file_obj_read_method(file_obj)
return file_obj
def xlistdir(path: str, use_auth_token: Optional[Union[str, bool]] = None) -> List[str]:
"""Extend `os.listdir` function to support remote files.
Args:
path (:obj:`str`): URL path.
Returns:
:obj:`list` of :obj:`str`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.listdir(path)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops and fsspec.get_fs_token_paths(rest_hops[0])[0].protocol == "https":
storage_options = {
"https": {"headers": get_authentication_headers_for_url(rest_hops[0], use_auth_token=use_auth_token)}
}
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
objects = fs.listdir(main_hop.split("://")[1])
return [os.path.basename(obj["name"]) for obj in objects]
def xpathopen(path: Path, *args, **kwargs):
"""Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
**kwargs: Keyword arguments passed to :func:`fsspec.open`.
Returns:
:obj:`io.FileIO`: File-like object.
"""
return xopen(_as_posix(path), *args, **kwargs)
def xglob(urlpath, *, recursive=False, use_auth_token: Optional[Union[str, bool]] = None):
"""Extend `glob.glob` function to support remote files.
Args:
urlpath (:obj:`str`): URL path with shell-style wildcard patterns.
recursive (:obj:`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more
directories or subdirectories.
Returns:
:obj:`list` of :obj:`str`
"""
main_hop, *rest_hops = str(urlpath).split("::")
if is_local_path(main_hop):
return glob.glob(main_hop, recursive=recursive)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops and (rest_hops[0].startswith("http://") or rest_hops[0].startswith("https://")):
url = rest_hops[0]
url, kwargs = _prepare_http_url_kwargs(url, use_auth_token=use_auth_token)
storage_options = {"https": kwargs}
urlpath = "::".join([main_hop, url, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
# - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
# so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
# - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
# - If there is "**" in the pattern, `fs.glob` must be called anyway.
globbed_paths = fs.glob(main_hop)
return ["::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths]
def xpathglob(path, pattern, use_auth_token: Optional[Union[str, bool]] = None):
"""Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
pattern (:obj:`str`): Pattern that resulting paths must match.
Yields:
:obj:`~pathlib.Path`
"""
posix_path = _as_posix(path)
main_hop, *rest_hops = posix_path.split("::")
if is_local_path(main_hop):
yield from Path(main_hop).glob(pattern)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops and (rest_hops[0].startswith("http://") or rest_hops[0].startswith("https://")):
url = rest_hops[0]
url, kwargs = _prepare_http_url_kwargs(url, use_auth_token=use_auth_token)
storage_options = {"https": kwargs}
posix_path = "::".join([main_hop, url, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options)
# - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
# so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
# - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
# - If there is "**" in the pattern, `fs.glob` must be called anyway.
globbed_paths = fs.glob(xjoin(main_hop, pattern))
for globbed_path in globbed_paths:
yield type(path)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops))
def xpathrglob(path, pattern, **kwargs):
"""Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
pattern (:obj:`str`): Pattern that resulting paths must match.
Yields:
:obj:`~pathlib.Path`
"""
return xpathglob(path, "**/" + pattern, **kwargs)
def xpathparent(path: Path):
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
Returns:
:obj:`~pathlib.Path`
"""
return type(path)(xdirname(_as_posix(path)))
def xpathname(path: Path):
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
Returns:
:obj:`str`
"""
return PurePosixPath(_as_posix(path).split("::")[0]).name
def xpathstem(path: Path):
"""Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
Returns:
:obj:`str`
"""
return PurePosixPath(_as_posix(path).split("::")[0]).stem
def xpathsuffix(path: Path):
"""Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
path (:obj:`~pathlib.Path`): Calling Path instance.
Returns:
:obj:`str`
"""
return PurePosixPath(_as_posix(path).split("::")[0]).suffix
def xwalk(urlpath, use_auth_token: Optional[Union[str, bool]] = None):
"""Extend `os.walk` function to support remote files.
Args:
urlpath (:obj:`str`): URL root path.
use_auth_token (:obj:`bool` or :obj:`str`, optional): Whether to use token or token to authenticate on the
Hugging Face Hub for private remote files.
Yields:
:obj:`tuple`: 3-tuple (dirpath, dirnames, filenames).
"""
main_hop, *rest_hops = str(urlpath).split("::")
if is_local_path(main_hop):
return os.walk(main_hop)
else:
# walking inside a zip in a private repo requires authentication
if rest_hops and (rest_hops[0].startswith("http://") or rest_hops[0].startswith("https://")):
url = rest_hops[0]
url, kwargs = _prepare_http_url_kwargs(url, use_auth_token=use_auth_token)
storage_options = {"https": kwargs}
urlpath = "::".join([main_hop, url, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
for dirpath, dirnames, filenames in fs.walk(main_hop):
yield "::".join([f"{fs.protocol}://{dirpath}"] + rest_hops), dirnames, filenames
def xpandas_read_csv(filepath_or_buffer, use_auth_token: Optional[Union[str, bool]] = None, **kwargs):
import pandas as pd
if hasattr(filepath_or_buffer, "read"):
return pd.read_csv(filepath_or_buffer, **kwargs)
else:
return pd.read_csv(xopen(filepath_or_buffer, use_auth_token=use_auth_token), **kwargs)
def xpandas_read_excel(filepath_or_buffer, **kwargs):
import pandas as pd
return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs)
def xsio_loadmat(filepath_or_buffer, use_auth_token: Optional[Union[str, bool]] = None, **kwargs):
import scipy.io as sio
if hasattr(filepath_or_buffer, "read"):
return sio.loadmat(filepath_or_buffer, **kwargs)
else:
return sio.loadmat(xopen(filepath_or_buffer, "rb", use_auth_token=use_auth_token), **kwargs)
def xet_parse(source, parser=None, use_auth_token: Optional[Union[str, bool]] = None):
"""Extend `xml.etree.ElementTree.parse` function to support remote files.
Args:
source: File path or file object.
parser (optional, default `XMLParser`): Parser instance.
use_auth_token (:obj:`bool` or :obj:`str`, optional): Whether to use token or token to authenticate on the
Hugging Face Hub for private remote files.
Returns:
:obj:`xml.etree.ElementTree.Element`: Root element of the given source document.
"""
if hasattr(source, "read"):
return ET.parse(source, parser=parser)
else:
with xopen(source, "rb", use_auth_token=use_auth_token) as f:
return ET.parse(f, parser=parser)
class _IterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator: Callable, *args, **kwargs):
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
yield from self.generator(*self.args, **self.kwargs)
class ArchiveIterable(_IterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
@classmethod
def _iter_from_urlpath(
cls, urlpath: str, use_auth_token: Optional[Union[str, bool]] = None
) -> Generator[Tuple, None, None]:
with xopen(urlpath, "rb", use_auth_token=use_auth_token) as f:
yield from cls._iter_from_fileobj(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_urlpath(cls, urlpath_or_buf, use_auth_token: Optional[Union[str, bool]] = None) -> "ArchiveIterable":
return cls(cls._iter_from_urlpath, urlpath_or_buf, use_auth_token)
class FilesIterable(_IterableFromGenerator):
"""An iterable of paths from a list of directories or files"""
@classmethod
def _iter_from_urlpaths(
cls, urlpaths: Union[str, List[str]], use_auth_token: Optional[Union[str, bool]] = None
) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if xisfile(urlpath, use_auth_token=use_auth_token):
yield urlpath
else:
for dirpath, _, filenames in xwalk(urlpath, use_auth_token=use_auth_token):
for filename in filenames:
yield xjoin(dirpath, filename)
@classmethod
def from_urlpaths(cls, urlpaths, use_auth_token: Optional[Union[str, bool]] = None) -> "FilesIterable":
return cls(cls._iter_from_urlpaths, urlpaths, use_auth_token)
class StreamingDownloadManager:
"""
Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
Contrary to the regular DownloadManager, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
builtin `open` function to stream data from remote files.
"""
is_streaming = True
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
self.download_config = download_config or DownloadConfig()
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
"""Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url is a `str`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
def _download(self, urlpath: str) -> str:
urlpath = str(urlpath)
if is_relative_path(urlpath):
# append the relative path to the base_path
urlpath = url_or_path_join(self._base_path, urlpath)
return urlpath
def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path is a `str`.
num_proc: Use multi-processing if `num_proc` > 1 and the length of
`path_or_paths` is larger than `num_proc`
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
urlpaths = map_nested(self._extract, path_or_paths, map_tuple=True)
return urlpaths
def _extract(self, urlpath: str) -> str:
urlpath = str(urlpath)
protocol = _get_extraction_protocol(urlpath, use_auth_token=self.download_config.use_auth_token)
if protocol is None:
# no extraction
return urlpath
elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
# there is one single file which is the uncompressed file
inner_file = os.path.basename(urlpath.split("::")[0])
inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
# check for tar.gz, tar.bz2 etc.
if inner_file.endswith(".tar"):
return f"tar://::{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def download_and_extract(self, url_or_urls):
"""Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url is a `str`.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
return self.extract(self.download(url_or_urls))
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
"""Iterate over files within an archive.
Args:
urlpath_or_buf (:obj:`str` or :obj:`io.BufferedReader`): Archive path or archive binary file object.
Yields:
:obj:`tuple`[:obj:`str`, :obj:`io.BufferedReader`]: 2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(urlpath_or_buf, "read"):
return ArchiveIterable.from_buf(urlpath_or_buf)
else:
return ArchiveIterable.from_urlpath(urlpath_or_buf, use_auth_token=self.download_config.use_auth_token)
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
"""Iterate over files.
Args:
urlpaths (:obj:`str` or :obj:`list` of :obj:`str`): Root paths.
Yields:
str: File URL path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(urlpaths, use_auth_token=self.download_config.use_auth_token)
| 37.601741 | 152 | 0.646545 |
793fe8daae05d403567e1534f4bbf7f432800eb9 | 1,464 | py | Python | main.py | cto-ai/query-mysql | 830e86f2b20d08842cae6285e4d77e446838a0e3 | [
"MIT"
] | 2 | 2020-05-25T18:47:45.000Z | 2020-05-25T22:23:00.000Z | main.py | cto-ai/query-mysql | 830e86f2b20d08842cae6285e4d77e446838a0e3 | [
"MIT"
] | null | null | null | main.py | cto-ai/query-mysql | 830e86f2b20d08842cae6285e4d77e446838a0e3 | [
"MIT"
] | 1 | 2020-03-24T21:11:14.000Z | 2020-03-24T21:11:14.000Z | import mysql.connector
from cto_ai import ux, prompt
import src.login as login
from src.logos import logo_print
from src.op_cursor import op_cursor
def main():
"""
Prompts user for a MySQL database and allows them to print any data they query for
"""
logo_print()
db_creds = login.Db_credentials()
# Prompts
try:
db_creds.get_credentials()
except KeyboardInterrupt:
print("💤 Exiting Op")
return
# Connection
try:
connection = make_connection(db_creds)
except:
ux.print("❗ Could not connect to " + db_creds.host)
return
ux.print("Successfully connected to " + db_creds.host)
# Select
try:
op_cursor(connection)
except KeyboardInterrupt:
print("💤 Exiting Op")
def make_connection(creds):
"""
Tries connecting to a given MySQL database. Factors in potential Slack formating on the host url
"""
try:
return mysql.connector.connect(
host=creds.host,
user=creds.username,
passwd=creds.password,
database=creds.db,
port=creds.port
)
except:
return mysql.connector.connect(
host=creds.host.replace("http://", ""),
user=creds.username,
passwd=creds.password,
database=creds.db,
port=creds.port
)
if __name__ == "__main__":
main()
| 22.875 | 101 | 0.590847 |
793fea85413be1ae1b3eecee0375c5623e7a141c | 395 | py | Python | nicos_jcns/jcnsse/setups/notifiers.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_jcns/jcnsse/setups/notifiers.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_jcns/jcnsse/setups/notifiers.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Email and SMS notifiers'
group = 'lowlevel'
devices = dict(
emailer = device('nicos.devices.notifiers.Mailer',
mailserver = 'mailhost.frm2.tum.de',
sender = '[email protected]',
copies = [],
subject = 'SE',
),
smser = device('nicos.devices.notifiers.SMSer',
server = 'triton.admin.frm2',
receivers = [],
),
)
| 23.235294 | 54 | 0.577215 |
793fecbec44626a24adb3f3c80cc7fc6361a6dc5 | 431 | py | Python | test/conftest.py | eggplants/rdflib | 1fdf40cd957d68753348d1430cbb37e80dbce999 | [
"BSD-3-Clause"
] | 1 | 2022-03-02T10:25:44.000Z | 2022-03-02T10:25:44.000Z | test/conftest.py | eggplants/rdflib | 1fdf40cd957d68753348d1430cbb37e80dbce999 | [
"BSD-3-Clause"
] | 6 | 2021-11-01T20:13:25.000Z | 2022-03-24T02:01:58.000Z | test/conftest.py | ashleysommer/rdflib | 03c5e79298b35f29087ff82002becaaba0e3a656 | [
"BSD-3-Clause"
] | 1 | 2022-03-15T22:05:24.000Z | 2022-03-15T22:05:24.000Z | import pytest
from rdflib import Graph
from .data import CONSISTENT_DATA_DIR
from .earl import EarlReporter
pytest_plugins = [EarlReporter.__module__]
# This is here so that asserts from these modules are formatted for human
# readibility.
pytest.register_assert_rewrite("test.testutils")
@pytest.fixture(scope="session")
def rdfs_graph() -> Graph:
return Graph().parse(CONSISTENT_DATA_DIR / "rdfs.ttl", format="turtle")
| 23.944444 | 75 | 0.781903 |
793fed3cc8f8beb073ae88ae85b933cf5ef6c7af | 2,960 | py | Python | tests/device/conftest.py | maxthomas/yubikey-manager | 79bf111093401dbbe18ef7627d45e8c472ba17dd | [
"BSD-2-Clause"
] | null | null | null | tests/device/conftest.py | maxthomas/yubikey-manager | 79bf111093401dbbe18ef7627d45e8c472ba17dd | [
"BSD-2-Clause"
] | null | null | null | tests/device/conftest.py | maxthomas/yubikey-manager | 79bf111093401dbbe18ef7627d45e8c472ba17dd | [
"BSD-2-Clause"
] | null | null | null | from ykman.device import connect_to_device, list_all_devices, read_info
from ykman.pcsc import list_devices
from yubikit.core import TRANSPORT
from yubikit.core.otp import OtpConnection
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SmartCardConnection
from yubikit.management import USB_INTERFACE
from functools import partial
from . import condition
import pytest
import time
import os
@pytest.fixture(scope="session")
def _device(pytestconfig):
serial = pytestconfig.getoption("device")
no_serial = pytestconfig.getoption("no_serial")
if not serial:
if no_serial:
serial = None
else:
pytest.skip("No serial specified for device tests")
reader = pytestconfig.getoption("reader")
if reader:
readers = list_devices(reader)
if len(readers) != 1:
pytest.exit("No/Multiple readers matched")
dev = readers[0]
with dev.open_connection(SmartCardConnection) as conn:
info = read_info(None, conn)
else:
devices = list_all_devices()
if len(devices) != 1:
pytest.exit("Device tests require a single YubiKey")
dev, info = devices[0]
if info.serial != serial:
pytest.exit("Device serial does not match: %d != %d" % (serial, info.serial))
return dev, info
@pytest.fixture(scope="session")
def device(_device):
return _device[0]
@pytest.fixture(scope="session")
def info(_device):
return _device[1]
@pytest.fixture(scope="session")
def version(info):
return info.version
@pytest.fixture(scope="session")
def transport(device):
return device.transport
@pytest.fixture(scope="session")
def pid(device):
return device.pid
@pytest.fixture(scope="session")
def await_reboot(transport):
delay = float(os.environ.get("REBOOT_TIME", "2.0"))
return partial(time.sleep, delay) if transport == TRANSPORT.USB else lambda: None
connection_scope = os.environ.get("CONNECTION_SCOPE", "function")
@pytest.fixture(scope=connection_scope)
@condition.transport(TRANSPORT.USB)
def otp_connection(device, info):
if USB_INTERFACE.OTP in device.pid.get_interfaces():
with connect_to_device(info.serial, [OtpConnection])[0] as c:
yield c
@pytest.fixture(scope=connection_scope)
@condition.transport(TRANSPORT.USB)
def fido_connection(device, info):
if USB_INTERFACE.FIDO in device.pid.get_interfaces():
with connect_to_device(info.serial, [FidoConnection])[0] as c:
yield c
@pytest.fixture(scope=connection_scope)
def ccid_connection(device, info):
if device.transport == TRANSPORT.NFC:
with device.open_connection(SmartCardConnection) as c:
yield c
elif USB_INTERFACE.CCID in device.pid.get_interfaces():
with connect_to_device(info.serial, [SmartCardConnection])[0] as c:
yield c
else:
pytest.skip("CCID connection not available")
| 28.461538 | 85 | 0.703041 |
793fed5955ad2d04aa031d1420875c0ab969dca8 | 1,101 | py | Python | markdown_thirdparty.py | FuzzyWuzzie/markdown_thirdparty | 4ba6ab0ee525bd633a35038b4ca36d04c84c8cef | [
"MIT"
] | 2 | 2015-06-01T22:15:38.000Z | 2015-08-24T19:41:35.000Z | markdown_thirdparty.py | hamaluik/markdown_thirdparty | 4ba6ab0ee525bd633a35038b4ca36d04c84c8cef | [
"MIT"
] | 1 | 2015-08-24T19:43:26.000Z | 2015-08-24T19:43:26.000Z | markdown_thirdparty.py | hamaluik/markdown_thirdparty | 4ba6ab0ee525bd633a35038b4ca36d04c84c8cef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Markdown Third Party
============================
This plugin allows you to use various third-party
Markdown extensions to make writing posts in Markdown
easier and better.
"""
from pelican import signals, readers
import os, sys, inspect
class MarkdownThirdParty(readers.MarkdownReader):
def __init__(self, *args, **kwargs):
super(MarkdownThirdParty, self).__init__(*args, **kwargs)
self.extensions = list(self.settings['MD_EXTENSIONS'])
# always make sure we have the 'meta' extension
if 'meta' not in self.extensions:
self.extensions.append('meta')
# add our current folder to the path so Markdown can find the extension
cmdFolder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmdFolder not in sys.path:
sys.path.insert(0, cmdFolder)
def addReader(readers):
# re-route all markdown extensions to be our reader
extensions = ['md', 'markdown', 'mkd', 'mdown']
for ext in extensions:
readers.reader_classes[ext] = MarkdownThirdParty
def register():
signals.readers_init.connect(addReader) | 31.457143 | 106 | 0.725704 |
793fed6c9a826fa48f09488242425e8119e3bb71 | 814 | py | Python | src/MapReduce3/reducer.py | jonaac/Hadoop-MapReduce | d3dc443210298da218e055f4f37280ede7e63a0d | [
"MIT"
] | 1 | 2021-03-11T22:19:12.000Z | 2021-03-11T22:19:12.000Z | src/MapReduce3/reducer.py | jonaac/MapReduce | d3dc443210298da218e055f4f37280ede7e63a0d | [
"MIT"
] | null | null | null | src/MapReduce3/reducer.py | jonaac/MapReduce | d3dc443210298da218e055f4f37280ede7e63a0d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Rami Abou-Nassar
# Jonathan Azpur
# Third Reducer:
# Input from third mapper:
# (word,filename,count dw)
#
# Outputs:
# (word,filename, tfidf)
import sys
import math
currWord = None
currCount = 0
word = None
# input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
#split line into different components
wF, wcC = line.split()
#split into respective components
word, fileName = wF.split(',',1 )
wordCount, count = wcC.split(',', 1)
#calculate and print tf-idf(W,D) = tf(W,D) X idf(W)
idf = math.log(float(20) / float(count))
tf = (float(wordCount))
tfidf = (tf * idf)
print('%s\t%f' % (word + ',' + fileName, tfidf)) | 23.257143 | 56 | 0.587224 |
793fed92e28617d52b7e53147347270a3271ad96 | 2,083 | py | Python | RecoEcal/EgammaClusterProducers/python/particleFlowSuperClusteringSequence_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 13 | 2015-11-30T15:49:45.000Z | 2022-02-08T16:11:30.000Z | RecoEcal/EgammaClusterProducers/python/particleFlowSuperClusteringSequence_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 640 | 2015-02-11T18:55:47.000Z | 2022-03-31T14:12:23.000Z | RecoEcal/EgammaClusterProducers/python/particleFlowSuperClusteringSequence_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 51 | 2015-08-11T21:01:40.000Z | 2022-03-30T07:31:34.000Z | import FWCore.ParameterSet.Config as cms
#------------------
#Hybrid clustering:
#------------------
# Producer for Box Particle Flow Super Clusters
from RecoEcal.EgammaClusterProducers.particleFlowSuperClusterECAL_cff import *
# Producer for energy corrections
#from RecoEcal.EgammaClusterProducers.correctedDynamicHybridSuperClusters_cfi import *
# PFECAL super clusters, either hybrid-clustering clone (Box) or mustache.
particleFlowSuperClusteringTask = cms.Task(particleFlowSuperClusterECAL)
particleFlowSuperClusteringSequence = cms.Sequence(particleFlowSuperClusteringTask)
particleFlowSuperClusterHGCal = particleFlowSuperClusterECAL.clone()
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toModify(
particleFlowSuperClusterHGCal,
PFClusters = 'particleFlowClusterHGCal',
useRegression = False, #no HGCal regression yet
use_preshower = False,
PFBasicClusterCollectionEndcap = "",
PFSuperClusterCollectionEndcap = "",
PFSuperClusterCollectionEndcapWithPreshower = "",
thresh_PFClusterEndcap = 1.5e-1, # 150 MeV threshold
dropUnseedable = True,
)
particleFlowSuperClusterHGCalFromMultiCl = particleFlowSuperClusterHGCal.clone()
phase2_hgcal.toModify(
particleFlowSuperClusterHGCalFromMultiCl,
PFClusters = 'particleFlowClusterHGCalFromMultiCl',
useRegression = True,
)
phase2_hgcal.toModify( particleFlowSuperClusterHGCalFromMultiCl.regressionConfig,
regressionKeyEE = "superclus_hgcal_mean_offline",
uncertaintyKeyEE = "superclus_hgcal_sigma_offline",
isPhaseII = True,
hgcalRecHits = "particleFlowRecHitHGC"
)
_phase2_hgcal_particleFlowSuperClusteringTask = particleFlowSuperClusteringTask.copy()
_phase2_hgcal_particleFlowSuperClusteringTask.add(particleFlowSuperClusterHGCal)
_phase2_hgcal_particleFlowSuperClusteringTask.add(particleFlowSuperClusterHGCalFromMultiCl)
phase2_hgcal.toReplaceWith( particleFlowSuperClusteringTask, _phase2_hgcal_particleFlowSuperClusteringTask )
| 44.319149 | 108 | 0.792127 |
793fedb1871b4f3114f9adb7015b86d005ec73e4 | 166 | py | Python | sillysort/__init__.py | renan-eccel/sillysort | 98531bb011f220d0877ba2abfd202d52026a2695 | [
"MIT"
] | null | null | null | sillysort/__init__.py | renan-eccel/sillysort | 98531bb011f220d0877ba2abfd202d52026a2695 | [
"MIT"
] | 310 | 2018-05-14T01:32:25.000Z | 2022-03-28T06:34:20.000Z | sillysort/__init__.py | renan-eccel/sillysort | 98531bb011f220d0877ba2abfd202d52026a2695 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for SillySort."""
__author__ = """Renan Artur Lopes Eccel"""
__email__ = '[email protected]'
__version__ = '0.2.0'
| 20.75 | 42 | 0.650602 |
793fee08f9ee650dc243bc11482e6588d81b7e32 | 21,630 | py | Python | pyaff4/block_hasher.py | aff4/python-aff4 | 94a3583475c07ad92147f70ff8a19e9e36f12aa9 | [
"Apache-2.0"
] | 34 | 2017-10-21T16:12:58.000Z | 2022-02-18T00:37:08.000Z | pyaff4/block_hasher.py | aff4/python-aff4 | 94a3583475c07ad92147f70ff8a19e9e36f12aa9 | [
"Apache-2.0"
] | 23 | 2017-11-06T17:01:04.000Z | 2021-12-26T14:09:38.000Z | pyaff4/block_hasher.py | aff4/python-aff4 | 94a3583475c07ad92147f70ff8a19e9e36f12aa9 | [
"Apache-2.0"
] | 17 | 2019-02-11T00:47:02.000Z | 2022-03-14T02:52:04.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
# Copyright 2016,2017 Schatz Forensic Pty Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from builtins import next
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import binascii
import collections
import hashlib
import six
from pyaff4 import container
from pyaff4 import data_store
from pyaff4 import hashes
from pyaff4 import lexicon
from pyaff4 import rdfvalue
from pyaff4 import zip
class InvalidBlockHashComparison(Exception):
pass
class InvalidHashComparison(Exception):
pass
class InconsistentHashMethod(Exception):
pass
# the following is for ordering hashes when calculating
hashOrderingMap = { lexicon.HASH_MD5 : 1,
lexicon.HASH_SHA1: 2,
lexicon.HASH_SHA256 : 3,
lexicon.HASH_SHA512 : 4,
lexicon.HASH_BLAKE2B: 5}
class ValidationListener(object):
def __init__(self):
pass
def onValidBlockHash(self, a):
pass
def onInvalidBlockHash(self, a, b, imageStreamURI, offset):
raise InvalidBlockHashComparison(
"Invalid block hash comarison for stream %s at offset %d" % (imageStreamURI, offset))
def onValidHash(self, typ, hash, imageStreamURI):
print("Validation of %s %s succeeded. Hash = %s" % (imageStreamURI, typ, hash))
def onInvalidHash(self, typ, a, b, streamURI):
raise InvalidHashComparison("Invalid %s comarison for stream %s" % (typ, streamURI))
class BlockHashesHash(object):
def __init__(self, blockHashAlgo, hash, hashDataType):
self.blockHashAlgo = blockHashAlgo
self.hash = hash
self.hashDataType = hashDataType
def __eq__(self, other):
if self.blockHashAlgo == other.blockHashAlgo and self.hash == other.hash and self.hashDataType == other.hashDataType:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def digest(self):
return binascii.unhexlify(self.hash)
class Validator(object):
def __init__(self, listener=None):
if listener == None:
self.listener = ValidationListener()
else:
self.listener = listener
self.delegate = None
def validateContainer(self, urn):
(version, lex) = container.Container.identifyURN(urn)
resolver = data_store.MemoryDataStore(lex)
with zip.ZipFile.NewZipFile(resolver, version, urn) as zip_file:
if lex == lexicon.standard:
self.delegate = InterimStdValidator(resolver, lex, self.listener)
elif lex == lexicon.legacy:
self.delegate = PreStdValidator(resolver, lex, self.listener)
else:
raise ValueError
self.delegate.volume_arn = zip_file.urn
self.delegate.doValidateContainer()
def validateContainerMultiPart(self, urn_a, urn_b):
# in this simple example, we assume that both files passed are
# members of the Container
(version, lex) = container.Container.identifyURN(urn_a)
resolver = data_store.MemoryDataStore(lex)
with zip.ZipFile.NewZipFile(resolver, version, urn_a) as zip_filea:
with zip.ZipFile.NewZipFile(resolver, version, urn_b) as zip_fileb:
if lex == lexicon.standard:
self.delegate = InterimStdValidator(resolver, lex, self.listener)
elif lex == lexicon.legacy:
self.delegate = PreStdValidator(resolver, lex, self.listener)
else:
raise ValueError
self.delegate.volume_arn = zip_filea.urn
self.delegate.doValidateContainer()
def validateBlockMapHash(self, mapStreamURI, imageStreamURI):
storedHash = next(self.resolver.QuerySubjectPredicate(
self.volume_arn, mapStreamURI, self.lexicon.blockMapHash))
calculalatedHash = self.calculateBlockMapHash(mapStreamURI, imageStreamURI, storedHash.datatype)
if storedHash != calculalatedHash:
self.listener.onInvalidHash("BlockMapHash", storedHash, calculalatedHash, mapStreamURI)
else:
self.listener.onValidHash("BlockMapHash", storedHash, mapStreamURI)
return calculalatedHash
def findLocalImageStreamOfMap(self, mapStreamURI):
mapContainer = self.resolver.findContainerOfStream(mapStreamURI)
for dependentStream in self.resolver.QuerySubjectPredicate(mapStreamURI,
self.lexicon.dependentStream):
container = self.resolver.findContainerOfStream(dependentStream)
if container == mapContainer:
return dependentStream
raise Exception
def calculateBlockMapHash(self, mapStreamURI, imageStreamURI, storedHashDataType):
storedBlockHashesHash = sorted(
self.getStoredBlockHashes(str(imageStreamURI)),
key=lambda x: hashOrderingMap[x.blockHashAlgo])
calculatedHash = hashes.new(storedHashDataType)
for hash in storedBlockHashesHash:
bytes = hash.digest()
calculatedHash.update(bytes)
for hash in self.resolver.QuerySubjectPredicate(self.volume_arn, mapStreamURI, self.lexicon.mapPointHash):
calculatedHash.update(hash.digest())
for hash in self.resolver.QuerySubjectPredicate(self.volume_arn, mapStreamURI, self.lexicon.mapIdxHash):
calculatedHash.update(hash.digest())
for hash in self.resolver.QuerySubjectPredicate(self.volume_arn, mapStreamURI, self.lexicon.mapPathHash):
calculatedHash.update(hash.digest())
return hashes.newImmutableHash(calculatedHash.hexdigest(), storedHashDataType)
def calculateBlockHashesHash(self, imageStreamURI):
hash = self.getStoredBlockHashes(imageStreamURI)
with self.resolver.AFF4FactoryOpen(imageStreamURI) as imageStream:
calculatedBlockHashes = []
for h in hash:
calculatedBlockHashes.append(hashes.new(h.hashDataType))
offset = 0
while offset < imageStream.size:
imageStream.seek(offset)
block = imageStream.Read(imageStream.chunk_size)
for i in range(len(hash)):
calculatedBlockHashesHash = calculatedBlockHashes[i]
hashDataType = hash[i].blockHashAlgo
# verify the block hash
h = hashes.new(hashDataType)
h.update(block)
calculatedBlockHash = h.hexdigest()
chunkIdx = old_div(offset, imageStream.chunk_size)
storedBlockHash = imageStream.readBlockHash(chunkIdx, hashDataType)
if calculatedBlockHash != storedBlockHash:
self.listener.onInvalidBlockHash(
calculatedBlockHash,
storedBlockHash.value,
imageStreamURI, offset)
else:
self.listener.onValidBlockHash(calculatedBlockHash)
calculatedBlockHashesHash.update(h.digest())
offset = offset + imageStream.chunk_size
# we now have the block hashes hash calculated
res = []
for i in range(len(hash)):
a = hash[i].blockHashAlgo
b = calculatedBlockHashes[i].hexdigest()
c = hash[i].hashDataType
blockHashesHash = BlockHashesHash(a, b, c)
res.append(blockHashesHash)
return res
def getStoredBlockHashes(self, imageStreamURI):
hashes = []
for hash in self.resolver.QuerySubjectPredicate(self.volume_arn, imageStreamURI, self.lexicon.blockHashesHash):
blockHashAlgo = hash.datatype
digest = hash.value
digestDataType = hash.datatype
hashes.append(BlockHashesHash(blockHashAlgo, digest, digestDataType))
return hashes
def validateBlockHashesHash(self, imageStreamURI):
storedHashes = self.getStoredBlockHashes(imageStreamURI)
calculatedHashes = self.calculateBlockHashesHash(imageStreamURI)
for i in range(len(storedHashes)):
a = storedHashes[i]
b = calculatedHashes[i]
if a != b:
self.listener.onInvalidHash("BlockHashesHash", a, b, imageStreamURI)
else:
self.listener.onValidHash("BlockHashesHash", a, imageStreamURI)
def validateMapIdxHash(self, map_uri):
for stored_hash in self.resolver.QuerySubjectPredicate(
self.volume_arn, map_uri, self.lexicon.mapIdxHash):
return self.validateSegmentHash(
map_uri, "mapIdxHash", self.calculateMapIdxHash(
map_uri, stored_hash.datatype))
def calculateMapIdxHash(self, mapURI, hashDataType):
return self.calculateSegmentHash(mapURI, "idx", hashDataType)
def validateMapPointHash(self, map_uri):
for stored_hash in self.resolver.QuerySubjectPredicate(
self.volume_arn, map_uri, self.lexicon.mapPointHash):
return self.validateSegmentHash(
map_uri, "mapPointHash", self.calculateMapPointHash(
map_uri, stored_hash.datatype))
def calculateMapPointHash(self, mapURI, storedHashDataType):
return self.calculateSegmentHash(mapURI, "map", storedHashDataType)
def validateMapPathHash(self, map_uri):
for stored_hash in self.resolver.QuerySubjectPredicate(
self.volume_arn, map_uri, self.lexicon.mapPathHash):
return self.validateSegmentHash(
map_uri, "mapPathHash", self.calculateMapPathHash(
map_uri, stored_hash.datatype))
def calculateMapPathHash(self, mapURI, storedHashDataType):
return self.calculateSegmentHash(mapURI, "mapPath", storedHashDataType)
def validateMapHash(self, map_uri):
for stored_hash in self.resolver.QuerySubjectPredicate(
self.volume_arn, map_uri, self.lexicon.mapHash):
return self.validateSegmentHash(
map_uri, "mapHash", self.calculateMapHash(
map_uri, stored_hash.datatype))
def calculateMapHash(self, mapURI, storedHashDataType):
calculatedHash = hashes.new(storedHashDataType)
calculatedHash.update(self.readSegment(mapURI, "map"))
calculatedHash.update(self.readSegment(mapURI, "idx"))
try:
calculatedHash.update(self.readSegment(mapURI, "mapPath"))
except:
pass
return hashes.newImmutableHash(calculatedHash.hexdigest(), storedHashDataType)
def validateSegmentHash(self, mapURI, hashType, calculatedHash):
storedHash = next(self.resolver.QuerySubjectPredicate(self.volume_arn, mapURI, self.lexicon.base + hashType))
if storedHash != calculatedHash:
self.listener.onInvalidHash(hashType, storedHash, calculatedHash, mapURI)
else:
self.listener.onValidHash(hashType, storedHash, mapURI)
def readSegment(self, parentURI, subSegment):
parentURI = rdfvalue.URN(parentURI)
segment_uri = parentURI.Append(subSegment)
with self.resolver.AFF4FactoryOpen(segment_uri) as segment:
data = segment.Read(segment.Size())
return data
def calculateSegmentHash(self, parentURI, subSegment, hashDataType):
calculatedHash = hashes.new(hashDataType)
data = self.readSegment(parentURI, subSegment)
if data != None:
calculatedHash.update(data)
b = calculatedHash.hexdigest()
return hashes.newImmutableHash(b, hashDataType)
else:
raise Exception
def checkSame(self, a, b):
if a != b:
raise InconsistentHashMethod()
# A block hash validator for AFF4 Pre-Standard images produced by Evimetry 1.x-2.1
class PreStdValidator(Validator):
def __init__(self, resolver, lex, listener=None):
Validator.__init__(self, listener)
self.resolver = resolver
self.lexicon = lex
def validateContainer(self, urn):
with zip.ZipFile.NewZipFile(self.resolver, urn) as zip_file:
self.volume_arn = zip_file.urn
self.doValidateContainer()
# pre AFF4 standard Evimetry uses the contains relationship to locate the local
# image stream of a Map
def findLocalImageStreamOfMap(self, mapStreamURI):
imageStreamURI = next(self.resolver.QuerySubjectPredicate(mapStreamURI,
self.lexicon.contains))
return imageStreamURI
def doValidateContainer(self):
types = list(self.resolver.QueryPredicateObject(self.volume_arn,
lexicon.AFF4_TYPE, self.lexicon.Image))
if not types:
return
imageURI = types[0]
# For block based hashing our starting point is the map
if self.isMap(imageURI):
with self.resolver.AFF4FactoryOpen(imageURI) as mapStream:
for target in mapStream.targets:
if self.resolver.isImageStream(target):
self.validateBlockHashesHash(target)
self.validateMapIdxHash(imageURI)
self.validateMapPointHash(imageURI)
self.validateMapPathHash(imageURI)
self.validateMapHash(imageURI)
self.validateBlockMapHash(imageURI, target)
# in AFF4 pre-standard Evimetry stores what we now call the blockMapHash in the Map, with the
# name blockHashesHash
def validateBlockMapHash(self, mapStreamURI, imageStreamURI):
storedHash = next(self.resolver.QuerySubjectPredicate(self.volume_arn, mapStreamURI,
self.lexicon.blockHashesHash))
calculalatedHash = self.calculateBlockMapHash(mapStreamURI, imageStreamURI, storedHash.datatype)
if storedHash != calculalatedHash:
self.listener.onInvalidHash("BlockMapHash", storedHash, calculalatedHash, mapStreamURI)
else:
self.listener.onValidHash("BlockMapHash", storedHash, mapStreamURI)
def isMap(self, stream):
types = self.resolver.QuerySubjectPredicate(self.volume_arn, stream, lexicon.AFF4_TYPE)
if self.lexicon.map in types:
return True
return False
# A block hash validator for AFF4 Interim Standard images produced by Evimetry 3.0
class InterimStdValidator(Validator):
def __init__(self, resolver, lex, listener=None):
Validator.__init__(self, listener)
self.resolver = resolver
self.lexicon = lex
def validateContainer(self, urn):
with zip.ZipFile.NewZipFile(self.resolver, urn) as zip_file:
self.delegate.volume_arn = zip_file.urn
self.doValidateContainer()
def getParentMap(self, imageStreamURI):
imageStreamVolume = next(self.resolver.QuerySubjectPredicate(self.volume_arn, imageStreamURI, self.lexicon.stored))
for map in self.resolver.QuerySubjectPredicate(self.volume_arn, imageStreamURI, self.lexicon.target):
mapVolume = next(self.resolver.QuerySubjectPredicate(self.volume_arn, map, self.lexicon.stored))
if mapVolume == imageStreamVolume:
return map
raise Exception("Illegal State")
def doValidateContainer(self):
# FIXME: This should further restrict by container URN since
# the same data store may be used for multiple containers with
# many images.
for image in self.resolver.QueryPredicateObject(
self.volume_arn, lexicon.AFF4_TYPE, self.lexicon.Image):
datastreams = list(self.resolver.QuerySubjectPredicate(
self.volume_arn, image, self.lexicon.dataStream))
calculated_hashes = collections.OrderedDict()
hash_datatype = None
for stream in datastreams:
if self.isMap(stream):
for image_stream_uri in self.resolver.QuerySubjectPredicate(
self.volume_arn, stream, self.lexicon.dependentStream):
parent_map = self.getParentMap(image_stream_uri)
if parent_map == stream:
# only validate the map and stream pair in the same container
self.validateBlockHashesHash(image_stream_uri)
self.validateMapIdxHash(parent_map)
self.validateMapPointHash(parent_map)
self.validateMapPathHash(parent_map)
self.validateMapHash(parent_map)
calculated_hash = self.validateBlockMapHash(
parent_map, image_stream_uri)
calculated_hashes[parent_map] = calculated_hash
# Assume all block hashes are the same type.
if (hash_datatype is not None and
hash_datatype != calculated_hash.datatype):
raise AttributeError(
"Block hashes are not all the same type.")
else:
hash_datatype = calculated_hash.datatype
for stored_hash in self.resolver.QuerySubjectPredicate(
self.volume_arn, image, self.lexicon.hash):
hasha = ""
hashb = ""
parent_map = None
# TODO: handle more cleanly the sematic difference between datatypes
if len(calculated_hashes) == 1:
# This is a single part image
# The single AFF4 hash is just the blockMapHash
parent_map, calculated_hash = calculated_hashes.popitem()
hasha = stored_hash
hashb = calculated_hash
else:
# This is a multiple part image The single AFF4
# hash is one layer up in the Merkel tree again,
# with the subordinate nodes being the
# blockMapHashes for the map stored in each
# container volume
# The hash algorithm we use for the single AFF4
# hash is the same algorithm we use for all of the
# Merkel tree inner nodes
current_hash = hashes.new(hash_datatype)
# The canonical striped images and Evimetry rely on the natural ordering
# (string comparison) of the map URN's
# as they are stored in the map to order the
# blockMapHashes in the Merkel tree.
#
# For example for a striped image composed of two containers, we would have one map per
# container. c1 -- > aff4://363ac10c-8d8d-4905-ac25-a14aaddd8a41
# c2 --> aff4://2dd04819-73c8-40e3-a32b-fdddb0317eac
# At this level of the merkel tree, we order the concatenated hashes based on
# the map URI, so we would calculate the hash from c2 then c1
# TODO: update the specification to reflect this rule
for parent_map, calculated_hash in sorted(calculated_hashes.items()):
current_hash.update(calculated_hash.digest())
hasha = stored_hash.value
hashb = current_hash.hexdigest()
if hasha != hashb:
self.listener.onInvalidHash("AFF4Hash", hasha, hashb, parent_map)
else:
self.listener.onValidHash("AFF4Hash", hasha, parent_map)
def getStoredBlockHashes(self, image_stream_uri):
res = []
for block_hash_uri in self.resolver.SelectSubjectsByPrefix(self.volume_arn, str(image_stream_uri) + "/blockhash."):
for hash in self.resolver.QuerySubjectPredicate(self.volume_arn, block_hash_uri, self.lexicon.hash):
extension = block_hash_uri.Parse().path.split(".")[-1]
block_hash_algo_type = hashes.fromShortName(extension)
hash = BlockHashesHash(block_hash_algo_type, hash.value, hash.datatype)
res.append(hash)
return res
def isMap(self, stream):
types = self.resolver.QuerySubjectPredicate(self.volume_arn, stream, lexicon.AFF4_TYPE)
if self.lexicon.map in types:
return True
return False
| 42 | 125 | 0.632455 |
793fee7dfa01a83005f96c2cf3d7808deb9b6bf0 | 7,196 | py | Python | src/morphforgecontrib/simulation/channels/hh_style/neuron/hocmodbuilders/mmwriter_alphabetabeta.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | src/morphforgecontrib/simulation/channels/hh_style/neuron/hocmodbuilders/mmwriter_alphabetabeta.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | src/morphforgecontrib/simulation/channels/hh_style/neuron/hocmodbuilders/mmwriter_alphabetabeta.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from Cheetah.Template import Template
from morphforge.simulation.neuron import ModFile
from morphforge.simulation.neuron.simulationdatacontainers import MHocFileData
from morphforge.simulation.neuron.simulationdatacontainers import MHOCSections
from morphforge.simulation.neuron.hocmodbuilders import MM_ModFileWriterBase
class NEURONChlWriterAlphaBetaBeta(object):
chlHoc = """
$(cell_name).internalsections [$section_index] {
// AlphaBetaBeta Channels
insert $neuron_suffix
#for variable_name, variable_value_nounit, variable_value_with_unit, variable_unit in $variables:
$(variable_name)_$(neuron_suffix) = $variable_value_nounit //(in $variable_unit, converted from $variable_value_with_unit)
#end for
}
"""
Units = {'gBar': 'S/cm2', 'e_rev': 'mV', 'gScale': ''}
@classmethod
def build_hoc_section(self, cell, section, hocfile_obj, mta):
cell_hoc = hocfile_obj[MHocFileData.Cells][cell]
cell_name = cell_hoc['cell_name']
section_index = cell_hoc['section_indexer'][section]
neuron_suffix = mta.channel.get_neuron_suffix()
# Calculate the values of the variables for the section:
variables = []
for variable_name in mta.channel.get_variables():
variable_value_with_unit = mta.applicator.get_variable_value_for_section(variable_name=variable_name, section=section)
variable_unit = NEURONChlWriterAlphaBetaBeta.Units[variable_name]
variable_value_nounit = variable_value_with_unit.rescale(variable_unit).magnitude
variables.append([variable_name, variable_value_nounit, variable_value_with_unit, variable_unit])
tmpl_dict = {
'cell_name': cell_name,
'section_index': section_index,
'neuron_suffix': neuron_suffix,
'variables': variables,
}
# Add the data to the HOC file
hocfile_obj.add_to_section(MHOCSections.InitCellMembranes, Template(NEURONChlWriterAlphaBetaBeta.chlHoc, tmpl_dict).respond())
@classmethod
def build_mod(cls, alphabeta_beta_chl, modfile_set):
gbar_name = 'gBar'
e_rev_name = 'eLk'
g_scale_name = 'gScale'
base_writer = MM_ModFileWriterBase(suffix=alphabeta_beta_chl.get_neuron_suffix())
# Naming Conventions:
state_tau = lambda s: '%stau' % s
state_inf = lambda s: '%sinf' % s
state_alpha = lambda s: '%s_alpha' % s
state_beta = lambda s: '%s_beta' % s
# State Equations and initial values:
for s in alphabeta_beta_chl.statevars:
base_writer.internalstates[s] = "%s" % state_inf(s) , "%s'=(%s-%s)/%s" % (s, state_inf(s), s, state_tau(s))
# Parameters:
# {name: (value, unit, range)}
base_writer.parameters = {
#gbar_name: (alphabeta_beta_chl.conductance.toval(ounit="S/cm2"), ("S/cm2"), None),
#e_rev_name: (alphabeta_beta_chl.reversalpotential.toval("mV"), ("mV"), None)
gbar_name: (alphabeta_beta_chl.conductance.rescale("S/cm2").magnitude, ("S/cm2"), None),
e_rev_name: (alphabeta_beta_chl.reversalpotential.rescale("mV").magnitude, ("mV"), None),
g_scale_name: (1.0, None, None)
}
# Rates:
# name : (locals, code), unit
for s in alphabeta_beta_chl.statevars:
base_writer.rates[state_alpha(s)] = (("", state_alpha(s) + "= StdAlphaBeta(%f, %f, %f, %f, %f, v)" % tuple(alphabeta_beta_chl.statevars[s][0]))), None
#base_writer.rates[state_beta(s)] = (("", state_beta(s) + "= StdBetaBeta(%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, v)" % tuple(alphabeta_beta_chl.statevars[s][1] + alphabeta_beta_chl.statevars[s][2] + [alphabeta_beta_chl.beta2threshold.toval(ounit="mV")]))), None
base_writer.rates[state_beta(s)] = (("", state_beta(s) + "= StdBetaBeta(%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, v)" % tuple(alphabeta_beta_chl.statevars[s][1] + alphabeta_beta_chl.statevars[s][2] + [alphabeta_beta_chl.beta2threshold.rescale("mV").magnitude]))), None
base_writer.rates[state_inf(s)] = (("", state_inf(s) + "= %s/(%s+%s)" % (state_alpha(s), state_alpha(s), state_beta(s))), None)
base_writer.rates[state_tau(s)] = (("", state_tau(s) + "= 1.0/(%s+%s)" % (state_alpha(s), state_beta(s))), "ms")
base_writer.ratecalcorder.extend([state_alpha(s), state_beta(s), state_inf(s), state_tau(s)])
base_writer.currentequation = "(v-%s) * %s * %s * %s" % (e_rev_name, gbar_name, alphabeta_beta_chl.eqn, g_scale_name)
base_writer.conductanceequation = "%s * %s * %s" % (gbar_name, alphabeta_beta_chl.eqn, g_scale_name)
#base_writer.currentequation = "(v-%s) * %s * %s" % (e_rev_name, gbar_name, alphabeta_beta_chl.eqn)
base_writer.functions = """
FUNCTION StdAlphaBeta(A, B, C, D, E, V){ StdAlphaBeta = (A + B*V) / (C + exp((D+V)/E)) }
FUNCTION StdBetaBeta(A, B, C, D, E, A2, B2, C2, D2, E2, beta2Threshold, V)
{
if(V < beta2Threshold)
{
StdBetaBeta = (A + B*V) / (C + exp((D+V)/E))
}
else
{
StdBetaBeta = (A2 + B2*V) / (C2 + exp((D2+V)/E2))
}
}
"""
txt = base_writer.generate_modfile()
mod_file = ModFile(name=alphabeta_beta_chl.name, modtxt=txt)
modfile_set.append(mod_file)
| 48.952381 | 285 | 0.629655 |
793ff00f9884b31c7311a6c25c98f72d4dc26bfd | 2,944 | py | Python | glashammer/bundles/deprecated/auth/__init__.py | passy/glashammer-rdrei | 9e56952d70b961d8945707469aad9cfe97c4e7b7 | [
"MIT"
] | 1 | 2016-07-04T15:23:59.000Z | 2016-07-04T15:23:59.000Z | glashammer/bundles/deprecated/auth/__init__.py | passy/glashammer-rdrei | 9e56952d70b961d8945707469aad9cfe97c4e7b7 | [
"MIT"
] | null | null | null | glashammer/bundles/deprecated/auth/__init__.py | passy/glashammer-rdrei | 9e56952d70b961d8945707469aad9cfe97c4e7b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
glashammer.bundles.auth
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2008 by Glashammer Developers
:license: MIT
"""
import os
from werkzeug.exceptions import NotFound
from wtforms import Form, TextField, PasswordField
from glashammer.utils import render_response, sibpath, emit_event, redirect, \
get_app, url_for
from glashammer.bundles.sessions import setup_sessions, get_session
def get_username():
session = get_session()
app = get_app()
return session.get(app.conf['auth/token_key'])
def get_user():
pass
def check_username_password(username, password):
tokens = emit_event('password-check', username, password)
if any(tokens):
return username
def check_role(token, role_key):
roles = emit_event('role-check', token, role_key)
if any(roles):
return True
def login(token):
session = get_session()
app = get_app()
session[app.conf['auth/token_key']] = token
def logout():
session = get_session()
app = get_app()
del session[app.conf['auth/token_key']]
def set_user_password(username, password):
emit_event('password-change', username, gen_pwhash(password))
def auth_protected_view(f):
"""
Decorator to only allow authorized users to access the view
"""
def wrapped(*args, **kw):
if get_app().conf['auth/token_key'] in get_session():
return f(*args, **kw)
else:
return redirect(url_for('auth/login'))
return wrapped
# Basic logging in view and forms
class UserForm(Form):
username = TextField()
password = PasswordField()
def view_login(request):
"""Show a login page."""
error = None
form = UserForm(request.form)
if request.method == 'POST':
if form.validate():
username = form.username.data
password = form.password.data
if username and check_username_password(username, password):
login(username)
return redirect('/')
error = ('Incorrect password.')
return render_response('auth_login.jinja', error=error,
auth_form=form)
def view_logout(request):
"""Just logout and redirect to the login screen."""
logout()
return redirect(url_for('auth/login'))
def setup_auth(app, add_auth_views=True):
"""
Setup the application to use auth.
`add_auth_views`
If True, the views for /login and /logout are created for you. This is
the reference implementation that you may or may not want to replace.
"""
app.add_setup(setup_sessions)
app.add_config_var('auth/token_key', str, 'auth_session_key')
app.add_template_searchpath(sibpath(__file__, 'templates'))
if add_auth_views:
app.add_url('/login', endpoint='auth/login', view=view_login)
app.add_url('/logout', endpoint='auth/logout', view=view_logout)
setup_app = setup_auth
| 24.739496 | 78 | 0.654891 |
793ff0519df21a13305f3e162866650ebba443b5 | 49,513 | py | Python | sdks/python/apache_beam/examples/snippets/snippets_test.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 3 | 2020-08-28T17:47:26.000Z | 2021-08-17T06:38:58.000Z | sdks/python/apache_beam/examples/snippets/snippets_test.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 9 | 2020-06-03T12:34:25.000Z | 2020-08-11T12:18:22.000Z | sdks/python/apache_beam/examples/snippets/snippets_test.py | shitanshu-google/beam | 9cd959f61d377874ee1839c2de4bb8f65a948ecc | [
"Apache-2.0"
] | 1 | 2021-10-05T20:53:52.000Z | 2021-10-05T20:53:52.000Z | # coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for all code snippets used in public docs."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import glob
import gzip
import logging
import math
import os
import sys
import tempfile
import time
import unittest
import uuid
from builtins import map
from builtins import object
from builtins import range
from builtins import zip
import mock
import apache_beam as beam
import apache_beam.transforms.combiners as combiners
from apache_beam import WindowInto
from apache_beam import coders
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.coders.coders import ToBytesCoder
from apache_beam.examples.snippets import snippets
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.trigger import AccumulationMode
from apache_beam.transforms.trigger import AfterAny
from apache_beam.transforms.trigger import AfterCount
from apache_beam.transforms.trigger import AfterProcessingTime
from apache_beam.transforms.trigger import AfterWatermark
from apache_beam.transforms.trigger import Repeatedly
from apache_beam.transforms.window import FixedWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.windowed_value import WindowedValue
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py import base_api
except ImportError:
base_api = None
# pylint: enable=wrong-import-order, wrong-import-position
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.datastore import client as datastore_client
except ImportError:
datastore_client = None
# pylint: enable=wrong-import-order, wrong-import-position
# Protect against environments where the PubSub library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
# pylint: enable=wrong-import-order, wrong-import-position
class ParDoTest(unittest.TestCase):
"""Tests for model/par-do."""
def test_pardo(self):
# Note: "words" and "ComputeWordLengthFn" are referenced by name in
# the text of the doc.
words = ['aa', 'bbb', 'c']
# [START model_pardo_pardo]
class ComputeWordLengthFn(beam.DoFn):
def process(self, element):
return [len(element)]
# [END model_pardo_pardo]
# [START model_pardo_apply]
# Apply a ParDo to the PCollection "words" to compute lengths for each word.
word_lengths = words | beam.ParDo(ComputeWordLengthFn())
# [END model_pardo_apply]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_yield(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_yield]
class ComputeWordLengthFn(beam.DoFn):
def process(self, element):
yield len(element)
# [END model_pardo_yield]
word_lengths = words | beam.ParDo(ComputeWordLengthFn())
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_map(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_using_map]
word_lengths = words | beam.Map(len)
# [END model_pardo_using_map]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_flatmap(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_using_flatmap]
word_lengths = words | beam.FlatMap(lambda word: [len(word)])
# [END model_pardo_using_flatmap]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_flatmap_yield(self):
words = ['aA', 'bbb', 'C']
# [START model_pardo_using_flatmap_yield]
def capitals(word):
for letter in word:
if 'A' <= letter <= 'Z':
yield letter
all_capitals = words | beam.FlatMap(capitals)
# [END model_pardo_using_flatmap_yield]
self.assertEqual({'A', 'C'}, set(all_capitals))
def test_pardo_with_label(self):
words = ['aa', 'bbc', 'defg']
# [START model_pardo_with_label]
result = words | 'CountUniqueLetters' >> beam.Map(
lambda word: len(set(word)))
# [END model_pardo_with_label]
self.assertEqual({1, 2, 4}, set(result))
def test_pardo_side_input(self):
# pylint: disable=line-too-long
with TestPipeline() as p:
words = p | 'start' >> beam.Create(['a', 'bb', 'ccc', 'dddd'])
# [START model_pardo_side_input]
# Callable takes additional arguments.
def filter_using_length(word, lower_bound, upper_bound=float('inf')):
if lower_bound <= len(word) <= upper_bound:
yield word
# Construct a deferred side input.
avg_word_len = (
words
| beam.Map(len)
| beam.CombineGlobally(beam.combiners.MeanCombineFn()))
# Call with explicit side inputs.
small_words = words | 'small' >> beam.FlatMap(filter_using_length, 0, 3)
# A single deferred side input.
larger_than_average = (
words | 'large' >> beam.FlatMap(
filter_using_length, lower_bound=pvalue.AsSingleton(avg_word_len))
)
# Mix and match.
small_but_nontrivial = words | beam.FlatMap(
filter_using_length,
lower_bound=2,
upper_bound=pvalue.AsSingleton(avg_word_len))
# [END model_pardo_side_input]
assert_that(small_words, equal_to(['a', 'bb', 'ccc']))
assert_that(
larger_than_average,
equal_to(['ccc', 'dddd']),
label='larger_than_average')
assert_that(
small_but_nontrivial, equal_to(['bb']), label='small_but_not_trivial')
def test_pardo_side_input_dofn(self):
words = ['a', 'bb', 'ccc', 'dddd']
# [START model_pardo_side_input_dofn]
class FilterUsingLength(beam.DoFn):
def process(self, element, lower_bound, upper_bound=float('inf')):
if lower_bound <= len(element) <= upper_bound:
yield element
small_words = words | beam.ParDo(FilterUsingLength(), 0, 3)
# [END model_pardo_side_input_dofn]
self.assertEqual({'a', 'bb', 'ccc'}, set(small_words))
def test_pardo_with_tagged_outputs(self):
# [START model_pardo_emitting_values_on_tagged_outputs]
class ProcessWords(beam.DoFn):
def process(self, element, cutoff_length, marker):
if len(element) <= cutoff_length:
# Emit this short word to the main output.
yield element
else:
# Emit this word's long length to the 'above_cutoff_lengths' output.
yield pvalue.TaggedOutput('above_cutoff_lengths', len(element))
if element.startswith(marker):
# Emit this word to a different output with the 'marked strings' tag.
yield pvalue.TaggedOutput('marked strings', element)
# [END model_pardo_emitting_values_on_tagged_outputs]
words = ['a', 'an', 'the', 'music', 'xyz']
# [START model_pardo_with_tagged_outputs]
results = (
words
| beam.ParDo(ProcessWords(), cutoff_length=2, marker='x').with_outputs(
'above_cutoff_lengths',
'marked strings',
main='below_cutoff_strings'))
below = results.below_cutoff_strings
above = results.above_cutoff_lengths
marked = results['marked strings'] # indexing works as well
# [END model_pardo_with_tagged_outputs]
self.assertEqual({'a', 'an'}, set(below))
self.assertEqual({3, 5}, set(above))
self.assertEqual({'xyz'}, set(marked))
# [START model_pardo_with_tagged_outputs_iter]
below, above, marked = (words
| beam.ParDo(
ProcessWords(), cutoff_length=2, marker='x')
.with_outputs('above_cutoff_lengths',
'marked strings',
main='below_cutoff_strings'))
# [END model_pardo_with_tagged_outputs_iter]
self.assertEqual({'a', 'an'}, set(below))
self.assertEqual({3, 5}, set(above))
self.assertEqual({'xyz'}, set(marked))
def test_pardo_with_undeclared_outputs(self):
# Note: the use of undeclared outputs is currently not supported in eager
# execution mode.
with TestPipeline() as p:
numbers = p | beam.Create([1, 2, 3, 4, 5, 10, 20])
# [START model_pardo_with_undeclared_outputs]
def even_odd(x):
yield pvalue.TaggedOutput('odd' if x % 2 else 'even', x)
if x % 10 == 0:
yield x
results = numbers | beam.FlatMap(even_odd).with_outputs()
evens = results.even
odds = results.odd
tens = results[None] # the undeclared main output
# [END model_pardo_with_undeclared_outputs]
assert_that(evens, equal_to([2, 4, 10, 20]), label='assert_even')
assert_that(odds, equal_to([1, 3, 5]), label='assert_odds')
assert_that(tens, equal_to([10, 20]), label='assert_tens')
class TypeHintsTest(unittest.TestCase):
def test_bad_types(self):
# [START type_hints_missing_define_numbers]
p = TestPipeline()
numbers = p | beam.Create(['1', '2', '3'])
# [END type_hints_missing_define_numbers]
# Consider the following code.
# pylint: disable=expression-not-assigned
# pylint: disable=unused-variable
# [START type_hints_missing_apply]
evens = numbers | beam.Filter(lambda x: x % 2 == 0)
# [END type_hints_missing_apply]
# Now suppose numbers was defined as [snippet above].
# When running this pipeline, you'd get a runtime error,
# possibly on a remote machine, possibly very late.
with self.assertRaises(TypeError):
p.run()
# To catch this early, we can assert what types we expect.
with self.assertRaises(typehints.TypeCheckError):
# [START type_hints_takes]
evens = numbers | beam.Filter(lambda x: x % 2 == 0).with_input_types(int)
# [END type_hints_takes]
# Type hints can be declared on DoFns and callables as well, rather
# than where they're used, to be more self contained.
with self.assertRaises(typehints.TypeCheckError):
# [START type_hints_do_fn]
@beam.typehints.with_input_types(int)
class FilterEvensDoFn(beam.DoFn):
def process(self, element):
if element % 2 == 0:
yield element
evens = numbers | beam.ParDo(FilterEvensDoFn())
# [END type_hints_do_fn]
words = p | 'words' >> beam.Create(['a', 'bb', 'c'])
# One can assert outputs and apply them to transforms as well.
# Helps document the contract and checks it at pipeline construction time.
# [START type_hints_transform]
from typing import Tuple, TypeVar
T = TypeVar('T')
@beam.typehints.with_input_types(T)
@beam.typehints.with_output_types(Tuple[int, T])
class MyTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(lambda x: (len(x), x))
words_with_lens = words | MyTransform()
# [END type_hints_transform]
# Given an input of str, the inferred output type would be Tuple[int, str].
self.assertEqual(typehints.Tuple[int, str], words_with_lens.element_type)
# pylint: disable=expression-not-assigned
with self.assertRaises(typehints.TypeCheckError):
words_with_lens | beam.Map(lambda x: x).with_input_types(Tuple[int, int])
def test_runtime_checks_off(self):
# We do not run the following pipeline, as it has incorrect type
# information, and may fail with obscure errors, depending on the runner
# implementation.
# pylint: disable=expression-not-assigned
# [START type_hints_runtime_off]
p = TestPipeline()
p | beam.Create(['a']) | beam.Map(lambda x: 3).with_output_types(str)
# [END type_hints_runtime_off]
def test_runtime_checks_on(self):
# pylint: disable=expression-not-assigned
with self.assertRaises(typehints.TypeCheckError):
# [START type_hints_runtime_on]
p = TestPipeline(options=PipelineOptions(runtime_type_check=True))
p | beam.Create(['a']) | beam.Map(lambda x: 3).with_output_types(str)
p.run()
# [END type_hints_runtime_on]
def test_deterministic_key(self):
with TestPipeline() as p:
lines = (
p | beam.Create([
'banana,fruit,3',
'kiwi,fruit,2',
'kiwi,fruit,2',
'zucchini,veg,3'
]))
# For pickling.
global Player # pylint: disable=global-variable-not-assigned
# [START type_hints_deterministic_key]
from typing import Tuple
class Player(object):
def __init__(self, team, name):
self.team = team
self.name = name
class PlayerCoder(beam.coders.Coder):
def encode(self, player):
return ('%s:%s' % (player.team, player.name)).encode('utf-8')
def decode(self, s):
return Player(*s.decode('utf-8').split(':'))
def is_deterministic(self):
return True
beam.coders.registry.register_coder(Player, PlayerCoder)
def parse_player_and_score(csv):
name, team, score = csv.split(',')
return Player(team, name), int(score)
totals = (
lines
| beam.Map(parse_player_and_score)
| beam.CombinePerKey(sum).with_input_types(Tuple[Player, int]))
# [END type_hints_deterministic_key]
assert_that(
totals | beam.Map(lambda k_v: (k_v[0].name, k_v[1])),
equal_to([('banana', 3), ('kiwi', 4), ('zucchini', 3)]))
class SnippetsTest(unittest.TestCase):
# Replacing text read/write transforms with dummy transforms for testing.
class DummyReadTransform(beam.PTransform):
"""A transform that will replace iobase.ReadFromText.
To be used for testing.
"""
def __init__(self, file_to_read=None, compression_type=None):
self.file_to_read = file_to_read
self.compression_type = compression_type
class ReadDoFn(beam.DoFn):
def __init__(self, file_to_read, compression_type):
self.file_to_read = file_to_read
self.compression_type = compression_type
self.coder = coders.StrUtf8Coder()
def process(self, element):
pass
def finish_bundle(self):
from apache_beam.transforms import window
assert self.file_to_read
for file_name in glob.glob(self.file_to_read):
if self.compression_type is None:
with open(file_name, 'rb') as file:
for record in file:
value = self.coder.decode(record.rstrip(b'\n'))
yield WindowedValue(value, -1, [window.GlobalWindow()])
else:
with gzip.open(file_name, 'rb') as file:
for record in file:
value = self.coder.decode(record.rstrip(b'\n'))
yield WindowedValue(value, -1, [window.GlobalWindow()])
def expand(self, pcoll):
return pcoll | beam.Create([None]) | 'DummyReadForTesting' >> beam.ParDo(
SnippetsTest.DummyReadTransform.ReadDoFn(
self.file_to_read, self.compression_type))
class DummyWriteTransform(beam.PTransform):
"""A transform that will replace iobase.WriteToText.
To be used for testing.
"""
def __init__(self, file_to_write=None, file_name_suffix=''):
self.file_to_write = file_to_write
class WriteDoFn(beam.DoFn):
def __init__(self, file_to_write):
self.file_to_write = file_to_write
self.file_obj = None
self.coder = ToBytesCoder()
def start_bundle(self):
assert self.file_to_write
# Appending a UUID to create a unique file object per invocation.
self.file_obj = open(self.file_to_write + str(uuid.uuid4()), 'wb')
def process(self, element):
assert self.file_obj
self.file_obj.write(self.coder.encode(element) + b'\n')
def finish_bundle(self):
assert self.file_obj
self.file_obj.close()
def expand(self, pcoll):
return pcoll | 'DummyWriteForTesting' >> beam.ParDo(
SnippetsTest.DummyWriteTransform.WriteDoFn(self.file_to_write))
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
self.old_read_from_text = beam.io.ReadFromText
self.old_write_to_text = beam.io.WriteToText
# Monkey patching to allow testing pipelines defined in snippets.py using
# real data.
beam.io.ReadFromText = SnippetsTest.DummyReadTransform
beam.io.WriteToText = SnippetsTest.DummyWriteTransform
self.temp_files = []
def tearDown(self):
beam.io.ReadFromText = self.old_read_from_text
beam.io.WriteToText = self.old_write_to_text
# Cleanup all the temporary files created in the test.
map(os.remove, self.temp_files)
def create_temp_file(self, contents=''):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(contents.encode('utf-8'))
self.temp_files.append(f.name)
return f.name
def get_output(self, path, sorted_output=True, suffix=''):
all_lines = []
for file_name in glob.glob(path + '*'):
with open(file_name) as f:
lines = f.readlines()
all_lines.extend([s.rstrip('\n') for s in lines])
if sorted_output:
return sorted(s.rstrip('\n') for s in all_lines)
return all_lines
def test_model_pipelines(self):
temp_path = self.create_temp_file('aa bb cc\n bb cc\n cc')
result_path = temp_path + '.result'
snippets.model_pipelines(
['--input=%s*' % temp_path, '--output=%s' % result_path])
self.assertEqual(
self.get_output(result_path),
[str(s) for s in [(u'aa', 1), (u'bb', 2), (u'cc', 3)]])
def test_model_pcollection(self):
temp_path = self.create_temp_file()
snippets.model_pcollection(['--output=%s' % temp_path])
self.assertEqual(
self.get_output(temp_path),
[
'Or to take arms against a sea of troubles, ',
'The slings and arrows of outrageous fortune, ',
'To be, or not to be: that is the question: ',
'Whether \'tis nobler in the mind to suffer ',
])
def test_construct_pipeline(self):
temp_path = self.create_temp_file('abc def ghi\n jkl mno pqr\n stu vwx yz')
result_path = self.create_temp_file()
snippets.construct_pipeline({'read': temp_path, 'write': result_path})
self.assertEqual(
self.get_output(result_path),
['cba', 'fed', 'ihg', 'lkj', 'onm', 'rqp', 'uts', 'xwv', 'zy'])
def test_model_custom_source(self):
snippets.model_custom_source(100)
def test_model_custom_sink(self):
tempdir_name = tempfile.mkdtemp()
class SimpleKV(object):
def __init__(self, tmp_dir):
self._dummy_token = 'dummy_token'
self._tmp_dir = tmp_dir
def connect(self, url):
return self._dummy_token
def open_table(self, access_token, table_name):
assert access_token == self._dummy_token
file_name = self._tmp_dir + os.sep + table_name
assert not os.path.exists(file_name)
open(file_name, 'wb').close()
return table_name
def write_to_table(self, access_token, table_name, key, value):
assert access_token == self._dummy_token
file_name = self._tmp_dir + os.sep + table_name
assert os.path.exists(file_name)
with open(file_name, 'ab') as f:
content = (key + ':' + value + os.linesep).encode('utf-8')
f.write(content)
def rename_table(self, access_token, old_name, new_name):
assert access_token == self._dummy_token
old_file_name = self._tmp_dir + os.sep + old_name
new_file_name = self._tmp_dir + os.sep + new_name
assert os.path.isfile(old_file_name)
assert not os.path.exists(new_file_name)
os.rename(old_file_name, new_file_name)
snippets.model_custom_sink(
SimpleKV(tempdir_name),
[('key' + str(i), 'value' + str(i)) for i in range(100)],
'final_table_no_ptransform',
'final_table_with_ptransform')
expected_output = [
'key' + str(i) + ':' + 'value' + str(i) for i in range(100)
]
glob_pattern = tempdir_name + os.sep + 'final_table_no_ptransform*'
output_files = glob.glob(glob_pattern)
assert output_files
received_output = []
for file_name in output_files:
with open(file_name) as f:
for line in f:
received_output.append(line.rstrip(os.linesep))
self.assertCountEqual(expected_output, received_output)
glob_pattern = tempdir_name + os.sep + 'final_table_with_ptransform*'
output_files = glob.glob(glob_pattern)
assert output_files
received_output = []
for file_name in output_files:
with open(file_name) as f:
for line in f:
received_output.append(line.rstrip(os.linesep))
self.assertCountEqual(expected_output, received_output)
def test_model_textio(self):
temp_path = self.create_temp_file('aa bb cc\n bb cc\n cc')
result_path = temp_path + '.result'
snippets.model_textio({'read': temp_path, 'write': result_path})
self.assertEqual(['aa', 'bb', 'bb', 'cc', 'cc', 'cc'],
self.get_output(result_path, suffix='.csv'))
def test_model_textio_compressed(self):
temp_path = self.create_temp_file('aa\nbb\ncc')
gzip_file_name = temp_path + '.gz'
with open(temp_path, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
# Add the temporary gzip file to be cleaned up as well.
self.temp_files.append(gzip_file_name)
snippets.model_textio_compressed({'read': gzip_file_name},
['aa', 'bb', 'cc'])
@unittest.skipIf(
datastore_client is None, 'GCP dependencies are not installed')
def test_model_datastoreio(self):
# We cannot test DatastoreIO functionality in unit tests, therefore we limit
# ourselves to making sure the pipeline containing Datastore read and write
# transforms can be built.
# TODO(vikasrk): Expore using Datastore Emulator.
snippets.model_datastoreio()
@unittest.skipIf(base_api is None, 'GCP dependencies are not installed')
def test_model_bigqueryio(self):
# We cannot test BigQueryIO functionality in unit tests, therefore we limit
# ourselves to making sure the pipeline containing BigQuery sources and
# sinks can be built.
#
# To run locally, set `run_locally` to `True`. You will also have to set
# `project`, `dataset` and `table` to the BigQuery table the test will write
# to.
run_locally = False
if run_locally:
project = 'my-project'
dataset = 'samples' # this must already exist
table = 'model_bigqueryio' # this will be created if needed
options = PipelineOptions().view_as(GoogleCloudOptions)
options.project = project
with beam.Pipeline(options=options) as p:
snippets.model_bigqueryio(p, project, dataset, table)
else:
p = TestPipeline()
snippets.model_bigqueryio(p)
def _run_test_pipeline_for_options(self, fn):
temp_path = self.create_temp_file('aa\nbb\ncc')
result_path = temp_path + '.result'
fn(['--input=%s*' % temp_path, '--output=%s' % result_path])
self.assertEqual(['aa', 'bb', 'cc'], self.get_output(result_path))
def test_pipeline_options_local(self):
self._run_test_pipeline_for_options(snippets.pipeline_options_local)
def test_pipeline_options_remote(self):
self._run_test_pipeline_for_options(snippets.pipeline_options_remote)
def test_pipeline_options_command_line(self):
self._run_test_pipeline_for_options(snippets.pipeline_options_command_line)
def test_pipeline_logging(self):
result_path = self.create_temp_file()
lines = [
'we found love right where we are',
'we found love right from the start',
'we found love in a hopeless place'
]
snippets.pipeline_logging(lines, result_path)
self.assertEqual(
sorted(' '.join(lines).split(' ')), self.get_output(result_path))
def test_examples_wordcount(self):
pipelines = [
snippets.examples_wordcount_minimal,
snippets.examples_wordcount_wordcount,
snippets.pipeline_monitoring,
snippets.examples_wordcount_templated
]
for pipeline in pipelines:
temp_path = self.create_temp_file('abc def ghi\n abc jkl')
result_path = self.create_temp_file()
pipeline({'read': temp_path, 'write': result_path})
self.assertEqual(
self.get_output(result_path),
['abc: 2', 'def: 1', 'ghi: 1', 'jkl: 1'])
def test_examples_ptransforms_templated(self):
pipelines = [snippets.examples_ptransforms_templated]
for pipeline in pipelines:
temp_path = self.create_temp_file('1\n 2\n 3')
result_path = self.create_temp_file()
pipeline({'read': temp_path, 'write': result_path})
self.assertEqual(self.get_output(result_path), ['11', '12', '13'])
def test_examples_wordcount_debugging(self):
temp_path = self.create_temp_file(
'Flourish Flourish Flourish stomach abc def')
result_path = self.create_temp_file()
snippets.examples_wordcount_debugging({
'read': temp_path, 'write': result_path
})
self.assertEqual(
self.get_output(result_path), ['Flourish: 3', 'stomach: 1'])
@unittest.skipIf(pubsub is None, 'GCP dependencies are not installed')
@mock.patch('apache_beam.io.ReadFromPubSub')
@mock.patch('apache_beam.io.WriteStringsToPubSub')
def test_examples_wordcount_streaming(self, *unused_mocks):
def FakeReadFromPubSub(topic=None, subscription=None, values=None):
expected_topic = topic
expected_subscription = subscription
def _inner(topic=None, subscription=None):
assert topic == expected_topic
assert subscription == expected_subscription
return TestStream().add_elements(values)
return _inner
class AssertTransform(beam.PTransform):
def __init__(self, matcher):
self.matcher = matcher
def expand(self, pcoll):
assert_that(pcoll, self.matcher)
def FakeWriteStringsToPubSub(topic=None, values=None):
expected_topic = topic
def _inner(topic=None, subscription=None):
assert topic == expected_topic
return AssertTransform(equal_to(values))
return _inner
# Test basic execution.
input_topic = 'projects/fake-beam-test-project/topic/intopic'
input_values = [
TimestampedValue(b'a a b', 1),
TimestampedValue(u'🤷 ¯\\_(ツ)_/¯ b b '.encode('utf-8'), 12),
TimestampedValue(b'a b c c c', 20)
]
output_topic = 'projects/fake-beam-test-project/topic/outtopic'
output_values = ['a: 1', 'a: 2', 'b: 1', 'b: 3', 'c: 3']
beam.io.ReadFromPubSub = (
FakeReadFromPubSub(topic=input_topic, values=input_values))
beam.io.WriteStringsToPubSub = (
FakeWriteStringsToPubSub(topic=output_topic, values=output_values))
snippets.examples_wordcount_streaming([
'--input_topic',
'projects/fake-beam-test-project/topic/intopic',
'--output_topic',
'projects/fake-beam-test-project/topic/outtopic'
])
# Test with custom subscription.
input_sub = 'projects/fake-beam-test-project/subscriptions/insub'
beam.io.ReadFromPubSub = FakeReadFromPubSub(
subscription=input_sub, values=input_values)
snippets.examples_wordcount_streaming([
'--input_subscription',
'projects/fake-beam-test-project/subscriptions/insub',
'--output_topic',
'projects/fake-beam-test-project/topic/outtopic'
])
def test_model_composite_transform_example(self):
contents = ['aa bb cc', 'bb cc', 'cc']
result_path = self.create_temp_file()
snippets.model_composite_transform_example(contents, result_path)
self.assertEqual(['aa: 1', 'bb: 2', 'cc: 3'], self.get_output(result_path))
def test_model_multiple_pcollections_flatten(self):
contents = ['a', 'b', 'c', 'd', 'e', 'f']
result_path = self.create_temp_file()
snippets.model_multiple_pcollections_flatten(contents, result_path)
self.assertEqual(contents, self.get_output(result_path))
def test_model_multiple_pcollections_partition(self):
contents = [17, 42, 64, 32, 0, 99, 53, 89]
result_path = self.create_temp_file()
snippets.model_multiple_pcollections_partition(contents, result_path)
self.assertEqual(['0', '17', '32', '42', '53', '64', '89', '99'],
self.get_output(result_path))
def test_model_group_by_key(self):
contents = ['a bb ccc bb bb a']
result_path = self.create_temp_file()
snippets.model_group_by_key(contents, result_path)
expected = [('a', 2), ('bb', 3), ('ccc', 1)]
self.assertEqual([str(s) for s in expected], self.get_output(result_path))
def test_model_co_group_by_key_tuple(self):
with TestPipeline() as p:
# [START model_group_by_key_cogroupbykey_tuple_inputs]
emails_list = [
('amy', '[email protected]'),
('carl', '[email protected]'),
('julia', '[email protected]'),
('carl', '[email protected]'),
]
phones_list = [
('amy', '111-222-3333'),
('james', '222-333-4444'),
('amy', '333-444-5555'),
('carl', '444-555-6666'),
]
emails = p | 'CreateEmails' >> beam.Create(emails_list)
phones = p | 'CreatePhones' >> beam.Create(phones_list)
# [END model_group_by_key_cogroupbykey_tuple_inputs]
result_path = self.create_temp_file()
snippets.model_co_group_by_key_tuple(emails, phones, result_path)
# [START model_group_by_key_cogroupbykey_tuple_outputs]
results = [
(
'amy',
{
'emails': ['[email protected]'],
'phones': ['111-222-3333', '333-444-5555']
}),
(
'carl',
{
'emails': ['[email protected]', '[email protected]'],
'phones': ['444-555-6666']
}),
('james', {
'emails': [], 'phones': ['222-333-4444']
}),
('julia', {
'emails': ['[email protected]'], 'phones': []
}),
]
# [END model_group_by_key_cogroupbykey_tuple_outputs]
# [START model_group_by_key_cogroupbykey_tuple_formatted_outputs]
formatted_results = [
"amy; ['[email protected]']; ['111-222-3333', '333-444-5555']",
"carl; ['[email protected]', '[email protected]']; ['444-555-6666']",
"james; []; ['222-333-4444']",
"julia; ['[email protected]']; []",
]
# [END model_group_by_key_cogroupbykey_tuple_formatted_outputs]
expected_results = [
'%s; %s; %s' % (name, info['emails'], info['phones']) for name,
info in results
]
self.assertEqual(expected_results, formatted_results)
self.assertEqual(formatted_results, self.get_output(result_path))
def test_model_use_and_query_metrics(self):
"""DebuggingWordCount example snippets."""
import re
p = TestPipeline() # Use TestPipeline for testing.
words = p | beam.Create(
['albert', 'sam', 'mark', 'sarah', 'swati', 'daniel', 'andrea'])
# pylint: disable=unused-variable
# [START metrics_usage_example]
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regex."""
def __init__(self, pattern):
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Create
# custom metrics to count unmatched words, and know the distribution of
# word lengths in the input PCollection.
self.word_len_dist = Metrics.distribution(
self.__class__, 'word_len_dist')
self.unmatched_words = Metrics.counter(
self.__class__, 'unmatched_words')
def process(self, element):
word = element
self.word_len_dist.update(len(word))
if re.match(self.pattern, word):
yield element
else:
self.unmatched_words.inc()
filtered_words = (words | 'FilterText' >> beam.ParDo(FilterTextFn('s.*')))
# [END metrics_usage_example]
# pylint: enable=unused-variable
# [START metrics_check_values_example]
result = p.run()
result.wait_until_finish()
custom_distribution = result.metrics().query(
MetricsFilter().with_name('word_len_dist'))['distributions']
custom_counter = result.metrics().query(
MetricsFilter().with_name('unmatched_words'))['counters']
if custom_distribution:
logging.info(
'The average word length was %d',
custom_distribution[0].committed.mean)
if custom_counter:
logging.info(
'There were %d words that did not match the filter.',
custom_counter[0].committed)
# [END metrics_check_values_example]
# There should be 4 words that did not match
self.assertEqual(custom_counter[0].committed, 4)
# The shortest word is 3 characters, the longest is 6
self.assertEqual(custom_distribution[0].committed.min, 3)
self.assertEqual(custom_distribution[0].committed.max, 6)
def test_model_join_using_side_inputs(self):
name_list = ['a', 'b']
email_list = [['a', '[email protected]'], ['b', '[email protected]']]
phone_list = [['a', 'x4312'], ['b', 'x8452']]
result_path = self.create_temp_file()
snippets.model_join_using_side_inputs(
name_list, email_list, phone_list, result_path)
expect = ['a; [email protected]; x4312', 'b; [email protected]; x8452']
self.assertEqual(expect, self.get_output(result_path))
def test_model_early_late_triggers(self):
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
with TestPipeline(options=pipeline_options) as p:
test_stream = (
TestStream().advance_watermark_to(10).add_elements([
'a', 'a', 'a', 'b', 'b'
]).add_elements([
TimestampedValue('a', 10)
]).advance_watermark_to(20).advance_processing_time(60).add_elements(
[TimestampedValue('a', 10)]))
trigger = (
# [START model_early_late_triggers]
AfterWatermark(
early=AfterProcessingTime(delay=1 * 60), late=AfterCount(1))
# [END model_early_late_triggers]
)
counts = (
p
| test_stream
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| WindowInto(
FixedWindows(15),
trigger=trigger,
allowed_lateness=20,
accumulation_mode=AccumulationMode.DISCARDING)
| 'group' >> beam.GroupByKey()
| 'count' >>
beam.Map(lambda word_ones: (word_ones[0], sum(word_ones[1]))))
assert_that(counts, equal_to([('a', 4), ('b', 2), ('a', 1)]))
def test_model_setting_trigger(self):
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
with TestPipeline(options=pipeline_options) as p:
test_stream = (
TestStream().advance_watermark_to(10).add_elements(
['a', 'a', 'a', 'b',
'b']).advance_watermark_to(70).advance_processing_time(600))
pcollection = (
p
| test_stream
| 'pair_with_one' >> beam.Map(lambda x: (x, 1)))
counts = (
# [START model_setting_trigger]
pcollection | WindowInto(
FixedWindows(1 * 60),
trigger=AfterProcessingTime(10 * 60),
accumulation_mode=AccumulationMode.DISCARDING)
# [END model_setting_trigger]
| 'group' >> beam.GroupByKey()
| 'count' >>
beam.Map(lambda word_ones: (word_ones[0], sum(word_ones[1]))))
assert_that(counts, equal_to([('a', 3), ('b', 2)]))
def test_model_composite_triggers(self):
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
with TestPipeline(options=pipeline_options) as p:
test_stream = (
TestStream().advance_watermark_to(10).add_elements(
['a', 'a', 'a', 'b', 'b']).advance_watermark_to(70).add_elements([
TimestampedValue('a', 10),
TimestampedValue('a', 10),
TimestampedValue('c', 10),
TimestampedValue('c', 10)
]).advance_processing_time(600))
pcollection = (
p
| test_stream
| 'pair_with_one' >> beam.Map(lambda x: (x, 1)))
counts = (
# [START model_composite_triggers]
pcollection | WindowInto(
FixedWindows(1 * 60),
trigger=AfterWatermark(late=AfterProcessingTime(10 * 60)),
allowed_lateness=10,
accumulation_mode=AccumulationMode.DISCARDING)
# [END model_composite_triggers]
| 'group' >> beam.GroupByKey()
| 'count' >>
beam.Map(lambda word_ones: (word_ones[0], sum(word_ones[1]))))
assert_that(counts, equal_to([('a', 3), ('b', 2), ('a', 2), ('c', 2)]))
def test_model_other_composite_triggers(self):
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
with TestPipeline(options=pipeline_options) as p:
test_stream = (
TestStream().advance_watermark_to(10).add_elements(
['a', 'a']).add_elements(
['a', 'b',
'b']).advance_processing_time(60).add_elements(['a'] * 100))
pcollection = (
p
| test_stream
| 'pair_with_one' >> beam.Map(lambda x: (x, 1)))
counts = (
# [START model_other_composite_triggers]
pcollection | WindowInto(
FixedWindows(1 * 60),
trigger=Repeatedly(
AfterAny(AfterCount(100), AfterProcessingTime(1 * 60))),
accumulation_mode=AccumulationMode.DISCARDING)
# [END model_other_composite_triggers]
| 'group' >> beam.GroupByKey()
| 'count' >>
beam.Map(lambda word_ones: (word_ones[0], sum(word_ones[1]))))
assert_that(counts, equal_to([('a', 3), ('b', 2), ('a', 100)]))
class CombineTest(unittest.TestCase):
"""Tests for model/combine."""
def test_global_sum(self):
pc = [1, 2, 3]
# [START global_sum]
result = pc | beam.CombineGlobally(sum)
# [END global_sum]
self.assertEqual([6], result)
def test_combine_values(self):
occurences = [('cat', 1), ('cat', 5), ('cat', 9), ('dog', 5), ('dog', 2)]
# [START combine_values]
first_occurences = occurences | beam.GroupByKey() | beam.CombineValues(min)
# [END combine_values]
self.assertEqual({('cat', 1), ('dog', 2)}, set(first_occurences))
def test_combine_per_key(self):
player_accuracies = [('cat', 1), ('cat', 5), ('cat', 9), ('cat', 1),
('dog', 5), ('dog', 2)]
# [START combine_per_key]
avg_accuracy_per_player = (
player_accuracies
| beam.CombinePerKey(beam.combiners.MeanCombineFn()))
# [END combine_per_key]
self.assertEqual({('cat', 4.0), ('dog', 3.5)}, set(avg_accuracy_per_player))
def test_combine_concat(self):
pc = ['a', 'b']
# [START combine_concat]
def concat(values, separator=', '):
return separator.join(values)
with_commas = pc | beam.CombineGlobally(concat)
with_dashes = pc | beam.CombineGlobally(concat, separator='-')
# [END combine_concat]
self.assertEqual(1, len(with_commas))
self.assertTrue(with_commas[0] in {'a, b', 'b, a'})
self.assertEqual(1, len(with_dashes))
self.assertTrue(with_dashes[0] in {'a-b', 'b-a'})
def test_bounded_sum(self):
# [START combine_bounded_sum]
pc = [1, 10, 100, 1000]
def bounded_sum(values, bound=500):
return min(sum(values), bound)
small_sum = pc | beam.CombineGlobally(bounded_sum) # [500]
large_sum = pc | beam.CombineGlobally(bounded_sum, bound=5000) # [1111]
# [END combine_bounded_sum]
self.assertEqual([500], small_sum)
self.assertEqual([1111], large_sum)
def test_combine_reduce(self):
factors = [2, 3, 5, 7]
# [START combine_reduce]
import functools
import operator
product = factors | beam.CombineGlobally(
functools.partial(functools.reduce, operator.mul), 1)
# [END combine_reduce]
self.assertEqual([210], product)
def test_custom_average(self):
pc = [2, 3, 5, 7]
# [START combine_custom_average_define]
class AverageFn(beam.CombineFn):
def create_accumulator(self):
return (0.0, 0)
def add_input(self, sum_count, input):
(sum, count) = sum_count
return sum + input, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
(sum, count) = sum_count
return sum / count if count else float('NaN')
# [END combine_custom_average_define]
# [START combine_custom_average_execute]
average = pc | beam.CombineGlobally(AverageFn())
# [END combine_custom_average_execute]
self.assertEqual([4.25], average)
def test_keys(self):
occurrences = [('cat', 1), ('cat', 5), ('dog', 5), ('cat', 9), ('dog', 2)]
unique_keys = occurrences | snippets.Keys()
self.assertEqual({'cat', 'dog'}, set(unique_keys))
def test_count(self):
occurrences = ['cat', 'dog', 'cat', 'cat', 'dog']
perkey_counts = occurrences | snippets.Count()
self.assertEqual({('cat', 3), ('dog', 2)}, set(perkey_counts))
def test_setting_fixed_windows(self):
with TestPipeline() as p:
unkeyed_items = p | beam.Create([22, 33, 55, 100, 115, 120])
items = (
unkeyed_items
| 'key' >>
beam.Map(lambda x: beam.window.TimestampedValue(('k', x), x)))
# [START setting_fixed_windows]
from apache_beam import window
fixed_windowed_items = (
items | 'window' >> beam.WindowInto(window.FixedWindows(60)))
# [END setting_fixed_windows]
summed = (
fixed_windowed_items
| 'group' >> beam.GroupByKey()
| 'combine' >> beam.CombineValues(sum))
unkeyed = summed | 'unkey' >> beam.Map(lambda x: x[1])
assert_that(unkeyed, equal_to([110, 215, 120]))
def test_setting_sliding_windows(self):
with TestPipeline() as p:
unkeyed_items = p | beam.Create([2, 16, 23])
items = (
unkeyed_items
| 'key' >>
beam.Map(lambda x: beam.window.TimestampedValue(('k', x), x)))
# [START setting_sliding_windows]
from apache_beam import window
sliding_windowed_items = (
items | 'window' >> beam.WindowInto(window.SlidingWindows(30, 5)))
# [END setting_sliding_windows]
summed = (
sliding_windowed_items
| 'group' >> beam.GroupByKey()
| 'combine' >> beam.CombineValues(sum))
unkeyed = summed | 'unkey' >> beam.Map(lambda x: x[1])
assert_that(unkeyed, equal_to([2, 2, 2, 18, 23, 39, 39, 39, 41, 41]))
def test_setting_session_windows(self):
with TestPipeline() as p:
unkeyed_items = p | beam.Create([2, 11, 16, 27])
items = (
unkeyed_items
| 'key' >>
beam.Map(lambda x: beam.window.TimestampedValue(('k', x), x * 60)))
# [START setting_session_windows]
from apache_beam import window
session_windowed_items = (
items | 'window' >> beam.WindowInto(window.Sessions(10 * 60)))
# [END setting_session_windows]
summed = (
session_windowed_items
| 'group' >> beam.GroupByKey()
| 'combine' >> beam.CombineValues(sum))
unkeyed = summed | 'unkey' >> beam.Map(lambda x: x[1])
assert_that(unkeyed, equal_to([29, 27]))
def test_setting_global_window(self):
with TestPipeline() as p:
unkeyed_items = p | beam.Create([2, 11, 16, 27])
items = (
unkeyed_items
| 'key' >>
beam.Map(lambda x: beam.window.TimestampedValue(('k', x), x)))
# [START setting_global_window]
from apache_beam import window
session_windowed_items = (
items | 'window' >> beam.WindowInto(window.GlobalWindows()))
# [END setting_global_window]
summed = (
session_windowed_items
| 'group' >> beam.GroupByKey()
| 'combine' >> beam.CombineValues(sum))
unkeyed = summed | 'unkey' >> beam.Map(lambda x: x[1])
assert_that(unkeyed, equal_to([56]))
def test_setting_timestamp(self):
with TestPipeline() as p:
unkeyed_items = p | beam.Create([12, 30, 60, 61, 66])
items = (unkeyed_items | 'key' >> beam.Map(lambda x: ('k', x)))
def extract_timestamp_from_log_entry(entry):
return entry[1]
# [START setting_timestamp]
class AddTimestampDoFn(beam.DoFn):
def process(self, element):
# Extract the numeric Unix seconds-since-epoch timestamp to be
# associated with the current log entry.
unix_timestamp = extract_timestamp_from_log_entry(element)
# Wrap and emit the current entry and new timestamp in a
# TimestampedValue.
yield beam.window.TimestampedValue(element, unix_timestamp)
timestamped_items = items | 'timestamp' >> beam.ParDo(AddTimestampDoFn())
# [END setting_timestamp]
fixed_windowed_items = (
timestamped_items
| 'window' >> beam.WindowInto(beam.window.FixedWindows(60)))
summed = (
fixed_windowed_items
| 'group' >> beam.GroupByKey()
| 'combine' >> beam.CombineValues(sum))
unkeyed = summed | 'unkey' >> beam.Map(lambda x: x[1])
assert_that(unkeyed, equal_to([42, 187]))
class PTransformTest(unittest.TestCase):
"""Tests for PTransform."""
def test_composite(self):
# [START model_composite_transform]
class ComputeWordLengths(beam.PTransform):
def expand(self, pcoll):
# Transform logic goes here.
return pcoll | beam.Map(lambda x: len(x))
# [END model_composite_transform]
with TestPipeline() as p:
lengths = p | beam.Create(["a", "ab", "abc"]) | ComputeWordLengths()
assert_that(lengths, equal_to([1, 2, 3]))
class SlowlyChangingSideInputsTest(unittest.TestCase):
"""Tests for PTransform."""
def test_side_input_slow_update(self):
temp_file = tempfile.NamedTemporaryFile(delete=True)
src_file_pattern = temp_file.name
temp_file.close()
first_ts = math.floor(time.time()) - 30
interval = 5
main_input_windowing_interval = 7
# aligning timestamp to get persistent results
first_ts = first_ts - (
first_ts % (interval * main_input_windowing_interval))
last_ts = first_ts + 45
for i in range(-1, 10, 1):
count = i + 2
idstr = str(first_ts + interval * i)
with open(src_file_pattern + idstr, "w") as f:
for j in range(count):
f.write('f' + idstr + 'a' + str(j) + '\n')
sample_main_input_elements = ([first_ts - 2, # no output due to no SI
first_ts + 1, # First window
first_ts + 8, # Second window
first_ts + 15, # Third window
first_ts + 22, # Fourth window
])
pipeline, pipeline_result = snippets.side_input_slow_update(
src_file_pattern, first_ts, last_ts, interval,
sample_main_input_elements, main_input_windowing_interval)
try:
with pipeline:
pipeline_result = (
pipeline_result
| 'AddKey' >> beam.Map(lambda v: ('key', v))
| combiners.Count.PerKey())
assert_that(
pipeline_result,
equal_to([('key', 3), ('key', 4), ('key', 6), ('key', 7)]))
finally:
for i in range(-1, 10, 1):
os.unlink(src_file_pattern + str(first_ts + interval * i))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 36.406618 | 80 | 0.648153 |
793ff198e885845b12d8fcaa050e70842be2bf21 | 2,079 | py | Python | steps/step48.py | tokupooh/DeZero | 8405b72c0ffcf49a946ef6b3ac6fecae97da7c2c | [
"MIT"
] | null | null | null | steps/step48.py | tokupooh/DeZero | 8405b72c0ffcf49a946ef6b3ac6fecae97da7c2c | [
"MIT"
] | 3 | 2021-06-08T22:13:02.000Z | 2022-01-13T03:12:07.000Z | steps/step48.py | tokupooh/DeZero | 8405b72c0ffcf49a946ef6b3ac6fecae97da7c2c | [
"MIT"
] | null | null | null | if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import math
import matplotlib.pyplot as plt
import numpy as np
import dezero
import dezero.functions as F
from dezero import optimizers
from dezero.models import MLP
if __name__ == '__main__':
# Hyperparameters
max_epoch = 300
batch_size = 30
hidden_size = 10
lr = 1.0
x, t = dezero.datasets.get_spiral(train=True)
model = MLP((hidden_size, 3))
optimizer = optimizers.SGD(lr).setup(model)
data_size = len(x)
max_iter = math.ceil(data_size / batch_size)
for epoch in range(max_epoch):
# Shuffle index for data
index = np.random.permutation(data_size)
sum_loss = 0
for i in range(max_iter):
batch_index = index[i * batch_size:(i + 1) * batch_size]
batch_x = x[batch_index]
batch_t = t[batch_index]
y = model(batch_x)
loss = F.softmax_cross_entropy(y, batch_t)
model.cleargrads()
loss.backward()
optimizer.update()
sum_loss += float(loss.data) * len(batch_t)
# Print loss every epoch
avg_loss = sum_loss / data_size
print('epoch %d, loss %.2f' % (epoch + 1, avg_loss))
# Plot boundary area the model predict
h = 0.001
x_min, x_max = x[:, 0].min() - .1, x[:, 0].max() + .1
y_min, y_max = x[:, 1].min() - .1, x[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
X = np.c_[xx.ravel(), yy.ravel()]
with dezero.no_grad():
score = model(X)
predict_cls = np.argmax(score.data, axis=1)
Z = predict_cls.reshape(xx.shape)
plt.contourf(xx, yy, Z)
# Plot data points of the dataset
N, CLS_NUM = 100, 3
markers = ['o', 'x', '^']
colors = ['orange', 'blue', 'green']
for i in range(len(x)):
c = t[i]
plt.scatter(x[i][0], x[i][1], s=40, marker=markers[c], c=colors[c])
plt.savefig('step48.png')
| 28.094595 | 75 | 0.580087 |
793ff1a5ee34cf8a33e90b4d63437baf1fa7e47d | 540 | py | Python | backend/app/alembic/versions/3161ae076013_.py | lianjy357/vue-element-admin-fastapi | 70f697af33ca747a154d0de129f4cbd7f9d03f7b | [
"MIT"
] | 10 | 2020-12-16T07:31:29.000Z | 2022-01-27T08:01:22.000Z | backend/app/alembic/versions/3161ae076013_.py | lianjy357/vue-element-admin-fastapi | 70f697af33ca747a154d0de129f4cbd7f9d03f7b | [
"MIT"
] | null | null | null | backend/app/alembic/versions/3161ae076013_.py | lianjy357/vue-element-admin-fastapi | 70f697af33ca747a154d0de129f4cbd7f9d03f7b | [
"MIT"
] | 3 | 2021-03-18T11:38:21.000Z | 2021-09-02T06:23:15.000Z | """
Revision ID: 3161ae076013
Revises: bb6f7651ffa8
Create Date: 2020-07-08 19:34:22.355324
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3161ae076013'
down_revision = 'bb6f7651ffa8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 18.62069 | 65 | 0.681481 |
793ff2f2dfc226595f91cf53a39cd57f306e4936 | 5,958 | py | Python | docs/conf.py | vindex10/AmpliGraph | 1aa91a4b32081bcaad4e7386032b9ac85deb99d6 | [
"Apache-2.0"
] | 2 | 2019-06-24T23:21:28.000Z | 2020-10-28T02:57:59.000Z | docs/conf.py | rezacsedu/AmpliGraph | 94a5c31187f1267782e95a8d3380ccd1a53221bd | [
"Apache-2.0"
] | null | null | null | docs/conf.py | rezacsedu/AmpliGraph | 94a5c31187f1267782e95a8d3380ccd1a53221bd | [
"Apache-2.0"
] | 1 | 2020-05-09T20:03:02.000Z | 2020-05-09T20:03:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ampligraph documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 17 14:10:15 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
import sphinx_rtd_theme
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import ampligraph
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinxcontrib.bibtex',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary'
]
autodoc_mock_imports = ["tensorflow"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_parsers = {'.md': CommonMarkParser}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AmpliGraph'
copyright = 'AmpliGraph is licensed under the Apache 2.0 License'
author = 'Accenture Labs Dublin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ampligraph.__version__
# The full version, including alpha/beta/rc tags.
release = ampligraph.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['ampligraph.']
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
html_show_sourcelink = False
html_show_sphinx = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'xai_lpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ampligraph.tex', 'AmpliGraph',
'Luca Costabello - Accenture Labs Dublin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ampligraph', 'AmpliGraph Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ampligraph', 'ampligraph Documentation',
author, 'Accenture Dublin Labs', 'Open source Python library that predicts links between concepts in a knowledge graph.',
''),
]
| 30.397959 | 126 | 0.694025 |
793ff3afcca927ec190ae0fd72aa4ca9cb49f0ee | 649 | py | Python | openprocurement/auctions/insider/views/award_complaint.py | oleksiyVeretiuk/openprocurement.auctions.insider | f380caa9b63dadbfe7bd1f9512e21c486a138457 | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/insider/views/award_complaint.py | oleksiyVeretiuk/openprocurement.auctions.insider | f380caa9b63dadbfe7bd1f9512e21c486a138457 | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/insider/views/award_complaint.py | oleksiyVeretiuk/openprocurement.auctions.insider | f380caa9b63dadbfe7bd1f9512e21c486a138457 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
opresource,
)
from openprocurement.auctions.dgf.views.financial.award_complaint import (
FinancialAuctionAwardComplaintResource,
)
@opresource(name='dgfInsider:Auction Award Complaints',
collection_path='/auctions/{auction_id}/awards/{award_id}/complaints',
path='/auctions/{auction_id}/awards/{award_id}/complaints/{complaint_id}',
auctionsprocurementMethodType="dgfInsider",
description="Insider auction award complaints")
class InsiderAuctionAwardComplaintResource(FinancialAuctionAwardComplaintResource):
pass | 40.5625 | 86 | 0.751926 |
793ff40e5abbbf730168f203a02195c3564fc386 | 439 | py | Python | timevortex/wsgi.py | timevortexproject/timevortex | 2bc1a50b255524af8582e6624dee280d64d3c9f3 | [
"MIT"
] | null | null | null | timevortex/wsgi.py | timevortexproject/timevortex | 2bc1a50b255524af8582e6624dee280d64d3c9f3 | [
"MIT"
] | null | null | null | timevortex/wsgi.py | timevortexproject/timevortex | 2bc1a50b255524af8582e6624dee280d64d3c9f3 | [
"MIT"
] | null | null | null | """
WSGI config for timevortex project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timevortex.settings.production")
application = get_wsgi_application() # pylint: disable=I0011,C0103
| 25.823529 | 81 | 0.788155 |
793ff44127513a9b0b4c6104c31b17da1b705f47 | 643 | py | Python | tests/integration/test_tcp.py | ozonru/mbtest | 90013cdc2cec07c310a910c037f10f7ba9de9a9f | [
"MIT"
] | 26 | 2018-10-10T16:29:59.000Z | 2021-12-28T00:07:57.000Z | tests/integration/test_tcp.py | ozonru/mbtest | 90013cdc2cec07c310a910c037f10f7ba9de9a9f | [
"MIT"
] | 54 | 2019-02-02T08:16:04.000Z | 2022-03-03T07:16:59.000Z | tests/integration/test_tcp.py | ozonru/mbtest | 90013cdc2cec07c310a910c037f10f7ba9de9a9f | [
"MIT"
] | 14 | 2019-02-01T23:08:07.000Z | 2021-12-16T11:28:30.000Z | # encoding=utf-8
import logging
import socket
from hamcrest import assert_that, is_
from mbtest.imposters import Imposter, Stub, TcpPredicate, TcpResponse
logger = logging.getLogger(__name__)
def test_tcp(mock_server):
imposter = Imposter(
Stub(TcpPredicate(data="request"), TcpResponse(data="*" * 1024)),
protocol=Imposter.Protocol.TCP,
)
with mock_server(imposter):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((imposter.host, imposter.port))
client.send(b"request")
response = client.recv(1024)
assert_that(response, is_(b"*" * 1024))
| 24.730769 | 73 | 0.692068 |
793ff512cea13db13d5459641b66dd4c18dbe98c | 4,011 | py | Python | fabtools/require/system.py | gisce/fabtools | 6324b1da285083b2545c8d441ecb572d00ee8781 | [
"BSD-2-Clause"
] | null | null | null | fabtools/require/system.py | gisce/fabtools | 6324b1da285083b2545c8d441ecb572d00ee8781 | [
"BSD-2-Clause"
] | null | null | null | fabtools/require/system.py | gisce/fabtools | 6324b1da285083b2545c8d441ecb572d00ee8781 | [
"BSD-2-Clause"
] | null | null | null | """
System settings
===============
"""
from re import escape
from fabric.api import settings, warn
from fabric.contrib.files import append, uncomment
from fabtools.files import is_file, watch
from fabtools.system import (
UnsupportedFamily,
distrib_family, distrib_id,
get_hostname, set_hostname,
get_sysctl, set_sysctl,
supported_locales,
)
from fabtools.utils import run_as_root
class UnsupportedLocales(Exception):
def __init__(self, locales):
self.locales = sorted(locales)
msg = "Unsupported locales: %s" % ', '.join(self.locales)
super(UnsupportedLocales, self).__init__(msg)
def sysctl(key, value, persist=True):
"""
Require a kernel parameter to have a specific value.
"""
if get_sysctl(key) != value:
set_sysctl(key, value)
if persist:
from fabtools.require import file as require_file
filename = '/etc/sysctl.d/60-%s.conf' % key
with watch(filename, use_sudo=True) as config:
require_file(filename,
contents='%(key)s = %(value)s\n' % locals(),
use_sudo=True)
if config.changed:
if distrib_family() == 'debian':
with settings(warn_only=True):
run_as_root('service procps start')
def hostname(name):
"""
Require the hostname to have a specific value.
"""
if get_hostname() != name:
set_hostname(name)
def locales(names):
"""
Require the list of locales to be available.
Raises UnsupportedLocales if some of the required locales
are not supported.
"""
from fabtools.system import distrib_release
family = distrib_family()
if family == 'debian':
command = 'dpkg-reconfigure --frontend=noninteractive locales'
#if distrib_id() == 'Ubuntu':
# config_file = '/var/lib/locales/supported.d/local'
# if not is_file(config_file):
# run_as_root('touch %s' % config_file)
#else:
config_file = '/etc/locale.gen'
_locales_generic(names, config_file=config_file, command=command)
elif family in ['arch', 'gentoo']:
_locales_generic(names, config_file='/etc/locale.gen', command='locale-gen')
elif distrib_family() == 'redhat':
_locales_redhat(names)
else:
raise UnsupportedFamily(supported=['debian', 'arch', 'gentoo', 'redhat'])
def _locales_generic(names, config_file, command):
supported = supported_locales()
_check_for_unsupported_locales(names, supported)
# Regenerate locales if config file changes
with watch(config_file, use_sudo=True) as config:
# Add valid locale names to the config file
charset_from_name = dict(supported)
for name in names:
charset = charset_from_name[name]
locale = "%s %s" % (name, charset)
uncomment(config_file, escape(locale), use_sudo=True, shell=True)
append(config_file, locale, use_sudo=True, partial=True, shell=True)
if config.changed:
run_as_root(command)
def _locales_redhat(names):
supported = supported_locales()
_check_for_unsupported_locales(names, supported)
def _check_for_unsupported_locales(names, supported):
missing = set(names) - set([name for name, _ in supported])
if missing:
raise UnsupportedLocales(missing)
def locale(name):
"""
Require the locale to be available.
Raises UnsupportedLocales if the required locale is not supported.
"""
locales([name])
def default_locale(name):
"""
Require the locale to be the default.
"""
from fabtools.require import file as require_file
# Ensure the locale is available
locale(name)
# Make it the default
contents = 'LANG="%s"\n' % name
if distrib_family() == 'arch':
config_file = '/etc/locale.conf'
else:
config_file = '/etc/default/locale'
require_file(config_file, contents, use_sudo=True)
| 28.246479 | 84 | 0.64722 |
793ff748e60fc15c0539f9863fea2d4c3a56d155 | 7,118 | py | Python | pytorch_lightning/loops/base.py | Flash-321/pytorch-lightning | cdb6f979a062a639a6d709a0e1915a07d5ed50f6 | [
"Apache-2.0"
] | 2 | 2021-06-25T08:42:32.000Z | 2021-06-25T08:49:33.000Z | pytorch_lightning/loops/base.py | Flash-321/pytorch-lightning | cdb6f979a062a639a6d709a0e1915a07d5ed50f6 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/loops/base.py | Flash-321/pytorch-lightning | cdb6f979a062a639a6d709a0e1915a07d5ed50f6 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional
from deprecate import void
import pytorch_lightning as pl
from pytorch_lightning.trainer.progress import BaseProgress, Progress
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class Loop(ABC):
"""
Basic Loops interface. All classes derived from this must implement the following properties and methods:
* :attr:`done` (property): Condition to break the loop
* :attr:`reset` (method): Resets the internal state between multiple calls of :attr:`run`
* :attr:`advance` (method): Implements one step of the loop
This class implements the following loop structure:
.. codeblock:: python
on_run_start()
while not done:
on_advance_start()
advance()
on_advance_end()
on_run_end()
"""
def __init__(self) -> None:
# TODO: replace by progress tracking
self.iteration_count: int = 0
self.restarting = False
self._trainer: Optional["pl.Trainer"] = None
@property
def trainer(self) -> Optional["pl.Trainer"]:
return self._trainer
@trainer.setter
def trainer(self, trainer: "pl.Trainer"):
"""Connects this loop's trainer and its children"""
if not isinstance(trainer, pl.Trainer):
raise MisconfigurationException(
f"Loop {self.__class__.__name__} should be connected to a `Trainer`, found: {trainer}."
)
self._trainer = trainer
for v in self.__dict__.values():
if isinstance(v, Loop):
v.trainer = trainer
@property
@abstractmethod
def done(self) -> bool:
"""Property indicating when loop is finished"""
@property
def skip(self) -> bool:
"""Determine whether to return immediately from the call to :meth:`run`."""
return False
def connect(self, **kwargs: "Loop") -> None:
"""Optionally connect one or multiple loops to this one. Linked loops should form a tree."""
def on_skip(self) -> Optional[Any]:
"""
The function to run when :meth:`run` should be skipped, determined by the condition in :attr:`skip`.
Returns:
the default output value of :meth:`on_run_end`
"""
def run(self, *args: Any, **kwargs: Any) -> Optional[Any]:
"""
The main entry point to the loop.
Will frequently check the :attr:`done` condition and calls :attr:`advance`
until :attr:`done` evaluates to ``True``.
Returns:
the output of :attr:`on_run_end` (often outputs collected from each step of the loop)
"""
if self.skip:
return self.on_skip()
self.reset()
self.on_run_start(*args, **kwargs)
while not self.done:
try:
self.on_advance_start(*args, **kwargs)
self.advance(*args, **kwargs)
self.on_advance_end()
self.iteration_count += 1
self.restarting = False
except StopIteration:
break
output = self.on_run_end()
return output
@abstractmethod
def reset(self) -> None:
"""Resets the internal state of the loop at the beginning of each call to :attr:`run`."""
def on_run_start(self, *args: Any, **kwargs: Any) -> None:
"""
Hook to be called as the first thing after entering :attr:`run` (except the state reset).
Accepts all arguments passed to :attr:`run`.
"""
void(*args, **kwargs)
def on_advance_start(self, *args: Any, **kwargs: Any) -> None:
"""
Hook to be called each time before :attr:`advance` is called. Accepts all arguments passed to :attr`run`.
"""
void(*args, **kwargs)
@abstractmethod
def advance(self, *args: Any, **kwargs: Any) -> None:
"""Performs a single step. Accepts all arguments passed to :attr:`run`."""
def on_advance_end(self) -> None:
"""Hook to be called each time after :attr:`advance` is called."""
def on_run_end(self) -> Any:
"""Hook to be called at the end of the run. Its return argument is returned from :attr:`run`."""
def teardown(self) -> None:
"""Use to release memory etc."""
def on_save_checkpoint(self) -> Dict:
"""
Called when saving a model checkpoint, use to persist loop state.
Returns:
The current loop state.
"""
return {}
def on_load_checkpoint(self, state_dict: Dict) -> None:
"""Called when loading a model checkpoint, use to reload loop state."""
def state_dict(self, destination: Optional[Dict] = None, prefix: Optional[str] = "") -> Dict:
"""
The state dict is determined by the state and progress of this loop and all its children.
Args:
destination: An existing dictionary to update with this loop's state. By default a new dictionary
is returned.
prefix: A prefix for each key in the state dictionary
"""
if destination is None:
destination = {}
destination[prefix + "state_dict"] = self.on_save_checkpoint()
for k, v in self.__dict__.items():
if isinstance(v, BaseProgress):
destination[prefix + k] = v.state_dict()
elif isinstance(v, Loop):
v.state_dict(destination, prefix + k + ".")
return destination
def load_state_dict(self, state_dict: Dict, prefix: str = "", restart_progress: bool = True) -> None:
"""Loads the state of this loop and all its children."""
self._load_from_state_dict(state_dict.copy(), prefix, restart_progress)
for k, v in self.__dict__.items():
if isinstance(v, Loop):
v.load_state_dict(state_dict.copy(), prefix + k + ".", restart_progress)
def _load_from_state_dict(self, state_dict: Dict, prefix: str, restart_progress: bool) -> None:
for k, v in self.__dict__.items():
if isinstance(v, BaseProgress):
v.load_state_dict(state_dict[prefix + k])
if restart_progress:
apply_to_collection(v, Progress, lambda p: p.current.reset_on_restart())
self.on_load_checkpoint(state_dict[prefix + "state_dict"])
self.restarting = True
| 35.412935 | 113 | 0.622647 |
793ff792b3e2b58612a572170b55e70a5da740fd | 224 | py | Python | BOOK/MAIN/05-file-handling/chapter-5-examples/23-b-display-records.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | BOOK/MAIN/05-file-handling/chapter-5-examples/23-b-display-records.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | BOOK/MAIN/05-file-handling/chapter-5-examples/23-b-display-records.py | kabirsrivastava3/python-practice | f56a4a0764031d3723b0ba4cd1418a1a83b1e4f5 | [
"MIT"
] | null | null | null | #read and display the contents of Employee.csv in 23-a
import csv
with open('Employee.csv',"r") as fhObj:
eReader = csv.reader(fhObj)
print("File Employee.csv contains :")
for rec in eReader:
print(rec)
| 24.888889 | 54 | 0.674107 |
793ff793b83179653791c4848ce694322b8c258d | 21,355 | py | Python | src/aiortc/rtcrtpreceiver.py | vibhaa/aiortc | 764d5f63a1eef0367e4c2159e515e7bb87a31f80 | [
"BSD-3-Clause"
] | null | null | null | src/aiortc/rtcrtpreceiver.py | vibhaa/aiortc | 764d5f63a1eef0367e4c2159e515e7bb87a31f80 | [
"BSD-3-Clause"
] | 2 | 2021-11-17T15:12:48.000Z | 2021-12-10T01:12:24.000Z | src/aiortc/rtcrtpreceiver.py | vibhaa/aiortc | 764d5f63a1eef0367e4c2159e515e7bb87a31f80 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import datetime
import logging
import queue
import random
import threading
import time
from dataclasses import dataclass
from typing import Dict, List, Optional, Set
from av.frame import Frame
from . import clock
from .codecs import depayload, get_capabilities, get_decoder, is_rtx
from .exceptions import InvalidStateError
from .jitterbuffer import JitterBuffer
from .mediastreams import MediaStreamError, MediaStreamTrack
from .rate import RemoteBitrateEstimator
from .rtcdtlstransport import RTCDtlsTransport
from .rtcrtpparameters import (
RTCRtpCapabilities,
RTCRtpCodecParameters,
RTCRtpReceiveParameters,
)
from .rtp import (
RTCP_PSFB_APP,
RTCP_PSFB_PLI,
RTCP_RTPFB_NACK,
AnyRtcpPacket,
RtcpByePacket,
RtcpPsfbPacket,
RtcpReceiverInfo,
RtcpRrPacket,
RtcpRtpfbPacket,
RtcpSrPacket,
RtpPacket,
clamp_packets_lost,
pack_remb_fci,
unwrap_rtx,
)
from .stats import (
RTCInboundRtpStreamStats,
RTCRemoteOutboundRtpStreamStats,
RTCStatsReport,
)
from .utils import uint16_add, uint16_gt
logger = logging.getLogger(__name__)
def decoder_worker(loop, input_q, output_q):
codec_name = None
decoder = None
while True:
task = input_q.get()
if task is None:
# inform the track that is has ended
asyncio.run_coroutine_threadsafe(output_q.put(None), loop)
break
codec, encoded_frame = task
if codec.name != codec_name:
decoder = get_decoder(codec)
codec_name = codec.name
logger.debug(f"RTCRtpReceiver(%s) retrieved the decoder", codec_name)
decoded_frames = decoder.decode(encoded_frame)
logger.debug(f"RTCRtpReceiver(%s) decoding timestamp %s, got %d frames", codec_name, encoded_frame.timestamp, len(decoded_frames))
for frame in decoded_frames:
# pass the decoded frame to the track
asyncio.run_coroutine_threadsafe(output_q.put(frame), loop)
if decoder is not None:
del decoder
class NackGenerator:
def __init__(self) -> None:
self.max_seq: Optional[int] = None
self.missing: Set[int] = set()
def add(self, packet: RtpPacket) -> bool:
missed = False
if self.max_seq is None:
self.max_seq = packet.sequence_number
return missed
# mark missing packets
if uint16_gt(packet.sequence_number, self.max_seq):
seq = uint16_add(self.max_seq, 1)
while uint16_gt(packet.sequence_number, seq):
self.missing.add(seq)
missed = True
seq = uint16_add(seq, 1)
self.max_seq = packet.sequence_number
if missed:
logger.debug(f"RTCRtpReceiver(%s) missed packets", self.missing)
else:
self.missing.discard(packet.sequence_number)
return missed
class StreamStatistics:
def __init__(self, clockrate: int) -> None:
self.base_seq: Optional[int] = None
self.max_seq: Optional[int] = None
self.cycles = 0
self.packets_received = 0
# jitter
self._clockrate = clockrate
self._jitter_q4 = 0
self._last_arrival: Optional[int] = None
self._last_timestamp: Optional[int] = None
# fraction lost
self._expected_prior = 0
self._received_prior = 0
def add(self, packet: RtpPacket) -> None:
in_order = self.max_seq is None or uint16_gt(
packet.sequence_number, self.max_seq
)
self.packets_received += 1
if self.base_seq is None:
self.base_seq = packet.sequence_number
if in_order:
arrival = int(time.time() * self._clockrate)
if self.max_seq is not None and packet.sequence_number < self.max_seq:
self.cycles += 1 << 16
self.max_seq = packet.sequence_number
if packet.timestamp != self._last_timestamp and self.packets_received > 1:
diff = abs(
(arrival - self._last_arrival)
- (packet.timestamp - self._last_timestamp)
)
self._jitter_q4 += diff - ((self._jitter_q4 + 8) >> 4)
self._last_arrival = arrival
self._last_timestamp = packet.timestamp
@property
def fraction_lost(self) -> int:
expected_interval = self.packets_expected - self._expected_prior
self._expected_prior = self.packets_expected
received_interval = self.packets_received - self._received_prior
self._received_prior = self.packets_received
lost_interval = expected_interval - received_interval
if expected_interval == 0 or lost_interval <= 0:
return 0
else:
return (lost_interval << 8) // expected_interval
@property
def jitter(self) -> int:
return self._jitter_q4 >> 4
@property
def packets_expected(self) -> int:
return self.cycles + self.max_seq - self.base_seq + 1
@property
def packets_lost(self) -> int:
return clamp_packets_lost(self.packets_expected - self.packets_received)
class RemoteStreamTrack(MediaStreamTrack):
def __init__(self, kind: str, id: Optional[str] = None) -> None:
super().__init__()
self.kind = kind
if id is not None:
self._id = id
self._queue: asyncio.Queue = asyncio.Queue()
async def recv(self) -> Frame:
"""
Receive the next frame.
"""
if self.readyState != "live":
raise MediaStreamError
frame = await self._queue.get()
if frame is None:
self.stop()
logger.debug(f"RTCRtpReceiver(%s) received None frame", self.kind)
raise MediaStreamError
logger.debug(f"RTCRtpReceiver(%s) received the next frame", self.kind)
return frame
class TimestampMapper:
def __init__(self) -> None:
self._last: Optional[int] = None
self._origin: Optional[int] = None
def map(self, timestamp: int) -> int:
if self._origin is None:
# first timestamp
self._origin = timestamp
elif timestamp < self._last:
# RTP timestamp wrapped
self._origin -= 1 << 32
self._last = timestamp
return timestamp - self._origin
@dataclass
class RTCRtpContributingSource:
"""
The :class:`RTCRtpContributingSource` dictionary contains information about
a contributing source (CSRC).
"""
timestamp: datetime.datetime
"The timestamp associated with this source."
source: int
"The CSRC identifier associated with this source."
@dataclass
class RTCRtpSynchronizationSource:
"""
The :class:`RTCRtpSynchronizationSource` dictionary contains information about
a synchronization source (SSRC).
"""
timestamp: datetime.datetime
"The timestamp associated with this source."
source: int
"The SSRC identifier associated with this source."
class RTCRtpReceiver:
"""
The :class:`RTCRtpReceiver` interface manages the reception and decoding
of data for a :class:`MediaStreamTrack`.
:param kind: The kind of media (`'audio'` or `'video'` or `'keypoints'`).
:param transport: An :class:`RTCDtlsTransport`.
"""
def __init__(self, kind: str, transport: RTCDtlsTransport) -> None:
if transport.state == "closed":
raise InvalidStateError
self.__active_ssrc: Dict[int, datetime.datetime] = {}
self.__codecs: Dict[int, RTCRtpCodecParameters] = {}
self.__decoder_queue: queue.Queue = queue.Queue()
self.__decoder_thread: Optional[threading.Thread] = None
self.__kind = kind
if kind == "audio":
self.__jitter_buffer = JitterBuffer(capacity=16, prefetch=4)
self.__nack_generator = None
self.__remote_bitrate_estimator = None
elif kind == "keypoints":
self.__jitter_buffer = JitterBuffer(capacity=128)
self.__nack_generator = NackGenerator()
self.__remote_bitrate_estimator = None
else:
self.__jitter_buffer = JitterBuffer(capacity=128, is_video=True)
self.__nack_generator = NackGenerator()
self.__remote_bitrate_estimator = RemoteBitrateEstimator()
self._track: Optional[RemoteStreamTrack] = None
self.__rtcp_exited = asyncio.Event()
self.__rtcp_task: Optional[asyncio.Future[None]] = None
self.__rtx_ssrc: Dict[int, int] = {}
self.__started = False
self.__stats = RTCStatsReport()
self.__timestamp_mapper = TimestampMapper()
self.__transport = transport
# RTCP
self.__lsr: Dict[int, int] = {}
self.__lsr_time: Dict[int, float] = {}
self.__remote_streams: Dict[int, StreamStatistics] = {}
self.__rtcp_ssrc: Optional[int] = None
# dropping
self.__dropped_packet_time = -10000
@property
def track(self) -> MediaStreamTrack:
"""
The :class:`MediaStreamTrack` which is being handled by the receiver.
"""
return self._track
@property
def transport(self) -> RTCDtlsTransport:
"""
The :class:`RTCDtlsTransport` over which the media for the receiver's
track is received.
"""
return self.__transport
@classmethod
def getCapabilities(self, kind) -> Optional[RTCRtpCapabilities]:
"""
Returns the most optimistic view of the system's capabilities for
receiving media of the given `kind`.
:rtype: :class:`RTCRtpCapabilities`
"""
return get_capabilities(kind)
async def getStats(self) -> RTCStatsReport:
"""
Returns statistics about the RTP receiver.
:rtype: :class:`RTCStatsReport`
"""
for ssrc, stream in self.__remote_streams.items():
self.__stats.add(
RTCInboundRtpStreamStats(
# RTCStats
timestamp=clock.current_datetime(),
type="inbound-rtp",
id="inbound-rtp_" + str(id(self)),
# RTCStreamStats
ssrc=ssrc,
kind=self.__kind,
transportId=self.transport._stats_id,
# RTCReceivedRtpStreamStats
packetsReceived=stream.packets_received,
packetsLost=stream.packets_lost,
jitter=stream.jitter,
# RTPInboundRtpStreamStats
)
)
self.__stats.update(self.transport._get_stats())
return self.__stats
def getSynchronizationSources(self) -> List[RTCRtpSynchronizationSource]:
"""
Returns a :class:`RTCRtpSynchronizationSource` for each unique SSRC identifier
received in the last 10 seconds.
"""
cutoff = clock.current_datetime() - datetime.timedelta(seconds=10)
sources = []
for source, timestamp in self.__active_ssrc.items():
if timestamp >= cutoff:
sources.append(
RTCRtpSynchronizationSource(source=source, timestamp=timestamp)
)
return sources
async def receive(self, parameters: RTCRtpReceiveParameters) -> None:
"""
Attempt to set the parameters controlling the receiving of media.
:param parameters: The :class:`RTCRtpParameters` for the receiver.
"""
if not self.__started:
for codec in parameters.codecs:
self.__codecs[codec.payloadType] = codec
for encoding in parameters.encodings:
if encoding.rtx:
self.__rtx_ssrc[encoding.rtx.ssrc] = encoding.ssrc
# start decoder thread
self.__decoder_thread = threading.Thread(
target=decoder_worker,
name=self.__kind + "-decoder",
args=(
asyncio.get_event_loop(),
self.__decoder_queue,
self._track._queue,
),
)
self.__decoder_thread.start()
self.__transport._register_rtp_receiver(self, parameters)
self.__rtcp_task = asyncio.ensure_future(self._run_rtcp())
self.__started = True
def setTransport(self, transport: RTCDtlsTransport) -> None:
self.__transport = transport
async def stop(self) -> None:
"""
Irreversibly stop the receiver.
"""
if self.__started:
self.__transport._unregister_rtp_receiver(self)
self.__stop_decoder()
self.__rtcp_task.cancel()
await self.__rtcp_exited.wait()
def _handle_disconnect(self) -> None:
self.__stop_decoder()
async def _handle_rtcp_packet(self, packet: AnyRtcpPacket) -> None:
self.__log_debug("< RTCP %s", packet)
if isinstance(packet, RtcpSrPacket):
self.__stats.add(
RTCRemoteOutboundRtpStreamStats(
# RTCStats
timestamp=clock.current_datetime(),
type="remote-outbound-rtp",
id=f"remote-outbound-rtp_{id(self)}",
# RTCStreamStats
ssrc=packet.ssrc,
kind=self.__kind,
transportId=self.transport._stats_id,
# RTCSentRtpStreamStats
packetsSent=packet.sender_info.packet_count,
bytesSent=packet.sender_info.octet_count,
# RTCRemoteOutboundRtpStreamStats
remoteTimestamp=clock.datetime_from_ntp(
packet.sender_info.ntp_timestamp
),
)
)
self.__lsr[packet.ssrc] = (
(packet.sender_info.ntp_timestamp) >> 16
) & 0xFFFFFFFF
self.__lsr_time[packet.ssrc] = time.time()
elif isinstance(packet, RtcpByePacket):
self.__stop_decoder()
async def _handle_rtp_packet(self, packet: RtpPacket, arrival_time_ms: int) -> None:
"""
Handle an incoming RTP packet.
"""
self.__log_debug("< RTP %s arrival time:%d", packet, arrival_time_ms)
"""
if (packet.sequence_number == 3000):
self.__dropped_packet_time = arrival_time_ms
self.__log_debug("dropping packet %s", packet.sequence_number)
return
if arrival_time_ms - self.__dropped_packet_time < 10000: # 10seconds
self.__log_debug("dropping more packets %s", packet.sequence_number)
return
"""
# feed bitrate estimator
if self.__remote_bitrate_estimator is not None:
if packet.extensions.abs_send_time is not None:
remb = self.__remote_bitrate_estimator.add(
abs_send_time=packet.extensions.abs_send_time,
arrival_time_ms=arrival_time_ms,
payload_size=len(packet.payload) + packet.padding_size,
ssrc=packet.ssrc,
)
if self.__rtcp_ssrc is not None and remb is not None:
# send Receiver Estimated Maximum Bitrate feedback
rtcp_packet = RtcpPsfbPacket(
fmt=RTCP_PSFB_APP,
ssrc=self.__rtcp_ssrc,
media_ssrc=0,
fci=pack_remb_fci(*remb),
)
await self._send_rtcp(rtcp_packet)
# keep track of sources
self.__active_ssrc[packet.ssrc] = clock.current_datetime()
# check the codec is known
codec = self.__codecs.get(packet.payload_type)
if codec is None:
self.__log_debug(
"x RTP packet with unknown payload type %d", packet.payload_type
)
return
# feed RTCP statistics
if packet.ssrc not in self.__remote_streams:
self.__remote_streams[packet.ssrc] = StreamStatistics(codec.clockRate)
self.__remote_streams[packet.ssrc].add(packet)
# unwrap retransmission packet
if is_rtx(codec):
original_ssrc = self.__rtx_ssrc.get(packet.ssrc)
if original_ssrc is None:
self.__log_debug("x RTX packet from unknown SSRC %d", packet.ssrc)
return
if len(packet.payload) < 2:
return
codec = self.__codecs[codec.parameters["apt"]]
packet = unwrap_rtx(
packet, payload_type=codec.payloadType, ssrc=original_ssrc
)
# send NACKs for any missing any packets
if self.__nack_generator is not None and self.__nack_generator.add(packet):
await self._send_rtcp_nack(
packet.ssrc, sorted(self.__nack_generator.missing)
)
# parse codec-specific information
try:
if packet.payload:
packet._data = depayload(codec, packet.payload) # type: ignore
else:
packet._data = b"" # type: ignore
except ValueError as exc:
self.__log_debug("x RTP payload parsing failed: %s", exc)
return
# try to re-assemble encoded frame
pli_flag, encoded_frame = self.__jitter_buffer.add(packet)
# check if the PLI should be sent
if pli_flag:
if encoded_frame is not None:
self.__log_debug("Generating a PLI for %s", encoded_frame.timestamp)
else:
self.__log_debug("Generating a PLI for None")
await self._send_rtcp_pli(packet.ssrc)
# if we have a complete encoded frame, decode it
if encoded_frame is not None and self.__decoder_thread:
encoded_frame.timestamp = self.__timestamp_mapper.map(
encoded_frame.timestamp
)
self.__decoder_queue.put((codec, encoded_frame))
self.__log_debug("Put frame timestamp %s into decoder queue", encoded_frame.timestamp)
async def _run_rtcp(self) -> None:
self.__log_debug("- RTCP started")
try:
while True:
# The interval between RTCP packets is varied randomly over the
# range [0.5, 1.5] times the calculated interval.
await asyncio.sleep(0.5 + random.random())
# RTCP RR
reports = []
for ssrc, stream in self.__remote_streams.items():
lsr = 0
dlsr = 0
if ssrc in self.__lsr:
lsr = self.__lsr[ssrc]
delay = time.time() - self.__lsr_time[ssrc]
if delay > 0 and delay < 65536:
dlsr = int(delay * 65536)
reports.append(
RtcpReceiverInfo(
ssrc=ssrc,
fraction_lost=stream.fraction_lost,
packets_lost=stream.packets_lost,
highest_sequence=stream.max_seq,
jitter=stream.jitter,
lsr=lsr,
dlsr=dlsr,
)
)
if self.__rtcp_ssrc is not None and reports:
packet = RtcpRrPacket(ssrc=self.__rtcp_ssrc, reports=reports)
await self._send_rtcp(packet)
except asyncio.CancelledError:
pass
self.__log_debug("- RTCP finished")
self.__rtcp_exited.set()
async def _send_rtcp(self, packet) -> None:
self.__log_debug("> %s", packet)
try:
await self.transport._send_rtp(bytes(packet))
except ConnectionError:
pass
async def _send_rtcp_nack(self, media_ssrc: int, lost) -> None:
"""
Send an RTCP packet to report missing RTP packets.
"""
if self.__rtcp_ssrc is not None:
packet = RtcpRtpfbPacket(
fmt=RTCP_RTPFB_NACK, ssrc=self.__rtcp_ssrc, media_ssrc=media_ssrc
)
packet.lost = lost
await self._send_rtcp(packet)
async def _send_rtcp_pli(self, media_ssrc: int) -> None:
"""
Send an RTCP packet to report picture loss.
"""
if self.__rtcp_ssrc is not None:
packet = RtcpPsfbPacket(
fmt=RTCP_PSFB_PLI, ssrc=self.__rtcp_ssrc, media_ssrc=media_ssrc
)
await self._send_rtcp(packet)
def _set_rtcp_ssrc(self, ssrc: int) -> None:
self.__rtcp_ssrc = ssrc
def __stop_decoder(self) -> None:
"""
Stop the decoder thread, which will in turn stop the track.
"""
if self.__decoder_thread:
self.__decoder_queue.put(None)
self.__decoder_thread.join()
self.__decoder_thread = None
def __log_debug(self, msg: str, *args) -> None:
logger.debug(f"RTCRtpReceiver(%s) {msg}", self.__kind, *args)
| 34.499192 | 138 | 0.590822 |
793ff796fa9dae652c4388a416e762ce9ec71f5c | 734 | py | Python | dnd5e/migrations/0005_auto_20210106_1318.py | VlasovVitaly/gmfriend | 4efacee2a09061d4a6ccb916438bd09db56a1837 | [
"MIT"
] | null | null | null | dnd5e/migrations/0005_auto_20210106_1318.py | VlasovVitaly/gmfriend | 4efacee2a09061d4a6ccb916438bd09db56a1837 | [
"MIT"
] | 2 | 2021-12-13T15:47:16.000Z | 2021-12-15T17:48:15.000Z | dnd5e/migrations/0005_auto_20210106_1318.py | VlasovVitaly/gmfriend | 4efacee2a09061d4a6ccb916438bd09db56a1837 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-06 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dnd5e', '0004_auto_20210106_1258'),
]
operations = [
migrations.AddField(
model_name='character',
name='languages',
field=models.ManyToManyField(editable=False, related_name='_character_languages_+', to='dnd5e.Language', verbose_name='Владение языками'),
),
migrations.AlterField(
model_name='character',
name='features',
field=models.ManyToManyField(editable=False, related_name='_character_features_+', to='dnd5e.Feature', verbose_name='Умения'),
),
]
| 30.583333 | 150 | 0.638965 |
793ff809f13638f77d4cde1d359150017bed1877 | 12,216 | py | Python | tests/db.py | zlucifer/DIRBS-Core | 702e93dcefdf0fb5787cb42c2a6bc2574e483057 | [
"Unlicense"
] | 1 | 2019-12-10T10:53:00.000Z | 2019-12-10T10:53:00.000Z | tests/db.py | zlucifer/DIRBS-Core | 702e93dcefdf0fb5787cb42c2a6bc2574e483057 | [
"Unlicense"
] | null | null | null | tests/db.py | zlucifer/DIRBS-Core | 702e93dcefdf0fb5787cb42c2a6bc2574e483057 | [
"Unlicense"
] | null | null | null | """
dirbs-db unit tests.
SPDX-License-Identifier: BSD-4-Clause-Clear
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
- All advertising materials mentioning features or use of this software, or any deployment of this software,
or documentation accompanying any distribution of this software, must display the trademark/logo as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
SPDX-License-Identifier: ZLIB-ACKNOWLEDGEMENT
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable
for any damages arising from the use of this software. Permission is granted to anyone to use this software for any
purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following
restrictions:
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/logo as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original
software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from click.testing import CliRunner
import testing.postgresql
import pytest
from psycopg2 import sql
from dirbs.cli.listgen import cli as dirbs_listgen_cli
from dirbs.cli.classify import cli as dirbs_classify_cli
from dirbs.cli.db import cli as dirbs_db_cli
from dirbs.importer.operator_data_importer import OperatorDataImporter
from _fixtures import * # noqa: F403, F401
from _helpers import import_data, get_importer, expect_success
from _importer_params import OperatorDataParams, GSMADataParams, StolenListParams, PairListParams, \
RegistrationListParams
def test_basic_cli_check(postgres, mocked_config, monkeypatch):
"""Test that the dirbs-db check script runs without an error."""
runner = CliRunner()
# Now use non-empty, installed PostgreSQL
# Run dirbs-db check using db args from the temp postgres instance
result = runner.invoke(dirbs_db_cli, ['check'], obj={'APP_CONFIG': mocked_config})
# Test whether dirbs-db check passes after schema install
assert result.exit_code == 0
# Create temp empty postgres instance
empty_postgresql = testing.postgresql.Postgresql()
dsn = empty_postgresql.dsn()
for setting in ['database', 'host', 'port', 'user', 'password']:
monkeypatch.setattr(mocked_config.db_config, setting, dsn.get(setting, None))
result = runner.invoke(dirbs_db_cli, ['check'], obj={'APP_CONFIG': mocked_config})
# Test whether check fails on an empty db
assert result.exit_code == 1
def test_basic_cli_upgrade(postgres, mocked_config):
"""Test that the dirbs-db upgrade script runs without an error."""
runner = CliRunner()
result = runner.invoke(dirbs_db_cli, ['upgrade'], obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
@pytest.mark.parametrize('operator_data_importer, stolen_list_importer, pairing_list_importer, '
'gsma_tac_db_importer, registration_list_importer',
[(OperatorDataParams(
filename='testData1-operator-operator1-anonymized_20161101_20161130.csv',
operator='operator1',
perform_unclean_checks=False,
extract=False),
StolenListParams(
filename='testData1-sample_stolen_list-anonymized.csv'),
PairListParams(
filename='testData1-sample_pairinglist-anonymized.csv'),
GSMADataParams(
filename='testData1-gsmatac_operator4_operator1_anonymized.txt'),
RegistrationListParams(
filename='sample_registration_list.csv'))],
indirect=True)
def test_cli_repartition(postgres, mocked_config, db_conn, operator_data_importer, registration_list_importer,
pairing_list_importer, stolen_list_importer, gsma_tac_db_importer, tmpdir, logger,
metadata_db_conn, mocked_statsd):
"""Test that the dirbs-db partition script runs without an error."""
import_data(operator_data_importer, 'operator_data', 17, db_conn, logger)
import_data(gsma_tac_db_importer, 'gsma_data', 13, db_conn, logger)
import_data(stolen_list_importer, 'stolen_list', 21, db_conn, logger)
import_data(registration_list_importer, 'registration_list', 20, db_conn, logger)
import_data(pairing_list_importer, 'pairing_list', 7, db_conn, logger)
# Import second month of operator data to ensure that we have 2 months worth for the same operator
with get_importer(OperatorDataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
OperatorDataParams(
content='date,imei,imsi,msisdn\n'
'20161201,64220496727231,123456789012345,123456789012345\n'
'20161201,64220496727232,123456789012345,123456789012345',
operator='operator1',
extract=False,
perform_leading_zero_check=False,
perform_unclean_checks=False,
perform_home_network_check=False,
perform_region_checks=False,
perform_historic_checks=False,
mcc_mnc_pairs=[{'mcc': '111', 'mnc': '04'}],
perform_file_daterange_check=False)) as new_imp:
expect_success(new_imp, 19, db_conn, logger)
# Run dirbs-classify and dirbs-listgen to populate some tables prior to re-partition
runner = CliRunner()
output_dir = str(tmpdir)
result = runner.invoke(dirbs_classify_cli, ['--no-safety-check'], obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
result = runner.invoke(dirbs_listgen_cli, [output_dir], obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
# Make sure that if we run with some invalid phyical shards, that it fails
for num_shards in [-1, 0, 101]:
result = runner.invoke(dirbs_db_cli, ['repartition', '--num-physical-shards={0:d}'.format(num_shards)],
obj={'APP_CONFIG': mocked_config})
assert result.exit_code != 0
partitioned_tables = ['classification_state', 'historic_pairing_list', 'historic_registration_list',
'network_imeis', 'monthly_network_triplets_per_mno_operator1_2016_11',
'monthly_network_triplets_country_2016_11', 'blacklist', 'exceptions_lists_operator1',
'notifications_lists_operator1', 'historic_stolen_list']
with db_conn, db_conn.cursor() as cursor:
# Manually add one record into the notifications_lists for operator_1 so that the repartitioned table
# is not empty
cursor.execute("""INSERT INTO notifications_lists_operator1 (operator_id, imei_norm, imsi, msisdn, block_date,
reasons, start_run_id, end_run_id, delta_reason,
virt_imei_shard)
VALUES ('operator1', '12345678901234', '12345678901234', '1', '20170110',
ARRAY['condition1'], 1125, NULL, 'new', calc_virt_imei_shard('12345678901234'))
""")
# Run dirbs-db repartition to 8 partitions and check that it works
result = runner.invoke(dirbs_db_cli, ['repartition', '--num-physical-shards=8'], obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
with db_conn, db_conn.cursor() as cursor:
for base_table in partitioned_tables:
cursor.execute(sql.SQL('SELECT COUNT(*) FROM {0}').format(sql.Identifier(base_table)))
tbl_count = cursor.fetchone()[0]
assert tbl_count > 0
cursor.execute("""SELECT TABLE_NAME
FROM information_schema.tables
WHERE TABLE_NAME LIKE %s
ORDER BY TABLE_NAME""",
['{0}%'.format(base_table)]),
res = [x.table_name for x in cursor]
assert res == ['{0}'.format(base_table),
'{0}_0_12'.format(base_table),
'{0}_13_25'.format(base_table),
'{0}_26_38'.format(base_table),
'{0}_39_51'.format(base_table),
'{0}_52_63'.format(base_table),
'{0}_64_75'.format(base_table),
'{0}_76_87'.format(base_table),
'{0}_88_99'.format(base_table)]
# Re-partition back to the default 4 shards so that we do not change state for other tests
result = runner.invoke(dirbs_db_cli, ['repartition', '--num-physical-shards=4'], obj={'APP_CONFIG': mocked_config})
assert result.exit_code == 0
with db_conn, db_conn.cursor() as cursor:
for base_table in partitioned_tables:
cursor.execute(sql.SQL('SELECT COUNT(*) FROM {0}').format(sql.Identifier(base_table)))
tbl_count = cursor.fetchone()[0]
assert tbl_count > 0
cursor.execute("""SELECT TABLE_NAME
FROM information_schema.tables
WHERE TABLE_NAME LIKE %s
ORDER BY TABLE_NAME""",
['{0}%'.format(base_table)]),
res = [x.table_name for x in cursor]
assert res == ['{0}'.format(base_table),
'{0}_0_24'.format(base_table),
'{0}_25_49'.format(base_table),
'{0}_50_74'.format(base_table),
'{0}_75_99'.format(base_table)]
| 56.294931 | 119 | 0.64612 |
793ff8cef0c118791728620d93c10e919cff3413 | 1,094 | py | Python | tfmtcnn/utils/__init__.py | look4pritam/mtcnn | a8324450e02334e3b4d0f1654c65c772a4bdb953 | [
"MIT"
] | null | null | null | tfmtcnn/utils/__init__.py | look4pritam/mtcnn | a8324450e02334e3b4d0f1654c65c772a4bdb953 | [
"MIT"
] | null | null | null | tfmtcnn/utils/__init__.py | look4pritam/mtcnn | a8324450e02334e3b4d0f1654c65c772a4bdb953 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 49.727273 | 80 | 0.776965 |
Subsets and Splits