max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/functional/basic/test_basic.py | tomasfarias/dbt-core | 799 | 12752529 | import pytest
from dbt.tests.util import run_dbt, get_manifest
my_model_sql = """
select 1 as fun
"""
@pytest.fixture(scope="class")
def models():
return {"my_model.sql": my_model_sql}
def test_basic(project):
# Tests that a project with a single model works
results = run_dbt(["run"])
assert len(results) == 1
manifest = get_manifest(project.project_root)
assert "model.test.my_model" in manifest.nodes
|
test/nn/test_reshape.py | mrmotallebi/pytorch_geometric | 12,651 | 12752532 | import torch
from torch_geometric.nn.reshape import Reshape
def test_reshape():
x = torch.randn(10, 4)
op = Reshape(5, 2, 4)
assert op.__repr__() == 'Reshape(5, 2, 4)'
assert op(x).size() == (5, 2, 4)
assert op(x).view(10, 4).tolist() == x.tolist()
|
src/genie/libs/parser/nxos/show_system.py | balmasea/genieparser | 204 | 12752559 | <filename>src/genie/libs/parser/nxos/show_system.py
"""show_system.py
NXOS parsers for the following show commands:
* 'show system internal sysmgr service name <WORD>'
* 'show system internal l2fwder Mac'
* 'show system internal processes memory'
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional, Or, And,\
Default, Use
# ====================================================================
# Schema for 'show system internal sysmgr service name <process>'
# ====================================================================
class ShowSystemInternalSysmgrServiceNameSchema(MetaParser):
"""Schema for show system internal sysmgr service name <process>"""
schema = {'instance':
{Any():
{'tag':
{Any():
{'process_name': str,
'internal_id': int,
'uuid': str,
'state': str,
'plugin_id': str,
'state_start_date': str,
Optional('last_restart_date'): str,
Optional('pid'): int,
Optional('previous_pid'): int,
Optional('sap'): int,
Optional('restart_count'): int,
Optional('reboot_state'): str,
Optional('last_terminate_reason'): str}
},
}
},
}
class ShowSystemInternalSysmgrServiceName(
ShowSystemInternalSysmgrServiceNameSchema):
"""Parser for show system internal sysmgr service name <process>"""
cli_command = 'show system internal sysmgr service name {process}'
def cli(self, process,output=None):
if process:
cmd = self.cli_command.format(process=process)
else:
cmd = ""
if output is None:
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# Service "bfdc" ("bfdc", 3):
# Service "__inst_012__isis" ("isis", 61):
# Service "feature-mgr" ("feature-mgr", 135):
p1 = re.compile(r'^Service +\"(?P<inst>[\w\-]+)\" *'
'\(\"(?P<process_name>[\w\-]+)\", *'
'(?P<internal_id>\d+)\):$')
m = p1.match(line)
if m:
if 'instance' not in ret_dict:
ret_dict['instance'] = {}
inst = m.groupdict()['inst']
if inst not in ret_dict['instance']:
ret_dict['instance'][inst] = {}
process_name = m.groupdict()['process_name']
internal_id = int(m.groupdict()['internal_id'])
# initial for each process
pid = sap = restart_count = previous_pid = None
last_restart_date = reboot_state = last_terminate_reason = None
continue
# UUID = 0x2C7, PID = 6547, SAP = 1008
# UUID = 0x42000118, -- Currently not running --
p2 = re.compile(r'^UUID *= *(?P<uuid>\w+), *'
'((PID *= *(?P<pid>\d+), *'
'SAP *= *(?P<sap>\d+))'
'|(-- Currently not running --))$')
m = p2.match(line)
if m:
uuid = m.groupdict()['uuid']
if m.groupdict()['pid']:
pid = int(m.groupdict()['pid'])
else:
pid = None
if m.groupdict()['sap']:
sap = int(m.groupdict()['sap'])
else:
sap = None
continue
# State: SRV_STATE_WAIT_SPAWN_CONDITION (entered at time Tue Mar 26 17:31:06 2013).
# State: SRV_STATE_HAP_FAILED [unstable] (entered at time Thu Oct 26 13:46:32 2017).
p3 = re.compile(r'^State: *(?P<state>[\w\s\[\]]+) *'
'\(entered +at +time +'
'(?P<state_start_date>[\w\s\:]+)\).$')
m = p3.match(line)
if m:
state = m.groupdict()['state'].strip()
state_start_date = m.groupdict()['state_start_date']
continue
# Restart count: 1
p4 = re.compile(r'^Restart +count: +(?P<restart_count>\d+)$')
m = p4.match(line)
if m:
if m.groupdict()['restart_count']:
restart_count = int(m.groupdict()['restart_count'])
else:
restart_count = None
continue
# Time of last restart: Sat Jul 1 14:49:10 2017.
p5 = re.compile(r'^Time +of +last +restart: +'
'(?P<last_restart_date>[\w\s\:]+).$')
m = p5.match(line)
if m:
last_restart_date = m.groupdict()['last_restart_date']
continue
# The service never crashed since the last reboot.
# The service has never been started since the last reboot.
p6 = re.compile(r'The service never crashed since the last reboot.')
m = p6.match(line)
if m:
reboot_state = 'never_crashed'
continue
p6_1 = re.compile(r'The service has never been started since the last reboot.')
m = p6_1.match(line)
if m:
reboot_state = 'never_started'
continue
# Previous PID: 2176
p7 = re.compile(r'^Previous +PID: +(?P<previous_pid>\d+)$')
m = p7.match(line)
if m:
previous_pid = int(m.groupdict()['previous_pid'])
continue
# Reason of last termination: SYSMGR_DEATH_REASON_FAILURE_SIGNAL
p8 = re.compile(r'^Reason +of +last +termination: +'
'(?P<last_terminate_reason>\w+)$')
m = p8.match(line)
if m:
last_terminate_reason = m.groupdict()['last_terminate_reason']
continue
# Plugin ID: 0
p9 = re.compile(r'^Plugin +ID: +(?P<plugin_id>\d+)$')
m = p9.match(line)
if m:
plugin_id = m.groupdict()['plugin_id']
ret_dict['instance'][inst]['tag'][tag]['plugin_id'] = plugin_id
continue
# Tag = N/A
# Tag = 100
# Tag = l3vpn
p10 = re.compile(r'^Tag *= *(?P<tag>(N\/A)|(\S+))$')
m = p10.match(line)
if m:
tag = m.groupdict()['tag']
if 'tag' not in ret_dict['instance'][inst]:
ret_dict['instance'][inst]['tag'] = {}
if tag not in ret_dict['instance'][inst]['tag']:
ret_dict['instance'][inst]['tag'][tag] = {}
if 'process_name':
ret_dict['instance'][inst]['tag'][tag]['process_name'] = process_name
if 'internal_id':
ret_dict['instance'][inst]['tag'][tag]['internal_id'] = internal_id
if 'uuid':
ret_dict['instance'][inst]['tag'][tag]['uuid'] = uuid
if 'state':
ret_dict['instance'][inst]['tag'][tag]['state'] = state
if 'state_start_date':
ret_dict['instance'][inst]['tag'][tag]['state_start_date'] = state_start_date
if last_restart_date:
ret_dict['instance'][inst]['tag'][tag]\
['last_restart_date'] = last_restart_date
if pid:
ret_dict['instance'][inst]['tag'][tag]['pid'] = pid
if previous_pid:
ret_dict['instance'][inst]['tag'][tag]\
['previous_pid'] = previous_pid
if sap:
ret_dict['instance'][inst]['tag'][tag]['sap'] = sap
if restart_count:
ret_dict['instance'][inst]['tag'][tag]\
['restart_count'] = restart_count
if reboot_state:
ret_dict['instance'][inst]['tag'][tag]\
['reboot_state'] = reboot_state
if last_terminate_reason:
ret_dict['instance'][inst]['tag'][tag]\
['last_terminate_reason'] = last_terminate_reason
continue
return ret_dict
# ====================================================================
# Schema for 'show system internal l2fwder Mac'
# ====================================================================
class ShowSystemInternalL2fwderMacSchema(MetaParser):
"""Schema for show system internal l2fwder Mac"""
schema = {'vlans':
{Any():
{'mac_addresses':
{Any():
{'mac_type': str,
'mac_aging_time': str,
'entry': str,
'secure': str,
'ntfy': str,
'ports': str,
}
},
}
},
}
# ====================================================================
# Parser for 'show system internal l2fwder Mac'
# ====================================================================
class ShowSystemInternalL2fwderMac(ShowSystemInternalL2fwderMacSchema):
"""Parser for show system internal l2fwder Mac"""
cli_command = 'show system internal l2fwder Mac'
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# G 1008 5e01.80ff.0007 static - F F sup-eth1(R)
p1 = re.compile(r'^\s*(?P<entry>[A-Z\*\(\+\)]+) +(?P<vlan>[0-9]+) '
'+(?P<mac_address>[0-9a-z\.]+) +(?P<mac_type>[a-z]+) '
'+(?P<age>[0-9\-\:]+) +(?P<secure>[A-Z]+) +(?P<ntfy>[A-Z]+) '
'+(?P<ports>[a-zA-Z0-9\-\(\)\s\.\/]+)$')
m = p1.match(line)
if m:
vlan = str(m.groupdict()['vlan'])
mac_address = str(m.groupdict()['mac_address'])
if 'vlans' not in ret_dict:
ret_dict['vlans'] = {}
if vlan not in ret_dict['vlans']:
ret_dict['vlans'][vlan] = {}
if 'mac_addresses' not in ret_dict['vlans'][vlan]:
ret_dict['vlans'][vlan]['mac_addresses'] = {}
ret_dict['vlans'][vlan]['mac_addresses'][mac_address] = {}
ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['mac_type'] = \
str(m.groupdict()['mac_type'])
ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['mac_aging_time'] = \
str(m.groupdict()['age'])
ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['entry'] = \
str(m.groupdict()['entry'])
ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['secure'] = \
str(m.groupdict()['secure'])
ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['ntfy'] = \
str(m.groupdict()['ntfy'])
ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['ports'] = \
str(m.groupdict()['ports'])
continue
return ret_dict
class ShowSystemInternalKernelMeminfoSchema(MetaParser):
"""
Schema for show system internal kernel meminfo
"""
schema = {
'mem': {
'memtotal_kb': int,
'memfree_kb': int,
'memavailable_kb': int,
},
'buffers_kb': int,
'cached_kb': int,
'active': {
'active_kb': int,
'inactive_kb': int,
'active(anon)_kb': int,
'inactive(anon)_kb': int,
'active(file)_kb': int,
'inactive(file)_kb': int,
},
'unevictable_kb': int,
'mlocked_kb': int,
'swap': {
'swapcached_kb': int,
'swaptotal_kb': int,
'swapfree_kb': int,
},
'dirty_kb': int,
'writeback_kb': int,
'anonpages_kb': int,
'mapped_kb': int,
'shmem_kb': int,
'slab_kb': int,
'sreclaimable_kb': int,
'sunreclaim_kb': int,
'kernelstack_kb': int,
'pagetables_kb': int,
'nfs_unstable_kb': int,
'bounce_kb': int,
'writebacktmp_kb': int,
'commitlimit_kb': int,
'committed_as_kb': int,
'vmalloc': {
'vmalloctotal_kb': int,
'vmallocused_kb': int,
'vmallocchunk_kb': int,
},
'hardwarecorrupted_kb': int,
'hugepages': {
'hugepages_total': int,
'hugepages_free': int,
'hugepages_rsvd': int,
'hugepages_surp': int,
'hugepagesize_kb': int,
},
'directmap4k_kb': int,
'directmap2m_kb': int,
}
class ShowSystemInternalKernelMeminfo(ShowSystemInternalKernelMeminfoSchema):
"""
Parser for show system internal kernel meminfo
"""
cli_command = 'show system internal system internal kernel meminfo'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# MemTotal: 5873172 kB
p1 = re.compile(r'(?P<mem_type>Mem.+):\s+(?P<amount>\d+)\skB$')
# Active(file): 236740 kB
p2 = re.compile(r'(?i)(?P<active_state>[in]*active.*):\s+(?P<amount>\d+)\skB$')
# SwapTotal: 0 kB
p3 = re.compile(r'(?P<swap_type>Swap.+):\s+(?P<amount>\d+)\skB$')
# VmallocChunk: 34359477316 kB
p4 = re.compile(r'(?P<vmalloc_type>Vmalloc.+):\s+(?P<amount>\d+)\skB$')
# HugePages_Surp: 0
p5 = re.compile(r'(?P<hugepages_type>Huge.+):\s+(?P<amount>\d+)$')
# Hugepagesize: 2048 kB
p6 = re.compile(r'(?P<hugepages_type>Huge.+):\s+(?P<amount>\d+)\s+kB$')
# Buffers: 38212 kB
p7 = re.compile(r'(?P<key>.+):\s+(?P<amount>\d+)(\skB)?$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# MemTotal: 5873172 kB
m = p1.match(line)
if m:
group = m.groupdict()
mem_dict = ret_dict.setdefault('mem', {})
key = group['mem_type'].lower() + '_kb'
mem_dict[key] = int(group['amount'])
continue
# Active(file): 236740 kB
m = p2.match(line)
if m:
group = m.groupdict()
active_dict = ret_dict.setdefault('active', {})
key = group['active_state'].lower() + '_kb'
active_dict[key] = int(group['amount'])
continue
# SwapTotal: 0 kB
m = p3.match(line)
if m:
group = m.groupdict()
swap_dict = ret_dict.setdefault('swap', {})
key = group['swap_type'].lower() + '_kb'
swap_dict[key] = int(group['amount'])
continue
# VmallocChunk: 34359477316 kB
m = p4.match(line)
if m:
group = m.groupdict()
vmalloc_dict = ret_dict.setdefault('vmalloc', {})
key = group['vmalloc_type'].lower() + '_kb'
vmalloc_dict[key] = int(group['amount'])
continue
# HugePages_Surp: 0
m = p5.match(line)
if m:
group = m.groupdict()
hugepages_dict = ret_dict.setdefault('hugepages', {})
key = group['hugepages_type'].lower()
hugepages_dict[key] = int(group['amount'])
continue
# Hugepagesize: 2048 kB
m = p6.match(line)
if m:
group = m.groupdict()
hugepages_dict = ret_dict.setdefault('hugepages', {})
key = group['hugepages_type'].lower() + '_kb'
hugepages_dict[key] = int(group['amount'])
continue
# Buffers: 38212 kB
m = p7.match(line)
if m:
group = m.groupdict()
key = group['key'].lower() + '_kb'
ret_dict[key] = int(group['amount'])
continue
return ret_dict
class ShowSystemResourcesSchema(MetaParser):
"""
Schema for show system resources
"""
schema = {
'load_avg': {
'load_avg_1min': float,
'load_avg_5min': float,
'load_avg_15min': float,
},
'processes': {
'processes_total': int,
'processes_running': int,
},
'cpu_state': {
'cpu_state_user': float,
'cpu_state_kernel': float,
'cpu_state_idle': float,
'cpus': {
Any(): {
'cpu_state_user': float,
'cpu_state_kernel': float,
'cpu_state_idle': float,
}
}
},
'memory_usage': {
'memory_usage_total_kb': int,
'memory_usage_used_kb': int,
'memory_usage_free_kb': int,
},
'kernel': {
'kernel_vmalloc_total_kb': int,
'kernel_vmalloc_free_kb': int,
'kernel_buffers_kb': int,
'kernel_cached_kb': int,
},
'current_memory_status': str,
}
class ShowSystemResources(ShowSystemResourcesSchema):
"""
Parser for show system resources
"""
cli_command = 'show system resources'
def cli(self, output=None):
# execute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Load average: 1 minute: 0.34 5 minutes: 0.40 15 minutes: 0.66
p1 = re.compile(r'^Load average\s*:\s+1 minute:\s+(?P<minute_one>[\d\.]+)\s+5 minutes:\s+'
r'(?P<minute_five>\d+\.\d+)\s+15 minutes:\s+(?P<minute_fifteen>\d+\.\d+)$')
# Processes : 901 total, 2 running
p2 = re.compile(r'^Processes\s*:\s+(?P<processes_total>\d+)\s+total,\s+'
r'(?P<processes_running>\d+)\s+running$')
# CPU states : 2.11% user, 11.64% kernel, 86.24% idle
# CPU0 states : 3.33% user, 12.22% kernel, 84.44% idle
p3 = re.compile(r'^CPU(?P<cpu_num>\d*)\s+states\s+:\s+(?P<user>\d+\.\d+)%\s+user,\s+'
r'(?P<kernel>[\d+\.]+)%\s+kernel,\s+(?P<idle>\d+\.\d+)%\s+idle$')
# Memory usage: 5873172K total, 4189652K used, 1683520K free
p4 = re.compile(r'^Memory usage\s*:\s+(?P<total>\d+)K total,\s+'
r'(?P<used>\d+)K used,\s+(?P<free>\d+)K free$')
# Kernel vmalloc: 0K total, 0K free
p5 = re.compile(r'^Kernel vmalloc\s*:\s+(?P<total>\d+)'
r'K total,\s+(?P<free>\d+)K free$')
# Kernel buffers: 144876K Used
p6 = re.compile(r'^Kernel buffers\s*:\s+(?P<buffers>\d+)K Used$')
# Kernel cached : 2296916K Used
p7 = re.compile(r'^Kernel cached\s*:\s+(?P<cached>\d+)K Used$')
# Current memory status: OK
p8 = re.compile(r'^Current memory status\s*:\s+(?P<status>\w+)$')
ret_dict = {}
for line in out.splitlines():
if line:
line = line.strip()
else:
continue
# Load average: 1 minute: 0.34 5 minutes: 0.40 15 minutes: 0.66
m = p1.match(line)
if m:
group = m.groupdict()
load_avg_dict = ret_dict.setdefault('load_avg', {})
load_avg_dict["load_avg_1min"] = float(group['minute_one'])
load_avg_dict["load_avg_5min"] = float(group['minute_five'])
load_avg_dict["load_avg_15min"] = float(group['minute_five'])
continue
# Processes : 901 total, 2 running
m = p2.match(line)
if m:
group = m.groupdict()
processes_dict = ret_dict.setdefault('processes', {})
processes_dict["processes_total"] = int(group['processes_total'])
processes_dict["processes_running"] = int(group['processes_running'])
continue
# CPU states : 2.11% user, 11.64% kernel, 86.24% idle
# CPU0 states : 3.33% user, 12.22% kernel, 84.44% idle
m = p3.match(line)
if m:
group = m.groupdict()
cpu_state_dict = ret_dict.setdefault('cpu_state', {})
if group['cpu_num']:
cpu_id_dict = cpu_state_dict.setdefault(
'cpus', {}).setdefault(int(group['cpu_num']), {})
cpu_id_dict['cpu_state_user'] = float(group['user'])
cpu_id_dict['cpu_state_kernel'] = float(group['kernel'])
cpu_id_dict['cpu_state_idle'] = float(group['idle'])
continue
cpu_state_dict['cpu_state_user'] = float(group['user'])
cpu_state_dict['cpu_state_kernel'] = float(group['kernel'])
cpu_state_dict['cpu_state_idle'] = float(group['idle'])
continue
# Memory usage: 5873172K total, 4189652K used, 1683520K free
m = p4.match(line)
if m:
group = m.groupdict()
memory_usage_dict = ret_dict.setdefault('memory_usage', {})
memory_usage_dict['memory_usage_total_kb'] = int(group['total'])
memory_usage_dict['memory_usage_used_kb'] = int(group['used'])
memory_usage_dict['memory_usage_free_kb'] = int(group['free'])
continue
# Kernel vmalloc: 0K total, 0K free
m = p5.match(line)
if m:
group = m.groupdict()
kernel_dict = ret_dict.setdefault('kernel', {})
kernel_dict['kernel_vmalloc_total_kb'] = int(group['total'])
kernel_dict['kernel_vmalloc_free_kb'] = int(group['free'])
continue
# Kernel buffers: 144876K Used
m = p6.match(line)
if m:
group = m.groupdict()
kernel_dict = ret_dict.setdefault('kernel', {})
kernel_dict['kernel_buffers_kb'] = int(group['buffers'])
continue
# Kernel cached : 2296916K Used
m = p7.match(line)
if m:
group = m.groupdict()
kernel_dict = ret_dict.setdefault('kernel', {})
kernel_dict['kernel_cached_kb'] = int(group['cached'])
continue
# Current memory status: OK
m = p8.match(line)
if m:
ret_dict["current_memory_status"] = m.groupdict()['status']
continue
return ret_dict
class ShowSystemInternalProcessesMemorySchema(MetaParser):
"""
Schema for show system internal processes memory
"""
schema = {
'pid':
{
Any():
{
'stat': str,
'time': str,
'majflt': int,
'trs': int,
'rss': int,
'vsz': int,
'mem_percent': float,
'command': str,
'tty': str
}
}
}
class ShowSystemInternalProcessesMemory(ShowSystemInternalProcessesMemorySchema):
"""
Parser for show system internal processes memory
"""
cli_command = "show system internal processes memory"
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# 7482 ? Ssl 00:05:05 158 0 219576 1053628 3.7 /opt/mtx/bin/grpc -i 2626 -I
# 27344 pts/0 Sl+ 00:00:20 0 63 117180 709928 1.9 /isan/bin/vsh.bin
p1 = re.compile(
r'^(?P<pid>\d+)\s+(?P<tty>\S+)\s+(?P<stat>\S+)\s+(?P<time>[\d:]+)\s+(?P<majflt>\d+)\s+(?P<trs>\d+)\s+'
r'(?P<rss>\d+)\s+(?P<vsz>\d+)\s+(?P<mem_percent>[\d.]+)\s+(?P<command>.+$)')
ret_dict = {}
for line in out.splitlines():
stripped_line = line.strip()
# 27344 pts/0 Sl+ 00:00:20 0 63 117180 709928 1.9 /isan/bin/vsh.bin
# 7482 ? Ssl 00:05:05 158 0 219576 1053628 3.7 /opt/mtx/bin/grpc -i 2626 -I
m = p1.match(stripped_line)
if m:
group = m.groupdict()
pid = int(group['pid'])
pid_dict = ret_dict.setdefault('pid', {}).setdefault(pid, {})
pid_dict['stat'] = group['stat']
pid_dict['majflt'] = int(group['majflt'])
pid_dict['trs'] = int(group['trs'])
pid_dict['rss'] = int(group['rss'])
pid_dict['vsz'] = int(group['vsz'])
pid_dict['mem_percent'] = float(group['mem_percent'])
pid_dict['command'] = group['command']
pid_dict['tty'] = group['tty']
pid_dict['time'] = group['time']
return ret_dict
|
ib/ext/EWrapperMsgGenerator.py | LewisW/IbPy | 1,260 | 12752574 | <filename>ib/ext/EWrapperMsgGenerator.py
#!/usr/bin/env python
""" generated source for module EWrapperMsgGenerator """
#
# Original file copyright original author(s).
# This file copyright <NAME>, <EMAIL>.
#
# WARNING: all changes to this file will be lost.
from ib.ext.AnyWrapperMsgGenerator import AnyWrapperMsgGenerator
from ib.ext.EClientSocket import EClientSocket
from ib.ext.MarketDataType import MarketDataType
from ib.ext.TickType import TickType
from ib.ext.Util import Util
from ib.lib import Double
# package: com.ib.client
class EWrapperMsgGenerator(AnyWrapperMsgGenerator):
""" generated source for class EWrapperMsgGenerator """
SCANNER_PARAMETERS = "SCANNER PARAMETERS:"
FINANCIAL_ADVISOR = "FA:"
@classmethod
def tickPrice(cls, tickerId, field, price, canAutoExecute):
""" generated source for method tickPrice """
return "id=" + str(tickerId) + " " + TickType.getField(field) + "=" + str(price) + " " + (" canAutoExecute" if (canAutoExecute != 0) else " noAutoExecute")
@classmethod
def tickSize(cls, tickerId, field, size):
""" generated source for method tickSize """
return "id=" + str(tickerId) + " " + TickType.getField(field) + "=" + str(size)
@classmethod
def tickOptionComputation(cls, tickerId, field, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice):
""" generated source for method tickOptionComputation """
toAdd = "id=" + str(tickerId) + " " + TickType.getField(field) \
+ ": vol = " + (str(impliedVol) if (impliedVol >= 0 and impliedVol != Double.MAX_VALUE) else "N/A") \
+ " delta = " + (str(delta) if (abs(delta) <= 1) else "N/A") \
+ " gamma = " + (str(gamma) if (abs(gamma) <= 1) else "N/A") \
+ " vega = " + (str(vega) if (abs(vega) <= 1) else "N/A") \
+ " theta = " + (str(theta) if (abs(theta) <= 1) else "N/A") \
+ " optPrice = " + (str(optPrice) if (optPrice >= 0 and optPrice != Double.MAX_VALUE) else "N/A") \
+ " pvDividend = " + (str(pvDividend) if (pvDividend >= 0 and pvDividend != Double.MAX_VALUE) else "N/A") \
+ " undPrice = " + (str(undPrice) if (undPrice >= 0 and undPrice != Double.MAX_VALUE) else "N/A")
return toAdd
@classmethod
def tickGeneric(cls, tickerId, tickType, value):
""" generated source for method tickGeneric """
return "id=" + str(tickerId) + " " + TickType.getField(tickType) + "=" + str(value)
@classmethod
def tickString(cls, tickerId, tickType, value):
""" generated source for method tickString """
return "id=" + str(tickerId) + " " + TickType.getField(tickType) + "=" + str(value)
@classmethod
def tickEFP(cls, tickerId, tickType, basisPoints, formattedBasisPoints, impliedFuture, holdDays, futureExpiry, dividendImpact, dividendsToExpiry):
""" generated source for method tickEFP """
return "id=" + str(tickerId) + " " + TickType.getField(tickType) \
+ ": basisPoints = " + str(basisPoints) + "/" + formattedBasisPoints \
+ " impliedFuture = " + str(impliedFuture) + " holdDays = " + str(holdDays) \
+ " futureExpiry = " + futureExpiry + " dividendImpact = " + str(dividendImpact) \
+ " dividends to expiry = " + str(dividendsToExpiry)
@classmethod
def orderStatus(cls, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld):
""" generated source for method orderStatus """
return "order status: orderId=" + str(orderId) + " clientId=" + str(clientId) \
+ " permId=" + str(permId) + " status=" + status + " filled=" + str(filled) \
+ " remaining=" + str(remaining) + " avgFillPrice=" + str(avgFillPrice) \
+ " lastFillPrice=" + str(lastFillPrice) + " parent Id=" + str(parentId) \
+ " whyHeld=" + whyHeld
@classmethod
def openOrder(cls, orderId, contract, order, orderState):
""" generated source for method openOrder """
msg = "open order: orderId=" + str(orderId) \
+ " action=" + str(order.m_action) \
+ " quantity=" + str(order.m_totalQuantity) \
+ " conid=" + str(contract.m_conId) \
+ " symbol=" + str(contract.m_symbol) \
+ " secType=" + str(contract.m_secType) \
+ " expiry=" + str(contract.m_expiry) \
+ " strike=" + str(contract.m_strike) \
+ " right=" + str(contract.m_right) \
+ " multiplier=" + str(contract.m_multiplier) \
+ " exchange=" + str(contract.m_exchange) \
+ " primaryExch=" + str(contract.m_primaryExch) \
+ " currency=" + str(contract.m_currency) \
+ " localSymbol=" + str(contract.m_localSymbol) \
+ " tradingClass=" + str(contract.m_tradingClass) \
+ " type=" + str(order.m_orderType) \
+ " lmtPrice=" + Util.DoubleMaxString(order.m_lmtPrice) \
+ " auxPrice=" + Util.DoubleMaxString(order.m_auxPrice) \
+ " TIF=" + str(order.m_tif) \
+ " localSymbol=" + str(contract.m_localSymbol) \
+ " client Id=" + str(order.m_clientId) \
+ " parent Id=" + str(order.m_parentId) \
+ " permId=" + str(order.m_permId) \
+ " outsideRth=" + str(order.m_outsideRth) \
+ " hidden=" + str(order.m_hidden) \
+ " discretionaryAmt=" + str(order.m_discretionaryAmt) \
+ " displaySize=" + str(order.m_displaySize) \
+ " triggerMethod=" + str(order.m_triggerMethod) \
+ " goodAfterTime=" + str(order.m_goodAfterTime) \
+ " goodTillDate=" + str(order.m_goodTillDate) \
+ " faGroup=" + str(order.m_faGroup) \
+ " faMethod=" + str(order.m_faMethod) \
+ " faPercentage=" + str(order.m_faPercentage) \
+ " faProfile=" + str(order.m_faProfile) \
+ " shortSaleSlot=" + str(order.m_shortSaleSlot) \
+ " designatedLocation=" + str(order.m_designatedLocation) \
+ " exemptCode=" + str(order.m_exemptCode) \
+ " ocaGroup=" + str(order.m_ocaGroup) \
+ " ocaType=" + str(order.m_ocaType) \
+ " rule80A=" + str(order.m_rule80A) \
+ " allOrNone=" + str(order.m_allOrNone) \
+ " minQty=" + Util.IntMaxString(order.m_minQty) \
+ " percentOffset=" + Util.DoubleMaxString(order.m_percentOffset) \
+ " eTradeOnly=" + order.m_eTradeOnly \
+ " firmQuoteOnly=" + str(order.m_firmQuoteOnly) \
+ " nbboPriceCap=" + Util.DoubleMaxString(order.m_nbboPriceCap) \
+ " optOutSmartRouting=" + str(order.m_optOutSmartRouting) \
+ " auctionStrategy=" + str(order.m_auctionStrategy) \
+ " startingPrice=" + Util.DoubleMaxString(order.m_startingPrice) \
+ " stockRefPrice=" + Util.DoubleMaxString(order.m_stockRefPrice) \
+ " delta=" + Util.DoubleMaxString(order.m_delta) \
+ " stockRangeLower=" + Util.DoubleMaxString(order.m_stockRangeLower) \
+ " stockRangeUpper=" + Util.DoubleMaxString(order.m_stockRangeUpper) \
+ " volatility=" + Util.DoubleMaxString(order.m_volatility) \
+ " volatilityType=" + str(order.m_volatilityType) \
+ " deltaNeutralOrderType=" + str(order.m_deltaNeutralOrderType) \
+ " deltaNeutralAuxPrice=" + Util.DoubleMaxString(order.m_deltaNeutralAuxPrice) \
+ " deltaNeutralConId=" + str(order.m_deltaNeutralConId) \
+ " deltaNeutralSettlingFirm=" + str(order.m_deltaNeutralSettlingFirm) \
+ " deltaNeutralClearingAccount=" + str(order.m_deltaNeutralClearingAccount) \
+ " deltaNeutralClearingIntent=" + str(order.m_deltaNeutralClearingIntent) \
+ " deltaNeutralOpenClose=" + str(order.m_deltaNeutralOpenClose) \
+ " deltaNeutralShortSale=" + str(order.m_deltaNeutralShortSale) \
+ " deltaNeutralShortSaleSlot=" + str(order.m_deltaNeutralShortSaleSlot) \
+ " deltaNeutralDesignatedLocation=" + str(order.m_deltaNeutralDesignatedLocation) \
+ " continuousUpdate=" + str(order.m_continuousUpdate) \
+ " referencePriceType=" + str(order.m_referencePriceType) \
+ " trailStopPrice=" + Util.DoubleMaxString(order.m_trailStopPrice) \
+ " trailingPercent=" + Util.DoubleMaxString(order.m_trailingPercent) \
+ " scaleInitLevelSize=" + Util.IntMaxString(order.m_scaleInitLevelSize) \
+ " scaleSubsLevelSize=" + Util.IntMaxString(order.m_scaleSubsLevelSize) \
+ " scalePriceIncrement=" + Util.DoubleMaxString(order.m_scalePriceIncrement) \
+ " scalePriceAdjustValue=" + Util.DoubleMaxString(order.m_scalePriceAdjustValue) \
+ " scalePriceAdjustInterval=" + Util.IntMaxString(order.m_scalePriceAdjustInterval) \
+ " scaleProfitOffset=" + Util.DoubleMaxString(order.m_scaleProfitOffset) \
+ " scaleAutoReset=" + str(order.m_scaleAutoReset) \
+ " scaleInitPosition=" + Util.IntMaxString(order.m_scaleInitPosition) \
+ " scaleInitFillQty=" + Util.IntMaxString(order.m_scaleInitFillQty) \
+ " scaleRandomPercent=" + str(order.m_scaleRandomPercent) \
+ " hedgeType=" + str(order.m_hedgeType) \
+ " hedgeParam=" + str(order.m_hedgeParam) \
+ " account=" + str(order.m_account) \
+ " settlingFirm=" + str(order.m_settlingFirm) \
+ " clearingAccount=" + str(order.m_clearingAccount) \
+ " clearingIntent=" + str(order.m_clearingIntent) \
+ " notHeld=" + str(order.m_notHeld) \
+ " whatIf=" + str(order.m_whatIf)
if "BAG" == contract.m_secType:
if contract.m_comboLegsDescrip is not None:
msg += " comboLegsDescrip=" + str(contract.m_comboLegsDescrip)
msg += " comboLegs={"
if contract.m_comboLegs is not None:
i = 0
while i < len(contract.m_comboLegs):
comboLeg = contract.m_comboLegs[i]
msg += " leg " + str(i + 1) + ": "
msg += "conId=" + str(comboLeg.m_conId)
msg += " ratio=" + str(comboLeg.m_ratio)
msg += " action=" + str(comboLeg.m_action)
msg += " exchange=" + str(comboLeg.m_exchange)
msg += " openClose=" + str(comboLeg.m_openClose)
msg += " shortSaleSlot=" + str(comboLeg.m_shortSaleSlot)
msg += " designatedLocation=" + str(comboLeg.m_designatedLocation)
msg += " exemptCode=" + str(comboLeg.m_exemptCode)
if order.m_orderComboLegs is not None and len(contract.m_comboLegs) == len(order.m_orderComboLegs):
orderComboLeg = order.m_orderComboLegs[i]
msg += " price=" + Util.DoubleMaxString(orderComboLeg.m_price)
msg += ";"
i += 1
msg += "}"
if order.m_basisPoints != Double.MAX_VALUE:
msg += " basisPoints=" + Util.DoubleMaxString(order.m_basisPoints)
msg += " basisPointsType=" + Util.IntMaxString(order.m_basisPointsType)
if contract.m_underComp is not None:
underComp = contract.m_underComp
msg += " underComp.conId =" + str(underComp.m_conId) + " underComp.delta =" + str(underComp.m_delta) + " underComp.price =" + str(underComp.m_price)
if not Util.StringIsEmpty(order.m_algoStrategy):
msg += " algoStrategy=" + str(order.m_algoStrategy)
msg += " algoParams={"
if order.m_algoParams is not None:
algoParams = order.m_algoParams
i = 0
while i < len(algoParams):
param = algoParams[i]
if i > 0:
msg += ","
msg += str(param.m_tag) + "=" + str(param.m_value)
i += 1
msg += "}"
if "BAG" == contract.m_secType:
msg += " smartComboRoutingParams={"
if order.m_smartComboRoutingParams is not None:
smartComboRoutingParams = order.m_smartComboRoutingParams
i = 0
while i < len(smartComboRoutingParams):
param = smartComboRoutingParams[i]
if i > 0:
msg += ","
msg += str(param.m_tag) + "=" + str(param.m_value)
i += 1
msg += "}"
orderStateMsg = " status=" + str(orderState.m_status) \
+ " initMargin=" + str(orderState.m_initMargin) \
+ " maintMargin=" + str(orderState.m_maintMargin) \
+ " equityWithLoan=" + str(orderState.m_equityWithLoan) \
+ " commission=" + Util.DoubleMaxString(orderState.m_commission) \
+ " minCommission=" + Util.DoubleMaxString(orderState.m_minCommission) \
+ " maxCommission=" + Util.DoubleMaxString(orderState.m_maxCommission) \
+ " commissionCurrency=" + str(orderState.m_commissionCurrency) \
+ " warningText=" + str(orderState.m_warningText)
return msg + orderStateMsg
@classmethod
def openOrderEnd(cls):
""" generated source for method openOrderEnd """
return " =============== end ==============="
@classmethod
def updateAccountValue(cls, key, value, currency, accountName):
""" generated source for method updateAccountValue """
return "updateAccountValue: " + key + " " + value + " " + currency + " " + accountName
@classmethod
def updatePortfolio(cls, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName):
""" generated source for method updatePortfolio """
msg = "updatePortfolio: " + cls.contractMsg(contract) + \
str(position) + " " + str(marketPrice) + " " + str(marketValue) + \
" " + str(averageCost) + " " + str(unrealizedPNL) + " " + \
str(realizedPNL) + " " + accountName
return msg
@classmethod
def updateAccountTime(cls, timeStamp):
""" generated source for method updateAccountTime """
return "updateAccountTime: " + timeStamp
@classmethod
def accountDownloadEnd(cls, accountName):
""" generated source for method accountDownloadEnd """
return "accountDownloadEnd: " + accountName
@classmethod
def nextValidId(cls, orderId):
""" generated source for method nextValidId """
return "Next Valid Order ID: " + orderId
@classmethod
def contractDetails(cls, reqId, contractDetails):
""" generated source for method contractDetails """
contract = contractDetails.m_summary
msg = "reqId = " + reqId + " ===================================\n" + \
" ---- Contract Details begin ----\n" + \
cls.contractMsg(contract) + cls.contractDetailsMsg(contractDetails) + \
" ---- Contract Details End ----\n"
return msg
@classmethod
def contractDetailsMsg(cls, contractDetails):
""" generated source for method contractDetailsMsg """
msg = "marketName = " + str(contractDetails.m_marketName) + "\n" \
+ "minTick = " + str(contractDetails.m_minTick) + "\n" \
+ "price magnifier = " + str(contractDetails.m_priceMagnifier) + "\n" \
+ "orderTypes = " + str(contractDetails.m_orderTypes) + "\n" \
+ "validExchanges = " + str(contractDetails.m_validExchanges) + "\n" \
+ "underConId = " + str(contractDetails.m_underConId) + "\n" \
+ "longName = " + str(contractDetails.m_longName) + "\n" \
+ "contractMonth = " + str(contractDetails.m_contractMonth) + "\n" \
+ "industry = " + str(contractDetails.m_industry) + "\n" \
+ "category = " + str(contractDetails.m_category) + "\n" \
+ "subcategory = " + str(contractDetails.m_subcategory) + "\n" \
+ "timeZoneId = " + str(contractDetails.m_timeZoneId) + "\n" \
+ "tradingHours = " + str(contractDetails.m_tradingHours) + "\n" \
+ "liquidHours = " + str(contractDetails.m_liquidHours) + "\n" \
+ "evRule = " + str(contractDetails.m_evRule) + "\n" \
+ "evMultiplier = " + str(contractDetails.m_evMultiplier) + "\n" \
+ cls.contractDetailsSecIdList(contractDetails)
return msg
@classmethod
def contractMsg(cls, contract):
""" generated source for method contractMsg """
msg = "conid = " + str(contract.m_conId) + "\n" \
+ "symbol = " + str(contract.m_symbol) + "\n" \
+ "secType = " + str(contract.m_secType) + "\n" \
+ "expiry = " + str(contract.m_expiry) + "\n" \
+ "strike = " + str(contract.m_strike) + "\n" \
+ "right = " + str(contract.m_right) + "\n" \
+ "multiplier = " + str(contract.m_multiplier) + "\n" \
+ "exchange = " + str(contract.m_exchange) + "\n" \
+ "primaryExch = " + str(contract.m_primaryExch) + "\n" \
+ "currency = " + str(contract.m_currency) + "\n" \
+ "localSymbol = " + str(contract.m_localSymbol) + "\n" \
+ "tradingClass = " + str(contract.m_tradingClass) + "\n"
return msg
@classmethod
def bondContractDetails(cls, reqId, contractDetails):
""" generated source for method bondContractDetails """
contract = contractDetails.m_summary
msg = "reqId = " + str(reqId) + " ===================================\n" \
+ " ---- Bond Contract Details begin ----\n" \
+ "symbol = " + str(contract.m_symbol) + "\n" \
+ "secType = " + str(contract.m_secType) + "\n" \
+ "cusip = " + str(contractDetails.m_cusip) + "\n" \
+ "coupon = " + str(contractDetails.m_coupon) + "\n" \
+ "maturity = " + str(contractDetails.m_maturity) + "\n" \
+ "issueDate = " + str(contractDetails.m_issueDate) + "\n" \
+ "ratings = " + str(contractDetails.m_ratings) + "\n" \
+ "bondType = " + str(contractDetails.m_bondType) + "\n" \
+ "couponType = " + str(contractDetails.m_couponType) + "\n" \
+ "convertible = " + str(contractDetails.m_convertible) + "\n" \
+ "callable = " + str(contractDetails.m_callable) + "\n" \
+ "putable = " + str(contractDetails.m_putable) + "\n" \
+ "descAppend = " + str(contractDetails.m_descAppend) + "\n" \
+ "exchange = " + str(contract.m_exchange) + "\n" \
+ "currency = " + str(contract.m_currency) + "\n" \
+ "marketName = " + str(contractDetails.m_marketName) + "\n" \
+ "tradingClass = " + str(contract.m_tradingClass) + "\n" \
+ "conid = " + str(contract.m_conId) + "\n" \
+ "minTick = " + str(contractDetails.m_minTick) + "\n" \
+ "orderTypes = " + str(contractDetails.m_orderTypes) + "\n" \
+ "validExchanges = " + str(contractDetails.m_validExchanges) + "\n" \
+ "nextOptionDate = " + str(contractDetails.m_nextOptionDate) + "\n" \
+ "nextOptionType = " + str(contractDetails.m_nextOptionType) + "\n" \
+ "nextOptionPartial = " + str(contractDetails.m_nextOptionPartial) + "\n" \
+ "notes = " + str(contractDetails.m_notes) + "\n" \
+ "longName = " + str(contractDetails.m_longName) + "\n" \
+ "evRule = " + str(contractDetails.m_evRule) + "\n" \
+ "evMultiplier = " + str(contractDetails.m_evMultiplier) + "\n" \
+ cls.contractDetailsSecIdList(contractDetails) \
+ " ---- Bond Contract Details End ----\n"
return msg
@classmethod
def contractDetailsSecIdList(cls, contractDetails):
""" generated source for method contractDetailsSecIdList """
msg = "secIdList={"
if contractDetails.m_secIdList is not None:
secIdList = contractDetails.m_secIdList
i = 0
while i < len(secIdList):
param = secIdList[i]
if i > 0:
msg += ","
msg += str(param.m_tag) + "=" + str(param.m_value)
i += 1
msg += "}\n"
return msg
@classmethod
def contractDetailsEnd(cls, reqId):
""" generated source for method contractDetailsEnd """
return "reqId = " + str(reqId) + " =============== end ==============="
@classmethod
def execDetails(cls, reqId, contract, execution):
""" generated source for method execDetails """
msg = " ---- Execution Details begin ----\n" \
+ "reqId = " + str(reqId) + "\n" \
+ "orderId = " + str(execution.m_orderId) + "\n" \
+ "clientId = " + str(execution.m_clientId) + "\n" \
+ cls.contractMsg(contract) \
+ "execId = " + str(execution.m_execId) + "\n" \
+ "time = " + str(execution.m_time) + "\n" \
+ "acctNumber = " + str(execution.m_acctNumber) + "\n" \
+ "executionExchange = " + str(execution.m_exchange) + "\n" \
+ "side = " + str(execution.m_side) + "\n" \
+ "shares = " + str(execution.m_shares) + "\n" \
+ "price = " + str(execution.m_price) + "\n" \
+ "permId = " + str(execution.m_permId) + "\n" \
+ "liquidation = " + str(execution.m_liquidation) + "\n" \
+ "cumQty = " + str(execution.m_cumQty) + "\n" \
+ "avgPrice = " + str(execution.m_avgPrice) + "\n" \
+ "orderRef = " + str(execution.m_orderRef) + "\n" \
+ "evRule = " + str(execution.m_evRule) + "\n" \
+ "evMultiplier = " + str(execution.m_evMultiplier) + "\n" \
" ---- Execution Details end ----\n"
return msg
@classmethod
def execDetailsEnd(cls, reqId):
""" generated source for method execDetailsEnd """
return "reqId = " + str(reqId) + " =============== end ==============="
@classmethod
def updateMktDepth(cls, tickerId, position, operation, side, price, size):
""" generated source for method updateMktDepth """
return "updateMktDepth: " + str(tickerId) + " " + str(position) + " " + str(operation) + " " + str(side) + " " + str(price) + " " + str(size)
@classmethod
def updateMktDepthL2(cls, tickerId, position, marketMaker, operation, side, price, size):
""" generated source for method updateMktDepthL2 """
return "updateMktDepth: " + str(tickerId) + " " + str(position) + " " + marketMaker + " " + str(operation) + " " + str(side) + " " + str(price) + " " + str(size)
@classmethod
def updateNewsBulletin(cls, msgId, msgType, message, origExchange):
""" generated source for method updateNewsBulletin """
return "MsgId=" + str(msgId) + " :: MsgType=" + str(msgType) + " :: Origin=" + origExchange + " :: Message=" + message
@classmethod
def managedAccounts(cls, accountsList):
""" generated source for method managedAccounts """
return "Connected : The list of managed accounts are : [" + accountsList + "]"
@classmethod
def receiveFA(cls, faDataType, xml):
""" generated source for method receiveFA """
return cls.FINANCIAL_ADVISOR + " " + EClientSocket.faMsgTypeName(faDataType) + " " + xml
@classmethod
def historicalData(cls, reqId, date, open, high, low, close, volume, count, WAP, hasGaps):
""" generated source for method historicalData """
return "id=" + str(reqId) \
+ " date = " + date \
+ " open=" + str(open) \
+ " high=" + str(high) \
+ " low=" + str(low) \
+ " close=" + str(close) \
+ " volume=" + str(volume) \
+ " count=" + str(count) \
+ " WAP=" + str(WAP) \
+ " hasGaps=" + str(hasGaps)
@classmethod
def realtimeBar(cls, reqId, time, open, high, low, close, volume, wap, count):
""" generated source for method realtimeBar """
return "id=" + str(reqId) \
+ " time = " + str(time) \
+ " open=" + str(open) \
+ " high=" + str(high) \
+ " low=" + str(low) \
+ " close=" + str(close) \
+ " volume=" + str(volume) \
+ " count=" + str(count) \
+ " WAP=" + str(wap)
@classmethod
def scannerParameters(cls, xml):
""" generated source for method scannerParameters """
return cls.SCANNER_PARAMETERS + "\n" + xml
@classmethod
def scannerData(cls, reqId, rank, contractDetails, distance, benchmark, projection, legsStr):
""" generated source for method scannerData """
contract = contractDetails.m_summary
return "id = " + str(reqId) \
+ " rank=" + str(rank) \
+ " symbol=" + str(contract.m_symbol) \
+ " secType=" + str(contract.m_secType) \
+ " expiry=" + str(contract.m_expiry) \
+ " strike=" + str(contract.m_strike) \
+ " right=" + str(contract.m_right) \
+ " exchange=" + str(contract.m_exchange) \
+ " currency=" + str(contract.m_currency) \
+ " localSymbol=" + str(contract.m_localSymbol) \
+ " marketName=" + str(contractDetails.m_marketName) \
+ " tradingClass=" + str(contract.m_tradingClass) \
+ " distance=" + distance \
+ " benchmark=" + benchmark \
+ " projection=" + projection \
+ " legsStr=" + legsStr
@classmethod
def scannerDataEnd(cls, reqId):
""" generated source for method scannerDataEnd """
return "id = " + str(reqId) + " =============== end ==============="
@classmethod
def currentTime(cls, time):
""" generated source for method currentTime """
return "current time = " + str(time)
@classmethod
def fundamentalData(cls, reqId, data):
""" generated source for method fundamentalData """
return "id = " + str(reqId) + " len = " + str(len(data)) + '\n' + data
@classmethod
def deltaNeutralValidation(cls, reqId, underComp):
""" generated source for method deltaNeutralValidation """
return "id = " + str(reqId) + " underComp.conId =" + str(underComp.m_conId) + " underComp.delta =" + str(underComp.m_delta) + " underComp.price =" + str(underComp.m_price)
@classmethod
def tickSnapshotEnd(cls, tickerId):
""" generated source for method tickSnapshotEnd """
return "id=" + str(tickerId) + " =============== end ==============="
@classmethod
def marketDataType(cls, reqId, marketDataType):
""" generated source for method marketDataType """
return "id=" + str(reqId) + " marketDataType = " + MarketDataType.getField(marketDataType)
@classmethod
def commissionReport(cls, commissionReport):
""" generated source for method commissionReport """
msg = "commission report:" \
+ " execId=" + str(commissionReport.m_execId) \
+ " commission=" + Util.DoubleMaxString(commissionReport.m_commission) \
+ " currency=" + str(commissionReport.m_currency) \
+ " realizedPNL=" + Util.DoubleMaxString(commissionReport.m_realizedPNL) \
+ " yield=" + Util.DoubleMaxString(commissionReport.m_yield) \
+ " yieldRedemptionDate=" \
+ Util.IntMaxString(commissionReport.m_yieldRedemptionDate)
return msg
@classmethod
def position(cls, account, contract, position, avgCost):
""" generated source for method position """
msg = " ---- Position begin ----\n" \
+ "account = " + str(account) + "\n" \
+ cls.contractMsg(contract) \
+ "position = " + Util.IntMaxString(position) + "\n" \
+ "avgCost = " + Util.DoubleMaxString(avgCost) + "\n" + \
" ---- Position end ----\n"
return msg
@classmethod
def positionEnd(cls):
""" generated source for method positionEnd """
return " =============== end ==============="
@classmethod
def accountSummary(cls, reqId, account, tag, value, currency):
""" generated source for method accountSummary """
msg = " ---- Account Summary begin ----\n" \
+ "reqId = " + str(reqId) + "\n" \
+ "account = " + str(account) + "\n" \
+ "tag = " + str(tag) + "\n" \
+ "value = " + str(value) + "\n" \
+ "currency = " + str(currency) + "\n" \
+ " ---- Account Summary end ----\n"
return msg
@classmethod
def accountSummaryEnd(cls, reqId):
""" generated source for method accountSummaryEnd """
return "id=" + str(reqId) + " =============== end ==============="
|
model-optimizer/extensions/front/onnx/cast_ext.py | monroid/openvino | 2,406 | 12752579 | <filename>model-optimizer/extensions/front/onnx/cast_ext.py
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.ops.Cast import Cast
from mo.front.extractor import FrontExtractorOp
from mo.front.onnx.extractors.utils import get_onnx_datatype_as_numpy, onnx_attr
class CastFrontExtractor(FrontExtractorOp):
op = 'Cast'
enabled = True
@classmethod
def extract(cls, node):
to = onnx_attr(node, 'to', 'i', default=None)
Cast.update_node_stat(node, {'dst_type': get_onnx_datatype_as_numpy(to)})
return cls.enabled
|
src/genie/libs/parser/ios/show_prefix_list.py | nujo/genieparser | 204 | 12752581 | """show_prefix_list.py
IOS parsers for the following show commands:
* show ip prefix-list detail
* show ipv6 prefix-list detail
"""
from genie.libs.parser.iosxe.show_prefix_list import ShowIpPrefixListDetail as ShowIpPrefixListDetail_iosxe,\
ShowIpv6PrefixListDetail as ShowIpv6PrefixListDetail_iosxe
class ShowIpPrefixListDetail(ShowIpPrefixListDetail_iosxe):
"""Parser for:
show ip prefix-list detail
show ipv6 prefix-list detail"""
pass
class ShowIpv6PrefixListDetail(ShowIpv6PrefixListDetail_iosxe):
"""Parser for show ipv6 prefix-list detail"""
pass
|
samples/keras/mnist.py | elgalu/labml | 463 | 12752589 | <reponame>elgalu/labml<gh_stars>100-1000
# This example is based on the following examples
# https://www.tensorflow.org/tutorials/quickstart/beginner
import tensorflow as tf
from labml import experiment
from labml.utils.keras import LabMLKerasCallback
def main():
experiment.create(name='MNIST Keras')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
with experiment.start():
model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test),
callbacks=[LabMLKerasCallback()], verbose=None)
if __name__ == '__main__':
main()
|
src/azure-cli/azure/cli/command_modules/eventgrid/commands.py | YuanyuanNi/azure-cli | 3,287 | 12752594 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
from azure.cli.core.commands import CliCommandType
from ._client_factory import (
topics_factory,
domains_factory,
domain_topics_factory,
system_topics_factory,
system_topic_event_subscriptions_factory,
event_subscriptions_factory,
topic_types_factory,
extension_topics_factory,
partner_registrations_factory,
partner_namespaces_factory,
event_channels_factory,
partner_topics_factory,
partner_topic_event_subscriptions_factory
)
def load_command_table(self, _):
topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#TopicsOperations.{}',
client_factory=topics_factory,
client_arg_name='self'
)
extension_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#ExtensionTopicsOperations.{}',
client_factory=extension_topics_factory,
client_arg_name='self'
)
domains_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#DomainsOperations.{}',
client_factory=domains_factory,
client_arg_name='self'
)
domain_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#DomainTopicsOperations.{}',
client_factory=domain_topics_factory,
client_arg_name='self'
)
system_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#SystemTopicsOperations.{}',
client_factory=system_topics_factory,
client_arg_name='self'
)
system_topic_event_subscriptions_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#SystemTopicEventSubscriptionsOperations.{}',
client_factory=system_topic_event_subscriptions_factory,
client_arg_name='self'
)
partner_registrations_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerRegistrationsOperations.{}',
client_factory=partner_registrations_factory,
client_arg_name='self'
)
partner_namespaces_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerNamespacesOperations.{}',
client_factory=partner_namespaces_factory,
client_arg_name='self'
)
event_channels_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#EventChannelsOperations.{}',
client_factory=event_channels_factory,
client_arg_name='self'
)
partner_topics_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerTopicsOperations.{}',
client_factory=partner_topics_factory,
client_arg_name='self'
)
partner_topic_event_subscriptions_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#PartnerTopicEventSubscriptionsOperations.{}',
client_factory=partner_topic_event_subscriptions_factory,
client_arg_name='self'
)
topic_type_mgmt_util = CliCommandType(
operations_tmpl='azure.mgmt.eventgrid.operations#TopicTypesOperations.{}',
client_factory=topic_types_factory,
client_arg_name='self'
)
with self.command_group('eventgrid topic', topics_mgmt_util, client_factory=topics_factory) as g:
g.show_command('show', 'get')
g.command('key list', 'list_shared_access_keys')
g.command('delete', 'begin_delete')
g.custom_command('key regenerate', 'cli_topic_regenerate_key')
g.custom_command('list', 'cli_topic_list')
g.custom_command('create', 'cli_topic_create_or_update')
g.custom_command('update', 'cli_topic_update')
with self.command_group('eventgrid extension-topic', extension_topics_mgmt_util, client_factory=extension_topics_factory) as g:
g.show_command('show', 'get')
with self.command_group('eventgrid domain topic', domain_topics_mgmt_util, client_factory=domain_topics_factory) as g:
g.show_command('show', 'get')
g.custom_command('list', 'cli_domain_topic_list')
g.custom_command('delete', 'cli_domain_topic_delete')
g.custom_command('create', 'cli_domain_topic_create_or_update')
with self.command_group('eventgrid domain', domains_mgmt_util, client_factory=domains_factory) as g:
g.show_command('show', 'get')
g.command('key list', 'list_shared_access_keys')
g.custom_command('key regenerate', 'cli_domain_regenerate_key')
g.custom_command('list', 'cli_domain_list')
g.custom_command('create', 'cli_domain_create_or_update')
g.command('delete', 'begin_delete')
g.custom_command('update', 'cli_domain_update')
with self.command_group('eventgrid system-topic', system_topics_mgmt_util, client_factory=system_topics_factory) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_system_topic_list')
g.custom_command('create', 'cli_system_topic_create_or_update')
g.custom_command('update', 'cli_system_topic_update')
with self.command_group('eventgrid system-topic event-subscription', system_topic_event_subscriptions_mgmt_util, client_factory=system_topic_event_subscriptions_factory) as g:
g.custom_show_command('show', 'cli_system_topic_event_subscription_get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_system_topic_event_subscription_list')
g.custom_command('create', 'cli_system_topic_event_subscription_create_or_update')
g.custom_command('update', 'cli_system_topic_event_subscription_update')
with self.command_group('eventgrid partner registration', partner_registrations_mgmt_util, client_factory=partner_registrations_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'delete', confirmation=True)
g.custom_command('list', 'cli_partner_registration_list')
g.custom_command('create', 'cli_partner_registration_create_or_update')
# g.custom_command('update', 'cli_partner_registration_update')
with self.command_group('eventgrid partner namespace', partner_namespaces_mgmt_util, client_factory=partner_namespaces_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_partner_namespace_list')
g.custom_command('create', 'cli_partner_namespace_create_or_update')
g.command('key list', 'list_shared_access_keys')
g.custom_command('key regenerate', 'cli_partner_namespace_regenerate_key')
# g.custom_command('update', 'cli_partner_namespace_update')
with self.command_group('eventgrid partner namespace event-channel', event_channels_mgmt_util, client_factory=event_channels_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_event_channel_list')
# g.custom_command('update', 'cli_event_channel_update')
g.custom_command('create', 'cli_event_channel_create_or_update')
with self.command_group('eventgrid partner topic', partner_topics_mgmt_util, client_factory=partner_topics_factory, is_preview=True) as g:
g.show_command('show', 'get')
g.command('delete', 'begin_delete', confirmation=True)
g.command('activate', 'activate')
g.command('deactivate', 'deactivate')
g.custom_command('list', 'cli_partner_topic_list')
# g.custom_command('create', 'cli_partner_topic_create_or_update')
# g.custom_command('update', 'cli_partner_topic_update')
with self.command_group('eventgrid partner topic event-subscription', partner_topic_event_subscriptions_mgmt_util, client_factory=partner_topic_event_subscriptions_factory, is_preview=True) as g:
g.custom_show_command('show', 'cli_partner_topic_event_subscription_get')
g.command('delete', 'begin_delete', confirmation=True)
g.custom_command('list', 'cli_partner_topic_event_subscription_list')
g.custom_command('create', 'cli_partner_topic_event_subscription_create_or_update')
g.custom_command('update', 'cli_partner_topic_event_subscription_update')
custom_tmpl = 'azure.cli.command_modules.eventgrid.custom#{}'
eventgrid_custom = CliCommandType(operations_tmpl=custom_tmpl)
with self.command_group('eventgrid event-subscription', client_factory=event_subscriptions_factory) as g:
g.custom_command('create', 'cli_eventgrid_event_subscription_create')
g.custom_show_command('show', 'cli_eventgrid_event_subscription_get')
g.custom_command('delete', 'cli_eventgrid_event_subscription_delete')
g.custom_command('list', 'cli_event_subscription_list')
g.generic_update_command('update',
getter_type=eventgrid_custom,
setter_type=eventgrid_custom,
getter_name='event_subscription_getter',
setter_name='event_subscription_setter',
custom_func_name='update_event_subscription')
with self.command_group('eventgrid topic-type', topic_type_mgmt_util) as g:
g.command('list', 'list')
g.show_command('show', 'get')
g.command('list-event-types', 'list_event_types')
|
tests/metrics/test_token_classification.py | techthiyanes/rubrix | 888 | 12752609 | <reponame>techthiyanes/rubrix
import httpx
import pytest
import rubrix
import rubrix as rb
from rubrix.metrics.token_classification import (
Annotations,
entity_capitalness,
entity_consistency,
entity_density,
entity_labels,
f1,
mention_length,
tokens_length,
token_length,
token_frequency,
token_capitalness,
)
from tests.server.test_helpers import client
def mocking_client(monkeypatch):
monkeypatch.setattr(httpx, "post", client.post)
monkeypatch.setattr(httpx, "get", client.get)
monkeypatch.setattr(httpx, "delete", client.delete)
monkeypatch.setattr(httpx, "put", client.put)
monkeypatch.setattr(httpx, "stream", client.stream)
def log_some_data(dataset: str):
rubrix.delete(dataset)
text = "My first rubrix example"
tokens = text.split(" ")
rb.log(
[
rb.TokenClassificationRecord(
id=1,
text=text,
tokens=tokens,
prediction=[("CARDINAL", 3, 8)],
annotation=[("CARDINAL", 3, 8)],
),
rb.TokenClassificationRecord(
id=2,
text=text,
tokens=tokens,
prediction=[("CARDINAL", 3, 8)],
annotation=[("CARDINAL", 3, 8)],
),
rb.TokenClassificationRecord(
id=3,
text=text,
tokens=tokens,
prediction=[("NUMBER", 3, 8)],
annotation=[("NUMBER", 3, 8)],
),
rb.TokenClassificationRecord(
id=4,
text=text,
tokens=tokens,
prediction=[("PERSON", 3, 8)],
annotation=[("PERSON", 3, 8)],
),
],
name=dataset,
)
def test_search_by_nested_metric(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_search_by_nested_metric"
rb.delete(dataset)
log_some_data(dataset)
df = rb.load(dataset, query="metrics.predicted.mentions.capitalness: LOWER")
assert len(df) > 0
def test_tokens_length(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_tokens_length"
log_some_data(dataset)
results = tokens_length(dataset)
assert results
assert results.data == {"4.0": 4}
results.visualize()
def test_token_length(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_token_length"
log_some_data(dataset)
results = token_length(dataset)
assert results
assert results.data == {"2.0": 4, "3.0": 0, "4.0": 0, "5.0": 4, "6.0": 4, "7.0": 4}
results.visualize()
def test_token_frequency(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_token_frequency"
log_some_data(dataset)
results = token_frequency(dataset)
assert results
assert results.data == {"My": 4, "example": 4, "first": 4, "rubrix": 4}
results.visualize()
def test_token_capitalness(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_token_capitalness"
log_some_data(dataset)
results = token_capitalness(dataset)
assert results
assert results.data == {"LOWER": 12, "FIRST": 4}
results.visualize()
def test_mentions_length(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_mentions_length"
log_some_data(dataset)
results = mention_length(dataset)
assert results
assert results.data == {"1.0": 4}
results.visualize()
results = mention_length(dataset, level="char")
assert results
assert results.data == {"5.0": 4}
results.visualize()
results = mention_length(dataset, compute_for=Annotations)
assert results
assert results.data == {"1.0": 4}
results.visualize()
results = mention_length(dataset, compute_for=Annotations, level="char")
assert results
assert results.data == {"5.0": 4}
results.visualize()
def test_compute_for_as_string(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_compute_for_as_string"
log_some_data(dataset)
results = entity_density(dataset, compute_for="Predictions")
assert results
assert results.data == {"0.25": 4}
results.visualize()
with pytest.raises(
ValueError,
match="not-found is not a valid ComputeFor, please select one of \['annotations', 'predictions'\]",
):
entity_density(dataset, compute_for="not-found")
def test_entity_density(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_entity_density"
log_some_data(dataset)
results = entity_density(dataset)
assert results
assert results.data == {"0.25": 4}
results.visualize()
results = entity_density(dataset, compute_for=Annotations)
assert results
assert results.data == {"0.25": 4}
results.visualize()
def test_entity_labels(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_entity_labels"
log_some_data(dataset)
results = entity_labels(dataset)
assert results
assert results.data == {"CARDINAL": 2, "NUMBER": 1, "PERSON": 1}
results.visualize()
results = entity_labels(dataset, compute_for=Annotations)
assert results
assert results.data == {"CARDINAL": 2, "NUMBER": 1, "PERSON": 1}
results.visualize()
def test_entity_capitalness(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_entity_capitalness"
rubrix.delete(dataset)
log_some_data(dataset)
results = entity_capitalness(dataset)
assert results
assert results.data == {"LOWER": 4}
results.visualize()
results = entity_capitalness(dataset, compute_for=Annotations)
assert results
assert results.data == {"LOWER": 4}
results.visualize()
def test_entity_consistency(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_entity_consistency"
rubrix.delete(dataset)
log_some_data(dataset)
results = entity_consistency(dataset, threshold=2)
assert results
assert results.data == {
"mentions": [
{
"mention": "first",
"entities": [
{"count": 2, "label": "CARDINAL"},
{"count": 1, "label": "NUMBER"},
{"count": 1, "label": "PERSON"},
],
}
]
}
results.visualize()
results = entity_consistency(dataset, compute_for=Annotations, threshold=2)
assert results
assert results.data == {
"mentions": [
{
"mention": "first",
"entities": [
{"count": 2, "label": "CARDINAL"},
{"count": 1, "label": "NUMBER"},
{"count": 1, "label": "PERSON"},
],
}
]
}
results.visualize()
@pytest.mark.parametrize(
("metric", "expected_results"),
[
(entity_consistency, {"mentions": []}),
(mention_length, {}),
(entity_density, {}),
(entity_capitalness, {}),
(entity_labels, {}),
],
)
def test_metrics_without_data(metric, expected_results, monkeypatch):
mocking_client(monkeypatch)
dataset = "test_metrics_without_data"
rb.delete(dataset)
text = "M"
tokens = text.split(" ")
rb.log(
rb.TokenClassificationRecord(
id=1,
text=text,
tokens=tokens,
),
name=dataset,
)
results = metric(dataset)
assert results
assert results.data == expected_results
results.visualize()
def test_metrics_for_text_classification(monkeypatch):
mocking_client(monkeypatch)
dataset = "test_metrics_for_token_classification"
text = "test the f1 metric of the token classification task"
rb.log(
rb.TokenClassificationRecord(
id=1,
text=text,
tokens=text.split(),
prediction=[("a", 0, 4), ("b", 5, 8), ("b", 9, 11)],
annotation=[("a", 0, 4), ("b", 5, 8), ("a", 9, 11)],
),
name=dataset,
)
results = f1(dataset)
assert results
assert results.data == {
"f1_macro": pytest.approx(0.75),
"f1_micro": pytest.approx(0.6666666666666666),
"a_f1": pytest.approx(0.6666666666666666),
"a_precision": pytest.approx(1.0),
"a_recall": pytest.approx(0.5),
"b_f1": pytest.approx(0.6666666666666666),
"b_precision": pytest.approx(0.5),
"b_recall": pytest.approx(1.0),
"precision_macro": pytest.approx(0.75),
"precision_micro": pytest.approx(0.6666666666666666),
"recall_macro": pytest.approx(0.75),
"recall_micro": pytest.approx(0.6666666666666666),
}
results.visualize()
|
tests/test_termcounts_asscs.py | flying-sheep/goatools | 477 | 12752622 | #!/usr/bin/env python
"""Test TermCounts object used in Resnik and Lin similarity calculations."""
from __future__ import print_function
import os
import sys
import timeit
import datetime
from goatools.base import get_godag
from goatools.semantic import TermCounts
from goatools.semantic import get_info_content
from goatools.test_data.gafs import ASSOCIATIONS
from goatools.associations import dnld_annotation
from goatools.anno.gaf_reader import GafReader
from goatools.godag.consts import NS2NAMESPACE
TIC = timeit.default_timer()
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_semantic_similarity(usr_assc=None):
"""Computing basic semantic similarities between GO terms."""
not_these = {'goa_uniprot_all.gaf', 'goa_uniprot_all_noiea.gaf'}
associations = sorted(ASSOCIATIONS.difference(not_these))
go2obj = get_go2obj()
# goids = go2obj.keys()
# http://current.geneontology.org/annotations/
if usr_assc is not None:
associations = [usr_assc]
not_found = set()
errs = []
for assc_name in associations: # Limit test numbers for speed
tic = timeit.default_timer()
# Get all the annotations from arabidopsis.
fin_gaf = os.path.join(REPO, assc_name)
if not os.path.exists(fin_gaf):
dnld_annotation(fin_gaf)
annoobj = GafReader(fin_gaf)
#### for nspc in ['BP', 'MF', 'CC']:
assc_gene2gos = annoobj.get_id2gos('all')
if not assc_gene2gos:
not_found.add(assc_name)
continue
# Calculate the information content of the single term, GO:0048364
# "Information content (GO:0048364) = 7.75481392334
# Initialize the counts of each GO term.
tcntobj = TermCounts(go2obj, assc_gene2gos)
go_cnt = tcntobj.gocnts.most_common()
#print tcntobj.gocnts.most_common()
if go_cnt:
print("{ASSC}".format(ASSC=assc_name))
print(tcntobj.aspect_counts)
gocnt_max = go_cnt[0][1]
prt_info(tcntobj, go_cnt, None)
prt_info(tcntobj, go_cnt, gocnt_max/2.0)
prt_info(tcntobj, go_cnt, gocnt_max/10.0)
print("{HMS} {hms} {ASSC}\n".format(ASSC=assc_name, HMS=_hms(TIC), hms=_hms(tic)))
print('{HMS} {N} Associations'.format(HMS=_hms(TIC), N=len(associations)))
if not_found:
_prt_not_found(not_found)
if errs:
fout_err = 'namespace_errors.txt'
with open(fout_err, 'w') as prt:
for err in errs:
prt.write(err)
print(' {N} ERRORS WROTE: {TXT}'.format(N=len(errs), TXT=fout_err))
def _prt_not_found(not_found):
print('**WARNING: {N} EMPTY ASSOCIATIONS:'.format(N=len(not_found)))
for idx, assc in enumerate(not_found):
print(' {I}) {ASSC}'.format(I=idx, ASSC=assc))
def _hms(tic):
"""Get Timing."""
return '{HMS}'.format(HMS=str(datetime.timedelta(seconds=(timeit.default_timer()-tic))))
def prt_info(tcntobj, go_cnt, max_val):
"""Print the information content of a frequently used GO ID."""
go_id, cnt = get_goid(go_cnt, max_val)
infocontent = get_info_content(go_id, tcntobj)
msg = 'Information content ({GO} {CNT:7,}) = {INFO:8.6f} {NAME}'
print(msg.format(GO=go_id, CNT=cnt, INFO=infocontent, NAME=tcntobj.go2obj[go_id].name))
def get_goid(go_cnt, max_val):
"""Get frequently used GO ID."""
if max_val is not None:
for goid, cnt in go_cnt:
if cnt < max_val:
return goid, cnt
return go_cnt[-1][0], go_cnt[-1][1]
return go_cnt[0][0], go_cnt[0][1]
def get_go2obj():
"""Read GODag and return go2obj."""
godag = get_godag(os.path.join(REPO, "go-basic.obo"), loading_bar=None)
return {go:o for go, o in godag.items() if not o.is_obsolete}
if __name__ == '__main__':
ASSC_NAME = None if len(sys.argv) == 1 else sys.argv[1]
test_semantic_similarity(ASSC_NAME)
|
tests/test_level4/test_playing.py | kianmeng/soupsieve | 130 | 12752633 | """Test playing selectors."""
from .. import util
class TestPlaying(util.TestCase):
"""Test playing selectors."""
MARKUP = """
<!DOCTYPE html>
<html>
<body>
<video id="vid" width="320" height="240" controls>
<source src="movie.mp4" type="video/mp4">
<source src="movie.ogg" type="video/ogg">
Your browser does not support the video tag.
</video>
</body>
</html>
"""
def test_playing(self):
"""Test playing (matches nothing)."""
# Not actually sure how this is used, but it won't match anything anyways
self.assert_selector(
self.MARKUP,
"video:playing",
[],
flags=util.HTML
)
def test_not_playing(self):
"""Test not playing."""
self.assert_selector(
self.MARKUP,
"video:not(:playing)",
["vid"],
flags=util.HTML
)
|
redbot/message/headers/tcn.py | kinow/redbot | 167 | 12752634 | <reponame>kinow/redbot
#!/usr/bin/env python
from redbot.message import headers
class tcn(headers.HttpHeader):
canonical_name = "TCN"
description = """\
The `TCN` header field is part of an experimental transparent content negotiation scheme. It
is not widely supported in clients.
"""
reference = "https://tools.ietf.org/html/rfc2295"
list_header = True
deprecated = False
valid_in_requests = False
valid_in_responses = True
no_coverage = True
|
skater/core/local_interpretation/lime/lime_tabular.py | RPUTHUMA/Skater | 718 | 12752653 | """
Making LimeTabularExplainer Accessible
"""
from lime.lime_tabular import LimeTabularExplainer
|
sktime/forecasting/croston.py | marcio55afr/sktime | 5,349 | 12752664 | <reponame>marcio55afr/sktime
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Croston's Forecasting Method."""
import numpy as np
import pandas as pd
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
class Croston(BaseForecaster):
r"""Croston's method for forecasting intermittent time series.
Implements the method proposed by Croston in [1]_ and described in [2]_.
Croston's method is a modification of (vanilla) exponential smoothing to handle
intermittent time series. A time series is considered intermittent if many
of its values are zero and the gaps between non-zero entries are not periodic.
Croston's method will predict a constant value for all future times, so
Croston's method essentially provides another notion for the average value
of a time series.
The method is (equivalent to) the following:
- Let :math:`v_0,\ldots,v_n` be the non-zero values of the time series
- Let :math:`v` be the exponentially smoothed average of :math:`v_0,\ldots,v_n`
- Let :math:`z_0,\ldots,z_n` be the number of consecutive zeros plus 1 between
the :math:`v_i` in the original time series.
- Let :math:`z` be the exponentially smoothed average of :math:`z_0,\ldots,z_n`
- Then the forecast is :math:`\frac{v}{z}`
The intuition is that :math:`v` is a weighted average of the non-zero time
series values and :math:`\frac{1}{z}` estimates the probability of getting a
non-zero value.
Example to illustrate the :math:`v` and :math:`z` notation.
- If the original time series is :math:`0,0,2,7,0,0,0,-5` then:
- The :math:`v`'s are :math:`2,7,-5`
- The :math:`z`'s are :math:`3,1,4`
Parameters
----------
smoothing : float, default = 0.1
Smoothing parameter in exponential smoothing
Examples
--------
>>> from sktime.forecasting.croston import Croston
>>> from sktime.datasets import load_PBS_dataset
>>> y = load_PBS_dataset()
>>> forecaster = Croston(smoothing=0.1)
>>> forecaster.fit(y)
Croston(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
See Also
--------
ExponentialSmoothing
References
----------
.. [1] <NAME>. Forecasting and stock control for intermittent demands.
Operational Research Quarterly (1970-1977), 23(3):pp. 289–303, 1972.
.. [2] <NAME>. Forecasting Intermittent Demand with the Croston Model.
https://towardsdatascience.com/croston-forecast-model-for-intermittent-demand-360287a17f5f
"""
_tags = {
"requires-fh-in-fit": False, # is forecasting horizon already required in fit?
}
def __init__(self, smoothing=0.1):
# hyperparameter
self.smoothing = smoothing
self._f = None
super(Croston, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
Returns
-------
self : returns an instance of self.
"""
n_timepoints = len(y) # Historical period: i.e the input array's length
smoothing = self.smoothing
y = y.to_numpy() # Transform the input into a numpy array
# Fit the parameters: level(q), periodicity(a) and forecast(f)
q, a, f = np.full((3, n_timepoints + 1), np.nan)
p = 1 # periods since last demand observation
# Initialization:
first_occurrence = np.argmax(y[:n_timepoints] > 0)
q[0] = y[first_occurrence]
a[0] = 1 + first_occurrence
f[0] = q[0] / a[0]
# Create t+1 forecasts:
for t in range(0, n_timepoints):
if y[t] > 0:
q[t + 1] = smoothing * y[t] + (1 - smoothing) * q[t]
a[t + 1] = smoothing * p + (1 - smoothing) * a[t]
f[t + 1] = q[t + 1] / a[t + 1]
p = 1
else:
q[t + 1] = q[t]
a[t + 1] = a[t]
f[t + 1] = f[t]
p += 1
self._f = f
return self
def _predict(
self,
fh=None,
X=None,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Predict forecast.
Parameters
----------
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
Returns
-------
forecast : pd.series
Predicted forecasts.
"""
len_fh = len(self.fh)
f = self._f
# Predicting future forecasts:to_numpy()
y_pred = np.full(len_fh, f[-1])
index = self.fh.to_absolute(self.cutoff)
return pd.Series(y_pred, index=index)
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_shellutil_cfg.py | CiscoDevNet/ydk-py | 177 | 12752669 | """ Cisco_IOS_XR_shellutil_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR shellutil package configuration.
This module contains definitions
for the following management objects\:
host\-names\: Container Schema for hostname configuration
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class HostNames(_Entity_):
"""
Container Schema for hostname configuration
.. attribute:: host_name
Configure system's hostname
**type**\: str
"""
_prefix = 'shellutil-cfg'
_revision = '2015-10-12'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(HostNames, self).__init__()
self._top_entity = None
self.yang_name = "host-names"
self.yang_parent_name = "Cisco-IOS-XR-shellutil-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
])
self.host_name = None
self._segment_path = lambda: "Cisco-IOS-XR-shellutil-cfg:host-names"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(HostNames, ['host_name'], name, value)
def clone_ptr(self):
self._top_entity = HostNames()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_shellutil_cfg as meta
return meta._meta_table['HostNames']['meta_info']
|
rtools/install_package.py | scw/r-tools-install | 193 | 12752691 | # Py3 compat layer
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import arcpy
import glob
import os
import shutil
import sys
# create a handle to the windows kernel; want to make Win API calls
try:
import ctypes
from ctypes import wintypes
# pass str() to avoid bpo29082 in Python 2.7.13
kdll = ctypes.windll.LoadLibrary(str("kernel32.dll"))
except (ImportError, TypeError):
kdll = None
from .bootstrap_r import execute_r
from .github_release import save_url, release_info
from .rpath import (
r_lib_path,
r_path,
r_pkg_path,
r_pkg_version,
r_user_lib_path,
r_version,
arcmap_exists,
arcmap_path,
fnf_exception,
handle_fnf,
)
from .utils import mkdtemp, set_env_tmpdir
from .fs import getvolumeinfo, hardlinks_supported, junctions_supported
try:
import winreg
except ImportError:
# py 2
import _winreg as winreg
PACKAGE_NAME = 'arcgisbinding'
PACKAGE_VERSION = r_pkg_version()
def bridge_running(product):
""" Check if the R ArcGIS bridge is running. Installation wil fail
if the DLL is currently loaded."""
running = False
# check for the correct DLL
if product == 'Pro':
proxy_name = "rarcproxy_pro.dll"
else:
proxy_name = "rarcproxy.dll"
kdll.GetModuleHandleW.restype = wintypes.HMODULE
kdll.GetModuleHandleW.argtypes = [wintypes.LPCWSTR]
dll_handle = kdll.GetModuleHandleW(proxy_name) # memory address of DLL
if dll_handle is not None:
running = True
return running
def arcgis_platform():
""" ArcGIS platform details used internally."""
info = arcpy.GetInstallInfo()
install_dir = info['InstallDir']
arc_version = info['Version']
if info['ProductName'] == 'ArcGISPro':
product = 'Pro'
else:
# there are other levels, but this is a PYT run from toolbox,
# so unlikely to be a non-ArcMap context
product = 'ArcMap'
return (install_dir, arc_version, product)
def validate_environment(overwrite=None):
"""Make sure we have a version of the product that works, and that
the library isn't already loaded."""
(install_dir, arc_version, product) = arcgis_platform()
# earlier versions excluded by virtue of not having Python toolbox support
no_hook_versions = ('10.1', '10.2', '10.2.1', '10.2.2', '10.3')
valid_env = True
msg = []
if arc_version in no_hook_versions and product is not 'Pro':
msg.append("The ArcGIS R bridge requires ArcGIS 10.3.1 or later.")
valid_env = False
if arc_version in ('1.0', '1.0.2') and product == 'Pro':
msg.append("The ArcGIS R bridge requires ArcGIS Pro 1.1 or later.")
valid_env = False
if not overwrite and PACKAGE_VERSION:
msg.append("The ArcGIS R bridge is already installed, and "
"overwrite is disabled.")
valid_env = False
if kdll is None:
msg.append("Unable to connect to your Windows configuration, "
"this is likely due to an incorrect Python installation. "
"Try repairing your ArcGIS installation.")
valid_env = False
# check the library isn't loaded
if kdll is not None and bridge_running(product):
msg.append("The ArcGIS R bridge is currently in-use, restart the "
"application and try again.")
valid_env = False
if r_version() is None:
msg.append("It doesn't look like R is installed. Install R prior "
"to running this tool.")
valid_env = False
if not valid_env:
arcpy.AddError("\n\n".join(msg))
sys.exit()
def create_registry_entry(product, arc_version):
"""Create a registry link back to the arcgisbinding package."""
root_key = winreg.HKEY_CURRENT_USER
if product == 'Pro':
product_name = "ArcGISPro"
else:
product_name = "Desktop{}".format(arc_version)
reg_path = "SOFTWARE\\Esri\\{}".format(product_name)
package_key = 'RintegrationProPackagePath'
link_key = None
try:
full_access = (winreg.KEY_WOW64_64KEY + winreg.KEY_ALL_ACCESS)
# find the key, 64- or 32-bit we want it all
link_key = winreg.OpenKey(root_key, reg_path, 0, full_access)
except fnf_exception as error:
handle_fnf(error)
if link_key:
try:
arcpy.AddMessage("Using registry key to link install.")
binding_path = "{}\\{}".format(r_lib_path(), "arcgisbinding")
winreg.SetValueEx(link_key, package_key, 0,
winreg.REG_SZ, binding_path)
except fnf_exception as error:
handle_fnf(error)
def install_package(overwrite=False, r_library_path=r_lib_path()):
"""Install ArcGIS R bindings onto this machine."""
if overwrite is True:
overwrite = True
else:
overwrite = False
(install_dir, arc_version, product) = arcgis_platform()
arcmap_needs_link = False
# check that we're in a sane installation environment
validate_environment(overwrite)
# detect if we we have a 10.3.1 install that needs linking
if product == 'Pro' and arcmap_exists("10.3"):
arcmap_needs_link = True
msg_base = "Pro side by side with 10.3 detected,"
if arcmap_path() is not None:
msg = "{} installing bridge for both environments.".format(msg_base)
arcpy.AddMessage(msg)
else:
msg = "{} but unable to find install path.".format(msg_base) + \
"ArcGIS bridge must be manually installed in ArcGIS 10.3."
arcpy.AddWarning(msg)
# if we're going to install the bridge in 10.3.1, create the appropriate
# directory before trying to install.
if arc_version == '10.3.1' and product == 'ArcMap' or arcmap_needs_link:
r_integration_dir = os.path.join(arcmap_path(), "Rintegration")
# TODO escalate privs here? test on non-admin user
if not os.path.exists(r_integration_dir):
try:
write_test = os.path.join(install_dir, 'test.txt')
with open(write_test, 'w') as f:
f.write('test')
os.remove(write_test)
os.makedirs(r_integration_dir)
except IOError:
arcpy.AddError(
"Insufficient privileges to create 10.3.1 bridge directory."
" Please start {} as an administrator, by right clicking"
" the icon, selecting \"Run as Administrator\", then run this"
" script again.".format(product))
return
# set an R-compatible temporary folder, if needed.
orig_tmpdir = os.getenv("TMPDIR")
if not orig_tmpdir:
set_env_tmpdir()
download_url = release_info()[0]
if download_url is None:
arcpy.AddWarning(
"Unable to get current release information."
" Trying offline installation.")
local_install = False
base_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
zip_glob = glob.glob(os.path.join(base_path, "arcgisbinding*.zip"))
# see if we have a local copy of the binding
if zip_glob and os.path.exists(zip_glob[0]):
local_install = True
zip_path = zip_glob[0]
zip_name = os.path.basename(zip_path)
elif not download_url and not local_install:
arcpy.AddError(
"Unable to access online package, and no "
"local copy of package found.")
return
else:
local_install = False
zip_name = os.path.basename(download_url)
# check for a network-based R installation
if r_path() and r_path()[0:2] == r'\\':
arcpy.AddMessage(
"R installed on a network path, using fallback installation method.")
r_local_install = False
else:
r_local_install = True
# we have a release, write it to disk for installation
with mkdtemp() as temp_dir:
# For R 4.0+, check version from GitHub but install via repo
if r_version() and r_version().split(".")[0] == '4':
cmd = "install.packages(\"arcgisbinding\", repos=\"https://r.esri.com\", type=\"win.binary\")"
install_script = os.path.join(temp_dir, 'install.R')
with open(install_script, 'w') as f:
f.write(cmd)
rcmd_return = execute_r("Rscript", install_script)
if rcmd_return != 0:
arcpy.AddError("Failed to install bridge with `install.packages`, try manualy running the command `{cmd}` from an R session or RStudio.")
else:
package_path = os.path.join(temp_dir, zip_name)
if local_install:
arcpy.AddMessage("Found local copy of binding, installing from zip")
shutil.copyfile(zip_path, package_path)
else:
save_url(download_url, package_path)
if os.path.exists(package_path):
# TODO -- need to do UAC escalation here?
# call the R installation script
rcmd_return = 0
if r_local_install:
rcmd_return = execute_r('Rcmd', 'INSTALL', package_path)
if not r_local_install or rcmd_return != 0:
# if we don't have a per-user library, create one
r_user_lib = r_user_lib_path()
if not os.path.exists(r_user_lib):
try:
arcpy.AddMessage("Creating per-user library directory")
os.makedirs(r_user_lib)
except OSError:
arcpy.AddWarning("Failed to create per-user library.")
# Can't execute Rcmd in this context, write out a temporary
# script and run install.packages() from within an R session.
install_script = os.path.join(temp_dir, 'install.R')
with open(install_script, 'w') as f:
f.write("install.packages(\"{}\", repos=NULL)".format(
package_path.replace("\\", "/")))
rcmd_return = execute_r("Rscript", install_script)
if rcmd_return != 0:
arcpy.AddWarning("Fallback installation method failed.")
else:
arcpy.AddError("No package found at {}".format(package_path))
return
# return TMPDIR to its original value; only need it for Rcmd INSTALL
set_env_tmpdir(orig_tmpdir)
# at 10.4 and Pro <=1.2, if the user has installed a version with a non-
# numeric patch level (e.g. 3.2.4revised), and the bridge is installed
# into Program Files, the link will fail. In this case, set the
# appropriate registry key so that the bridge will still work. Note that
# this isn't ideal, because it will persist after updates, but it is
# better than the bridge failing to work at all.
if (arc_version == '10.4' and product == 'Desktop') or \
(arc_version in ('1.1', '1.1.1', '1.2')
and product == 'Pro'):
if r_version():
(r_major, r_minor, r_patchlevel) = r_version().split(".")
# if we have a patchlevel like '4revised' or '3alpha', and
# the global library path is used, then use the registry key.
if len(r_patchlevel) > 1 and 'Program Files' in r_library_path:
# create_registry_entry(product, arc_version)
msg = ("Currently, the bridge doesn't support patched releases"
" (e.g. 3.2.4 Revised) in a global install. Please use"
" another version of R.")
arcpy.AddError(msg)
return
# at 10.3.1, we _must_ have the bridge installed at the correct location.
# create a symlink that connects back to the correct location on disk.
if arc_version == '10.3.1' and product == 'ArcMap' or arcmap_needs_link:
link_dir = os.path.join(r_integration_dir, PACKAGE_NAME)
if os.path.exists(link_dir):
if junctions_supported(link_dir) or hardlinks_supported(link_dir):
# os.rmdir uses RemoveDirectoryW, and can delete a junction
os.rmdir(link_dir)
else:
shutil.rmtree(link_dir)
# set up the link
r_package_path = r_pkg_path()
if r_package_path:
arcpy.AddMessage("R package path: {}.".format(r_package_path))
else:
arcpy.AddError("Unable to locate R package library. Link failed.")
return
detect_msg = "ArcGIS 10.3.1 detected."
if junctions_supported(link_dir) or hardlinks_supported(link_dir):
arcpy.AddMessage("{} Creating link to package.".format(detect_msg))
kdll.CreateSymbolicLinkW(link_dir, r_package_path, 1)
else:
# working on a non-NTFS volume, copy instead
vol_info = getvolumeinfo(link_dir)
arcpy.AddMessage("{} Drive type: {}. Copying package files.".format(
detect_msg, vol_info[0]))
# NOTE: this will need to be resynced when the package is updated,
# if installed from the R side.
shutil.copytree(r_package_path, link_dir)
# execute as standalone script, get parameters from sys.argv
if __name__ == '__main__':
if len(sys.argv) == 2:
overwrite = sys.argv[1]
else:
overwrite = None
print("library path: {}".format(r_lib_path()))
install_package(overwrite=overwrite, r_library_path=r_lib_path())
|
tests/capi2_cores/misc/generate/testgen.py | idex-biometrics/fusesoc | 829 | 12752697 | <reponame>idex-biometrics/fusesoc
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import sys
import yaml
template = """CAPI=2:
name : {}
targets:
default:
parameters: [p]
parameters:
p:
datatype : str
paramtype : vlogparam
"""
with open(sys.argv[1]) as fin:
data = yaml.safe_load(fin)
config = data.get("parameters")
files_root = data.get("files_root")
vlnv = data.get("vlnv")
with open("generated.core", "w") as fout:
fout.write(template.format(vlnv))
|
demo/demo_bbox_detector.py | realblack0/eft | 241 | 12752751 | <filename>demo/demo_bbox_detector.py
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
from PIL import Image
##Yolo related
yolo_path = './PyTorch-YOLOv3'
sys.path.append(yolo_path)
try:
from models import Darknet
from utils.utils import non_max_suppression, rescale_boxes
from utils.datasets import pad_to_square,resize
except ImportError:
print("Cannot find PyTorch-YOLOv3")
##lightweight human pose
# pose2d_estimator_path = '/home/hjoo/codes_test/lightweight-human-pose-estimation.pytorch/'
# pose2d_checkpoint = "/home/hjoo/codes_test/lightweight-human-pose-estimation.pytorch/pretrain/checkpoint_iter_370000.pth"
pose2d_checkpoint = "./lightweight-human-pose-estimation.pytorch/checkpoint_iter_370000.pth"
pose2d_estimator_path = './lightweight-human-pose-estimation.pytorch/'
sys.path.append(pose2d_estimator_path)
try:
from pose2d_models.with_mobilenet import PoseEstimationWithMobileNet
from modules.load_state import load_state
from val import normalize, pad_width
from modules.pose import Pose, track_poses
from modules.keypoints import extract_keypoints, group_keypoints
except ImportError:
print("Cannot find lightweight-human-pose-estimation.pytorch")
def Load_Yolo(device):
#Load Darknet
yolo_model_def= os.path.join(yolo_path, 'config/yolov3-tiny.cfg')
yolo_img_size = 416
yolo_weights_path = os.path.join(yolo_path, 'weights/yolov3-tiny.weights')
model = Darknet(yolo_model_def, img_size=yolo_img_size).to(device)
if yolo_weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(yolo_weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(yolo_weights_path))
model.eval() # Set in evaluation mode
return model
def Yolo_detect(model, camInputFrame, img_size = 416, conf_thres = 0.8, nms_thres = 0.4):
img = transforms.ToTensor()(Image.fromarray(camInputFrame))
# Pad to square resolution
img, _ = pad_to_square(img, 0)
# Resize
img = resize(img, img_size)
img = img.unsqueeze(0) #(1,3,416.419)
input_imgs = img.cuda()
with torch.no_grad():
detections = model(input_imgs)
detections = non_max_suppression(detections, conf_thres, nms_thres)
if detections is not None:
detections = detections[0]
if detections is not None:
detections = rescale_boxes(detections, img_size, camInputFrame.shape[:2])
return detections
def Yolo_detectHuman(model, camInputFrame):
detections = Yolo_detect(model,camInputFrame, conf_thres = 0.1, nms_thres = 0.3) #Modified to be better with yolo tiny
bbr_list=[] #minX, minY, width, height
if detections is not None:
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
if cls_pred!=0:
continue
box_w = x2 - x1
box_h = y2 - y1
# camInputFrame = viewer2D.Vis_Bbox_minmaxPt(camInputFrame,[x1,y1], [x2,y2])
bbr_list.append( np.array([x1,y1,box_w,box_h]))
return bbr_list
#Code from https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/demo.py
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256):
height, width, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
if not cpu:
tensor_img = tensor_img.cuda()
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
return heatmaps, pafs, scale, pad
#Code from https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/demo.py
def pose2d_detectHuman(net, img, height_size =256, track = 1, smooth=1, bVis =True):
stride = 8
upsample_ratio = 4
num_keypoints = Pose.num_kpts
previous_poses = []
delay = 33
if True:
# for img in image_provider:
orig_img = img.copy()
heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu=not torch.cuda.is_available())
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
pose = Pose(pose_keypoints, pose_entries[n][18])
current_poses.append(pose)
if bVis:
if track:
track_poses(previous_poses, current_poses, smooth=smooth)
previous_poses = current_poses
for pose in current_poses:
pose.draw(img)
img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
for pose in current_poses:
cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
(pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
if track:
cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
key = cv2.waitKey(delay)
if key == 27: # esc
return
elif key == 112: # 'p'
if delay == 33:
delay = 0
else:
delay = 33
return current_poses
def Load_pose2d(device):
"""
This one runs in CPU
"""
net = PoseEstimationWithMobileNet()
checkpoint = torch.load(pose2d_checkpoint, map_location='cpu')
load_state(net, checkpoint)
net = net.eval()
net = net.to(device)
return net
class BodyBboxDetector:
def __init__(self, method="2dpose", device = torch.device('cuda')):
"""
args:
method: "yolo" or "2dpose"
"""
self.method = method
if method =="yolo":
print("Loading Yolo Model...")
self.model = Load_Yolo(device)
print("Done")
elif method=="2dpose":
print("Loading Pose Estimation Model...")
self.model = Load_pose2d(device)
print("Done")
else :
print("invalid method")
assert False
self.bboxXYWH_list = None
def detectBbox(self, img_bgr):
"""
args:
img_bgr: Raw image with BGR order (cv2 default). Currently assumes BGR #TODO: make sure the input type of each method
output:
bboxXYWH_list: list of bboxes. Each bbox has XYWH form (minX,minY,width,height)
"""
if self.method=="yolo":
bboxXYWH_list = Yolo_detectHuman(self.model, img_bgr)
elif self.method=="2dpose":
poses_from2dPoseEst = pose2d_detectHuman(self.model, img_bgr, bVis=False)
bboxXYWH_list =[]
for poseEst in poses_from2dPoseEst:
bboxXYWH_list.append(np.array (poseEst.bbox))
else:
print("Unknown bbox extimation method")
assert False
self.bboxXYWH_list = bboxXYWH_list #Save this as member function
return bboxXYWH_list
|
dmb/data/loaders/__init__.py | jiaw-z/DenseMatchingBenchmark | 160 | 12752797 | <reponame>jiaw-z/DenseMatchingBenchmark
from .builder import build_data_loader
|
reinforcement_learning/dqn/atari_utils.py | AaratiAkkapeddi/nnabla-examples | 228 | 12752800 | <filename>reinforcement_learning/dqn/atari_utils.py
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import numpy as np
class Squeeze(gym.ObservationWrapper):
'''Assume wrap_deepmind with scale=True'''
def __init__(self, env):
from gym import spaces
gym.ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(
low=0, high=1.0,
shape=(84, 84), dtype=np.float32)
def observation(self, observation):
return np.squeeze(observation)
def make_atari_deepmind(rom_name, valid=False):
from external.atari_wrappers import make_atari, wrap_deepmind
env = make_atari(rom_name)
# framestack is handled by sampler.py
env = wrap_deepmind(env, episode_life=not valid,
frame_stack=False, scale=True)
env = Squeeze(env)
return env
|
django_covid19/apps.py | zhangguoyuanshuai/Python-Covid19API | 103 | 12752814 | <filename>django_covid19/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DjangoCovid19Config(AppConfig):
name = 'django_covid19'
verbose_name = _('django_covid19')
def ready(self):
import django_covid19.signals |
urduhack/tokenization/wtk.py | cinfotech94/urduhackk | 252 | 12752838 | <reponame>cinfotech94/urduhackk
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
|
interactive_control_train.py | mohamedSabry0/new_autoRCCar | 333 | 12752870 | #!/usr/bin/env python
"""Interactive control for the car"""
import time
import io
import pygame
import pygame.font
import picamera
import configuration
import helpers.motor_driver as motor_driver_helper
import helpers.image as image_helper
UP = LEFT = DOWN = RIGHT = ACCELERATE = DECELERATE = False
def get_keys():
"""Returns a tuple of (UP, DOWN, LEFT, RIGHT, change, ACCELERATE,
DECELERATE, stop) representing which keys are UP or DOWN and
whether or not the key states changed.
"""
change = False
stop = False
key_to_global_name = {
pygame.K_LEFT: 'LEFT',
pygame.K_RIGHT: 'RIGHT',
pygame.K_UP: 'UP',
pygame.K_DOWN: 'DOWN',
pygame.K_ESCAPE: 'QUIT',
pygame.K_q: 'QUIT',
pygame.K_w: 'ACCELERATE',
pygame.K_s: 'DECELERATE'
}
for event in pygame.event.get():
if event.type in {pygame.K_q, pygame.K_ESCAPE}:
stop = True
elif event.type in {pygame.KEYDOWN, pygame.KEYUP}:
down = (event.type == pygame.KEYDOWN)
change = (event.key in key_to_global_name)
if event.key in key_to_global_name:
globals()[key_to_global_name[event.key]] = down
return (UP, DOWN, LEFT, RIGHT, change, ACCELERATE, DECELERATE, stop)
def interactive_control():
"""Runs the interactive control"""
setup_interactive_control()
clock = pygame.time.Clock()
with picamera.PiCamera() as camera:
camera.resolution = configuration.PICAMERA_RESOLUTION
camera.framerate = configuration.PICAMERA_FRAMERATE
time.sleep(configuration.PICAMERA_WARM_UP_TIME)
# GPIO.output(BACK_MOTOR_ENABLE_PIN, True)
pwm = motor_driver_helper.get_pwm_imstance()
motor_driver_helper.start_pwm(pwm)
command = 'idle'
duty_cycle = configuration.INITIAL_PWM_DUTY_CYCLE
while True:
up_key, down, left, right, change, accelerate, decelerate, stop = get_keys()
if stop:
break
if accelerate:
duty_cycle = duty_cycle + 3 if (duty_cycle + 3) <= 100 else duty_cycle
motor_driver_helper.change_pwm_duty_cycle(pwm, duty_cycle)
print("speed: " + str(duty_cycle))
if decelerate:
duty_cycle = duty_cycle - 3 if (duty_cycle - 3) >= 0 else duty_cycle
motor_driver_helper.change_pwm_duty_cycle(pwm, duty_cycle)
print("speed: " + str(duty_cycle))
if change:
command = 'idle'
motor_driver_helper.set_idle_mode()
if up_key:
command = 'forward'
print(duty_cycle)
motor_driver_helper.set_forward_mode()
elif down:
command = 'reverse'
motor_driver_helper.set_reverse_mode()
append = lambda x: command + '_' + x if command != 'idle' else x
if left:
command = append('left')
motor_driver_helper.set_left_mode()
elif right:
command = append('right')
motor_driver_helper.set_right_mode()
print(command)
stream = io.BytesIO()
camera.capture(stream, format='jpeg', use_video_port=True)
image_helper.save_image_with_direction(stream, command)
stream.flush()
clock.tick(30)
pygame.quit()
def setup_interactive_control():
"""Setup the Pygame Interactive Control Screen"""
pygame.init()
display_size = (300, 400)
screen = pygame.display.set_mode(display_size)
background = pygame.Surface(screen.get_size())
color_white = (255, 255, 255)
display_font = pygame.font.Font(None, 40)
pygame.display.set_caption('RC Car Interactive Control')
text = display_font.render('Use arrows to move', 1, color_white)
text_position = text.get_rect(centerx=display_size[0] / 2)
background.blit(text, text_position)
screen.blit(background, (0, 0))
pygame.display.flip()
def main():
"""Main function"""
motor_driver_helper.set_gpio_pins()
interactive_control()
if __name__ == '__main__':
main()
|
Trakttv.bundle/Contents/Libraries/Shared/plex_database/models/directory.py | disrupted/Trakttv.bundle | 1,346 | 12752883 | <gh_stars>1000+
from plex_database.core import db
from plex_database.models.library_section import LibrarySection
from peewee import *
class Directory(Model):
class Meta:
database = db
db_table = 'directories'
library_section = ForeignKeyField(LibrarySection, null=True, related_name='directories')
parent_directory = ForeignKeyField('self', null=True, related_name='children')
path = CharField(null=True)
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
deleted_at = DateTimeField(null=True)
|
commands/uninstall/__init__.py | tamarindmonkey/.oh-my-comma | 102 | 12752884 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from commands.base import CommandBase
from py_utils.emu_utils import run, error, input_with_options, UNINSTALL_PATH
class Uninstall(CommandBase):
def __init__(self):
super().__init__()
self.name = 'uninstall'
self.description = '👋 Uninstalls emu'
@staticmethod
def _uninstall():
print('Are you sure you want to uninstall emu?')
if input_with_options(['Y', 'n'], 'n')[0] == 0:
run(['sh', UNINSTALL_PATH])
else:
error('Not uninstalling!')
|
tests/test_crc32c.py | shirui-japina/tensorboardX | 5,378 | 12752886 | import unittest
from tensorboardX.crc32c import _crc32c, _crc32c_native, crc32c
class CRC32CTest(unittest.TestCase):
def test_crc32c(self):
data = b'abcd'
assert crc32c(data) == 0x92c80a31
def test_crc32c_python(self):
data = b'abcd'
assert _crc32c(data) == 0x92c80a31
def test_crc32c_native(self):
if _crc32c_native is None:
return
data = b'abcd'
assert _crc32c_native(data) == 0x92c80a31
|
util/security/test/test_jwtutil.py | giuseppe/quay | 2,027 | 12752921 | <gh_stars>1000+
import time
import pytest
import jwt
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from authlib.jose import jwk
from util.security.jwtutil import (
decode,
exp_max_s_option,
jwk_dict_to_public_key,
InvalidTokenError,
InvalidAlgorithmError,
)
@pytest.fixture(scope="session")
def private_key():
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
@pytest.fixture(scope="session")
def private_key_pem(private_key):
return private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
@pytest.fixture(scope="session")
def public_key(private_key):
return private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
def _token_data(audience, subject, iss, iat=None, exp=None, nbf=None):
return {
"iss": iss,
"aud": audience,
"nbf": nbf() if nbf is not None else int(time.time()),
"iat": iat() if iat is not None else int(time.time()),
"exp": exp() if exp is not None else int(time.time() + 3600),
"sub": subject,
}
@pytest.mark.parametrize(
"aud, iss, nbf, iat, exp, expected_exception",
[
pytest.param(
"invalidaudience",
"someissuer",
None,
None,
None,
"Invalid audience",
id="invalid audience",
),
pytest.param(
"someaudience", "invalidissuer", None, None, None, "Invalid issuer", id="invalid issuer"
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 120,
None,
None,
"The token is not yet valid",
id="invalid not before",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 120,
None,
"Issued At claim",
id="issued at in future",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 100,
"Signature has expired",
id="already expired",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() + 10000,
"Token was signed for more than",
id="expiration too far in future",
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 10,
None,
None,
None,
id="not before in future by within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 10,
None,
None,
id="issued at in future but within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 10,
None,
id="expiration in past but within leeway",
),
],
)
def test_decode_jwt_validation(
aud, iss, nbf, iat, exp, expected_exception, private_key_pem, public_key
):
token = jwt.encode(_token_data(aud, "subject", iss, iat, exp, nbf), private_key_pem, "RS256")
if expected_exception is not None:
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match(expected_exception)
else:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
def test_decode_jwt_invalid_key(private_key_pem):
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Try to decode with a different public key.
another_public_key = (
rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
another_public_key,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("Signature verification failed")
def test_decode_jwt_invalid_algorithm(private_key_pem, public_key):
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Attempt to decode but only with a different algorithm than that used.
with pytest.raises(InvalidAlgorithmError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["ES256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("are not whitelisted")
def test_jwk_dict_to_public_key(private_key, private_key_pem):
public_key = private_key.public_key()
key_dict = jwk.dumps(
public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
converted = jwk_dict_to_public_key(key_dict)
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Decode with the converted key.
max_exp = exp_max_s_option(3600)
decode(
token,
converted,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
|
setup.py | sliderSun/scrapy-djangoitem | 509 | 12752933 | <gh_stars>100-1000
from setuptools import setup, find_packages
setup(
name='scrapy-djangoitem',
version='1.1.1',
url='https://github.com/scrapy-plugins/scrapy-djangoitem',
description='Scrapy extension to write scraped items using Django models',
long_description=open('README.rst').read(),
author='Scrapy developers',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Framework :: Django',
'Framework :: Scrapy',
],
install_requires=['six'],
requires=['scrapy (>=0.24.5)', 'django'],
)
|
game/sdl/nacl/generate_nmf.py | spiffcode/hostile-takeover | 113 | 12752936 | <filename>game/sdl/nacl/generate_nmf.py<gh_stars>100-1000
#!/usr/bin/python
#
# Copyright (c) 2011, The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import optparse
# This script generates a JSON .nmf file, which provides the mapping to indicate
# which .nexe file to load and execute for a particular architecture.
# The script must have -nmf <filename> as an option, which designates the name
# of the .nmf file to be generated.
# One or more nexes must be specified on the command line. Each
# nexe file is preceded by an argument that specifies the architecture
# that the nexe is associated with: --x86-64, --x86-32, --arm.
#
# For example:
# generate_nmf.py --nmf test.nmf --x86-64 hello_world_x86-64.nexe \
# --x86-32 hello32.nexe
# will create test.nmf that contains 2 entries, while
#
# generate_nmf.py --nmf hello.nmf --arm arm.nexe
#
# will create hello.nmf with a single entry.
# Note: argv has been passed in without the program name in argv[0]
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--nmf', dest='nmf_file', help='nmf file to generate')
parser.add_option('--x86-64', dest='x86_64', help='x86_64 nexe')
parser.add_option('--x86-32', dest='x86_32', help='x86_32 nexe')
parser.add_option('--arm', dest='arm', help='arm nexe')
(options, args) = parser.parse_args(argv)
if options.nmf_file == None:
parser.error("nmf file not specified. Use --nmf")
# Make sure that not all nexes are None -- i.e. at least one was specified.
if options.x86_64 == None and options.x86_32 == None and options.arm == None:
parser.error("No nexe files were specified")
nmf_file = open(options.nmf_file, 'w')
nmf_file.write('{\n')
nmf_file.write(' "nexes": {\n')
# Output an entry in the manifest file for each specified architecture
if options.x86_64:
nmf_file.write(' "x86-64": "%s",\n' % options.x86_64)
if options.x86_32:
nmf_file.write(' "x86-32": "%s",\n' % options.x86_32)
if options.arm:
nmf_file.write(' "arm": "%s",\n' % options.arm)
nmf_file.write(' }\n')
nmf_file.write('}\n')
nmf_file.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
testing/scripts/test_benchmark.py | MichaelXcc/seldon-core | 3,049 | 12752981 | <reponame>MichaelXcc/seldon-core<filename>testing/scripts/test_benchmark.py
import json
import numpy as np
import pytest
import tensorflow as tf
from google.protobuf import json_format
from seldon_e2e_utils import post_comment_in_pr, run_benchmark_and_capture_results
@pytest.mark.benchmark
@pytest.mark.usefixtures("argo_worfklows")
def test_service_orchestrator():
sort_by = ["apiType", "disableOrchestrator"]
data_size = 1_000
data = [100.0] * data_size
data_tensor = {"data": {"tensor": {"values": data, "shape": [1, data_size]}}}
df = run_benchmark_and_capture_results(
api_type_list=["rest", "grpc"],
disable_orchestrator_list=["false", "true"],
image_list=["seldonio/seldontest_predict:1.10.0-dev"],
benchmark_data=data_tensor,
)
df = df.sort_values(sort_by)
result_body = "# Benchmark results - Testing Service Orchestrator\n\n"
orch_mean = all(
(
df[df["disableOrchestrator"] == "false"]["mean"].values
- df[df["disableOrchestrator"] == "true"]["mean"].values
)
< 3
)
result_body += f"* Orch added mean latency under 4ms: {orch_mean}\n"
orch_nth = all(
(
df[df["disableOrchestrator"] == "false"]["95th"].values
- df[df["disableOrchestrator"] == "true"]["95th"].values
)
< 5
)
result_body += f"* Orch added 95th latency under 5ms: {orch_nth}\n"
orch_nth = all(
(
df[df["disableOrchestrator"] == "false"]["99th"].values
- df[df["disableOrchestrator"] == "true"]["99th"].values
)
< 10
)
result_body += f"* Orch added 99th latency under 10ms: {orch_nth}\n"
# We have to set no errors to 1 as the tools for some reason have 1 as base
no_err = all(df["errors"] <= 1)
result_body += f"* No errors: {no_err}\n"
result_body += "\n### Results table\n\n"
result_body += str(df.to_markdown())
post_comment_in_pr(result_body)
assert orch_mean
assert orch_nth
@pytest.mark.benchmark
@pytest.mark.usefixtures("argo_worfklows")
def test_workers_performance():
sort_by = ["apiType", "serverWorkers"]
data_size = 10
data = [100.0] * data_size
data_tensor = {"data": {"tensor": {"values": data, "shape": [1, data_size]}}}
df = run_benchmark_and_capture_results(
api_type_list=["grpc", "rest"],
server_workers_list=["1", "5", "10"],
benchmark_concurrency_list=["10", "100", "1000"],
parallelism="1",
requests_cpu_list=["4000Mi"],
limits_cpu_list=["4000Mi"],
image_list=["seldonio/seldontest_predict:1.10.0-dev"],
benchmark_data=data_tensor,
)
df = df.sort_values(sort_by)
result_body = "# Benchmark results - Testing Workers Performance\n\n"
result_body += "\n### Results table\n\n"
result_body += str(df.to_markdown())
post_comment_in_pr(result_body)
@pytest.mark.benchmark
@pytest.mark.usefixtures("argo_worfklows")
def test_python_wrapper_v1_vs_v2_iris():
sort_by = ["concurrency", "apiType"]
benchmark_concurrency_list = ["1", "50", "150"]
result_body = ""
result_body += "\n# Benchmark Results - Python Wrapper V1 vs V2\n\n"
# Using single worker as fastapi also uses single worker
df_pywrapper = run_benchmark_and_capture_results(
api_type_list=["rest", "grpc"],
protocol="seldon",
server_list=["SKLEARN_SERVER"],
benchmark_concurrency_list=benchmark_concurrency_list,
model_uri_list=["gs://seldon-models/v1.12.0-dev/sklearn/iris"],
benchmark_data={"data": {"ndarray": [[1, 2, 3, 4]]}},
)
df_pywrapper = df_pywrapper.sort_values(sort_by)
conc_idx = df_pywrapper["concurrency"] == 1
# Python V1 Wrapper Validations
# Ensure base mean performance latency below 10 ms
v1_latency_mean = all((df_pywrapper[conc_idx]["mean"] < 10))
result_body += f"* V1 base mean performance latency under 10ms: {v1_latency_mean}\n"
# Ensure 99th percentiles are not spiking above 15ms
v1_latency_nth = all(df_pywrapper[conc_idx]["99th"] < 10)
result_body += f"* V1 base 99th performance latenc under 10ms: {v1_latency_nth}\n"
# Ensure throughput is above 180 rps for REST
v1_rps_rest = all(
df_pywrapper[(df_pywrapper["apiType"] == "rest") & conc_idx][
"throughputAchieved"
]
> 180
)
result_body += f"* V1 base throughput above 180rps: {v1_rps_rest}\n"
# Ensure throughput is above 250 rps for GRPC
v1_rps_grpc = all(
df_pywrapper[(df_pywrapper["apiType"] == "grpc") & conc_idx][
"throughputAchieved"
]
> 250
)
result_body += f"* V1 base throughput above 250rps: {v1_rps_grpc}\n"
# Validate latenc added by adding service orchestrator is lower than 4ms
# TODO: Validate equivallent of parallel workers in MLServer
df_mlserver = run_benchmark_and_capture_results(
api_type_list=["rest", "grpc"],
model_name="classifier",
protocol="kfserving",
server_list=["SKLEARN_SERVER"],
model_uri_list=["gs://seldon-models/sklearn/iris-0.23.2/lr_model"],
benchmark_concurrency_list=benchmark_concurrency_list,
benchmark_data={
"inputs": [
{
"name": "predict",
"datatype": "FP32",
"shape": [1, 4],
"data": [[1, 2, 3, 4]],
}
]
},
benchmark_grpc_data_override={
"model_name": "classifier",
"inputs": [
{
"name": "predict",
"datatype": "FP32",
"shape": [1, 4],
"contents": {"fp32_contents": [1, 2, 3, 4]},
}
],
},
)
# First we sort the dataframes to ensure they are compared correctly
df_mlserver = df_mlserver.sort_values(sort_by)
# Python V1 Wrapper Validations
conc_idx = df_mlserver["concurrency"] == 1
# Ensure all mean performance latency below 5 ms
v2_latency_mean = all(df_mlserver[conc_idx]["mean"] < 5)
result_body += f"* V2 mean performance latency under 5ms: {v2_latency_mean}\n"
# Ensure 99th percentiles are not spiking above 15ms
v2_latency_nth = all(df_mlserver[conc_idx]["99th"] < 10)
result_body += f"* V2 99th performance latenc under 10ms: {v2_latency_nth}\n"
# Ensure throughput is above 180 rps for REST
v2_rps_rest = all(
df_mlserver[(df_mlserver["apiType"] == "rest") & conc_idx]["throughputAchieved"]
> 250
)
result_body += f"* V2 REST throughput above 250rps: {v2_rps_rest}\n"
# Ensure throughput is above 250 rps for GRPC
v2_rps_grpc = all(
df_mlserver[(df_mlserver["apiType"] == "grpc") & conc_idx]["throughputAchieved"]
> 250
)
result_body += f"* V2 throughput above 300rps: {v2_rps_grpc}\n"
result_body += "\n### Python V1 Wrapper Results table\n\n"
result_body += str(df_pywrapper.to_markdown())
result_body += "\n\n\n### Python V2 MLServer Results table\n\n"
result_body += str(df_mlserver.to_markdown())
post_comment_in_pr(result_body)
assert v1_latency_mean
assert v1_latency_nth
assert v1_rps_rest
assert v1_rps_grpc
assert v2_latency_mean
assert v2_latency_nth
assert v2_rps_rest
assert v2_rps_grpc
@pytest.mark.benchmark
@pytest.mark.usefixtures("argo_worfklows")
def test_v1_seldon_data_types():
sort_by = ["concurrency", "apiType"]
# 10000 element array
data_size = 10_000
data = [100.0] * data_size
benchmark_concurrency_list = ["1", "50", "150"]
image_list = ["seldonio/seldontest_predict:1.10.0-dev"]
data_ndarray = {"data": {"ndarray": data}}
data_tensor = {"data": {"tensor": {"values": data, "shape": [1, data_size]}}}
array = np.array(data)
tftensor_proto = tf.make_tensor_proto(array)
tftensor_json_str = json_format.MessageToJson(tftensor_proto)
tftensor_dict = json.loads(tftensor_json_str)
data_tftensor = {"data": {"tftensor": tftensor_dict}}
df_ndarray = run_benchmark_and_capture_results(
api_type_list=["rest", "grpc"],
image_list=image_list,
benchmark_concurrency_list=benchmark_concurrency_list,
benchmark_data=data_ndarray,
)
df_ndarray = df_ndarray.sort_values(sort_by)
df_tensor = run_benchmark_and_capture_results(
api_type_list=["rest", "grpc"],
image_list=image_list,
benchmark_concurrency_list=benchmark_concurrency_list,
benchmark_data=data_tensor,
)
df_tensor = df_tensor.sort_values(sort_by)
df_tftensor = run_benchmark_and_capture_results(
api_type_list=["rest", "grpc"],
image_list=image_list,
benchmark_concurrency_list=benchmark_concurrency_list,
benchmark_data=data_tftensor,
)
df_tftensor = df_tftensor.sort_values(sort_by)
result_body = "# Benchmark results - Testing Seldon V1 Data Types\n\n"
result_body += "\n### Results for NDArray\n\n"
result_body += str(df_ndarray.to_markdown())
result_body += "\n### Results for Tensor\n\n"
result_body += str(df_tensor.to_markdown())
result_body += "\n### Results for TFTensor\n\n"
result_body += str(df_tftensor.to_markdown())
post_comment_in_pr(result_body)
|
python/dlbs/result_processor.py | joehandzik/dlcookbook-dlbs | 123 | 12752991 | # (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
* Validate that every benchmark in ``input-file`` has mandatory parameters
defined in ``params``
$ python result_processor.py validate --input-file= --params=
* Filter benchmarks in ``input-file`` by throwing away those not containing
specific parameters defined in ``params``. The filtered subset of benchmarks
is written to ``output-file``.
$ python result_processor.py filter --input-file= --params= --output-file=
* Update every benchmark in ``input-file`` by overriding values of specific
parameters which value are defined in ``params``. The updated subset of
benchmarks is written to ``output-file``.
$ python result_processor.py update --input-file= --params= --output-file=
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
from collections import defaultdict
from dlbs.utils import Six
from dlbs.utils import DictUtils
from dlbs.processor import Processor
def load_json_file(file_name):
""" Loads a json object from a file.
:param str file_name: A file name to load JSON object from.
:return: A loaded JSON object.
"""
with open(file_name) as file_obj:
return json.load(file_obj)
def get_params(params):
"""Loads parameters specified by params.
:param str params: A JSON parsable string that defines how parameters
need to be loaded. See function comments on how it is
done.
:return: A dictionary with keys being parameters and values being their
values. Null value means no value - that's perfectly valid case.
:rtype: dict
The ``params`` is a JSON parsable string treated differently depending
on its type:
* ``string`` The value is a file name that contains JSON object
* ``list`` The list of parameters
* ``dict`` The dictionary that maps parameters to their values.
If type is list or loaded JSON object is a list, it gets converted to
dictionary with null values.
"""
parsed_params = json.loads(params)
if isinstance(parsed_params, Six.string_types):
parsed_params = load_json_file(parsed_params)
if isinstance(parsed_params, list):
parsed_params = dict.fromkeys(parsed_params, None)
if not isinstance(parsed_params, dict):
raise ValueError("Invalid type of object that holds parameters (%s)" % type(parsed_params))
return parsed_params
def validate_benchmarks(args):
"""Validates benchmarks ensuring every benchmark contains mandatory parameters.
Also make sure `exp.id`s are unique.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
"""
# Load benchmarks and parameters.
benchmarks = load_json_file(args.input_file)['data']
params = get_params(args.params)
# Figure out missing parameters.
missing_params = defaultdict(lambda: 0)
exp_ids = set() # All identifiers of experiments
duplicates = False # If two or more experiments have the same ID
for benchmark in benchmarks:
keys = [key for key in params if key not in benchmark]
for key in keys:
missing_params[key] += 1
if 'exp.id' in benchmark:
if benchmark['exp.id'] not in exp_ids:
exp_ids.add(benchmark['exp.id'])
else:
duplicates = True
# Report validation results.
print("Number of benchmarks: %d" % len(benchmarks))
if not missing_params and not duplicates:
print("Benchmark validation result: SUCCESS")
else:
print("Benchmark validation result: FAILURE")
if len(missing_params) > 0:
print("missing parameters:")
for missing_param in missing_params:
print("\t%s: %d" % (missing_param, missing_params[missing_param]))
if duplicates:
print("Several benchmarks have same identifier (exp.id)")
def filter_benchmarks(args):
"""Filter benchmarks by removing those that do not contain provided parameters.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
* ``args.output_file`` An output file with updated benchmark results.
"""
# Load benchmarks and parameters
input_benchmarks = load_json_file(args.input_file)['data']
params = get_params(args.params)
# Filter benchmarks
output_benchmarks = []
for input_benchmark in input_benchmarks:
keep = True
for key in params:
if key not in input_benchmark or not input_benchmark[key]:
keep = False
break
if keep:
output_benchmarks.append(input_benchmark)
# Report results and serialize
print("Number of input benchmarks: %d" % len(input_benchmarks))
print("Number of output benchmarks: %d" % len(output_benchmarks))
DictUtils.dump_json_to_file({"data": output_benchmarks}, args.output_file)
def update_benchmarks(args):
"""Update benchmarks by overriding parameters provided by a user.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
* ``args.output_file`` An output file with updated benchmark results.
"""
# Load benchmarks and parameters.
benchmarks = load_json_file(args.input_file)['data']
prefix = '__'
params = {prefix + k: v for k, v in get_params(args.params).items()}
# Add prefixed parameters to all benchmarks.
for benchmark in benchmarks:
benchmark.update(params)
# Process and compute variables
Processor().compute_variables(benchmarks)
# Replace prefix overwriting variables in case of a conflict
prefixed_keys = params.keys()
prefix_len = len(prefix)
output_benchmarks = []
for benchmark in benchmarks:
for k in prefixed_keys:
benchmark[k[prefix_len:]] = benchmark[k]
del benchmark[k]
if benchmark['exp.model'] != '':
output_benchmarks.append(benchmark)
benchmarks = output_benchmarks
# Serialize updated benchmarks.
DictUtils.dump_json_to_file({"data": benchmarks}, args.output_file)
def main():
"""Main function - parses command line args and processes benchmarks."""
parser = argparse.ArgumentParser()
parser.add_argument(
'action', type=str,
help="Action to perform ('validate', 'filter', 'update')"
)
parser.add_argument(
'--input_file', '--input-file', type=str, required=True, default=None,
help='An input JSON file. This file is never modified.'
)
parser.add_argument(
'--params', type=str, required=False, default=None,
help="JSON array or object OR string. If string it's considered as a file name."
)
parser.add_argument(
'--output_file', '--output-file', required=False, default=False,
help="Output JSON file, possible, modified version of an input JSON file."
)
args = parser.parse_args()
if args.action == 'validate':
validate_benchmarks(args)
elif args.action == 'filter':
filter_benchmarks(args)
elif args.action == 'update':
update_benchmarks(args)
else:
raise ValueError("Action parameter has invalid value (%s). "
"Must be one of ['validate', 'filter', 'update']" % args.action)
if __name__ == '__main__':
main()
|
yotta/version.py | microbit-foundation/yotta | 176 | 12753014 | <filename>yotta/version.py
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import argparse
import logging
import os
# version, , represent versions and specifications, internal
from yotta.lib import version
# Component, , represents an installed component, internal
from yotta.lib import component
# Target, , represents an installed target, internal
from yotta.lib import target
# vcs, , represent version controlled directories, internal
from yotta.lib import vcs
def addOptions(parser):
def patchType(s):
if s.lower() in ('major', 'minor', 'patch'):
return s.lower()
try:
return version.Version(s)
except:
raise argparse.ArgumentTypeError(
'"%s" is not a valid version (expected patch, major, minor, or something like 1.2.3)' % s
)
parser.add_argument('action', type=patchType, nargs='?', help='[patch | minor | major | <version>]')
def execCommand(args, following_args):
wd = os.getcwd()
c = component.Component(wd)
# skip testing for target if we already found a component
t = None if c else target.Target(wd)
if not (c or t):
logging.debug(str(c.getError()))
if t:
logging.debug(str(t.getError()))
logging.error('The current directory does not contain a valid module or target.')
return 1
else:
# only needed separate objects in order to display errors
p = (c or t)
if args.action:
try:
if not p.vcsIsClean():
logging.error('The working directory is not clean')
return 1
v = p.getVersion()
pre_script_env = {
'YOTTA_OLD_VERSION':str(v)
}
if args.action in ('major', 'minor', 'patch'):
v.bump(args.action)
else:
v = args.action
pre_script_env['YOTTA_NEW_VERSION'] =str(v)
errcode = p.runScript('preVersion', pre_script_env)
if errcode:
return errcode
logging.info('@%s' % v)
p.setVersion(v)
p.writeDescription()
errcode = p.runScript('postVersion')
if errcode:
return errcode
p.commitVCS(tag='v'+str(v))
except vcs.VCSError as e:
logging.error(e)
else:
logging.info(str(p.getVersion()))
|
test/programytest/utils/logging/test_snapshot.py | cdoebler1/AIML2 | 345 | 12753028 | <reponame>cdoebler1/AIML2
import unittest
from programy.utils.logging.ylogger import YLoggerSnapshot
class YLoggerSnapshotTests(unittest.TestCase):
def test_snapshot_with_defaults(self):
snapshot = YLoggerSnapshot()
self.assertIsNotNone(snapshot)
self.assertEquals("Critical(0) Fatal(0) Error(0) Exception(0) Warning(0) Info(0), Debug(0)", str(snapshot))
self.assertEqual({'criticals': 0,
'debugs': 0,
'errors': 0,
'exceptions': 0,
'fatals': 0,
'infos': 0,
'warnings': 0}, snapshot.to_json())
def test_snapshot_without_defaults(self):
snapshot = YLoggerSnapshot(criticals=1, fatals=2, errors=3, exceptions=4, warnings=5, infos=6, debugs=7)
self.assertIsNotNone(snapshot)
self.assertEquals("Critical(1) Fatal(2) Error(3) Exception(4) Warning(5) Info(6), Debug(7)", str(snapshot))
self.assertEqual({'criticals': 1,
'debugs': 7,
'errors': 3,
'exceptions': 4,
'fatals': 2,
'infos': 6,
'warnings': 5}, snapshot.to_json())
|
src/version.py | copyit/picbed | 115 | 12753039 | __version__ = "1.13.3"
|
homeassistant/components/oem/__init__.py | domwillcode/home-assistant | 30,023 | 12753057 | """The oem component."""
|
utils_cv/action_recognition/dataset.py | muminkoykiran/computervision-recipes | 7,899 | 12753070 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import copy
import math
from pathlib import Path
import warnings
from typing import Callable, Tuple, Union, List
import decord
from einops.layers.torch import Rearrange
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randint
import torch
from torch.utils.data import Dataset, Subset, DataLoader
from torchvision.transforms import Compose
from .references import transforms_video as transforms
from .references.functional_video import denormalize
from ..common.misc import Config
from ..common.gpu import num_devices, db_num_workers
Trans = Callable[[object, dict], Tuple[object, dict]]
DEFAULT_MEAN = (0.43216, 0.394666, 0.37645)
DEFAULT_STD = (0.22803, 0.22145, 0.216989)
class VideoRecord(object):
"""
This class is used for parsing split-files where each row contains a path
and a label:
Ex:
```
path/to/my/clip_1 3
path/to/another/clip_2 32
```
"""
def __init__(self, data: List[str]):
""" Initialized a VideoRecord
Ex.
data = ["path/to/video.mp4", 2, "cooking"]
Args:
row: a list where first element is the path and second element is
the label, and the third element (optional) is the label name
"""
assert len(data) >= 2 and len(data) <= 3
assert isinstance(data[0], str)
assert isinstance(int(data[1]), int)
if len(data) == 3:
assert isinstance(data[2], str)
self._data = data
self._num_frames = None
@property
def path(self) -> str:
return self._data[0]
@property
def num_frames(self) -> int:
if self._num_frames is None:
self._num_frames = int(
len([x for x in Path(self._data[0]).glob("img_*")]) - 1
)
return self._num_frames
@property
def label(self) -> int:
return int(self._data[1])
@property
def label_name(self) -> str:
return None if len(self._data) <= 2 else self._data[2]
def get_transforms(train: bool = True, tfms_config: Config = None) -> Trans:
""" Get default transformations to apply depending on whether we're applying it to the training or the validation set. If no tfms configurations are passed in, use the defaults.
Args:
train: whether or not this is for training
tfms_config: Config object with tranforms-related configs
Returns:
A list of transforms to apply
"""
if tfms_config is None:
tfms_config = get_default_tfms_config(train=train)
# 1. resize
tfms = [
transforms.ToTensorVideo(),
transforms.ResizeVideo(
tfms_config.im_scale, tfms_config.resize_keep_ratio
),
]
# 2. crop
if tfms_config.random_crop:
if tfms_config.random_crop_scales:
crop = transforms.RandomResizedCropVideo(
tfms_config.input_size, tfms_config.random_crop_scales
)
else:
crop = transforms.RandomCropVideo(tfms_config.input_size)
else:
crop = transforms.CenterCropVideo(tfms_config.input_size)
tfms.append(crop)
# 3. flip
tfms.append(transforms.RandomHorizontalFlipVideo(tfms_config.flip_ratio))
# 4. normalize
tfms.append(transforms.NormalizeVideo(tfms_config.mean, tfms_config.std))
return Compose(tfms)
def get_default_tfms_config(train: bool) -> Config:
"""
Args:
train: whether or not this is for training
Settings:
input_size (int or tuple): Model input image size.
im_scale (int or tuple): Resize target size.
resize_keep_ratio (bool): If True, keep the original ratio when resizing.
mean (tuple): Normalization mean.
if train:
std (tuple): Normalization std.
flip_ratio (float): Horizontal flip ratio.
random_crop (bool): If False, do center-crop.
random_crop_scales (tuple): Range of size of the origin size random cropped.
"""
flip_ratio = 0.5 if train else 0.0
random_crop = True if train else False
random_crop_scales = (0.6, 1.0) if train else None
return Config(
dict(
input_size=112,
im_scale=128,
resize_keep_ratio=True,
mean=DEFAULT_MEAN,
std=DEFAULT_STD,
flip_ratio=flip_ratio,
random_crop=random_crop,
random_crop_scales=random_crop_scales,
)
)
class VideoDataset:
""" A video recognition dataset. """
def __init__(
self,
root: str,
seed: int = None,
train_pct: float = 0.75,
num_samples: int = 1,
sample_length: int = 8,
sample_step: int = 1,
temporal_jitter: bool = True,
temporal_jitter_step: int = 2,
random_shift: bool = True,
batch_size: int = 8,
video_ext: str = "mp4",
warning: bool = False,
train_split_file: str = None,
test_split_file: str = None,
train_transforms: Trans = get_transforms(train=True),
test_transforms: Trans = get_transforms(train=False),
) -> None:
""" initialize dataset
Arg:
root: Videos directory.
seed: random seed
train_pct: percentage of dataset to use for training
num_samples: Number of clips to sample from each video.
sample_length: Number of consecutive frames to sample from a video (i.e. clip length).
sample_step: Sampling step.
temporal_jitter: Randomly skip frames when sampling each frames.
temporal_jitter_step: temporal jitter in frames
random_shift: Random temporal shift when sample a clip.
video_ext: Video file extension.
warning: On or off warning.
train_split_file: Annotation file containing video filenames and labels.
test_split_file: Annotation file containing video filenames and labels.
train_transforms: transforms for training
test_transforms: transforms for testing
"""
assert sample_step > 0
assert num_samples > 0
if temporal_jitter:
assert temporal_jitter_step > 0
if train_split_file:
assert Path(train_split_file).exists()
assert (
test_split_file is not None and Path(test_split_file).exists()
)
if test_split_file:
assert Path(test_split_file).exists()
assert (
train_split_file is not None
and Path(train_split_file).exists()
)
self.root = root
self.seed = seed
self.num_samples = num_samples
self.sample_length = sample_length
self.sample_step = sample_step
self.presample_length = sample_length * sample_step
self.temporal_jitter_step = temporal_jitter_step
self.train_transforms = train_transforms
self.test_transforms = test_transforms
self.random_shift = random_shift
self.temporal_jitter = temporal_jitter
self.batch_size = batch_size
self.video_ext = video_ext
self.warning = warning
# create training and validation datasets
self.train_ds, self.test_ds = (
self.split_with_file(
train_split_file=train_split_file,
test_split_file=test_split_file,
)
if train_split_file
else self.split_by_folder(train_pct=train_pct)
)
# initialize dataloaders
self.init_data_loaders()
def split_by_folder(
self, train_pct: float = 0.8
) -> Tuple[Dataset, Dataset]:
""" Split this dataset into a training and testing set based on the
folders that the videos are in.
```
/data
+-- action_class_1
| +-- video_01.mp4
| +-- video_02.mp4
| +-- ...
+-- action_class_2
| +-- video_11.mp4
| +-- video_12.mp4
| +-- ...
+-- ...
```
Args:
train_pct: the ratio of images to use for training vs
testing
Return
A training and testing dataset in that order
"""
self.video_records = []
# get all dirs in root (and make sure they are dirs)
dirs = []
for entry in os.listdir(self.root):
if os.path.isdir(os.path.join(self.root, entry)):
dirs.append(os.path.join(self.root, entry))
# add each video in each dir as a video record
label = 0
self.classes = []
for action in dirs:
action = os.path.basename(os.path.normpath(action))
self.video_records.extend(
[
VideoRecord(
[
os.path.join(self.root, action, vid.split(".")[0]),
label,
action,
]
)
for vid in os.listdir(os.path.join(self.root, action))
]
)
label += 1
self.classes.append(action)
# random split
test_num = math.floor(len(self) * (1 - train_pct))
if self.seed:
torch.manual_seed(self.seed)
# set indices
indices = torch.randperm(len(self)).tolist()
train_range = indices[test_num:]
test_range = indices[:test_num]
return self.split_train_test(train_range, test_range)
def split_with_file(
self,
train_split_file: Union[Path, str],
test_split_file: Union[Path, str],
) -> Tuple[Dataset, Dataset]:
""" Split this dataset into a training and testing set using a split file.
Each line in the split file must use the form:
```
path/to/jumping/video_name_1 3
path/to/swimming/video_name_2 5
path/to/another/jumping/video_name_3 3
```
Args:
split_files: a tuple of 2 files
Return:
A training and testing dataset in that order
"""
self.video_records = []
# add train records
self.video_records.extend(
[
VideoRecord(row.strip().split(" "))
for row in open(train_split_file)
]
)
train_len = len(self.video_records)
# add validation records
self.video_records.extend(
[
VideoRecord(row.strip().split(" "))
for row in open(test_split_file)
]
)
# create indices
indices = torch.arange(0, len(self.video_records))
train_range = indices[:train_len]
test_range = indices[train_len:]
return self.split_train_test(train_range, test_range)
def split_train_test(
self, train_range: torch.Tensor, test_range: torch.Tensor,
) -> Tuple[Dataset, Dataset]:
""" Split this dataset into a training and testing set
Args:
train_range: range of indices for training set
test_range: range of indices for testing set
Return
A training and testing dataset in that order
"""
# create train subset
train = copy.deepcopy(Subset(self, train_range))
train.dataset.transforms = self.train_transforms
train.dataset.sample_step = (
self.temporal_jitter_step
if self.temporal_jitter
else self.sample_step
)
train.dataset.presample_length = self.sample_length * self.sample_step
# create test subset
test = copy.deepcopy(Subset(self, test_range))
test.dataset.transforms = self.test_transforms
test.dataset.random_shift = False
test.dataset.temporal_jitter = False
return train, test
def init_data_loaders(self) -> None:
""" Create training and validation data loaders. """
devices = num_devices()
self.train_dl = DataLoader(
self.train_ds,
batch_size=self.batch_size * devices,
shuffle=True,
num_workers=db_num_workers(),
pin_memory=True,
)
self.test_dl = DataLoader(
self.test_ds,
batch_size=self.batch_size * devices,
shuffle=False,
num_workers=db_num_workers(),
pin_memory=True,
)
def __len__(self) -> int:
return len(self.video_records)
def _sample_indices(self, record: VideoRecord) -> List[int]:
"""
Create a list of frame-wise offsets into a video record. Depending on
whether or not 'random shift' is used, perform a uniform sample or a
random sample.
Args:
record (VideoRecord): A video record.
Return:
list: Segment offsets (start indices)
"""
if record.num_frames > self.presample_length:
if self.random_shift:
# Random sample
offsets = np.sort(
randint(
record.num_frames - self.presample_length + 1,
size=self.num_samples,
)
)
else:
# Uniform sample
distance = (
record.num_frames - self.presample_length + 1
) / self.num_samples
offsets = np.array(
[
int(distance / 2.0 + distance * x)
for x in range(self.num_samples)
]
)
else:
if self.warning:
warnings.warn(
f"num_samples and/or sample_length > num_frames in {record.path}"
)
offsets = np.zeros((self.num_samples,), dtype=int)
return offsets
def _get_frames(
self, video_reader: decord.VideoReader, offset: int,
) -> List[np.ndarray]:
""" Get frames at sample length.
Args:
video_reader: the decord tool for parsing videos
offset: where to start the reader from
Returns
Frames at sample length in a List
"""
clip = list()
# decord.seek() seems to have a bug. use seek_accurate().
video_reader.seek_accurate(offset)
# first frame
clip.append(video_reader.next().asnumpy())
# remaining frames
try:
for i in range(self.sample_length - 1):
step = (
randint(self.sample_step + 1)
if self.temporal_jitter
else self.sample_step
)
if step == 0 and self.temporal_jitter:
clip.append(clip[-1].copy())
else:
if step > 1:
video_reader.skip_frames(step - 1)
cur_frame = video_reader.next().asnumpy()
clip.append(cur_frame)
except StopIteration:
# pass when video has ended
pass
# if clip needs more frames, simply duplicate the last frame in the clip.
while len(clip) < self.sample_length:
clip.append(clip[-1].copy())
return clip
def __getitem__(self, idx: int) -> Tuple[torch.tensor, int]:
"""
Return:
(clips (torch.tensor), label (int))
"""
record = self.video_records[idx]
video_reader = decord.VideoReader(
"{}.{}".format(
os.path.join(self.root, record.path), self.video_ext
),
# TODO try to add `ctx=decord.ndarray.gpu(0) or .cuda(0)`
)
record._num_frames = len(video_reader)
offsets = self._sample_indices(record)
clips = np.array([self._get_frames(video_reader, o) for o in offsets])
if self.num_samples == 1:
return (
# [T, H, W, C] -> [C, T, H, W]
self.transforms(torch.from_numpy(clips[0])),
record.label,
)
else:
return (
# [S, T, H, W, C] -> [S, C, T, H, W]
torch.stack(
[self.transforms(torch.from_numpy(c)) for c in clips]
),
record.label,
)
def _show_batch(
self,
images: List[torch.tensor],
labels: List[int],
sample_length: int,
mean: Tuple[int, int, int] = DEFAULT_MEAN,
std: Tuple[int, int, int] = DEFAULT_STD,
) -> None:
"""
Display a batch of images.
Args:
images: List of sample (clip) tensors
labels: List of labels
sample_length: Number of frames to show for each sample
mean: Normalization mean
std: Normalization std-dev
"""
batch_size = len(images)
plt.tight_layout()
fig, axs = plt.subplots(
batch_size,
sample_length,
figsize=(4 * sample_length, 3 * batch_size),
)
for i, ax in enumerate(axs):
if batch_size == 1:
clip = images[0]
else:
clip = images[i]
clip = Rearrange("c t h w -> t c h w")(clip)
if not isinstance(ax, np.ndarray):
ax = [ax]
for j, a in enumerate(ax):
a.axis("off")
a.imshow(
np.moveaxis(denormalize(clip[j], mean, std).numpy(), 0, -1)
)
# display label/label_name on the first image
if j == 0:
a.text(
x=3,
y=15,
s=f"{labels[i]}",
fontsize=20,
bbox=dict(facecolor="white", alpha=0.80),
)
def show_batch(self, train_or_test: str = "train", rows: int = 2) -> None:
"""Plot first few samples in the datasets"""
if train_or_test == "train":
batch = [self.train_ds[i] for i in range(rows)]
elif train_or_test == "test":
batch = [self.test_ds[i] for i in range(rows)]
else:
raise ValueError("Unknown data type {}".format(which_data))
images = [im[0] for im in batch]
labels = [im[1] for im in batch]
self._show_batch(images, labels, self.sample_length)
|
hwt/hdl/types/string.py | ufo2011/hwt | 134 | 12753097 | from hwt.doc_markers import internal
from hwt.hdl.types.hdlType import HdlType
class HString(HdlType):
def all_mask(self):
return 1
@internal
@classmethod
def getValueCls(cls):
try:
return cls._valCls
except AttributeError:
from hwt.hdl.types.stringVal import HStringVal
cls._valCls = HStringVal
return cls._valCls
|
contrib/packs/tests/test_action_aliases.py | muyouming/st2 | 4,920 | 12753098 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.base import BaseActionAliasTestCase
class PackGet(BaseActionAliasTestCase):
action_alias_name = "pack_get"
def test_alias_pack_get(self):
format_string = self.action_alias_db.formats[0]["representation"][0]
format_strings = self.action_alias_db.get_format_strings()
command = "pack get st2"
expected_parameters = {"pack": "st2"}
self.assertExtractedParametersMatch(
format_string=format_string, command=command, parameters=expected_parameters
)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings, command=command
)
class PackInstall(BaseActionAliasTestCase):
action_alias_name = "pack_install"
def test_alias_pack_install(self):
format_string = self.action_alias_db.formats[0]["representation"][0]
command = "pack install st2"
expected_parameters = {"packs": "st2"}
self.assertExtractedParametersMatch(
format_string=format_string, command=command, parameters=expected_parameters
)
class PackSearch(BaseActionAliasTestCase):
action_alias_name = "pack_search"
def test_alias_pack_search(self):
format_string = self.action_alias_db.formats[0]["representation"][0]
format_strings = self.action_alias_db.get_format_strings()
command = "pack search st2"
expected_parameters = {"query": "st2"}
self.assertExtractedParametersMatch(
format_string=format_string, command=command, parameters=expected_parameters
)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings, command=command
)
class PackShow(BaseActionAliasTestCase):
action_alias_name = "pack_show"
def test_alias_pack_show(self):
format_string = self.action_alias_db.formats[0]["representation"][0]
format_strings = self.action_alias_db.get_format_strings()
command = "pack show st2"
expected_parameters = {"pack": "st2"}
self.assertExtractedParametersMatch(
format_string=format_string, command=command, parameters=expected_parameters
)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings, command=command
)
|
docs/source/internals/includes/snippets/api.py | azadoks/aiida-core | 180 | 12753110 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_restful import Resource
from aiida.restapi.api import AiidaApi, App
from aiida.restapi.run_api import run_api
class NewResource(Resource):
"""
resource containing GET and POST methods. Description of each method
follows:
GET: returns id, ctime, and attributes of the latest created Dict.
POST: creates a Dict object, stores it in the database,
and returns its newly assigned id.
"""
def get(self):
from aiida.orm import Dict, QueryBuilder
qb = QueryBuilder()
qb.append(Dict,
project=['id', 'ctime', 'attributes'],
tag='pdata')
qb.order_by({'pdata': {'ctime': 'desc'}})
result = qb.first()
# Results are returned as a dictionary, datetime objects is
# serialized as ISO 8601
return dict(id=result[0],
ctime=result[1].isoformat(),
attributes=result[2])
def post(self):
from aiida.orm import Dict
params = dict(property1='spam', property2='egg')
paramsData = Dict(dict=params).store()
return {'id': paramsData.pk}
class NewApi(AiidaApi):
def __init__(self, app=None, **kwargs):
"""
This init serves to add new endpoints to the basic AiiDA Api
"""
super().__init__(app=app, **kwargs)
self.add_resource(NewResource, '/new-endpoint/', strict_slashes=False)
# processing the options and running the app
from aiida import load_profile
import aiida.restapi.common as common
CONFIG_DIR = common.__path__[0]
import click
@click.command()
@click.option('-P', '--port', type=click.INT, default=5000,
help='Port number')
@click.option('-H', '--hostname', default='127.0.0.1',
help='Hostname')
@click.option('-c','--config-dir','config',type=click.Path(exists=True), default=CONFIG_DIR,
help='the path of the configuration directory')
@click.option('--debug', 'debug', is_flag=True, default=False,
help='run app in debug mode')
@click.option('--wsgi-profile', 'wsgi_profile', is_flag=True, default=False,
help='to use WSGI profiler middleware for finding bottlenecks in web application')
def newendpoint(**kwargs):
"""
runs the REST api
"""
# Invoke the runner
run_api(App, NewApi, **kwargs)
# main program
if __name__ == '__main__':
"""
Run the app with the provided options. For example:
python example.py --hostname=127.0.0.2 --port=6000
"""
load_profile()
newendpoint()
|
scrapcore/tools.py | lantip/SerpScrap | 199 | 12753119 | <reponame>lantip/SerpScrap<gh_stars>100-1000
# -*- coding: utf-8 -*-
from collections import namedtuple
import csv
import json
import os
import threading
from scrapcore import database
class JsonStreamWriter():
"""Writes consecutive objects to an json output file."""
def __init__(self, filename):
self.file = open(filename, 'wt')
self.file.write('[')
self.last_object = None
def write(self, obj):
if self.last_object:
self.file.write(',')
json.dump(obj, self.file, indent=2, sort_keys=True)
self.last_object = id(obj)
def end(self):
self.file.write(']')
self.file.close()
class CsvStreamWriter():
"""
Writes consecutive objects to an csv output file.
"""
def __init__(self, filename, csv_fieldnames):
self.csv_fieldnames = csv_fieldnames
self.file = open(filename, 'wt')
self.dict_writer = csv.DictWriter(
self.file,
fieldnames=csv_fieldnames,
delimiter=','
)
self.dict_writer.writeheader()
def write(self, data, serp):
for row in data['results']:
d = serp
d.update(row)
d = ({k: v if type(v) is str else v for k, v in d.items() if k in self.csv_fieldnames})
self.dict_writer.writerow(d)
def end(self):
self.file.close()
class ScrapeJobGenerator():
def get(self, keywords, search_engines, scrape_method, num_pages):
"""Get scrape jobs by keywords."""
for keyword in keywords:
for search_engine in search_engines:
for page in range(1, num_pages + 1):
yield {
'query': keyword,
'search_engine': search_engine,
'scrape_method': scrape_method,
'page_number': page
}
class Proxies():
Proxy = namedtuple('Proxy', 'proto, host, port, username, password')
def parse_proxy_file(self, fname):
"""Parses a proxy file
The format should be like the following:
socks5 XX.XXX.XX.XX:1080 username:password
socks4 XX.XXX.XX.XX:80 username:password
http XX.XXX.XX.XX:80
If username and password aren't provided, we assumes
that the proxy doesn't need auth credentials.
Args:
fname: The file name where to look for proxies.
Returns:
The parsed proxies.
Raises:
ValueError if no file with the path fname could be found.
"""
proxies = []
path = os.path.join(os.getcwd(), fname)
if os.path.exists(path):
with open(path, 'r') as pf:
for line in pf.readlines():
if not (line.strip().startswith('#') or
line.strip().startswith('//')):
tokens = line.replace('\n', '').split(' ')
try:
proto = tokens[0]
host, port = tokens[1].split(':')
except Exception:
raise Exception('''
Invalid proxy file.
Should have the following format: {}
'''.format(self.parse_proxy_file.__doc__)
)
if len(tokens) == 3:
username, password = tokens[2].split(':')
proxies.append(
self.Proxy(
proto=proto,
host=host,
port=port,
username=username,
password=password
)
)
else:
proxies.append(
self.Proxy(
proto=proto,
host=host,
port=port,
username='',
password=''
)
)
return proxies
else:
raise ValueError('No such file/directory')
def add_proxies_to_db(self, proxies, session):
"""Adds the list of proxies to the database.
If the proxy-ip already exists and the other data differs,
it will be overwritten.
Will not check the status of the proxy.
Args:
proxies: A list of proxies.
session: A database session to work with.
"""
for proxy in proxies:
if proxy:
p = session.query(database.Proxy).filter(proxy.host == database.Proxy.ip).first()
if not p:
p = database.Proxy(ip=proxy.host)
p.port = proxy.port
p.username = proxy.username
p.password = <PASSWORD>
p.proto = proxy.proto
session.add(p)
session.commit()
class ShowProgressQueue(threading.Thread):
"""Prints the number of keywords scraped already to show the user
the progress of the scraping process..
"""
def __init__(self, config, queue, num_keywords):
"""Create a ShowProgressQueue thread instance.
Args:
queue: A queue.Queue instance to share among the worker threads.
num_keywords: The number of total keywords that need to be scraped.
"""
super().__init__()
self.queue = queue
self.num_keywords = num_keywords
self.num_already_processed = 0
self.progress_fmt = '\033[92m{}/{} keywords processed.\033[0m'
def run(self):
while self.num_already_processed < self.num_keywords:
e = self.queue.get()
if e == 'done':
break
self.num_already_processed += 1
print(self.progress_fmt.format(self.num_already_processed, self.num_keywords), end='\r')
self.queue.task_done()
class Error(Exception):
pass
class ConfigurationError(Exception):
pass
class BlockedSearchException(Exception):
pass
|
yasql/apps/sqlorders/apps.py | Fanduzi/YaSQL | 443 | 12753123 | <filename>yasql/apps/sqlorders/apps.py<gh_stars>100-1000
from django.apps import AppConfig
class SqlordersConfig(AppConfig):
name = 'sqlorders'
verbose_name = 'SQL工单配置'
|
pkg/codegen/testing/test/testdata/output-funcs-edgeorder/python/pulumi_myedgeorder/_enums.py | goverdhan07/pulumi | 12,004 | 12753129 | <reponame>goverdhan07/pulumi
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'SupportedFilterTypes',
]
class SupportedFilterTypes(str, Enum):
"""
Type of product filter.
"""
SHIP_TO_COUNTRIES = "ShipToCountries"
"""
Ship to country
"""
DOUBLE_ENCRYPTION_STATUS = "DoubleEncryptionStatus"
"""
Double encryption status
"""
|
EventFilter/CSCRawToDigi/python/cscPacker_cfi.py | Purva-Chaudhari/cmssw | 852 | 12753157 | import FWCore.ParameterSet.Config as cms
## baseline configuration in the class itself
from EventFilter.CSCRawToDigi.cscPackerDef_cfi import cscPackerDef
cscpacker = cscPackerDef.clone()
## In Run-2 common: update the format version for new OTMBs in ME1/1
## Note: in the past, the packing with triggers and pretriggers was disabled
## for Run-2, Run-3 and Phase-2 scenarios. This should no longer be the case
## as of CMSSW_12_0_0_pre5
from Configuration.Eras.Modifier_run2_common_cff import run2_common
run2_common.toModify( cscpacker,
formatVersion = 2013)
## in Run-3 scenarios with GEM: pack GEM clusters
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
run3_GEM.toModify( cscpacker,
useGEMs = True)
|
server/www/packages/packages-windows/x86/ldap3/utils/config.py | tinygg/teleport | 640 | 12753194 | <filename>server/www/packages/packages-windows/x86/ldap3/utils/config.py<gh_stars>100-1000
"""
"""
# Created on 2016.08.31
#
# Author: <NAME>
#
# Copyright 2013 - 2020 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from sys import stdin, getdefaultencoding
from .. import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, NO_ATTRIBUTES, SEQUENCE_TYPES
from ..core.exceptions import LDAPConfigurationParameterError
# checks
_CLASSES_EXCLUDED_FROM_CHECK = ['subschema']
_ATTRIBUTES_EXCLUDED_FROM_CHECK = [ALL_ATTRIBUTES,
ALL_OPERATIONAL_ATTRIBUTES,
NO_ATTRIBUTES,
'ldapSyntaxes',
'matchingRules',
'matchingRuleUse',
'dITContentRules',
'dITStructureRules',
'nameForms',
'altServer',
'namingContexts',
'supportedControl',
'supportedExtension',
'supportedFeatures',
'supportedCapabilities',
'supportedLdapVersion',
'supportedSASLMechanisms',
'vendorName',
'vendorVersion',
'subschemaSubentry',
'ACL']
_UTF8_ENCODED_SYNTAXES = ['1.2.840.113556.1.4.904', # DN String [MICROSOFT]
'1.2.840.113556.1.4.1362', # String (Case) [MICROSOFT]
'1.3.6.1.4.1.1466.172.16.31.10', # DN String [RFC4517]
'1.3.6.1.4.1.1466.192.168.3.11', # Directory String [RFC4517]
'1.3.6.1.4.1.1466.192.168.3.11', # Postal Address) [RFC4517]
'1.3.6.1.4.1.1466.172.16.31.10', # Substring Assertion [RFC4517]
'2.16.840.1.113719.1.1.5.1.6', # Case Ignore List [NOVELL]
'2.16.840.1.113719.1.1.5.1.14', # Tagged String [NOVELL]
'2.16.840.1.113719.1.1.5.1.15', # Tagged Name and String [NOVELL]
'2.16.840.1.113719.1.1.5.1.23', # Tagged Name [NOVELL]
'2.16.840.1.113719.1.1.5.1.25'] # Typed Name [NOVELL]
_UTF8_ENCODED_TYPES = []
_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF = ['msds-memberOfTransitive', 'msds-memberTransitive', 'entryDN']
_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF = ['instanceType', 'nTSecurityDescriptor', 'objectCategory']
_CASE_INSENSITIVE_ATTRIBUTE_NAMES = True
_CASE_INSENSITIVE_SCHEMA_NAMES = True
# abstraction layer
_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX = 'OA_'
# communication
_POOLING_LOOP_TIMEOUT = 10 # number of seconds to wait before restarting a cycle to find an active server in the pool
_RESPONSE_SLEEPTIME = 0.05 # seconds to wait while waiting for a response in asynchronous strategies
_RESPONSE_WAITING_TIMEOUT = 3 # waiting timeout for receiving a response in asynchronous strategies
_SOCKET_SIZE = 4096 # socket byte size
_CHECK_AVAILABILITY_TIMEOUT = 2.5 # default timeout for socket connect when checking availability
_RESET_AVAILABILITY_TIMEOUT = 5 # default timeout for resetting the availability status when checking candidate addresses
_RESTARTABLE_SLEEPTIME = 2 # time to wait in a restartable strategy before retrying the request
_RESTARTABLE_TRIES = 30 # number of times to retry in a restartable strategy before giving up. Set to True for unlimited retries
_REUSABLE_THREADED_POOL_SIZE = 5
_REUSABLE_THREADED_LIFETIME = 3600 # 1 hour
_DEFAULT_THREADED_POOL_NAME = 'REUSABLE_DEFAULT_POOL'
_ADDRESS_INFO_REFRESH_TIME = 300 # seconds to wait before refreshing address info from dns
_ADDITIONAL_SERVER_ENCODINGS = ['latin-1', 'koi8-r'] # some broken LDAP implementation may have different encoding than those expected by RFCs
_ADDITIONAL_CLIENT_ENCODINGS = ['utf-8']
_IGNORE_MALFORMED_SCHEMA = False # some flaky LDAP servers returns malformed schema. If True no expection is raised and schema is thrown away
_DEFAULT_SERVER_ENCODING = 'utf-8' # should always be utf-8
if stdin and hasattr(stdin, 'encoding') and stdin.encoding:
_DEFAULT_CLIENT_ENCODING = stdin.encoding
elif getdefaultencoding():
_DEFAULT_CLIENT_ENCODING = getdefaultencoding()
else:
_DEFAULT_CLIENT_ENCODING = 'utf-8'
PARAMETERS = ['CASE_INSENSITIVE_ATTRIBUTE_NAMES',
'CASE_INSENSITIVE_SCHEMA_NAMES',
'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX',
'POOLING_LOOP_TIMEOUT',
'RESPONSE_SLEEPTIME',
'RESPONSE_WAITING_TIMEOUT',
'SOCKET_SIZE',
'CHECK_AVAILABILITY_TIMEOUT',
'RESTARTABLE_SLEEPTIME',
'RESTARTABLE_TRIES',
'REUSABLE_THREADED_POOL_SIZE',
'REUSABLE_THREADED_LIFETIME',
'DEFAULT_THREADED_POOL_NAME',
'ADDRESS_INFO_REFRESH_TIME',
'RESET_AVAILABILITY_TIMEOUT',
'DEFAULT_CLIENT_ENCODING',
'DEFAULT_SERVER_ENCODING',
'CLASSES_EXCLUDED_FROM_CHECK',
'ATTRIBUTES_EXCLUDED_FROM_CHECK',
'UTF8_ENCODED_SYNTAXES',
'UTF8_ENCODED_TYPES',
'ADDITIONAL_SERVER_ENCODINGS',
'ADDITIONAL_CLIENT_ENCODINGS',
'IGNORE_MALFORMED_SCHEMA',
'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF',
'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF'
]
def get_config_parameter(parameter):
if parameter == 'CASE_INSENSITIVE_ATTRIBUTE_NAMES': # Boolean
return _CASE_INSENSITIVE_ATTRIBUTE_NAMES
elif parameter == 'CASE_INSENSITIVE_SCHEMA_NAMES': # Boolean
return _CASE_INSENSITIVE_SCHEMA_NAMES
elif parameter == 'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX': # String
return _ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX
elif parameter == 'POOLING_LOOP_TIMEOUT': # Integer
return _POOLING_LOOP_TIMEOUT
elif parameter == 'RESPONSE_SLEEPTIME': # Integer
return _RESPONSE_SLEEPTIME
elif parameter == 'RESPONSE_WAITING_TIMEOUT': # Integer
return _RESPONSE_WAITING_TIMEOUT
elif parameter == 'SOCKET_SIZE': # Integer
return _SOCKET_SIZE
elif parameter == 'CHECK_AVAILABILITY_TIMEOUT': # Integer
return _CHECK_AVAILABILITY_TIMEOUT
elif parameter == 'RESTARTABLE_SLEEPTIME': # Integer
return _RESTARTABLE_SLEEPTIME
elif parameter == 'RESTARTABLE_TRIES': # Integer
return _RESTARTABLE_TRIES
elif parameter == 'REUSABLE_THREADED_POOL_SIZE': # Integer
return _REUSABLE_THREADED_POOL_SIZE
elif parameter == 'REUSABLE_THREADED_LIFETIME': # Integer
return _REUSABLE_THREADED_LIFETIME
elif parameter == 'DEFAULT_THREADED_POOL_NAME': # String
return _DEFAULT_THREADED_POOL_NAME
elif parameter == 'ADDRESS_INFO_REFRESH_TIME': # Integer
return _ADDRESS_INFO_REFRESH_TIME
elif parameter == 'RESET_AVAILABILITY_TIMEOUT': # Integer
return _RESET_AVAILABILITY_TIMEOUT
elif parameter in ['DEFAULT_CLIENT_ENCODING', 'DEFAULT_ENCODING']: # String - DEFAULT_ENCODING for backward compatibility
return _DEFAULT_CLIENT_ENCODING
elif parameter == 'DEFAULT_SERVER_ENCODING': # String
return _DEFAULT_SERVER_ENCODING
elif parameter == 'CLASSES_EXCLUDED_FROM_CHECK': # Sequence
if isinstance(_CLASSES_EXCLUDED_FROM_CHECK, SEQUENCE_TYPES):
return _CLASSES_EXCLUDED_FROM_CHECK
else:
return [_CLASSES_EXCLUDED_FROM_CHECK]
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_CHECK': # Sequence
if isinstance(_ATTRIBUTES_EXCLUDED_FROM_CHECK, SEQUENCE_TYPES):
return _ATTRIBUTES_EXCLUDED_FROM_CHECK
else:
return [_ATTRIBUTES_EXCLUDED_FROM_CHECK]
elif parameter == 'UTF8_ENCODED_SYNTAXES': # Sequence
if isinstance(_UTF8_ENCODED_SYNTAXES, SEQUENCE_TYPES):
return _UTF8_ENCODED_SYNTAXES
else:
return [_UTF8_ENCODED_SYNTAXES]
elif parameter == 'UTF8_ENCODED_TYPES': # Sequence
if isinstance(_UTF8_ENCODED_TYPES, SEQUENCE_TYPES):
return _UTF8_ENCODED_TYPES
else:
return [_UTF8_ENCODED_TYPES]
elif parameter in ['ADDITIONAL_SERVER_ENCODINGS', 'ADDITIONAL_ENCODINGS']: # Sequence - ADDITIONAL_ENCODINGS for backward compatibility
if isinstance(_ADDITIONAL_SERVER_ENCODINGS, SEQUENCE_TYPES):
return _ADDITIONAL_SERVER_ENCODINGS
else:
return [_ADDITIONAL_SERVER_ENCODINGS]
elif parameter in ['ADDITIONAL_CLIENT_ENCODINGS']: # Sequence
if isinstance(_ADDITIONAL_CLIENT_ENCODINGS, SEQUENCE_TYPES):
return _ADDITIONAL_CLIENT_ENCODINGS
else:
return [_ADDITIONAL_CLIENT_ENCODINGS]
elif parameter == 'IGNORE_MALFORMED_SCHEMA': # Boolean
return _IGNORE_MALFORMED_SCHEMA
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF': # Sequence
if isinstance(_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF, SEQUENCE_TYPES):
return _ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF
else:
return [_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF]
elif parameter == 'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF': # Sequence
if isinstance(_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF, SEQUENCE_TYPES):
return _IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF
else:
return [_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF]
raise LDAPConfigurationParameterError('configuration parameter %s not valid' % parameter)
def set_config_parameter(parameter, value):
if parameter == 'CASE_INSENSITIVE_ATTRIBUTE_NAMES':
global _CASE_INSENSITIVE_ATTRIBUTE_NAMES
_CASE_INSENSITIVE_ATTRIBUTE_NAMES = value
elif parameter == 'CASE_INSENSITIVE_SCHEMA_NAMES':
global _CASE_INSENSITIVE_SCHEMA_NAMES
_CASE_INSENSITIVE_SCHEMA_NAMES = value
elif parameter == 'ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX':
global _ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX
_ABSTRACTION_OPERATIONAL_ATTRIBUTE_PREFIX = value
elif parameter == 'POOLING_LOOP_TIMEOUT':
global _POOLING_LOOP_TIMEOUT
_POOLING_LOOP_TIMEOUT = value
elif parameter == 'RESPONSE_SLEEPTIME':
global _RESPONSE_SLEEPTIME
_RESPONSE_SLEEPTIME = value
elif parameter == 'RESPONSE_WAITING_TIMEOUT':
global _RESPONSE_WAITING_TIMEOUT
_RESPONSE_WAITING_TIMEOUT = value
elif parameter == 'SOCKET_SIZE':
global _SOCKET_SIZE
_SOCKET_SIZE = value
elif parameter == 'CHECK_AVAILABILITY_TIMEOUT':
global _CHECK_AVAILABILITY_TIMEOUT
_CHECK_AVAILABILITY_TIMEOUT = value
elif parameter == 'RESTARTABLE_SLEEPTIME':
global _RESTARTABLE_SLEEPTIME
_RESTARTABLE_SLEEPTIME = value
elif parameter == 'RESTARTABLE_TRIES':
global _RESTARTABLE_TRIES
_RESTARTABLE_TRIES = value
elif parameter == 'REUSABLE_THREADED_POOL_SIZE':
global _REUSABLE_THREADED_POOL_SIZE
_REUSABLE_THREADED_POOL_SIZE = value
elif parameter == 'REUSABLE_THREADED_LIFETIME':
global _REUSABLE_THREADED_LIFETIME
_REUSABLE_THREADED_LIFETIME = value
elif parameter == 'DEFAULT_THREADED_POOL_NAME':
global _DEFAULT_THREADED_POOL_NAME
_DEFAULT_THREADED_POOL_NAME = value
elif parameter == 'ADDRESS_INFO_REFRESH_TIME':
global _ADDRESS_INFO_REFRESH_TIME
_ADDRESS_INFO_REFRESH_TIME = value
elif parameter == 'RESET_AVAILABILITY_TIMEOUT':
global _RESET_AVAILABILITY_TIMEOUT
_RESET_AVAILABILITY_TIMEOUT = value
elif parameter in ['DEFAULT_CLIENT_ENCODING', 'DEFAULT_ENCODING']:
global _DEFAULT_CLIENT_ENCODING
_DEFAULT_CLIENT_ENCODING = value
elif parameter == 'DEFAULT_SERVER_ENCODING':
global _DEFAULT_SERVER_ENCODING
_DEFAULT_SERVER_ENCODING = value
elif parameter == 'CLASSES_EXCLUDED_FROM_CHECK':
global _CLASSES_EXCLUDED_FROM_CHECK
_CLASSES_EXCLUDED_FROM_CHECK = value
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_CHECK':
global _ATTRIBUTES_EXCLUDED_FROM_CHECK
_ATTRIBUTES_EXCLUDED_FROM_CHECK = value
elif parameter == 'UTF8_ENCODED_SYNTAXES':
global _UTF8_ENCODED_SYNTAXES
_UTF8_ENCODED_SYNTAXES = value
elif parameter == 'UTF8_ENCODED_TYPES':
global _UTF8_ENCODED_TYPES
_UTF8_ENCODED_TYPES = value
elif parameter in ['ADDITIONAL_SERVER_ENCODINGS', 'ADDITIONAL_ENCODINGS']:
global _ADDITIONAL_SERVER_ENCODINGS
_ADDITIONAL_SERVER_ENCODINGS = value if isinstance(value, SEQUENCE_TYPES) else [value]
elif parameter in ['ADDITIONAL_CLIENT_ENCODINGS']:
global _ADDITIONAL_CLIENT_ENCODINGS
_ADDITIONAL_CLIENT_ENCODINGS = value if isinstance(value, SEQUENCE_TYPES) else [value]
elif parameter == 'IGNORE_MALFORMED_SCHEMA':
global _IGNORE_MALFORMED_SCHEMA
_IGNORE_MALFORMED_SCHEMA = value
elif parameter == 'ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF':
global _ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF
_ATTRIBUTES_EXCLUDED_FROM_OBJECT_DEF = value
elif parameter == 'IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF':
global _IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF
_IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF = value
else:
raise LDAPConfigurationParameterError('unable to set configuration parameter %s' % parameter)
|
google/ads/googleads/v8/resources/types/conversion_value_rule.py | JakobSteixner/google-ads-python | 285 | 12753208 | <filename>google/ads/googleads/v8/resources/types/conversion_value_rule.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import conversion_value_rule_status
from google.ads.googleads.v8.enums.types import value_rule_device_type
from google.ads.googleads.v8.enums.types import (
value_rule_geo_location_match_type,
)
from google.ads.googleads.v8.enums.types import value_rule_operation
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"ConversionValueRule",},
)
class ConversionValueRule(proto.Message):
r"""A conversion value rule
Attributes:
resource_name (str):
Immutable. The resource name of the conversion value rule.
Conversion value rule resource names have the form:
``customers/{customer_id}/conversionValueRules/{conversion_value_rule_id}``
id (int):
Output only. The ID of the conversion value
rule.
action (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleAction):
Action applied when the rule is triggered.
geo_location_condition (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleGeoLocationCondition):
Condition for Geo location that must be
satisfied for the value rule to apply.
device_condition (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleDeviceCondition):
Condition for device type that must be
satisfied for the value rule to apply.
audience_condition (google.ads.googleads.v8.resources.types.ConversionValueRule.ValueRuleAudienceCondition):
Condition for audience that must be satisfied
for the value rule to apply.
owner_customer (str):
Output only. The resource name of the conversion value
rule's owner customer. When the value rule is inherited from
a manager customer, owner_customer will be the resource name
of the manager whereas the customer in the resource_name
will be of the requesting serving customer. \*\* Read-only
\*\*
status (google.ads.googleads.v8.enums.types.ConversionValueRuleStatusEnum.ConversionValueRuleStatus):
The status of the conversion value rule.
"""
class ValueRuleAction(proto.Message):
r"""Action applied when rule is applied.
Attributes:
operation (google.ads.googleads.v8.enums.types.ValueRuleOperationEnum.ValueRuleOperation):
Specifies applied operation.
value (float):
Specifies applied value.
"""
operation = proto.Field(
proto.ENUM,
number=1,
enum=value_rule_operation.ValueRuleOperationEnum.ValueRuleOperation,
)
value = proto.Field(proto.DOUBLE, number=2,)
class ValueRuleGeoLocationCondition(proto.Message):
r"""Condition on Geo dimension.
Attributes:
excluded_geo_target_constants (Sequence[str]):
Geo locations that advertisers want to
exclude.
excluded_geo_match_type (google.ads.googleads.v8.enums.types.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType):
Excluded Geo location match type.
geo_target_constants (Sequence[str]):
Geo locations that advertisers want to
include.
geo_match_type (google.ads.googleads.v8.enums.types.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType):
Included Geo location match type.
"""
excluded_geo_target_constants = proto.RepeatedField(
proto.STRING, number=1,
)
excluded_geo_match_type = proto.Field(
proto.ENUM,
number=2,
enum=value_rule_geo_location_match_type.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType,
)
geo_target_constants = proto.RepeatedField(proto.STRING, number=3,)
geo_match_type = proto.Field(
proto.ENUM,
number=4,
enum=value_rule_geo_location_match_type.ValueRuleGeoLocationMatchTypeEnum.ValueRuleGeoLocationMatchType,
)
class ValueRuleAudienceCondition(proto.Message):
r"""Condition on Audience dimension.
Attributes:
user_lists (Sequence[str]):
User Lists.
user_interests (Sequence[str]):
User Interests.
"""
user_lists = proto.RepeatedField(proto.STRING, number=1,)
user_interests = proto.RepeatedField(proto.STRING, number=2,)
class ValueRuleDeviceCondition(proto.Message):
r"""Condition on Device dimension.
Attributes:
device_types (Sequence[google.ads.googleads.v8.enums.types.ValueRuleDeviceTypeEnum.ValueRuleDeviceType]):
Value for device type condition.
"""
device_types = proto.RepeatedField(
proto.ENUM,
number=1,
enum=value_rule_device_type.ValueRuleDeviceTypeEnum.ValueRuleDeviceType,
)
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=2,)
action = proto.Field(proto.MESSAGE, number=3, message=ValueRuleAction,)
geo_location_condition = proto.Field(
proto.MESSAGE, number=4, message=ValueRuleGeoLocationCondition,
)
device_condition = proto.Field(
proto.MESSAGE, number=5, message=ValueRuleDeviceCondition,
)
audience_condition = proto.Field(
proto.MESSAGE, number=6, message=ValueRuleAudienceCondition,
)
owner_customer = proto.Field(proto.STRING, number=7,)
status = proto.Field(
proto.ENUM,
number=8,
enum=conversion_value_rule_status.ConversionValueRuleStatusEnum.ConversionValueRuleStatus,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
lldb/packages/Python/lldbsuite/test/commands/expression/radar_9673664/TestExprHelpExamples.py | dan-zheng/llvm-project | 765 | 12753219 | """
Test example snippets from the lldb 'help expression' output.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class Radar9673644TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.main_source = "main.c"
self.line = line_number(self.main_source, '// Set breakpoint here.')
def test_expr_commands(self):
"""The following expression commands should just work."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
self.main_source,
self.line,
num_expected_locations=1,
loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# rdar://problem/9673664 lldb expression evaluation problem
self.expect('expr char str[] = "foo"; str[0]',
substrs=["'f'"])
# runCmd: expr char c[] = "foo"; c[0]
# output: (char) $0 = 'f'
|
site/flask/lib/python2.7/site-packages/whoosh/multiproc.py | theholyhades1/tartanHacks2015 | 319 | 12753230 | # Copyright 2011 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import os
from multiprocessing import Process, Queue, cpu_count
from whoosh.compat import xrange, iteritems, pickle
from whoosh.codec import base
from whoosh.writing import PostingPool, SegmentWriter
from whoosh.externalsort import imerge
from whoosh.util import random_name
def finish_subsegment(writer, k=64):
# Tell the pool to finish up the current file
writer.pool.save()
# Tell the pool to merge any and all runs in the pool until there
# is only one run remaining. "k" is an optional parameter passed
# from the parent which sets the maximum number of files to open
# while reducing.
writer.pool.reduce_to(1, k)
# The filename of the single remaining run
runname = writer.pool.runs[0]
# The indexed field names
fieldnames = writer.pool.fieldnames
# The segment object (parent can use this to re-open the files created
# by the sub-writer)
segment = writer._partial_segment()
return runname, fieldnames, segment
# Multiprocessing Writer
class SubWriterTask(Process):
# This is a Process object that takes "jobs" off a job Queue, processes
# them, and when it's done, puts a summary of its work on a results Queue
def __init__(self, storage, indexname, jobqueue, resultqueue, kwargs,
multisegment):
Process.__init__(self)
self.storage = storage
self.indexname = indexname
self.jobqueue = jobqueue
self.resultqueue = resultqueue
self.kwargs = kwargs
self.multisegment = multisegment
self.running = True
def run(self):
# This is the main loop of the process. OK, so the way this works is
# kind of brittle and stupid, but I had to figure out how to use the
# multiprocessing module, work around bugs, and address performance
# issues, so there is at least some reasoning behind some of this
# The "parent" task farms individual documents out to the subtasks for
# indexing. You could pickle the actual documents and put them in the
# queue, but that is not very performant. Instead, we assume the tasks
# share a filesystem and use that to pass the information around. The
# parent task writes a certain number of documents to a file, then puts
# the filename on the "job queue". A subtask gets the filename off the
# queue and reads through the file processing the documents.
jobqueue = self.jobqueue
resultqueue = self.resultqueue
multisegment = self.multisegment
# Open a placeholder object representing the index
ix = self.storage.open_index(self.indexname)
# Open a writer for the index. The _lk=False parameter means to not try
# to lock the index (the parent object that started me takes care of
# locking the index)
writer = self.writer = SegmentWriter(ix, _lk=False, **self.kwargs)
# If the parent task calls cancel() on me, it will set self.running to
# False, so I'll notice the next time through the loop
while self.running:
# Take an object off the job queue
jobinfo = jobqueue.get()
# If the object is None, it means the parent task wants me to
# finish up
if jobinfo is None:
break
# The object from the queue is a tuple of (filename,
# number_of_docs_in_file). Pass those two pieces of information as
# arguments to _process_file().
self._process_file(*jobinfo)
if not self.running:
# I was cancelled, so I'll cancel my underlying writer
writer.cancel()
else:
if multisegment:
# Actually finish the segment and return it with no run
runname = None
fieldnames = writer.pool.fieldnames
segment = writer._finalize_segment()
else:
# Merge all runs in the writer's pool into one run, close the
# segment, and return the run name and the segment
k = self.kwargs.get("k", 64)
runname, fieldnames, segment = finish_subsegment(writer, k)
# Put the results (the run filename and the segment object) on the
# result queue
resultqueue.put((runname, fieldnames, segment), timeout=5)
def _process_file(self, filename, doc_count):
# This method processes a "job file" written out by the parent task. A
# job file is a series of pickled (code, arguments) tuples. Currently
# the only command codes is 0=add_document
writer = self.writer
tempstorage = writer.temp_storage()
load = pickle.load
with tempstorage.open_file(filename).raw_file() as f:
for _ in xrange(doc_count):
# Load the next pickled tuple from the file
code, args = load(f)
assert code == 0
writer.add_document(**args)
# Remove the job file
tempstorage.delete_file(filename)
def cancel(self):
self.running = False
class MpWriter(SegmentWriter):
def __init__(self, ix, procs=None, batchsize=100, subargs=None,
multisegment=False, **kwargs):
# This is the "main" writer that will aggregate the results created by
# the sub-tasks
SegmentWriter.__init__(self, ix, **kwargs)
self.procs = procs or cpu_count()
# The maximum number of documents in each job file submitted to the
# sub-tasks
self.batchsize = batchsize
# You can use keyword arguments or the "subargs" argument to pass
# keyword arguments to the sub-writers
self.subargs = subargs if subargs else kwargs
# If multisegment is True, don't merge the segments created by the
# sub-writers, just add them directly to the TOC
self.multisegment = multisegment
# A list to hold the sub-task Process objects
self.tasks = []
# A queue to pass the filenames of job files to the sub-tasks
self.jobqueue = Queue(self.procs * 4)
# A queue to get back the final results of the sub-tasks
self.resultqueue = Queue()
# A buffer for documents before they are flushed to a job file
self.docbuffer = []
self._grouping = 0
self._added_sub = False
def _new_task(self):
task = SubWriterTask(self.storage, self.indexname,
self.jobqueue, self.resultqueue, self.subargs,
self.multisegment)
self.tasks.append(task)
task.start()
return task
def _enqueue(self):
# Flush the documents stored in self.docbuffer to a file and put the
# filename on the job queue
docbuffer = self.docbuffer
dump = pickle.dump
length = len(docbuffer)
filename = "%s.doclist" % random_name()
with self.temp_storage().create_file(filename).raw_file() as f:
for item in docbuffer:
dump(item, f, -1)
if len(self.tasks) < self.procs:
self._new_task()
jobinfo = (filename, length)
self.jobqueue.put(jobinfo)
self.docbuffer = []
def cancel(self):
try:
for task in self.tasks:
task.cancel()
finally:
SegmentWriter.cancel(self)
def start_group(self):
self._grouping += 1
def end_group(self):
if not self._grouping:
raise Exception("Unbalanced end_group")
self._grouping -= 1
def add_document(self, **fields):
# Add the document to the docbuffer
self.docbuffer.append((0, fields))
# If the buffer is full, flush it to the job queue
if not self._grouping and len(self.docbuffer) >= self.batchsize:
self._enqueue()
self._added_sub = True
def _read_and_renumber_run(self, path, offset):
# Note that SortingPool._read_run() automatically deletes the run file
# when it's finished
gen = self.pool._read_run(path)
# If offset is 0, just return the items unchanged
if not offset:
return gen
else:
# Otherwise, add the offset to each docnum
return ((fname, text, docnum + offset, weight, value)
for fname, text, docnum, weight, value in gen)
def commit(self, mergetype=None, optimize=None, merge=None):
if self._added_sub:
# If documents have been added to sub-writers, use the parallel
# merge commit code
self._commit(mergetype, optimize, merge)
else:
# Otherwise, just do a regular-old commit
SegmentWriter.commit(self, mergetype=mergetype, optimize=optimize,
merge=merge)
def _commit(self, mergetype, optimize, merge):
# Index the remaining documents in the doc buffer
if self.docbuffer:
self._enqueue()
# Tell the tasks to finish
for task in self.tasks:
self.jobqueue.put(None)
# Merge existing segments
finalsegments = self._merge_segments(mergetype, optimize, merge)
# Wait for the subtasks to finish
for task in self.tasks:
task.join()
# Pull a (run_file_name, fieldnames, segment) tuple off the result
# queue for each sub-task, representing the final results of the task
results = []
for task in self.tasks:
results.append(self.resultqueue.get(timeout=5))
if self.multisegment:
# If we're not merging the segments, we don't care about the runname
# and fieldnames in the results... just pull out the segments and
# add them to the list of final segments
finalsegments += [s for _, _, s in results]
if self._added:
finalsegments.append(self._finalize_segment())
else:
self._close_segment()
assert self.perdocwriter.is_closed
else:
# Merge the posting sources from the sub-writers and my
# postings into this writer
self._merge_subsegments(results, mergetype)
self._close_segment()
self._assemble_segment()
finalsegments.append(self.get_segment())
assert self.perdocwriter.is_closed
self._commit_toc(finalsegments)
self._finish()
def _merge_subsegments(self, results, mergetype):
schema = self.schema
schemanames = set(schema.names())
storage = self.storage
codec = self.codec
sources = []
# If information was added to this writer the conventional (e.g.
# through add_reader or merging segments), add it as an extra source
if self._added:
sources.append(self.pool.iter_postings())
pdrs = []
for runname, fieldnames, segment in results:
fieldnames = set(fieldnames) | schemanames
pdr = codec.per_document_reader(storage, segment)
pdrs.append(pdr)
basedoc = self.docnum
docmap = self.write_per_doc(fieldnames, pdr)
assert docmap is None
items = self._read_and_renumber_run(runname, basedoc)
sources.append(items)
# Create a MultiLengths object combining the length files from the
# subtask segments
self.perdocwriter.close()
pdrs.insert(0, self.per_document_reader())
mpdr = base.MultiPerDocumentReader(pdrs)
try:
# Merge the iterators into the field writer
self.fieldwriter.add_postings(schema, mpdr, imerge(sources))
finally:
mpdr.close()
self._added = True
class SerialMpWriter(MpWriter):
# A non-parallel version of the MpWriter for testing purposes
def __init__(self, ix, procs=None, batchsize=100, subargs=None, **kwargs):
SegmentWriter.__init__(self, ix, **kwargs)
self.procs = procs or cpu_count()
self.batchsize = batchsize
self.subargs = subargs if subargs else kwargs
self.tasks = [SegmentWriter(ix, _lk=False, **self.subargs)
for _ in xrange(self.procs)]
self.pointer = 0
self._added_sub = False
def add_document(self, **fields):
self.tasks[self.pointer].add_document(**fields)
self.pointer = (self.pointer + 1) % len(self.tasks)
self._added_sub = True
def _commit(self, mergetype, optimize, merge):
# Pull a (run_file_name, segment) tuple off the result queue for each
# sub-task, representing the final results of the task
# Merge existing segments
finalsegments = self._merge_segments(mergetype, optimize, merge)
results = []
for writer in self.tasks:
results.append(finish_subsegment(writer))
self._merge_subsegments(results, mergetype)
self._close_segment()
self._assemble_segment()
finalsegments.append(self.get_segment())
self._commit_toc(finalsegments)
self._finish()
# For compatibility with old multiproc module
class MultiSegmentWriter(MpWriter):
def __init__(self, *args, **kwargs):
MpWriter.__init__(self, *args, **kwargs)
self.multisegment = True
|
tests/integration/services/policy_engine/test_loaders.py | rbrady/anchore-engine | 1,484 | 12753258 | <gh_stars>1000+
import json
import time
import pytest
from anchore_engine.configuration import localconfig
from anchore_engine.db import (
FeedGroupMetadata,
FeedMetadata,
GemMetadata,
Image,
NpmMetadata,
NvdMetadata,
Vulnerability,
session_scope,
)
from anchore_engine.services.policy_engine.engine.tasks import (
FeedsUpdateTask,
ImageLoadTask,
)
from anchore_engine.subsys import logger
logger.enable_test_logging()
localconfig.localconfig.update(
{"feeds": {"sync_enabled": True, "selective_sync": {"enabled": False, "feeds": {}}}}
)
@pytest.mark.skip("Skipping due to long run time, will fix later")
def test_feed_task(test_data_env, anchore_db):
logger.info("Running a feed sync with config: {}".format(localconfig.get_config()))
t = FeedsUpdateTask()
t.execute()
with session_scope() as db:
feeds = db.query(FeedMetadata).all()
logger.info("{}".format(feeds))
assert len(feeds) == 4 # packages, vulns, snyk, nvd
feed_groups = db.query(FeedGroupMetadata).all()
# See the tests/data/test_data_env/feeds dir for the proper count here
logger.info("{}".format(feed_groups))
assert len(feed_groups) == 11
# ToDo: set the source data to a small number and make this an exact count
assert db.query(Vulnerability).count() > 0
assert db.query(NpmMetadata).count() > 0
assert db.query(GemMetadata).count() > 0
assert db.query(NvdMetadata).count() == 0
def test_image_load(test_data_env):
for f in test_data_env.image_exports():
logger.info("Testing image export loading into the db")
with open(f[1]) as infile:
json_data = json.load(infile)
image_id = (
json_data[0]["image"]["imagedata"]["image_report"]["meta"]["imageId"]
if type(json_data) == list
else json_data["image_report"]["meta"]["imageId"]
)
logger.info("Using image id: " + image_id)
t = time.time()
task = ImageLoadTask(
user_id="0", image_id=image_id, url="file://" + f[1], force_reload=True
)
load_result = task.execute()
load_duration = time.time() - t
logger.info(
"Load complete for {}. Took: {} sec for db load. Result: {}".format(
f, load_duration, load_result
)
)
with session_scope() as db:
assert (
db.query(Image).filter_by(id=image_id, user_id="0").one_or_none()
is not None
)
|
tests/test_handler_metric_logger.py | KohYoungResearchAmerica/MONAI | 2,971 | 12753272 | <reponame>KohYoungResearchAmerica/MONAI
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from monai.utils import optional_import
from tests.utils import SkipIfNoModule
try:
_, has_ignite = optional_import("ignite")
from ignite.engine import Engine, Events
from monai.handlers import MetricLogger
except ImportError:
has_ignite = False
class TestHandlerMetricLogger(unittest.TestCase):
@SkipIfNoModule("ignite")
def test_metric_logging(self):
dummy_name = "dummy"
# set up engine
def _train_func(engine, batch):
return torch.tensor(0.0)
engine = Engine(_train_func)
# set up dummy metric
@engine.on(Events.EPOCH_COMPLETED)
def _update_metric(engine):
engine.state.metrics[dummy_name] = 1
# set up testing handler
handler = MetricLogger(loss_transform=lambda output: output.item())
handler.attach(engine)
engine.run(range(3), max_epochs=2)
expected_loss = [(1, 0.0), (2, 0.0), (3, 0.0), (4, 0.0), (5, 0.0), (6, 0.0)]
expected_metric = [(4, 1), (5, 1), (6, 1)]
self.assertSetEqual({dummy_name}, set(handler.metrics))
self.assertListEqual(expected_loss, handler.loss)
self.assertListEqual(expected_metric, handler.metrics[dummy_name])
if __name__ == "__main__":
unittest.main()
|
AWS Management Scripts/AWS Automation Script for AWS endorsement management/ec2.py | avinashkranjan/PraticalPythonProjects | 930 | 12753276 | <reponame>avinashkranjan/PraticalPythonProjects
from datetime import datetime, timedelta, timezone
import boto3
def get_delete_data(older_days):
delete_time = datetime.now(tz=timezone.utc) - timedelta(days=older_days)
return delete_time
def is_ignore_shutdown(tags):
for tag in tags:
print("K " + str(tag['Key']) + " is " + str(tag['Value']))
if str(tag['Key']) == 'excludepower' and str(tag['Value']) == 'true':
print("Not stopping K " +
str(tag['Key']) + " is " + str(tag['Value']))
return True
return False
def is_unassigned(tags):
if 'user' not in [t['Key'] for t in tags]:
return True
return False
class Ec2Instances(object):
def __init__(self, region):
print("region " + region)
# if you are not using AWS Tool Kit tool you will be needing to pass your access key and secret key here
# client = boto3.client('rds', region_name=region_name, aws_access_key_id=aws_access_key_id,
# aws_secret_access_key=aws_secret_access_key)
self.ec2 = boto3.client('ec2', region_name=region)
def delete_snapshots(self, older_days=2):
delete_snapshots_num = 0
snapshots = self.get_nimesa_created_snapshots()
for snapshot in snapshots['Snapshots']:
fmt_start_time = snapshot['StartTime']
if fmt_start_time < get_delete_data(older_days):
try:
self.delete_snapshot(snapshot['SnapshotId'])
delete_snapshots_num + 1
except Exception as e:
print(e)
return delete_snapshots_num
def get_user_created_snapshots(self):
snapshots = self.ec2.describe_snapshots(
Filters=[{
'Name': 'owner-id', 'Values': ['your owner id'],
}]) # Filters=[{'Name': 'description', 'Values': ['Created by Nimesa']}]
return snapshots
def delete_available_volumes(self):
volumes = self.ec2.describe_volumes()['Volumes']
for volume in volumes:
if volume['State'] == "available":
self.ec2.delete_volume(VolumeId=volume['VolumeId'])
def delete_snapshot(self, snapshot_id):
self.ec2.delete_snapshot(SnapshotId=snapshot_id)
def shutdown(self):
instances = self.ec2.describe_instances()
instance_to_stop = []
instance_to_terminate = []
for res in instances['Reservations']:
for instance in res['Instances']:
tags = instance.get('Tags')
if tags is None:
instance_to_terminate.append(instance['InstanceId'])
continue
if is_unassigned(tags):
print("instance_to_terminate " + instance['InstanceId'])
instance_to_terminate.append(instance['InstanceId'])
if is_ignore_shutdown(tags):
continue
if instance['State']['Code'] == 16:
instance_to_stop.append(instance['InstanceId'])
if any(instance_to_stop):
self.ec2.stop_instances(
InstanceIds=instance_to_stop
)
if any(instance_to_terminate):
print(instance_to_terminate)
self.ec2.terminate_instances(
InstanceIds=instance_to_terminate
)
if __name__ == "__main__":
ec2 = Ec2Instances('us-east-1')
ec2.delete_snapshots(3)
ec2.shutdown()
|
tests/integration_tests/db_engine_specs/presto_tests.py | delorenzosoftware/superset | 18,621 | 12753314 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from unittest import mock, skipUnless
import pandas as pd
from sqlalchemy import types
from sqlalchemy.engine.result import RowProxy
from sqlalchemy.sql import select
from superset.db_engine_specs.presto import PrestoEngineSpec
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.sql_parse import ParsedQuery
from superset.utils.core import DatasourceName, GenericDataType
from tests.integration_tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestPrestoDbEngineSpec(TestDbEngineSpec):
@skipUnless(TestDbEngineSpec.is_module_installed("pyhive"), "pyhive not installed")
def test_get_datatype_presto(self):
self.assertEqual("STRING", PrestoEngineSpec.get_datatype("string"))
def test_presto_get_view_names_return_empty_list(
self,
): # pylint: disable=invalid-name
self.assertEqual(
[], PrestoEngineSpec.get_view_names(mock.ANY, mock.ANY, mock.ANY)
)
@mock.patch("superset.db_engine_specs.presto.is_feature_enabled")
def test_get_view_names(self, mock_is_feature_enabled):
mock_is_feature_enabled.return_value = True
mock_execute = mock.MagicMock()
mock_fetchall = mock.MagicMock(return_value=[["a", "b,", "c"], ["d", "e"]])
database = mock.MagicMock()
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute = (
mock_execute
)
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.fetchall = (
mock_fetchall
)
result = PrestoEngineSpec.get_view_names(database, mock.Mock(), None)
mock_execute.assert_called_once_with(
"SELECT table_name FROM information_schema.views", {}
)
assert result == ["a", "d"]
@mock.patch("superset.db_engine_specs.presto.is_feature_enabled")
def test_get_view_names_with_schema(self, mock_is_feature_enabled):
mock_is_feature_enabled.return_value = True
mock_execute = mock.MagicMock()
mock_fetchall = mock.MagicMock(return_value=[["a", "b,", "c"], ["d", "e"]])
database = mock.MagicMock()
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute = (
mock_execute
)
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.fetchall = (
mock_fetchall
)
schema = "schema"
result = PrestoEngineSpec.get_view_names(database, mock.Mock(), schema)
mock_execute.assert_called_once_with(
"SELECT table_name FROM information_schema.views "
"WHERE table_schema=%(schema)s",
{"schema": schema},
)
assert result == ["a", "d"]
def verify_presto_column(self, column, expected_results):
inspector = mock.Mock()
inspector.engine.dialect.identifier_preparer.quote_identifier = mock.Mock()
keymap = {
"Column": (None, None, 0),
"Type": (None, None, 1),
"Null": (None, None, 2),
}
row = RowProxy(mock.Mock(), column, [None, None, None, None], keymap)
inspector.bind.execute = mock.Mock(return_value=[row])
results = PrestoEngineSpec.get_columns(inspector, "", "")
self.assertEqual(len(expected_results), len(results))
for expected_result, result in zip(expected_results, results):
self.assertEqual(expected_result[0], result["name"])
self.assertEqual(expected_result[1], str(result["type"]))
def test_presto_get_column(self):
presto_column = ("column_name", "boolean", "")
expected_results = [("column_name", "BOOLEAN")]
self.verify_presto_column(presto_column, expected_results)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_get_simple_row_column(self):
presto_column = ("column_name", "row(nested_obj double)", "")
expected_results = [("column_name", "ROW"), ("column_name.nested_obj", "FLOAT")]
self.verify_presto_column(presto_column, expected_results)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_get_simple_row_column_with_name_containing_whitespace(self):
presto_column = ("column name", "row(nested_obj double)", "")
expected_results = [("column name", "ROW"), ("column name.nested_obj", "FLOAT")]
self.verify_presto_column(presto_column, expected_results)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_get_simple_row_column_with_tricky_nested_field_name(self):
presto_column = ("column_name", 'row("Field Name(Tricky, Name)" double)', "")
expected_results = [
("column_name", "ROW"),
('column_name."Field Name(Tricky, Name)"', "FLOAT"),
]
self.verify_presto_column(presto_column, expected_results)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_get_simple_array_column(self):
presto_column = ("column_name", "array(double)", "")
expected_results = [("column_name", "ARRAY")]
self.verify_presto_column(presto_column, expected_results)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_get_row_within_array_within_row_column(self):
presto_column = (
"column_name",
"row(nested_array array(row(nested_row double)), nested_obj double)",
"",
)
expected_results = [
("column_name", "ROW"),
("column_name.nested_array", "ARRAY"),
("column_name.nested_array.nested_row", "FLOAT"),
("column_name.nested_obj", "FLOAT"),
]
self.verify_presto_column(presto_column, expected_results)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_get_array_within_row_within_array_column(self):
presto_column = (
"column_name",
"array(row(nested_array array(double), nested_obj double))",
"",
)
expected_results = [
("column_name", "ARRAY"),
("column_name.nested_array", "ARRAY"),
("column_name.nested_obj", "FLOAT"),
]
self.verify_presto_column(presto_column, expected_results)
def test_presto_get_fields(self):
cols = [
{"name": "column"},
{"name": "column.nested_obj"},
{"name": 'column."quoted.nested obj"'},
]
actual_results = PrestoEngineSpec._get_fields(cols)
expected_results = [
{"name": '"column"', "label": "column"},
{"name": '"column"."nested_obj"', "label": "column.nested_obj"},
{
"name": '"column"."quoted.nested obj"',
"label": 'column."quoted.nested obj"',
},
]
for actual_result, expected_result in zip(actual_results, expected_results):
self.assertEqual(actual_result.element.name, expected_result["name"])
self.assertEqual(actual_result.name, expected_result["label"])
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_expand_data_with_simple_structural_columns(self):
cols = [
{"name": "row_column", "type": "ROW(NESTED_OBJ VARCHAR)"},
{"name": "array_column", "type": "ARRAY(BIGINT)"},
]
data = [
{"row_column": ["a"], "array_column": [1, 2, 3]},
{"row_column": ["b"], "array_column": [4, 5, 6]},
]
actual_cols, actual_data, actual_expanded_cols = PrestoEngineSpec.expand_data(
cols, data
)
expected_cols = [
{"name": "row_column", "type": "ROW(NESTED_OBJ VARCHAR)"},
{"name": "row_column.nested_obj", "type": "VARCHAR"},
{"name": "array_column", "type": "ARRAY(BIGINT)"},
]
expected_data = [
{"array_column": 1, "row_column": ["a"], "row_column.nested_obj": "a"},
{"array_column": 2, "row_column": "", "row_column.nested_obj": ""},
{"array_column": 3, "row_column": "", "row_column.nested_obj": ""},
{"array_column": 4, "row_column": ["b"], "row_column.nested_obj": "b"},
{"array_column": 5, "row_column": "", "row_column.nested_obj": ""},
{"array_column": 6, "row_column": "", "row_column.nested_obj": ""},
]
expected_expanded_cols = [{"name": "row_column.nested_obj", "type": "VARCHAR"}]
self.assertEqual(actual_cols, expected_cols)
self.assertEqual(actual_data, expected_data)
self.assertEqual(actual_expanded_cols, expected_expanded_cols)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_expand_data_with_complex_row_columns(self):
cols = [
{
"name": "row_column",
"type": "ROW(NESTED_OBJ1 VARCHAR, NESTED_ROW ROW(NESTED_OBJ2 VARCHAR))",
}
]
data = [{"row_column": ["a1", ["a2"]]}, {"row_column": ["b1", ["b2"]]}]
actual_cols, actual_data, actual_expanded_cols = PrestoEngineSpec.expand_data(
cols, data
)
expected_cols = [
{
"name": "row_column",
"type": "ROW(NESTED_OBJ1 VARCHAR, NESTED_ROW ROW(NESTED_OBJ2 VARCHAR))",
},
{"name": "row_column.nested_obj1", "type": "VARCHAR"},
{"name": "row_column.nested_row", "type": "ROW(NESTED_OBJ2 VARCHAR)"},
{"name": "row_column.nested_row.nested_obj2", "type": "VARCHAR"},
]
expected_data = [
{
"row_column": ["a1", ["a2"]],
"row_column.nested_obj1": "a1",
"row_column.nested_row": ["a2"],
"row_column.nested_row.nested_obj2": "a2",
},
{
"row_column": ["b1", ["b2"]],
"row_column.nested_obj1": "b1",
"row_column.nested_row": ["b2"],
"row_column.nested_row.nested_obj2": "b2",
},
]
expected_expanded_cols = [
{"name": "row_column.nested_obj1", "type": "VARCHAR"},
{"name": "row_column.nested_row", "type": "ROW(NESTED_OBJ2 VARCHAR)"},
{"name": "row_column.nested_row.nested_obj2", "type": "VARCHAR"},
]
self.assertEqual(actual_cols, expected_cols)
self.assertEqual(actual_data, expected_data)
self.assertEqual(actual_expanded_cols, expected_expanded_cols)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_expand_data_with_complex_row_columns_and_null_values(self):
cols = [
{"name": "row_column", "type": "ROW(NESTED_ROW ROW(NESTED_OBJ VARCHAR))",}
]
data = [
{"row_column": '[["a"]]'},
{"row_column": "[[null]]"},
{"row_column": "[null]"},
{"row_column": "null"},
]
actual_cols, actual_data, actual_expanded_cols = PrestoEngineSpec.expand_data(
cols, data
)
expected_cols = [
{"name": "row_column", "type": "ROW(NESTED_ROW ROW(NESTED_OBJ VARCHAR))",},
{"name": "row_column.nested_row", "type": "ROW(NESTED_OBJ VARCHAR)"},
{"name": "row_column.nested_row.nested_obj", "type": "VARCHAR"},
]
expected_data = [
{
"row_column": [["a"]],
"row_column.nested_row": ["a"],
"row_column.nested_row.nested_obj": "a",
},
{
"row_column": [[None]],
"row_column.nested_row": [None],
"row_column.nested_row.nested_obj": None,
},
{
"row_column": [None],
"row_column.nested_row": None,
"row_column.nested_row.nested_obj": "",
},
{
"row_column": None,
"row_column.nested_row": "",
"row_column.nested_row.nested_obj": "",
},
]
expected_expanded_cols = [
{"name": "row_column.nested_row", "type": "ROW(NESTED_OBJ VARCHAR)"},
{"name": "row_column.nested_row.nested_obj", "type": "VARCHAR"},
]
self.assertEqual(actual_cols, expected_cols)
self.assertEqual(actual_data, expected_data)
self.assertEqual(actual_expanded_cols, expected_expanded_cols)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_expand_data_with_complex_array_columns(self):
cols = [
{"name": "int_column", "type": "BIGINT"},
{
"name": "array_column",
"type": "ARRAY(ROW(NESTED_ARRAY ARRAY(ROW(NESTED_OBJ VARCHAR))))",
},
]
data = [
{"int_column": 1, "array_column": [[[["a"], ["b"]]], [[["c"], ["d"]]]]},
{"int_column": 2, "array_column": [[[["e"], ["f"]]], [[["g"], ["h"]]]]},
]
actual_cols, actual_data, actual_expanded_cols = PrestoEngineSpec.expand_data(
cols, data
)
expected_cols = [
{"name": "int_column", "type": "BIGINT"},
{
"name": "array_column",
"type": "ARRAY(ROW(NESTED_ARRAY ARRAY(ROW(NESTED_OBJ VARCHAR))))",
},
{
"name": "array_column.nested_array",
"type": "ARRAY(ROW(NESTED_OBJ VARCHAR))",
},
{"name": "array_column.nested_array.nested_obj", "type": "VARCHAR"},
]
expected_data = [
{
"array_column": [[["a"], ["b"]]],
"array_column.nested_array": ["a"],
"array_column.nested_array.nested_obj": "a",
"int_column": 1,
},
{
"array_column": "",
"array_column.nested_array": ["b"],
"array_column.nested_array.nested_obj": "b",
"int_column": "",
},
{
"array_column": [[["c"], ["d"]]],
"array_column.nested_array": ["c"],
"array_column.nested_array.nested_obj": "c",
"int_column": "",
},
{
"array_column": "",
"array_column.nested_array": ["d"],
"array_column.nested_array.nested_obj": "d",
"int_column": "",
},
{
"array_column": [[["e"], ["f"]]],
"array_column.nested_array": ["e"],
"array_column.nested_array.nested_obj": "e",
"int_column": 2,
},
{
"array_column": "",
"array_column.nested_array": ["f"],
"array_column.nested_array.nested_obj": "f",
"int_column": "",
},
{
"array_column": [[["g"], ["h"]]],
"array_column.nested_array": ["g"],
"array_column.nested_array.nested_obj": "g",
"int_column": "",
},
{
"array_column": "",
"array_column.nested_array": ["h"],
"array_column.nested_array.nested_obj": "h",
"int_column": "",
},
]
expected_expanded_cols = [
{
"name": "array_column.nested_array",
"type": "ARRAY(ROW(NESTED_OBJ VARCHAR))",
},
{"name": "array_column.nested_array.nested_obj", "type": "VARCHAR"},
]
self.assertEqual(actual_cols, expected_cols)
self.assertEqual(actual_data, expected_data)
self.assertEqual(actual_expanded_cols, expected_expanded_cols)
def test_presto_extra_table_metadata(self):
db = mock.Mock()
db.get_indexes = mock.Mock(return_value=[{"column_names": ["ds", "hour"]}])
db.get_extra = mock.Mock(return_value={})
df = pd.DataFrame({"ds": ["01-01-19"], "hour": [1]})
db.get_df = mock.Mock(return_value=df)
PrestoEngineSpec.get_create_view = mock.Mock(return_value=None)
result = PrestoEngineSpec.extra_table_metadata(db, "test_table", "test_schema")
self.assertEqual({"ds": "01-01-19", "hour": 1}, result["partitions"]["latest"])
def test_presto_where_latest_partition(self):
db = mock.Mock()
db.get_indexes = mock.Mock(return_value=[{"column_names": ["ds", "hour"]}])
db.get_extra = mock.Mock(return_value={})
df = pd.DataFrame({"ds": ["01-01-19"], "hour": [1]})
db.get_df = mock.Mock(return_value=df)
columns = [{"name": "ds"}, {"name": "hour"}]
result = PrestoEngineSpec.where_latest_partition(
"test_table", "test_schema", db, select(), columns
)
query_result = str(result.compile(compile_kwargs={"literal_binds": True}))
self.assertEqual("SELECT \nWHERE ds = '01-01-19' AND hour = 1", query_result)
def test_convert_dttm(self):
dttm = self.get_dttm()
self.assertEqual(
PrestoEngineSpec.convert_dttm("DATE", dttm),
"from_iso8601_date('2019-01-02')",
)
self.assertEqual(
PrestoEngineSpec.convert_dttm("TIMESTAMP", dttm),
"from_iso8601_timestamp('2019-01-02T03:04:05.678900')",
)
def test_query_cost_formatter(self):
raw_cost = [
{
"inputTableColumnInfos": [
{
"table": {
"catalog": "hive",
"schemaTable": {
"schema": "default",
"table": "fact_passenger_state",
},
},
"columnConstraints": [
{
"columnName": "ds",
"typeSignature": "varchar",
"domain": {
"nullsAllowed": False,
"ranges": [
{
"low": {
"value": "2019-07-10",
"bound": "EXACTLY",
},
"high": {
"value": "2019-07-10",
"bound": "EXACTLY",
},
}
],
},
}
],
"estimate": {
"outputRowCount": 9.04969899e8,
"outputSizeInBytes": 3.54143678301e11,
"cpuCost": 3.54143678301e11,
"maxMemory": 0.0,
"networkCost": 0.0,
},
}
],
"estimate": {
"outputRowCount": 9.04969899e8,
"outputSizeInBytes": 3.54143678301e11,
"cpuCost": 3.54143678301e11,
"maxMemory": 0.0,
"networkCost": 3.54143678301e11,
},
}
]
formatted_cost = PrestoEngineSpec.query_cost_formatter(raw_cost)
expected = [
{
"Output count": "904 M rows",
"Output size": "354 GB",
"CPU cost": "354 G",
"Max memory": "0 B",
"Network cost": "354 G",
}
]
self.assertEqual(formatted_cost, expected)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"PRESTO_EXPAND_DATA": True},
clear=True,
)
def test_presto_expand_data_array(self):
cols = [
{"name": "event_id", "type": "VARCHAR", "is_date": False},
{"name": "timestamp", "type": "BIGINT", "is_date": False},
{
"name": "user",
"type": "ROW(ID BIGINT, FIRST_NAME VARCHAR, LAST_NAME VARCHAR)",
"is_date": False,
},
]
data = [
{
"event_id": "abcdef01-2345-6789-abcd-ef0123456789",
"timestamp": "1595895506219",
"user": '[1, "JOHN", "DOE"]',
}
]
actual_cols, actual_data, actual_expanded_cols = PrestoEngineSpec.expand_data(
cols, data
)
expected_cols = [
{"name": "event_id", "type": "VARCHAR", "is_date": False},
{"name": "timestamp", "type": "BIGINT", "is_date": False},
{
"name": "user",
"type": "ROW(ID BIGINT, FIRST_NAME VARCHAR, LAST_NAME VARCHAR)",
"is_date": False,
},
{"name": "user.id", "type": "BIGINT"},
{"name": "user.first_name", "type": "VARCHAR"},
{"name": "user.last_name", "type": "VARCHAR"},
]
expected_data = [
{
"event_id": "abcdef01-2345-6789-abcd-ef0123456789",
"timestamp": "1595895506219",
"user": [1, "JOHN", "DOE"],
"user.id": 1,
"user.first_name": "JOHN",
"user.last_name": "DOE",
}
]
expected_expanded_cols = [
{"name": "user.id", "type": "BIGINT"},
{"name": "user.first_name", "type": "VARCHAR"},
{"name": "user.last_name", "type": "VARCHAR"},
]
self.assertEqual(actual_cols, expected_cols)
self.assertEqual(actual_data, expected_data)
self.assertEqual(actual_expanded_cols, expected_expanded_cols)
def test_get_sqla_column_type(self):
column_spec = PrestoEngineSpec.get_column_spec("varchar(255)")
assert isinstance(column_spec.sqla_type, types.VARCHAR)
assert column_spec.sqla_type.length == 255
self.assertEqual(column_spec.generic_type, GenericDataType.STRING)
column_spec = PrestoEngineSpec.get_column_spec("varchar")
assert isinstance(column_spec.sqla_type, types.String)
assert column_spec.sqla_type.length is None
self.assertEqual(column_spec.generic_type, GenericDataType.STRING)
column_spec = PrestoEngineSpec.get_column_spec("char(10)")
assert isinstance(column_spec.sqla_type, types.CHAR)
assert column_spec.sqla_type.length == 10
self.assertEqual(column_spec.generic_type, GenericDataType.STRING)
column_spec = PrestoEngineSpec.get_column_spec("char")
assert isinstance(column_spec.sqla_type, types.CHAR)
assert column_spec.sqla_type.length is None
self.assertEqual(column_spec.generic_type, GenericDataType.STRING)
column_spec = PrestoEngineSpec.get_column_spec("integer")
assert isinstance(column_spec.sqla_type, types.Integer)
self.assertEqual(column_spec.generic_type, GenericDataType.NUMERIC)
column_spec = PrestoEngineSpec.get_column_spec("time")
assert isinstance(column_spec.sqla_type, types.Time)
assert type(column_spec.sqla_type).__name__ == "TemporalWrapperType"
self.assertEqual(column_spec.generic_type, GenericDataType.TEMPORAL)
column_spec = PrestoEngineSpec.get_column_spec("timestamp")
assert isinstance(column_spec.sqla_type, types.TIMESTAMP)
assert type(column_spec.sqla_type).__name__ == "TemporalWrapperType"
self.assertEqual(column_spec.generic_type, GenericDataType.TEMPORAL)
sqla_type = PrestoEngineSpec.get_sqla_column_type(None)
assert sqla_type is None
@mock.patch(
"superset.utils.feature_flag_manager.FeatureFlagManager.is_feature_enabled"
)
@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.get_table_names")
@mock.patch("superset.db_engine_specs.presto.PrestoEngineSpec.get_view_names")
def test_get_table_names_no_split_views_from_tables(
self, mock_get_view_names, mock_get_table_names, mock_is_feature_enabled
):
mock_get_view_names.return_value = ["view1", "view2"]
table_names = ["table1", "table2", "view1", "view2"]
mock_get_table_names.return_value = table_names
mock_is_feature_enabled.return_value = False
tables = PrestoEngineSpec.get_table_names(mock.Mock(), mock.Mock(), None)
assert tables == table_names
@mock.patch(
"superset.utils.feature_flag_manager.FeatureFlagManager.is_feature_enabled"
)
@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.get_table_names")
@mock.patch("superset.db_engine_specs.presto.PrestoEngineSpec.get_view_names")
def test_get_table_names_split_views_from_tables(
self, mock_get_view_names, mock_get_table_names, mock_is_feature_enabled
):
mock_get_view_names.return_value = ["view1", "view2"]
table_names = ["table1", "table2", "view1", "view2"]
mock_get_table_names.return_value = table_names
mock_is_feature_enabled.return_value = True
tables = PrestoEngineSpec.get_table_names(mock.Mock(), mock.Mock(), None)
assert sorted(tables) == sorted(table_names)
@mock.patch(
"superset.utils.feature_flag_manager.FeatureFlagManager.is_feature_enabled"
)
@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.get_table_names")
@mock.patch("superset.db_engine_specs.presto.PrestoEngineSpec.get_view_names")
def test_get_table_names_split_views_from_tables_no_tables(
self, mock_get_view_names, mock_get_table_names, mock_is_feature_enabled
):
mock_get_view_names.return_value = []
table_names = []
mock_get_table_names.return_value = table_names
mock_is_feature_enabled.return_value = True
tables = PrestoEngineSpec.get_table_names(mock.Mock(), mock.Mock(), None)
assert tables == []
def test_get_full_name(self):
names = [
("part1", "part2"),
("part11", "part22"),
]
result = PrestoEngineSpec._get_full_name(names)
assert result == "part1.part11"
def test_get_full_name_empty_tuple(self):
names = [
("part1", "part2"),
("", "part3"),
("part4", "part5"),
("", "part6"),
]
result = PrestoEngineSpec._get_full_name(names)
assert result == "part1.part4"
def test_split_data_type(self):
data_type = "value1 value2"
result = PrestoEngineSpec._split_data_type(data_type, " ")
assert result == ["value1", "value2"]
data_type = "value1,value2"
result = PrestoEngineSpec._split_data_type(data_type, ",")
assert result == ["value1", "value2"]
data_type = '"value,1",value2'
result = PrestoEngineSpec._split_data_type(data_type, ",")
assert result == ['"value,1"', "value2"]
def test_show_columns(self):
inspector = mock.MagicMock()
inspector.engine.dialect.identifier_preparer.quote_identifier = (
lambda x: f'"{x}"'
)
mock_execute = mock.MagicMock(return_value=["a", "b"])
inspector.bind.execute = mock_execute
table_name = "table_name"
result = PrestoEngineSpec._show_columns(inspector, table_name, None)
assert result == ["a", "b"]
mock_execute.assert_called_once_with(f'SHOW COLUMNS FROM "{table_name}"')
def test_show_columns_with_schema(self):
inspector = mock.MagicMock()
inspector.engine.dialect.identifier_preparer.quote_identifier = (
lambda x: f'"{x}"'
)
mock_execute = mock.MagicMock(return_value=["a", "b"])
inspector.bind.execute = mock_execute
table_name = "table_name"
schema = "schema"
result = PrestoEngineSpec._show_columns(inspector, table_name, schema)
assert result == ["a", "b"]
mock_execute.assert_called_once_with(
f'SHOW COLUMNS FROM "{schema}"."{table_name}"'
)
def test_is_column_name_quoted(self):
column_name = "mock"
assert PrestoEngineSpec._is_column_name_quoted(column_name) is False
column_name = '"mock'
assert PrestoEngineSpec._is_column_name_quoted(column_name) is False
column_name = '"moc"k'
assert PrestoEngineSpec._is_column_name_quoted(column_name) is False
column_name = '"moc"k"'
assert PrestoEngineSpec._is_column_name_quoted(column_name) is True
@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.select_star")
def test_select_star_no_presto_expand_data(self, mock_select_star):
database = mock.Mock()
table_name = "table_name"
engine = mock.Mock()
cols = [
{"col1": "val1"},
{"col2": "val2"},
]
PrestoEngineSpec.select_star(database, table_name, engine, cols=cols)
mock_select_star.assert_called_once_with(
database, table_name, engine, None, 100, False, True, True, cols
)
@mock.patch("superset.db_engine_specs.presto.is_feature_enabled")
@mock.patch("superset.db_engine_specs.base.BaseEngineSpec.select_star")
def test_select_star_presto_expand_data(
self, mock_select_star, mock_is_feature_enabled
):
mock_is_feature_enabled.return_value = True
database = mock.Mock()
table_name = "table_name"
engine = mock.Mock()
cols = [
{"name": "val1"},
{"name": "val2<?!@#$312,/'][p098"},
{"name": ".val2"},
{"name": "val2."},
{"name": "val.2"},
{"name": ".val2."},
]
PrestoEngineSpec.select_star(
database, table_name, engine, show_cols=True, cols=cols
)
mock_select_star.assert_called_once_with(
database,
table_name,
engine,
None,
100,
True,
True,
True,
[{"name": "val1"}, {"name": "val2<?!@#$312,/'][p098"},],
)
def test_estimate_statement_cost(self):
mock_cursor = mock.MagicMock()
estimate_json = {"a": "b"}
mock_cursor.fetchone.return_value = [
'{"a": "b"}',
]
result = PrestoEngineSpec.estimate_statement_cost(
"SELECT * FROM brth_names", mock_cursor
)
assert result == estimate_json
def test_estimate_statement_cost_invalid_syntax(self):
mock_cursor = mock.MagicMock()
mock_cursor.execute.side_effect = Exception()
with self.assertRaises(Exception):
PrestoEngineSpec.estimate_statement_cost(
"DROP TABLE brth_names", mock_cursor
)
def test_get_all_datasource_names(self):
df = pd.DataFrame.from_dict(
{"table_schema": ["schema1", "schema2"], "table_name": ["name1", "name2"]}
)
database = mock.MagicMock()
database.get_df.return_value = df
result = PrestoEngineSpec.get_all_datasource_names(database, "table")
expected_result = [
DatasourceName(schema="schema1", table="name1"),
DatasourceName(schema="schema2", table="name2"),
]
assert result == expected_result
def test_get_create_view(self):
mock_execute = mock.MagicMock()
mock_fetchall = mock.MagicMock(return_value=[["a", "b,", "c"], ["d", "e"]])
database = mock.MagicMock()
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute = (
mock_execute
)
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.fetchall = (
mock_fetchall
)
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.poll.return_value = (
False
)
schema = "schema"
table = "table"
result = PrestoEngineSpec.get_create_view(database, schema=schema, table=table)
assert result == "a"
mock_execute.assert_called_once_with(f"SHOW CREATE VIEW {schema}.{table}")
def test_get_create_view_exception(self):
mock_execute = mock.MagicMock(side_effect=Exception())
database = mock.MagicMock()
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute = (
mock_execute
)
schema = "schema"
table = "table"
with self.assertRaises(Exception):
PrestoEngineSpec.get_create_view(database, schema=schema, table=table)
def test_get_create_view_database_error(self):
from pyhive.exc import DatabaseError
mock_execute = mock.MagicMock(side_effect=DatabaseError())
database = mock.MagicMock()
database.get_sqla_engine.return_value.raw_connection.return_value.cursor.return_value.execute = (
mock_execute
)
schema = "schema"
table = "table"
result = PrestoEngineSpec.get_create_view(database, schema=schema, table=table)
assert result is None
def test_extract_error_message_orig(self):
DatabaseError = namedtuple("DatabaseError", ["error_dict"])
db_err = DatabaseError(
{"errorName": "name", "errorLocation": "location", "message": "msg"}
)
exception = Exception()
exception.orig = db_err
result = PrestoEngineSpec._extract_error_message(exception)
assert result == "name at location: msg"
def test_extract_error_message_db_errr(self):
from pyhive.exc import DatabaseError
exception = DatabaseError({"message": "Err message"})
result = PrestoEngineSpec._extract_error_message(exception)
assert result == "Err message"
def test_extract_error_message_general_exception(self):
exception = Exception("Err message")
result = PrestoEngineSpec._extract_error_message(exception)
assert result == "Err message"
def test_extract_errors(self):
msg = "Generic Error"
result = PrestoEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message="Generic Error",
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1002,
"message": "Issue 1002 - The database returned an unexpected error.",
}
],
},
)
]
msg = "line 1:8: Column 'bogus' cannot be resolved"
result = PrestoEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='We can\'t seem to resolve the column "bogus" at line 1:8.',
error_type=SupersetErrorType.COLUMN_DOES_NOT_EXIST_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1003,
"message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.",
},
{
"code": 1004,
"message": "Issue 1004 - The column was deleted or renamed in the database.",
},
],
},
)
]
msg = "line 1:15: Table 'tpch.tiny.region2' does not exist"
result = PrestoEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message="The table \"'tpch.tiny.region2'\" does not exist. A valid table must be used to run this query.",
error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1003,
"message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.",
},
{
"code": 1005,
"message": "Issue 1005 - The table was deleted or renamed in the database.",
},
],
},
)
]
msg = "line 1:15: Schema 'tin' does not exist"
result = PrestoEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='The schema "tin" does not exist. A valid schema must be used to run this query.',
error_type=SupersetErrorType.SCHEMA_DOES_NOT_EXIST_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1003,
"message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.",
},
{
"code": 1016,
"message": "Issue 1005 - The schema was deleted or renamed in the database.",
},
],
},
)
]
msg = b"Access Denied: Invalid credentials"
result = PrestoEngineSpec.extract_errors(Exception(msg), {"username": "alice"})
assert result == [
SupersetError(
message='Either the username "alice" or the password is incorrect.',
error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1014,
"message": "Issue 1014 - Either the username or the password is wrong.",
}
],
},
)
]
msg = "Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known"
result = PrestoEngineSpec.extract_errors(
Exception(msg), {"hostname": "badhost"}
)
assert result == [
SupersetError(
message='The hostname "badhost" cannot be resolved.',
error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1007,
"message": "Issue 1007 - The hostname provided can't be resolved.",
}
],
},
)
]
msg = "Failed to establish a new connection: [Errno 60] Operation timed out"
result = PrestoEngineSpec.extract_errors(
Exception(msg), {"hostname": "badhost", "port": 12345}
)
assert result == [
SupersetError(
message='The host "badhost" might be down, and can\'t be reached on port 12345.',
error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1009,
"message": "Issue 1009 - The host might be down, and can't be reached on the provided port.",
}
],
},
)
]
msg = "Failed to establish a new connection: [Errno 61] Connection refused"
result = PrestoEngineSpec.extract_errors(
Exception(msg), {"hostname": "badhost", "port": 12345}
)
assert result == [
SupersetError(
message='Port 12345 on hostname "badhost" refused the connection.',
error_type=SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{"code": 1008, "message": "Issue 1008 - The port is closed."}
],
},
)
]
msg = "line 1:15: Catalog 'wrong' does not exist"
result = PrestoEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='Unable to connect to catalog named "wrong".',
error_type=SupersetErrorType.CONNECTION_UNKNOWN_DATABASE_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Presto",
"issue_codes": [
{
"code": 1015,
"message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.",
}
],
},
)
]
def test_is_readonly():
def is_readonly(sql: str) -> bool:
return PrestoEngineSpec.is_readonly_query(ParsedQuery(sql))
assert not is_readonly("SET hivevar:desc='Legislators'")
assert not is_readonly("UPDATE t1 SET col1 = NULL")
assert not is_readonly("INSERT OVERWRITE TABLE tabB SELECT a.Age FROM TableA")
assert is_readonly("SHOW LOCKS test EXTENDED")
assert is_readonly("EXPLAIN SELECT 1")
assert is_readonly("SELECT 1")
assert is_readonly("WITH (SELECT 1) bla SELECT * from bla")
|
Python3/790.py | rakhi2001/ecom7 | 854 | 12753324 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def numTilings(self, N: int) -> int:
#dp[N]=dp[N-1]+dp[N-2] + 2*(dp[N-3]+dp[N-4]+...dp[1]+dp[0]) ...eq(1)
#dp[N-1] =dp[N-2]+dp[N-3] + 2*(dp[N-4]+dp[N-5]+....dp[0]) ...eq(2)
#eq(1)-eq(2)=>
# dp[N]=2*dp[N-1]+dp[N-3]
#dp[0]=dp[1]=1
if N<=1: return 1
dp=[0]*(N+1)
dp[0]=1
dp[1]=1
dp[2]=2
md=int(1e9+7)
for i in range(3,N+1):
dp[i]=2*dp[i-1]+dp[i-3]
dp[i]=dp[i]%md
return dp[-1]
__________________________________________________________________________________________________
sample 13140 kb submission
class Solution:
def numTilings(self, N: int) -> int:
if N == 1:
return 1
elif N == 2:
return 2
else:
f0 = 1 # full
f1 = 2
l1 = 1 # l shape
r1 = 1 # r shape
for i in range(3, N + 1):
f2 = (f1 + f0 + l1 + r1) % 1000000007
l2 = (f0 + r1)
r2 = (f0 + l1)
f0 = f1
f1 = f2
l1 = l2
r1 = r2
return f2
__________________________________________________________________________________________________
|
engines/ep/scripts/evictionVisualiser.py | BenHuddleston/kv_engine | 104 | 12753336 | <gh_stars>100-1000
#!/usr/bin/env python2.7
#
# Copyright 2017-Present Couchbase, Inc.
#
# Use of this software is governed by the Business Source License included in
# the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that
# file, in accordance with the Business Source License, use of this software
# will be governed by the Apache License, Version 2.0, included in the file
# licenses/APL2.txt.
#
import fileinput
import png
import re
# The script is to be used on conjuction with the ep - engine module test
# STHashTableEvictionItemPagerTest.The test is run as follows:
# cd to kv_engine build directory
# run the test as follows, redirecting standard error to a file.For example
# ./ ep - engine_ep_unit_tests -- gtest_filter = STHashTableEvictionTest \
# .#STHashTableEvictionItemPagerTest 2> eviction_data.txt
# The eviction_data.txt contains a textual output for which documents remain
# resident and which are evicted.
# The script is run as follows, using the example input file eviction_data.txt
# evictionVisualiser.py < eviction_data.txt
# The output from running the script is the generation of a png file
# evictionMap.png
# four vbucket colours and evicted(black)
green = (127, 201, 127);
purple = (190, 174, 212);
orange = (253, 192, 134);
yellow = (255, 255, 153);
black = (0, 0, 0);
colours = [ green, purple, orange, yellow, black ];
maxNoOfColumns = 500;
# contains a row of PNG values
row = [];
# contains the complete PNG image
image = [];
# count of the number of rows in the image
rowCount = 0;
# current count of columns
# reset when generating new row
columnCount = 0;
# regular expression to match document number, vbucket number, and whether
# evicted or not e.g."DOC_1 0 RESIDENT" or "DOC_2600 0 EVICT"
regex = r"DOC_(\d+) (\d+) (\w+)"
for line in fileinput.input():
matches = re.search(regex, line)
if matches:
vbucket = matches.group(2);
# RESIDENT or EVICT
state = matches.group(3);
if (columnCount == maxNoOfColumns) :
columnCount = 0;
rowCount += 1;
image += [row] * 10;
row = [];
num = int(vbucket);
if (state == 'EVICT'):
colour = black;
else:
colour = colours[num];
row += colour * 10;
columnCount += 1;
f = open('evictionMap.png', 'wb');
w = png.Writer(maxNoOfColumns * 10, rowCount * 10);
w.write(f, image);
f.close();
|
tools/vcrpy/tests/unit/test_vcr_import.py | rsdoherty/azure-sdk-for-python | 2,728 | 12753376 | <gh_stars>1000+
import sys
def test_vcr_import_deprecation(recwarn):
if "vcr" in sys.modules:
# Remove imported module entry if already loaded in another test
del sys.modules["vcr"]
import vcr # noqa: F401
if sys.version_info[0] == 2:
assert len(recwarn) == 1
assert issubclass(recwarn[0].category, DeprecationWarning)
else:
assert len(recwarn) == 0
|
homeassistant/components/temper/__init__.py | domwillcode/home-assistant | 30,023 | 12753387 | <filename>homeassistant/components/temper/__init__.py<gh_stars>1000+
"""The temper component."""
|
theano_lstm/__init__.py | JonathanRaiman/theano_lstm | 348 | 12753393 | <filename>theano_lstm/__init__.py
"""
Small Theano LSTM recurrent network module.
@author: <NAME>
@date: December 10th 2014
Implements most of the great things that came out
in 2014 concerning recurrent neural networks, and
some good optimizers for these types of networks.
Note (from 5 January 2015): Dropout api is a bit sophisticated due to the way
random number generators are dealt with in Theano's scan.
"""
import theano, theano.tensor as T
import numpy as np
from collections import OrderedDict
srng = theano.tensor.shared_randomstreams.RandomStreams(1234)
np_rng = np.random.RandomState(1234)
from .masked_loss import masked_loss, masked_loss_dx
from .shared_memory import wrap_params, borrow_memory, borrow_all_memories
class GradClip(theano.compile.ViewOp):
"""
Here we clip the gradients as <NAME> does in his
recurrent neural networks. In particular this prevents
explosion of gradients during backpropagation.
The original poster of this code was <NAME>,
[here](https://groups.google.com/forum/#!topic/theano-dev/GaJwGw6emK0).
"""
def __init__(self, clip_lower_bound, clip_upper_bound):
self.clip_lower_bound = clip_lower_bound
self.clip_upper_bound = clip_upper_bound
assert(self.clip_upper_bound >= self.clip_lower_bound)
def grad(self, args, g_outs):
return [T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound) for g_out in g_outs]
def clip_gradient(x, bound):
grad_clip = GradClip(-bound, bound)
try:
T.opt.register_canonicalize(theano.gof.OpRemove(grad_clip), name='grad_clip_%.1f' % (bound))
except ValueError:
pass
return grad_clip(x)
def create_shared(out_size, in_size=None, name=None):
"""
Creates a shared matrix or vector
using the given in_size and out_size.
Inputs
------
out_size int : outer dimension of the
vector or matrix
in_size int (optional) : for a matrix, the inner
dimension.
Outputs
-------
theano shared : the shared matrix, with random numbers in it
"""
if in_size is None:
return theano.shared(random_initialization((out_size, )), name=name)
else:
return theano.shared(random_initialization((out_size, in_size)), name=name)
def random_initialization(size):
return (np_rng.standard_normal(size) * 1. / size[0]).astype(theano.config.floatX)
def Dropout(shape, prob):
"""
Return a dropout mask on x.
The probability of a value in x going to zero is prob.
Inputs
------
x theano variable : the variable to add noise to
prob float, variable : probability of dropping an element.
size tuple(int, int) : size of the dropout mask.
Outputs
-------
y theano variable : x with the noise multiplied.
"""
mask = srng.binomial(n=1, p=1-prob, size=shape)
return T.cast(mask, theano.config.floatX)
def MultiDropout(shapes, dropout = 0.):
"""
Return all the masks needed for dropout outside of a scan loop.
"""
return [Dropout(shape, dropout) for shape in shapes]
class Layer(object):
"""
Base object for neural network layers.
A layer has an input set of neurons, and
a hidden activation. The activation, f, is a
function applied to the affine transformation
of x by the connection matrix W, and the bias
vector b.
> y = f ( W * x + b )
"""
def __init__(self, input_size, hidden_size, activation, clip_gradients=False):
self.input_size = input_size
self.hidden_size = hidden_size
self.activation = activation
self.clip_gradients = clip_gradients
self.is_recursive = False
self.create_variables()
def create_variables(self):
"""
Create the connection matrix and the bias vector
"""
self.linear_matrix = create_shared(self.hidden_size, self.input_size, name="Layer.linear_matrix")
self.bias_matrix = create_shared(self.hidden_size, name="Layer.bias_matrix")
def activate(self, x):
"""
The hidden activation of the network
"""
if self.clip_gradients is not False:
x = clip_gradient(x, self.clip_gradients)
if x.ndim > 1:
return self.activation(
T.dot(self.linear_matrix, x.T) + self.bias_matrix[:,None] ).T
else:
return self.activation(
T.dot(self.linear_matrix, x) + self.bias_matrix )
@property
def params(self):
return [self.linear_matrix, self.bias_matrix]
@params.setter
def params(self, param_list):
self.linear_matrix.set_value(param_list[0].get_value())
self.bias_matrix.set_value(param_list[1].get_value())
class Embedding(Layer):
"""
A Matrix useful for storing word vectors or other distributed
representations.
use #activate(T.iscalar()) or #activate(T.ivector()) to embed
a symbol.
"""
def __init__(self, vocabulary_size, hidden_size):
"""
Vocabulary size is the number of different symbols to store,
and hidden_size is the size of their embedding.
"""
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.create_variables()
self.is_recursive = False
def create_variables(self):
self.embedding_matrix = create_shared(self.vocabulary_size, self.hidden_size, name='Embedding.embedding_matrix')
def activate(self, x):
"""
Inputs
------
x T.ivector() or T.iscalar() : indices to embed
Output
------
embedding : self.embedding_matrix[x]
"""
return self.embedding_matrix[x]
@property
def params(self):
return [self.embedding_matrix]
@params.setter
def params(self, param_list):
self.embedding_matrix.set_value(param_list[0].get_value())
class RNN(Layer):
"""
Special recurrent layer than takes as input
a hidden activation, h, from the past and
an observation x.
> y = f ( W * [x, h] + b )
Note: x and h are concatenated in the activation.
"""
def __init__(self, *args, **kwargs):
super(RNN, self).__init__(*args, **kwargs)
self.is_recursive = True
def create_variables(self):
"""
Create the connection matrix and the bias vector,
and the base hidden activation.
"""
self.linear_matrix = create_shared(self.hidden_size, self.input_size+ self.hidden_size, name="RNN.linear_matrix")
self.bias_matrix = create_shared(self.hidden_size, name="RNN.bias_matrix")
self.initial_hidden_state = create_shared(self.hidden_size, name="RNN.initial_hidden_state")
def activate(self, x, h):
"""
The hidden activation of the network
"""
if self.clip_gradients is not False:
x = clip_gradient(x, self.clip_gradients)
h = clip_gradient(h, self.clip_gradients)
if x.ndim > 1:
return self.activation(
T.dot(
self.linear_matrix,
T.concatenate([x, h], axis=1).T
) + self.bias_matrix[:,None] ).T
else:
return self.activation(
T.dot(
self.linear_matrix,
T.concatenate([x, h])
) + self.bias_matrix )
@property
def params(self):
return [self.linear_matrix, self.bias_matrix]
@params.setter
def params(self, param_list):
self.linear_matrix.set_value(param_list[0].get_value())
self.bias_matrix.set_value(param_list[1].get_value())
class GRU(RNN):
def create_variables(self):
self.reset_layer = theano_lstm.RNN(self.input_size, self.hidden_size, activation = T.nnet.sigmoid)
self.memory_interpolation_layer = theano_lstm.RNN(self.input_size, self.hidden_size, activation = T.nnet.sigmoid)
self.memory_to_memory_layer = theano_lstm.RNN(self.input_size, self.hidden_size, activation = T.tanh)
self.internal_layers = [
self.reset_layer,
self.memory_interpolation_layer,
self.memory_to_memory_layer
]
@property
def params(self):
return [param for layer in self.internal_layers for param in layer.params]
@params.setter
def params(self, param_list):
assert(len(param_list) == 6)
self.reset_layer.params = param_list[0:2]
self.memory_interpolation_layer.params = param_list[2:4]
self.memory_to_memory_layer.params = param_list[4:6]
def activate(self, x, h):
reset_gate = self.reset_layer.activate(
x,
h
)
# the new state dampened by resetting
reset_h = reset_gate * h;
# the new hidden state:
candidate_h = self.memory_to_memory_layer.activate(
x,
reset_h
)
# how much to update the new hidden state:
update_gate = self.memory_interpolation_layer.activate(
x,
h
)
# the new state interploated between candidate and old:
new_h = (
h * (1.0 - update_gate) +
candidate_h * update_gate
)
return new_h
class LSTM(RNN):
"""
The structure of the LSTM allows it to learn on problems with
long term dependencies relatively easily. The "long term"
memory is stored in a vector of memory cells c.
Although many LSTM architectures differ in their connectivity
structure and activation functions, all LSTM architectures have
memory cells that are suitable for storing information for long
periods of time. Here we implement the LSTM from Graves et al.
(2013).
"""
def create_variables(self):
"""
Create the different LSTM gates and
their variables, along with the initial
hidden state for the memory cells and
the initial hidden activation.
"""
# input gate for cells
self.in_gate = Layer(self.input_size + self.hidden_size, self.hidden_size, T.nnet.sigmoid, self.clip_gradients)
# forget gate for cells
self.forget_gate = Layer(self.input_size + self.hidden_size, self.hidden_size, T.nnet.sigmoid, self.clip_gradients)
# input modulation for cells
self.in_gate2 = Layer(self.input_size + self.hidden_size, self.hidden_size, self.activation, self.clip_gradients)
# output modulation
self.out_gate = Layer(self.input_size + self.hidden_size, self.hidden_size, T.nnet.sigmoid, self.clip_gradients)
# keep these layers organized
self.internal_layers = [self.in_gate, self.forget_gate, self.in_gate2, self.out_gate]
# store the memory cells in first n spots, and store the current
# output in the next n spots:
self.initial_hidden_state = create_shared(self.hidden_size * 2, name="LSTM.initial_hidden_state")
@property
def params(self):
"""
Parameters given by the 4 gates and the
initial hidden activation of this LSTM cell
layer.
"""
return [param for layer in self.internal_layers for param in layer.params]
@params.setter
def params(self, param_list):
start = 0
for layer in self.internal_layers:
end = start + len(layer.params)
layer.params = param_list[start:end]
start = end
def postprocess_activation(self, x, *args):
if x.ndim > 1:
return x[:, self.hidden_size:]
else:
return x[self.hidden_size:]
def activate(self, x, h):
"""
The hidden activation, h, of the network, along
with the new values for the memory cells, c,
Both are concatenated as follows:
> y = f( x, past )
Or more visibly, with past = [prev_c, prev_h]
> [c, h] = f( x, [prev_c, prev_h] )
"""
if h.ndim > 1:
#previous memory cell values
prev_c = h[:, :self.hidden_size]
#previous activations of the hidden layer
prev_h = h[:, self.hidden_size:]
else:
#previous memory cell values
prev_c = h[:self.hidden_size]
#previous activations of the hidden layer
prev_h = h[self.hidden_size:]
# input and previous hidden constitute the actual
# input to the LSTM:
if h.ndim > 1:
obs = T.concatenate([x, prev_h], axis=1)
else:
obs = T.concatenate([x, prev_h])
# TODO could we combine these 4 linear transformations for efficiency? (e.g., http://arxiv.org/pdf/1410.4615.pdf, page 5)
# how much to add to the memory cells
in_gate = self.in_gate.activate(obs)
# how much to forget the current contents of the memory
forget_gate = self.forget_gate.activate(obs)
# modulate the input for the memory cells
in_gate2 = self.in_gate2.activate(obs)
# new memory cells
next_c = forget_gate * prev_c + in_gate2 * in_gate
# modulate the memory cells to create the new output
out_gate = self.out_gate.activate(obs)
# new hidden output
next_h = out_gate * T.tanh(next_c)
if h.ndim > 1:
return T.concatenate([next_c, next_h], axis=1)
else:
return T.concatenate([next_c, next_h])
class GatedInput(RNN):
def create_variables(self):
# input gate for cells
self.in_gate = Layer(self.input_size + self.hidden_size, 1, T.nnet.sigmoid, self.clip_gradients)
self.internal_layers = [self.in_gate]
@property
def params(self):
"""
Parameters given by the 4 gates and the
initial hidden activation of this LSTM cell
layer.
"""
return [param for layer in self.internal_layers
for param in layer.params]
@params.setter
def params(self, param_list):
start = 0
for layer in self.internal_layers:
end = start + len(layer.params)
layer.params = param_list[start:end]
start = end
def activate(self, x, h):
# input and previous hidden constitute the actual
# input to the LSTM:
if h.ndim > 1:
obs = T.concatenate([x, h], axis=1)
else:
obs = T.concatenate([x, h])
gate = self.in_gate.activate(obs)
if h.ndim > 1:
gate = gate[:,0][:,None]
else:
gate = gate[0]
return gate
def postprocess_activation(self, gate, x, h):
return gate * x
def apply_dropout(x, mask):
if mask is not None:
return mask * x
else:
return x
class StackedCells(object):
"""
Sequentially connect several recurrent layers.
celltypes can be RNN or LSTM.
"""
def __init__(self, input_size, celltype=RNN, layers=None,
activation=lambda x:x, clip_gradients=False):
if layers is None:
layers = []
self.input_size = input_size
self.clip_gradients = clip_gradients
self.create_layers(layers, activation, celltype)
def create_layers(self, layer_sizes, activation_type, celltype):
self.layers = []
prev_size = self.input_size
for k, layer_size in enumerate(layer_sizes):
layer = celltype(prev_size, layer_size, activation_type,
clip_gradients=self.clip_gradients)
self.layers.append(layer)
prev_size = layer_size
@property
def params(self):
return [param for layer in self.layers for param in layer.params]
@params.setter
def params(self, param_list):
start = 0
for layer in self.layers:
end = start + len(layer.params)
layer.params = param_list[start:end]
start = end
def forward(self, x, prev_hiddens=None, dropout=None):
"""
Return new hidden activations for all stacked RNNs
"""
if dropout is None:
dropout = []
if prev_hiddens is None:
prev_hiddens = [(T.repeat(T.shape_padleft(layer.initial_hidden_state),
x.shape[0], axis=0)
if x.ndim > 1 else layer.initial_hidden_state)
if hasattr(layer, 'initial_hidden_state') else None
for layer in self.layers]
out = []
layer_input = x
for k, layer in enumerate(self.layers):
level_out = layer_input
if len(dropout) > 0:
level_out = apply_dropout(layer_input, dropout[k])
if layer.is_recursive:
level_out = layer.activate(level_out, prev_hiddens[k])
else:
level_out = layer.activate(level_out)
out.append(level_out)
# deliberate choice to change the upward structure here
# in an RNN, there is only one kind of hidden values
if hasattr(layer, 'postprocess_activation'):
# in this case the hidden activation has memory cells
# that are not shared upwards
# along with hidden activations that can be sent
# updwards
if layer.is_recursive:
level_out = layer.postprocess_activation(level_out, layer_input, prev_hiddens[k])
else:
level_out = layer.postprocess_activation(level_out, layer_input)
layer_input = level_out
return out
def create_optimization_updates(cost, params, updates=None, max_norm=5.0,
lr=0.01, eps=1e-6, rho=0.95,
method = "adadelta", gradients = None):
"""
Get the updates for a gradient descent optimizer using
SGD, AdaDelta, or AdaGrad.
Returns the shared variables for the gradient caches,
and the updates dictionary for compilation by a
theano function.
Inputs
------
cost theano variable : what to minimize
params list : list of theano variables
with respect to which
the gradient is taken.
max_norm float : cap on excess gradients
lr float : base learning rate for
adagrad and SGD
eps float : numerical stability value
to not divide by zero
sometimes
rho float : adadelta hyperparameter.
method str : 'adagrad', 'adadelta', or 'sgd'.
Outputs:
--------
updates OrderedDict : the updates to pass to a
theano function
gsums list : gradient caches for Adagrad
and Adadelta
xsums list : gradient caches for AdaDelta only
lr theano shared : learning rate
max_norm theano_shared : normalizing clipping value for
excessive gradients (exploding).
"""
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
eps = np.float64(eps).astype(theano.config.floatX)
rho = theano.shared(np.float64(rho).astype(theano.config.floatX))
if max_norm is not None and max_norm is not False:
max_norm = theano.shared(np.float64(max_norm).astype(theano.config.floatX))
gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True))) if (method == 'adadelta' or method == 'adagrad') else None for param in params]
xsums = [theano.shared(np.zeros_like(param.get_value(borrow=True))) if method == 'adadelta' else None for param in params]
gparams = T.grad(cost, params) if gradients is None else gradients
if updates is None:
updates = OrderedDict()
for gparam, param, gsum, xsum in zip(gparams, params, gsums, xsums):
# clip gradients if they get too big
if max_norm is not None and max_norm is not False:
grad_norm = gparam.norm(L=2)
gparam = (T.minimum(max_norm, grad_norm)/ (grad_norm + eps)) * gparam
if method == 'adadelta':
updates[gsum] = T.cast(rho * gsum + (1. - rho) * (gparam **2), theano.config.floatX)
dparam = -T.sqrt((xsum + eps) / (updates[gsum] + eps)) * gparam
updates[xsum] = T.cast(rho * xsum + (1. - rho) * (dparam **2), theano.config.floatX)
updates[param] = T.cast(param + dparam, theano.config.floatX)
elif method == 'adagrad':
updates[gsum] = T.cast(gsum + (gparam ** 2), theano.config.floatX)
updates[param] = T.cast(param - lr * (gparam / (T.sqrt(updates[gsum] + eps))), theano.config.floatX)
else:
updates[param] = param - gparam * lr
if method == 'adadelta':
lr = rho
return updates, gsums, xsums, lr, max_norm
__all__ = [
"create_optimization_updates",
"masked_loss",
"masked_loss_dx",
"clip_gradient",
"create_shared",
"Dropout",
"apply_dropout",
"StackedCells",
"Layer",
"LSTM",
"RNN",
"GatedInput",
"Embedding",
"MultiDropout",
"wrap_params",
"borrow_memory",
"borrow_all_memories"
]
|
lib/django-1.5/django/contrib/gis/geos/__init__.py | MiCHiLU/google_appengine_sdk | 790 | 12753400 | """
The GeoDjango GEOS module. Please consult the GeoDjango documentation
for more details:
http://geodjango.org/docs/geos.html
"""
from django.contrib.gis.geos.geometry import GEOSGeometry, wkt_regex, hex_regex
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.io import WKTReader, WKTWriter, WKBReader, WKBWriter
from django.contrib.gis.geos.factory import fromfile, fromstr
from django.contrib.gis.geos.libgeos import geos_version, geos_version_info, GEOS_PREPARE
|
pytorch_toolkit/face_recognition/model/blocks/shufflenet_v2_blocks.py | AnastasiaaSenina/openvino_training_extensions | 158 | 12753427 | <gh_stars>100-1000
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
from model.blocks.shared_blocks import make_activation
def conv_bn(inp, oup, stride, activation=nn.ReLU):
conv = nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
make_activation(activation)
)
nn.init.kaiming_normal_(conv[0].weight, mode='fan_out')
return conv
def conv_1x1_bn(inp, oup, activation=nn.ReLU):
conv = nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
make_activation(activation)
)
nn.init.kaiming_normal_(conv[0].weight, mode='fan_out')
return conv
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class ShuffleInvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, benchmodel, activation=nn.ReLU):
super(ShuffleInvertedResidual, self).__init__()
self.benchmodel = benchmodel
self.stride = stride
assert stride in [1, 2]
oup_inc = oup//2
if self.benchmodel == 1:
# assert inp == oup_inc
self.branch2 = nn.Sequential(
# pw
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
make_activation(activation),
# dw
nn.Conv2d(oup_inc, oup_inc, 3, stride, 1, groups=oup_inc, bias=False),
nn.BatchNorm2d(oup_inc),
# pw-linear
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
make_activation(activation),
)
else:
self.branch1 = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
# pw-linear
nn.Conv2d(inp, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
make_activation(activation),
)
self.branch2 = nn.Sequential(
# pw
nn.Conv2d(inp, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
make_activation(activation),
# dw
nn.Conv2d(oup_inc, oup_inc, 3, stride, 1, groups=oup_inc, bias=False),
nn.BatchNorm2d(oup_inc),
# pw-linear
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
make_activation(activation),
)
self.init_weights()
@staticmethod
def _concat(x, out):
# concatenate along channel axis
return torch.cat((x, out), 1)
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.benchmodel == 1:
x1 = x[:, :(x.shape[1]//2), :, :]
x2 = x[:, (x.shape[1]//2):, :, :]
out = self._concat(x1, self.branch2(x2))
elif self.benchmodel == 2:
out = self._concat(self.branch1(x), self.branch2(x))
return channel_shuffle(out, 2)
|
utils/export_utils.py | Mithilesh1609/assembled-cnn | 363 | 12753430 | # coding=utf8
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tensorflow as tf
from official.utils.export import export
from utils import data_util
from functions import data_config
import numpy as np
from tqdm import tqdm
def export_test(bin_export_path, flags_obj, ir_eval):
ds = tf.data.Dataset.list_files(flags_obj.data_dir + '/' + flags_obj.val_regex)
ds = ds.interleave(tf.data.TFRecordDataset, cycle_length=10)
def parse_tfr(example_proto):
feature_def = {'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value='')}
features = tf.io.parse_single_example(serialized=example_proto, features=feature_def)
return features['image/encoded'], features['image/class/label']
ds = ds.map(parse_tfr)
ds = ds.batch(flags_obj.val_batch_size)
iterator = ds.make_one_shot_iterator()
images, labels = iterator.get_next()
dconf = data_config.get_config(flags_obj.dataset_name)
num_val_images = dconf.num_images['validation']
if flags_obj.zeroshot_eval or ir_eval:
feature_dim = flags_obj.embedding_size if flags_obj.embedding_size > 0 else flags_obj.num_features
np_features = np.zeros((num_val_images, feature_dim), dtype=np.float32)
np_labels = np.zeros(num_val_images, dtype=np.int64)
np_i = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.saved_model.load(sess=sess, export_dir=bin_export_path, tags={"serve"})
for _ in tqdm(range(int(num_val_images / flags_obj.val_batch_size) + 1)):
try:
np_image, np_label = sess.run([images, labels])
np_predict = sess.run('embedding_tensor:0',
feed_dict={'input_tensor:0': np_image})
np_features[np_i:np_i + np_predict.shape[0], :] = np_predict
np_labels[np_i:np_i + np_label.shape[0]] = np_label
np_i += np_predict.shape[0]
except tf.errors.OutOfRangeError:
break
assert np_i == num_val_images
from sklearn.preprocessing import normalize
x = normalize(np_features)
np_sim = x.dot(x.T)
np.fill_diagonal(np_sim, -10) # removing similarity for query.
num_correct = 0
for i in range(num_val_images):
cur_label = np_labels[i]
rank1_label = np_labels[np.argmax(np_sim[i, :])]
if rank1_label == cur_label:
num_correct += 1
recall_at_1 = num_correct / num_val_images
metric = recall_at_1
else:
np_i = 0
correct_cnt = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.saved_model.load(sess=sess, export_dir=bin_export_path, tags={"serve"})
for _ in tqdm(range(int(num_val_images / flags_obj.val_batch_size) + 1)):
try:
np_image, np_label = sess.run([images, labels])
np_predict = sess.run('ArgMax:0',
feed_dict={'input_tensor:0': np_image})
np_i += np_predict.shape[0]
correct_cnt += np.sum(np_predict == np_label)
except tf.errors.OutOfRangeError:
break
assert np_i == num_val_images
metric = correct_cnt / np_i
return metric
def image_bytes_serving_input_fn(image_shape, decoder_name, dtype=tf.float32, pptype='imagenet'):
"""Serving input fn for raw jpeg images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
# Bounding box around the whole image.
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
_, _, num_channels = image_shape
tf.logging.info("!!!!!!!!!! Preprocessing type for exporting pb: {} and decoder type: {}".format(pptype, decoder_name))
image = data_util.preprocess_image(
image_buffer=image_bytes, is_training=False, bbox=bbox,
num_channels=num_channels, dtype=dtype, use_random_crop=False,
decoder_name=decoder_name, dct_method='INTEGER_ACCURATE', preprocessing_type=pptype)
return image
image_bytes_list = tf.placeholder(
shape=[None], dtype=tf.string, name='input_tensor')
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def export_pb(flags_core, flags_obj, shape, classifier, ir_eval=False):
export_dtype = flags_core.get_tf_dtype(flags_obj)
if not flags_obj.data_format:
raise ValueError('The `data_format` must be specified: channels_first or channels_last ')
bin_export_path = os.path.join(flags_obj.export_dir, flags_obj.data_format, 'binary_input')
bin_input_receiver_fn = functools.partial(image_bytes_serving_input_fn, shape, flags_obj.export_decoder_type,
dtype=export_dtype, pptype=flags_obj.preprocessing_type)
pp_export_path = os.path.join(flags_obj.export_dir, flags_obj.data_format, 'preprocessed_input')
pp_input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape, batch_size=None, dtype=export_dtype)
result_bin_export_path = classifier.export_savedmodel(bin_export_path, bin_input_receiver_fn)
classifier.export_savedmodel(pp_export_path, pp_input_receiver_fn)
if flags_obj.export_decoder_type == 'jpeg':
metric = export_test(result_bin_export_path, flags_obj, ir_eval)
msg = 'IMPOTANT! Evaluation metric of exported saved_model.pb is {}'.format(metric)
tf.logging.info(msg)
with tf.gfile.Open(result_bin_export_path.decode("utf-8") + '/model_performance.txt', 'w') as fp:
fp.write(msg)
|
apps/xcode.py | zachbarrow/talon_community | 125 | 12753434 | # Talon voice commands for Xcode
# <NAME> <EMAIL>
from talon.voice import Key, Context
from ..misc.mouse import control_shift_click
ctx = Context("xcode", bundle="com.apple.dt.Xcode")
ctx.keymap(
{
"build it": Key("cmd-b"),
"stop it": Key("cmd-."),
"run it": Key("cmd-r"),
"go back": Key("cmd-ctrl-left"),
"go (fore | forward)": Key("cmd-ctrl-right"),
"find in (proj | project)": Key("cmd-shift-f"),
"(sell find in (proj | project) | find selection in project)": Key(
"cmd-e cmd-shift-f enter"
),
"(sell find ace in (proj | project) | replace selection in project)": Key(
"cmd-e cmd-shift-alt-f"
),
"next in (proj | project)": Key("cmd-ctrl-g"),
"prev in (proj | project)": Key("shift-cmd-ctrl-g"),
"split window": Key("cmd-alt-enter"),
"show editor": Key("cmd-enter"),
"(show | hide) debug": Key("cmd-shift-y"),
"(show | find) call hierarchy": Key("cmd-ctrl-shift-h"),
"show (recent | recent files)": [Key("ctrl-1"), "recent files\n"],
"show related": Key("ctrl-1"),
"show history": Key("ctrl-2"),
"show files": Key("ctrl-5"),
"show (methods | items)": Key("ctrl-6"),
"show navigator": Key("cmd-0"),
"hide (navigator | project | warnings | breakpoints | reports | build)": Key(
"cmd-0"
),
"show project": Key("cmd-1"),
"show warnings": Key("cmd-5"),
"show breakpoints": Key("cmd-8"),
"show (reports | build)": Key("cmd-9"),
"show diffs": Key("cmd-alt-shift-enter"),
"(next counterpart | show header | switcher)": Key("cmd-ctrl-down"),
"prev counterpart": Key("cmd-ctrl-up"),
"toggle comment": Key("cmd-/"),
"toggle breakpoint": Key("cmd-\\"),
"toggle all breakpoints": Key("cmd-y"),
"move line up": Key("cmd-alt-["),
"move line down": Key("cmd-alt-]"),
"go (deafen | definition)": Key("cmd-ctrl-j"),
"edit scheme": Key("cmd-shift-,"),
"quick open": Key("cmd-shift-o"),
"comm skoosh": "// ",
"(comm | comment) line": [
"//------------------------------------------------------------------------------",
Key("enter"),
],
"step in": Key("f7"),
"step over": Key("f6"),
"step out": Key("f8"),
"step (continue | go)": Key("ctrl-cmd-y"),
"show blame for line": Key("cmd-alt-ctrl-b"),
"(reveal file | show file in finder)": Key("cmd-alt-ctrl-shift-f"),
"(snipline | delete line)": Key("cmd-alt-ctrl-shift-backspace"),
"add cursor down": Key("ctrl-shift-down"),
"add cursor up": Key("ctrl-shift-up"),
"add cursor": control_shift_click,
"dub add cursor": lambda m: control_shift_click(m, 0, 2),
"((select | sell) (partial | sub) [word] left)": Key("shift-ctrl-left"),
"((select | sell) (partial | sub) [word] right)": Key("shift-ctrl-right"),
# the following require custom key bindings in xcode preferences
"((partial | sub) [word] left | wonkrim)": Key("alt-ctrl-left"),
"((partial | sub) [word] right | wonkrish)": Key("alt-ctrl-right"),
}
)
|
tools/deep_memory_profiler/subcommands/cat.py | iplo/Chain | 231 | 12753445 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import sys
from lib.bucket import BUCKET_ID, COMMITTED, ALLOC_COUNT, FREE_COUNT
from lib.ordered_dict import OrderedDict
from lib.subcommand import SubCommand
from lib.sorter import MallocUnit, MMapUnit, SorterSet, UnhookedUnit, UnitSet
LOGGER = logging.getLogger('dmprof')
class CatCommand(SubCommand):
def __init__(self):
super(CatCommand, self).__init__('Usage: %prog cat <first-dump>')
self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
metavar='/path/on/target@/path/on/host[:...]',
help='Read files in /path/on/host/ instead of '
'files in /path/on/target/.')
self._parser.add_option('--indent', dest='indent', action='store_true',
help='Indent the output.')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
# TODO(dmikurube): Support shared memory.
alternative_dirs_dict = {}
if options.alternative_dirs:
for alternative_dir_pair in options.alternative_dirs.split(':'):
target_path, host_path = alternative_dir_pair.split('@', 1)
alternative_dirs_dict[target_path] = host_path
(bucket_set, dumps) = SubCommand.load_basic_files(
dump_path, True, alternative_dirs=alternative_dirs_dict)
# Load all sorters.
sorters = SorterSet()
json_root = OrderedDict()
json_root['version'] = 1
json_root['run_id'] = None
json_root['roots'] = []
for sorter in sorters:
if sorter.root:
json_root['roots'].append([sorter.world, sorter.name])
json_root['default_template'] = 'l2'
json_root['templates'] = sorters.templates.as_dict()
orders = OrderedDict()
orders['worlds'] = OrderedDict()
for world in ['vm', 'malloc']:
orders['worlds'][world] = OrderedDict()
orders['worlds'][world]['breakdown'] = OrderedDict()
for sorter in sorters.iter_world(world):
order = []
for rule in sorter.iter_rule():
if rule.name not in order:
order.append(rule.name)
orders['worlds'][world]['breakdown'][sorter.name] = order
json_root['orders'] = orders
json_root['snapshots'] = []
for dump in dumps:
if json_root['run_id'] and json_root['run_id'] != dump.run_id:
LOGGER.error('Inconsistent heap profile dumps.')
json_root['run_id'] = ''
else:
json_root['run_id'] = dump.run_id
LOGGER.info('Sorting a dump %s...' % dump.path)
json_root['snapshots'].append(
self._fill_snapshot(dump, bucket_set, sorters))
if options.indent:
json.dump(json_root, sys.stdout, indent=2)
else:
json.dump(json_root, sys.stdout)
print ''
@staticmethod
def _fill_snapshot(dump, bucket_set, sorters):
root = OrderedDict()
root['time'] = dump.time
root['worlds'] = OrderedDict()
root['worlds']['vm'] = CatCommand._fill_world(
dump, bucket_set, sorters, 'vm')
root['worlds']['malloc'] = CatCommand._fill_world(
dump, bucket_set, sorters, 'malloc')
return root
@staticmethod
def _fill_world(dump, bucket_set, sorters, world):
root = OrderedDict()
root['name'] = world
if world == 'vm':
root['unit_fields'] = ['size', 'reserved']
elif world == 'malloc':
root['unit_fields'] = ['size', 'alloc_count', 'free_count']
# Make { vm | malloc } units with their sizes.
root['units'] = OrderedDict()
unit_set = UnitSet(world)
if world == 'vm':
for unit in CatCommand._iterate_vm_unit(dump, None, bucket_set):
unit_set.append(unit)
for unit in unit_set:
root['units'][unit.unit_id] = [unit.committed, unit.reserved]
elif world == 'malloc':
for unit in CatCommand._iterate_malloc_unit(dump, bucket_set):
unit_set.append(unit)
for unit in unit_set:
root['units'][unit.unit_id] = [
unit.size, unit.alloc_count, unit.free_count]
# Iterate for { vm | malloc } sorters.
root['breakdown'] = OrderedDict()
for sorter in sorters.iter_world(world):
LOGGER.info(' Sorting with %s:%s.' % (sorter.world, sorter.name))
breakdown = OrderedDict()
for rule in sorter.iter_rule():
category = OrderedDict()
category['name'] = rule.name
subs = []
for sub_world, sub_breakdown in rule.iter_subs():
subs.append([sub_world, sub_breakdown])
if subs:
category['subs'] = subs
if rule.hidden:
category['hidden'] = True
category['units'] = []
breakdown[rule.name] = category
for unit in unit_set:
found = sorter.find(unit)
if found:
# Note that a bucket which doesn't match any rule is just dropped.
breakdown[found.name]['units'].append(unit.unit_id)
root['breakdown'][sorter.name] = breakdown
return root
@staticmethod
def _iterate_vm_unit(dump, pfn_dict, bucket_set):
unit_id = 0
for _, region in dump.iter_map:
unit_id += 1
if region[0] == 'unhooked':
if pfn_dict and dump.pageframe_length:
for pageframe in region[1]['pageframe']:
yield UnhookedUnit(unit_id, pageframe.size, pageframe.size,
region, pageframe, pfn_dict)
else:
yield UnhookedUnit(unit_id,
int(region[1]['committed']),
int(region[1]['reserved']),
region)
elif region[0] == 'hooked':
if pfn_dict and dump.pageframe_length:
for pageframe in region[1]['pageframe']:
yield MMapUnit(unit_id,
pageframe.size,
pageframe.size,
region, bucket_set, pageframe, pfn_dict)
else:
yield MMapUnit(unit_id,
int(region[1]['committed']),
int(region[1]['reserved']),
region,
bucket_set)
else:
LOGGER.error('Unrecognized mapping status: %s' % region[0])
@staticmethod
def _iterate_malloc_unit(dump, bucket_set):
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if bucket and bucket.allocator_type == 'malloc':
yield MallocUnit(int(words[BUCKET_ID]),
int(words[COMMITTED]),
int(words[ALLOC_COUNT]),
int(words[FREE_COUNT]),
bucket)
elif not bucket:
# 'Not-found' buckets are all assumed as malloc buckets.
yield MallocUnit(int(words[BUCKET_ID]),
int(words[COMMITTED]),
int(words[ALLOC_COUNT]),
int(words[FREE_COUNT]),
None)
|
scripts/mi_tests/test_mutual_information.py | vishalbelsare/crosscat | 207 | 12753448 | <reponame>vishalbelsare/crosscat<gh_stars>100-1000
#
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Lead Developers: <NAME> and <NAME>
# Authors: <NAME>, <NAME>, <NAME>, <NAME>
# Research Leads: <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# calculated the mutual information of various shapes of data
import numpy
import pylab as pl
import crosscat.utils.sample_utils as su
import crosscat.utils.inference_utils as iu
import crosscat.utils.data_utils as du
import crosscat.cython_code.State as State
import random
import math
def ring(n=200):
X = numpy.zeros((n,2))
for i in range(n):
angle = random.uniform(0,2*math.pi)
distance = random.uniform(1,1.5)
X[i,0] = math.cos(angle)*distance
X[i,1] = math.sin(angle)*distance
return X
def circle(n=200):
X = numpy.zeros((n,2))
for i in range(n):
angle = random.uniform(0,2*math.pi)
distance = random.uniform(0,1.5)
X[i,0] = math.cos(angle)*distance
X[i,1] = math.sin(angle)*distance
return X
def square(n=200):
X = numpy.zeros((n,2))
for i in range(n):
x = random.uniform(-1,1)
y = random.uniform(-1,1)
X[i,0] = x
X[i,1] = y
return X
def diamond(n=200):
X = square(n=n)
for i in range(n):
angle = math.atan(X[i,1]/X[i,0])
angle += math.pi/4
hyp = (X[i,0]**2.0+X[i,1]**2.0)**.5
x = math.cos(angle)*hyp
y = math.sin(angle)*hyp
X[i,0] = x
X[i,1] = y
return X
def four_dots(n=200):
X = numpy.zeros((n,2))
nb = n/4
mx = [ -1, 1, -1, 1]
my = [ -1, -1, 1, 1]
s = .25
for i in range(n):
n = random.randrange(4)
x = random.normalvariate(mx[n], s)
y = random.normalvariate(my[n], s)
X[i,0] = x
X[i,1] = y
return X
def correlated(r,n=200):
X = numpy.random.multivariate_normal([0,0], [[1, r],[r, 1]], n)
return X
def sample_from_view(M_c, X_L, X_D, get_next_seed):
view_col = X_L['column_partition']['assignments'][0]
view_col2 = X_L['column_partition']['assignments'][1]
same_view = True
if view_col2 != view_col:
same_view = False
view_state = X_L['view_state'][view_col]
view_state2 = X_L['view_state'][view_col2]
cluster_crps = numpy.exp(su.determine_cluster_crp_logps(view_state))
cluster_crps2 = numpy.exp(su.determine_cluster_crp_logps(view_state2))
assert( math.fabs(numpy.sum(cluster_crps) - 1) < .00000001 )
samples = numpy.zeros((n,2))
cluster_idx1 = numpy.nonzero(numpy.random.multinomial(1, cluster_crps))[0][0]
cluster_model1 = su.create_cluster_model_from_X_L(M_c, X_L, view_col, cluster_idx1)
if same_view:
cluster_idx2 = cluster_idx1
cluster_model2 = cluster_model1
else:
cluster_idx2 = numpy.nonzero(numpy.random.multinomial(1, cluster_crps2))[0][0]
cluster_model2 = su.create_cluster_model_from_X_L(M_c, X_L, view_col2, cluster_idx2)
component_model1 = cluster_model1[0]
x = component_model1.get_draw(get_next_seed())
component_model2 = cluster_model2[1]
y = component_model2.get_draw(get_next_seed())
return x, y
def sample_data_from_crosscat(M_c, X_Ls, X_Ds, get_next_seed, n):
X = numpy.zeros((n,2))
n_samples = len(X_Ls)
for i in range(n):
cc = random.randrange(n_samples)
x, y = sample_from_view(M_c, X_Ls[cc], X_Ds[cc], get_next_seed)
X[i,0] = x
X[i,1] = y
return X
def do_test(which_plot, max_plots, n, burn_in, cc_samples, which_test, correlation=0, do_plot=False):
if which_test is "correlated":
X = correlated(correlation, n=n)
elif which_test is "square":
X = square(n=n)
elif which_test is "ring":
X = ring(n=n)
elif which_test is "circle":
X = circle(n=n)
elif which_test is "diamond":
X = diamond(n=n)
elif which_test is "blob":
X = correlated(0.0, n=n)
elif which_test is "dots":
X = four_dots(n=n)
elif which_test is "mixed":
X = numpy.vstack((correlated(.95, n=n/2),correlated(0, n=n/2)))
get_next_seed = lambda : random.randrange(32000)
# Build a state
M_c = du.gen_M_c_from_T(X.tolist())
state = State.p_State(M_c, X.tolist())
X_Ls = []
X_Ds = []
# collect crosscat samples
for _ in range(cc_samples):
state = State.p_State(M_c, X.tolist())
state.transition(n_steps=burn_in)
X_Ds.append(state.get_X_D())
X_Ls.append(state.get_X_L())
SX = sample_data_from_crosscat(M_c, X_Ls, X_Ds, get_next_seed, n)
if do_plot:
pl.subplot(2,max_plots,which_plot)
pl.scatter(X[:,0],X[:,1],c='blue',alpha=.5)
pl.title("Original data")
pl.subplot(2,max_plots,max_plots+which_plot)
pl.scatter(SX[:,0],SX[:,1],c='red',alpha=.5)
pl.title("Sampled data")
pl.show
return M_c, X_Ls, X_Ds
def MI_test(n, burn_in, cc_samples, which_test, n_MI_samples=500, correlation=0):
get_next_seed = lambda : random.randrange(32000)
M_c, X_Ls, X_Ds = do_test(0, 0, n, burn_in, cc_samples, "correlated", correlation=correlation, do_plot=False)
# query column 0 and 1
MI, Linfoot = iu.mutual_information(M_c, X_Ls, X_Ds, [(0,1)],
get_next_seed, n_samples=n_MI_samples)
MI = numpy.mean(MI)
Linfoot = numpy.mean(Linfoot)
if which_test == "correlated":
test_strn = "Test: correlation (%1.2f), N: %i, burn_in: %i, samples: %i, MI_samples: %i\n\tMI: %f, Linfoot %f" % (correlation, n, burn_in, cc_samples, n_MI_samples, MI, Linfoot)
else:
test_strn = "Test: %s, N: %i, burn_in: %i, samples: %i, MI_samples: %i\n\tMI: %f, Linfoot %f" % (which_test, n, burn_in, cc_samples, n_MI_samples, MI, Linfoot)
print(test_strn)
return test_strn
do_plot = False
n_mi_samples = 500
N = [10, 100, 1000]
burn_in = 200
cc_samples = 10
print(" ")
for n in N:
strn = MI_test(n, burn_in, cc_samples, "correlated", correlation=.3)
strn = MI_test(n, burn_in, cc_samples, "correlated", correlation=.6)
strn = MI_test(n, burn_in, cc_samples, "correlated", correlation=.9)
strn = MI_test(n, burn_in, cc_samples, "ring")
strn = MI_test(n, burn_in, cc_samples, "dots")
strn = MI_test(n, burn_in, cc_samples, "mixed")
|
xdl-algorithm-solution/ESMM/data/third_party/protobuf/protobuf-3.5.0/conformance/update_failure_list.py | hitflame/x-deeplearning | 4,071 | 12753477 | <gh_stars>1000+
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script to update a failure list file to add/remove failures.
This is sort of like comm(1), except it recognizes comments and ignores them.
"""
import argparse
parser = argparse.ArgumentParser(
description='Adds/removes failures from the failure list.')
parser.add_argument('filename', type=str, help='failure list file to update')
parser.add_argument('--add', dest='add_list', action='append')
parser.add_argument('--remove', dest='remove_list', action='append')
args = parser.parse_args()
add_set = set()
remove_set = set()
for add_file in (args.add_list or []):
with open(add_file) as f:
for line in f:
add_set.add(line)
for remove_file in (args.remove_list or []):
with open(remove_file) as f:
for line in f:
if line in add_set:
raise Exception("Asked to both add and remove test: " + line)
remove_set.add(line.strip())
add_list = sorted(add_set, reverse=True)
with open(args.filename) as in_file:
existing_list = in_file.read()
with open(args.filename, "w") as f:
for line in existing_list.splitlines(True):
test = line.split("#")[0].strip()
while len(add_list) > 0 and test > add_list[-1]:
f.write(add_list.pop())
if test not in remove_set:
f.write(line)
|
common_test.py | iostermann/deeplab2 | 587 | 12753491 | <reponame>iostermann/deeplab2
# coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common.py."""
import tensorflow as tf
from deeplab2 import common
class CommonTest(tf.test.TestCase):
def test_constants_keys(self):
self.assertEqual(common.PRED_PANOPTIC_KEY, 'panoptic_pred')
self.assertEqual(common.PRED_SEMANTIC_KEY, 'semantic_pred')
self.assertEqual(common.PRED_INSTANCE_CENTER_KEY, 'instance_center_pred')
self.assertEqual(common.PRED_INSTANCE_KEY, 'instance_pred')
self.assertEqual(common.PRED_SEMANTIC_LOGITS_KEY, 'semantic_logits')
self.assertEqual(common.PRED_CENTER_HEATMAP_KEY, 'center_heatmap')
self.assertEqual(common.PRED_OFFSET_MAP_KEY, 'offset_map')
self.assertEqual(common.PRED_FRAME_OFFSET_MAP_KEY, 'frame_offset_map')
self.assertEqual(common.GT_PANOPTIC_KEY, 'panoptic_gt')
self.assertEqual(common.GT_SEMANTIC_KEY, 'semantic_gt')
self.assertEqual(common.GT_INSTANCE_CENTER_KEY, 'instance_center_gt')
self.assertEqual(common.GT_FRAME_OFFSET_KEY, 'frame_offset_gt')
self.assertEqual(common.GT_INSTANCE_REGRESSION_KEY,
'instance_regression_gt')
self.assertEqual(common.GT_PANOPTIC_RAW, 'panoptic_raw')
self.assertEqual(common.GT_SEMANTIC_RAW, 'semantic_raw')
self.assertEqual(common.GT_SIZE_RAW, 'size_raw')
self.assertEqual(common.SEMANTIC_LOSS_WEIGHT_KEY, 'semantic_loss_weight')
self.assertEqual(common.CENTER_LOSS_WEIGHT_KEY, 'center_loss_weight')
self.assertEqual(common.REGRESSION_LOSS_WEIGHT_KEY,
'regression_loss_weight')
self.assertEqual(common.FRAME_REGRESSION_LOSS_WEIGHT_KEY,
'frame_regression_loss_weight')
self.assertEqual(common.RESIZED_IMAGE, 'resized_image')
self.assertEqual(common.IMAGE, 'image')
self.assertEqual(common.IMAGE_NAME, 'image_name')
self.assertEqual(common.SEQUENCE_ID, 'sequence_id')
self.assertEqual(common.KEY_FRAME_ID, 'video/frame_id')
self.assertEqual(common.KEY_SEQUENCE_ID, 'video/sequence_id')
self.assertEqual(common.KEY_LABEL_FORMAT, 'image/segmentation/class/format')
self.assertEqual(common.KEY_ENCODED_PREV_LABEL,
'prev_image/segmentation/class/encoded')
self.assertEqual(common.KEY_ENCODED_LABEL,
'image/segmentation/class/encoded')
self.assertEqual(common.KEY_IMAGE_CHANNELS, 'image/channels')
self.assertEqual(common.KEY_IMAGE_WIDTH, 'image/width')
self.assertEqual(common.KEY_IMAGE_HEIGHT, 'image/height')
self.assertEqual(common.KEY_IMAGE_FORMAT, 'image/format')
self.assertEqual(common.KEY_IMAGE_FILENAME, 'image/filename')
self.assertEqual(common.KEY_ENCODED_PREV_IMAGE, 'prev_image/encoded')
self.assertEqual(common.KEY_ENCODED_IMAGE, 'image/encoded')
if __name__ == '__main__':
tf.test.main()
|
benchmarks/report.py | nammingi/haste | 291 | 12753516 | <gh_stars>100-1000
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
def extract(x, predicate):
return np.array(list(filter(predicate, x)))
def main(args):
np.set_printoptions(suppress=True)
A = np.loadtxt(args.A, delimiter=',')
B = np.loadtxt(args.B, delimiter=',')
faster = 1.0 - A[:,-1] / B[:,-1]
print(f'A is faster than B by:')
print(f' mean: {np.mean(faster)*100:7.4}%')
print(f' std: {np.std(faster)*100:7.4}%')
print(f' median: {np.median(faster)*100:7.4}%')
print(f' min: {np.min(faster)*100:7.4}%')
print(f' max: {np.max(faster)*100:7.4}%')
for batch_size in np.unique(A[:,0]):
for input_size in np.unique(A[:,2]):
a = extract(A, lambda x: x[0] == batch_size and x[2] == input_size)
b = extract(B, lambda x: x[0] == batch_size and x[2] == input_size)
fig, ax = plt.subplots(dpi=200)
ax.set_xticks(a[:,1])
ax.set_xticklabels(a[:,1].astype(np.int32), rotation=60)
ax.tick_params(axis='y', which='both', length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.title(f'batch size={int(batch_size)}, input size={int(input_size)}')
plt.plot(a[:,1], a[:,-1], color=args.color[0])
plt.plot(a[:,1], b[:,-1], color=args.color[1])
plt.xlabel('hidden size')
plt.ylabel('time (ms)')
plt.legend(args.name, frameon=False)
plt.tight_layout()
if args.save:
os.makedirs(args.save[0], exist_ok=True)
plt.savefig(f'{args.save[0]}/report_n={int(batch_size)}_c={int(input_size)}.png', dpi=200)
else:
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', nargs=2, default=['A', 'B'])
parser.add_argument('--color', nargs=2, default=['#1f77b4', '#2ca02c'])
parser.add_argument('--save', nargs=1, default=None)
parser.add_argument('A')
parser.add_argument('B')
main(parser.parse_args())
|
boto3_type_annotations/boto3_type_annotations/elasticbeanstalk/client.py | cowboygneox/boto3_type_annotations | 119 | 12753525 | <reponame>cowboygneox/boto3_type_annotations
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def abort_environment_update(self, EnvironmentId: str = None, EnvironmentName: str = None):
pass
def apply_environment_managed_action(self, ActionId: str, EnvironmentName: str = None, EnvironmentId: str = None) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def check_dns_availability(self, CNAMEPrefix: str) -> Dict:
pass
def compose_environments(self, ApplicationName: str = None, GroupName: str = None, VersionLabels: List = None) -> Dict:
pass
def create_application(self, ApplicationName: str, Description: str = None, ResourceLifecycleConfig: Dict = None, Tags: List = None) -> Dict:
pass
def create_application_version(self, ApplicationName: str, VersionLabel: str, Description: str = None, SourceBuildInformation: Dict = None, SourceBundle: Dict = None, BuildConfiguration: Dict = None, AutoCreateApplication: bool = None, Process: bool = None, Tags: List = None) -> Dict:
pass
def create_configuration_template(self, ApplicationName: str, TemplateName: str, SolutionStackName: str = None, PlatformArn: str = None, SourceConfiguration: Dict = None, EnvironmentId: str = None, Description: str = None, OptionSettings: List = None, Tags: List = None) -> Dict:
pass
def create_environment(self, ApplicationName: str, EnvironmentName: str = None, GroupName: str = None, Description: str = None, CNAMEPrefix: str = None, Tier: Dict = None, Tags: List = None, VersionLabel: str = None, TemplateName: str = None, SolutionStackName: str = None, PlatformArn: str = None, OptionSettings: List = None, OptionsToRemove: List = None) -> Dict:
pass
def create_platform_version(self, PlatformName: str, PlatformVersion: str, PlatformDefinitionBundle: Dict, EnvironmentName: str = None, OptionSettings: List = None, Tags: List = None) -> Dict:
pass
def create_storage_location(self) -> Dict:
pass
def delete_application(self, ApplicationName: str, TerminateEnvByForce: bool = None):
pass
def delete_application_version(self, ApplicationName: str, VersionLabel: str, DeleteSourceBundle: bool = None):
pass
def delete_configuration_template(self, ApplicationName: str, TemplateName: str):
pass
def delete_environment_configuration(self, ApplicationName: str, EnvironmentName: str):
pass
def delete_platform_version(self, PlatformArn: str = None) -> Dict:
pass
def describe_account_attributes(self) -> Dict:
pass
def describe_application_versions(self, ApplicationName: str = None, VersionLabels: List = None, MaxRecords: int = None, NextToken: str = None) -> Dict:
pass
def describe_applications(self, ApplicationNames: List = None) -> Dict:
pass
def describe_configuration_options(self, ApplicationName: str = None, TemplateName: str = None, EnvironmentName: str = None, SolutionStackName: str = None, PlatformArn: str = None, Options: List = None) -> Dict:
pass
def describe_configuration_settings(self, ApplicationName: str, TemplateName: str = None, EnvironmentName: str = None) -> Dict:
pass
def describe_environment_health(self, EnvironmentName: str = None, EnvironmentId: str = None, AttributeNames: List = None) -> Dict:
pass
def describe_environment_managed_action_history(self, EnvironmentId: str = None, EnvironmentName: str = None, NextToken: str = None, MaxItems: int = None) -> Dict:
pass
def describe_environment_managed_actions(self, EnvironmentName: str = None, EnvironmentId: str = None, Status: str = None) -> Dict:
pass
def describe_environment_resources(self, EnvironmentId: str = None, EnvironmentName: str = None) -> Dict:
pass
def describe_environments(self, ApplicationName: str = None, VersionLabel: str = None, EnvironmentIds: List = None, EnvironmentNames: List = None, IncludeDeleted: bool = None, IncludedDeletedBackTo: datetime = None, MaxRecords: int = None, NextToken: str = None) -> Dict:
pass
def describe_events(self, ApplicationName: str = None, VersionLabel: str = None, TemplateName: str = None, EnvironmentId: str = None, EnvironmentName: str = None, PlatformArn: str = None, RequestId: str = None, Severity: str = None, StartTime: datetime = None, EndTime: datetime = None, MaxRecords: int = None, NextToken: str = None) -> Dict:
pass
def describe_instances_health(self, EnvironmentName: str = None, EnvironmentId: str = None, AttributeNames: List = None, NextToken: str = None) -> Dict:
pass
def describe_platform_version(self, PlatformArn: str = None) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_available_solution_stacks(self) -> Dict:
pass
def list_platform_versions(self, Filters: List = None, MaxRecords: int = None, NextToken: str = None) -> Dict:
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
pass
def rebuild_environment(self, EnvironmentId: str = None, EnvironmentName: str = None):
pass
def request_environment_info(self, InfoType: str, EnvironmentId: str = None, EnvironmentName: str = None):
pass
def restart_app_server(self, EnvironmentId: str = None, EnvironmentName: str = None):
pass
def retrieve_environment_info(self, InfoType: str, EnvironmentId: str = None, EnvironmentName: str = None) -> Dict:
pass
def swap_environment_cnames(self, SourceEnvironmentId: str = None, SourceEnvironmentName: str = None, DestinationEnvironmentId: str = None, DestinationEnvironmentName: str = None):
pass
def terminate_environment(self, EnvironmentId: str = None, EnvironmentName: str = None, TerminateResources: bool = None, ForceTerminate: bool = None) -> Dict:
pass
def update_application(self, ApplicationName: str, Description: str = None) -> Dict:
pass
def update_application_resource_lifecycle(self, ApplicationName: str, ResourceLifecycleConfig: Dict) -> Dict:
pass
def update_application_version(self, ApplicationName: str, VersionLabel: str, Description: str = None) -> Dict:
pass
def update_configuration_template(self, ApplicationName: str, TemplateName: str, Description: str = None, OptionSettings: List = None, OptionsToRemove: List = None) -> Dict:
pass
def update_environment(self, ApplicationName: str = None, EnvironmentId: str = None, EnvironmentName: str = None, GroupName: str = None, Description: str = None, Tier: Dict = None, VersionLabel: str = None, TemplateName: str = None, SolutionStackName: str = None, PlatformArn: str = None, OptionSettings: List = None, OptionsToRemove: List = None) -> Dict:
pass
def update_tags_for_resource(self, ResourceArn: str, TagsToAdd: List = None, TagsToRemove: List = None):
pass
def validate_configuration_settings(self, ApplicationName: str, OptionSettings: List, TemplateName: str = None, EnvironmentName: str = None) -> Dict:
pass
|
src/exabgp/bgp/message/update/attribute/med.py | pierky/exabgp | 1,560 | 12753528 | <gh_stars>1000+
# encoding: utf-8
"""
med.py
Created by <NAME> on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from struct import pack
from struct import unpack
from exabgp.bgp.message.update.attribute.attribute import Attribute
# ====================================================================== MED (4)
#
@Attribute.register()
class MED(Attribute):
ID = Attribute.CODE.MED
FLAG = Attribute.Flag.OPTIONAL
CACHING = True
def __init__(self, med, packed=None):
self.med = med
self._packed = self._attribute(packed if packed is not None else pack('!L', med))
def __eq__(self, other):
return self.ID == other.ID and self.FLAG == other.FLAG and self.med == other.med
def __ne__(self, other):
return not self.__eq__(other)
def pack(self, negotiated=None):
return self._packed
def __len__(self):
return 4
def __repr__(self):
return str(self.med)
def __hash__(self):
return hash(self.med)
@classmethod
def unpack(cls, data, direction, negotiated):
return cls(unpack('!L', data)[0])
|
tests/test_managed_object.py | zhgcao/pyvmomi | 1,894 | 12753537 | <reponame>zhgcao/pyvmomi<filename>tests/test_managed_object.py
# VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tests
from pyVim import connect
class ManagedObjectTests(tests.VCRTestBase):
@tests.VCRTestBase.my_vcr.use_cassette('root_folder_parent.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='once')
def test_root_folder_parent(self):
# see: http://python3porting.com/noconv.html
si = connect.SmartConnect(host='vcsa',
user='my_user',
pwd='<PASSWORD>')
root_folder = si.content.rootFolder
self.assertTrue(hasattr(root_folder, 'parent'))
# NOTE (hartsock): assertIsNone does not work in Python 2.6
self.assertTrue(root_folder.parent is None)
|
yolo_tf2/core/evaluation.py | emadboctorx/yolov3-keras-tf2 | 650 | 12753544 | <gh_stars>100-1000
import numpy as np
import pandas as pd
def get_true_positives(actual, detections, iou_threshold):
"""
Identify and flag true positive detections.
Args:
actual: Ground truth data as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`
as columns.
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
iou_threshold: Percentage above which detections overlapping with
ground truths are considered true positive.
Returns:
pd.DataFrame containing filtered out true positives.
"""
if 'detection_key' not in detections.columns:
detections['detection_key'] = np.random.default_rng().choice(
detections.shape[0], size=detections.shape[0], replace=False
)
merged = actual.merge(detections, on=['image', 'object_name'])
merged['x0'] = merged[['x0_x', 'x0_y']].max(1)
merged['x1'] = merged[['x1_x', 'x1_y']].min(1)
merged['y0'] = merged[['y0_x', 'y0_y']].max(1)
merged['y1'] = merged[['y1_x', 'y1_y']].min(1)
true_intersect = (merged['x1'] > merged['x0']) & (merged['y1'] > merged['y0'])
merged = merged[true_intersect]
actual_areas = (merged['x1_x'] - merged['x0_x']) * (merged['y1_x'] - merged['y0_x'])
predicted_areas = (merged['x1_y'] - merged['x0_y']) * (
merged['y1_y'] - merged['y0_y']
)
intersection_areas = (merged['x1'] - merged['x0']) * (merged['y1'] - merged['y0'])
merged['iou'] = intersection_areas / (
actual_areas + predicted_areas - intersection_areas
)
merged['true_positive'] = True
merged['false_positive'] = False
merged = merged[merged['iou'] >= iou_threshold]
return merged.drop_duplicates(subset='detection_key')
def get_false_positives(detections, true_positives):
"""
Filter out False positives.
Args:
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
true_positives: `pd.DataFrame` of true positive detections, the result
of `get_true_positives`.
Returns:
`pd.DataFrame` containing filtered out false positives.
"""
keys_before = detections['detection_key'].values
keys_after = true_positives['detection_key'].values
false_keys = np.where(np.isin(keys_before, keys_after, invert=True))
false_keys = keys_before[false_keys]
false_positives = detections.set_index('detection_key').loc[false_keys]
false_positives['true_positive'] = False
false_positives['false_positive'] = True
return false_positives.reset_index()
def calculate_ap(combined, total_actual):
"""
Calculate single object average precision.
Args:
combined: `pd.DataFrame` containing true positives + false positives.
total_actual: Total instances of an object in the dataset.
Returns:
Updated combined with average precision calculated.
"""
combined = combined.sort_values(by='score', ascending=False).reset_index(drop=True)
combined['acc_tp'] = combined['true_positive'].cumsum()
combined['acc_fp'] = combined['false_positive'].cumsum()
combined['precision'] = combined['acc_tp'] / (
combined['acc_tp'] + combined['acc_fp']
)
combined['recall'] = combined['acc_tp'] / total_actual
combined['m_pre1'] = combined['precision'].shift(1, fill_value=0)
combined['m_pre'] = combined[['m_pre1', 'precision']].max(axis=1)
combined['m_rec1'] = combined['recall'].shift(1, fill_value=0)
combined.loc[combined['m_rec1'] != combined['recall'], 'valid_m_rec'] = 1
combined['average_precision'] = (
combined['recall'] - combined['m_rec1']
) * combined['m_pre']
return combined
def calculate_stats(
actual,
detections,
true_positives,
false_positives,
combined,
):
"""
Calculate display data including total actual, total true positives, total false
positives and sort resulting `pd.DataFrame` by object average precision.
Args:
actual: Ground truth data as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`
as columns.
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
true_positives: `pd.DataFrame` of true positive detections, the result
of `get_true_positives`.
false_positives: `pd.DataFrame` of false positive detections, the result
of `get_false_positives`.
combined: `pd.DataFrame` containing true positives + false positives.
Returns:
`pd.DataFrame` with calculated average precisions per dataset object.
"""
class_stats = []
for object_name in actual['object_name'].drop_duplicates().values:
stats = dict()
stats['object_name'] = object_name
stats['average_precision'] = combined[combined['object_name'] == object_name][
'average_precision'
].sum()
stats['actual'] = actual[actual['object_name'] == object_name].shape[0]
stats['detections'] = detections[
detections['object_name'] == object_name
].shape[0]
stats['true_positives'] = true_positives[
true_positives['object_name'] == object_name
].shape[0]
stats['false_positives'] = false_positives[
false_positives['object_name'] == object_name
].shape[0]
stats['combined'] = combined[combined['object_name'] == object_name].shape[0]
class_stats.append(stats)
total_stats = pd.DataFrame(class_stats).sort_values(
by='average_precision', ascending=False
)
return total_stats
def calculate_map(actual, detections, iou_threshold):
"""
Calculate average precision per dataset object. The mean of the resulting
`pd.DataFrame` `average_precision` column is the mAP score.
Args:
actual: Ground truth data as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`
as columns.
detections: Detections as `pd.DataFrame` having
`image`, `object_name`, `object_index`, `x0`, `y0`, `x1`, `y1`,
`score` as columns.
iou_threshold: Percentage above which detections overlapping with
ground truths are considered true positive.
Returns:
`pd.DataFrame`, the result of `calculate_stats`.
"""
class_counts = actual['object_name'].value_counts().to_dict()
true_positives = get_true_positives(actual, detections, iou_threshold)
false_positives = get_false_positives(detections, true_positives)
true_positives = true_positives[
[*set(true_positives.columns) & set(false_positives.columns)]
]
false_positives = false_positives[
[*set(true_positives.columns) & set(false_positives.columns)]
]
combined = pd.concat([true_positives, false_positives])
combined = pd.concat(
[
calculate_ap(group, class_counts.get(object_name))
for object_name, group in combined.groupby('object_name')
]
)
return calculate_stats(
actual, detections, true_positives, false_positives, combined
)
|
tests/thread/stress_heap.py | learnforpractice/micropython-cpp | 692 | 12753546 | <filename>tests/thread/stress_heap.py
# stress test for the heap by allocating lots of objects within threads
# allocates about 5mb on the heap
#
# MIT license; Copyright (c) 2016 <NAME> on behalf of Pycom Ltd
try:
import utime as time
except ImportError:
import time
import _thread
def last(l):
return l[-1]
def thread_entry(n):
# allocate a bytearray and fill it
data = bytearray(i for i in range(256))
# run a loop which allocates a small list and uses it each iteration
lst = 8 * [0]
sum = 0
for i in range(n):
sum += last(lst)
lst = [0, 0, 0, 0, 0, 0, 0, i + 1]
# check that the bytearray still has the right data
for i, b in enumerate(data):
assert i == b
# print the result of the loop and indicate we are finished
with lock:
print(sum, lst[-1])
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 10
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(thread_entry, (10000,))
# wait for threads to finish
while n_finished < n_thread:
time.sleep(1)
|
test/integration/component/test_ss_volume_usage.py | ycyun/ablestack-cloud | 1,131 | 12753559 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test cases for checking that the secondary Storage usage is accounted. This is verified by checking the usage_event table
for a volume in 'Uploaded' state.
This test case does the following:
1.Creates an account and uploads a volume.
2.After the volume is uploaded successfully, connects to the database
3.From the database verifies that an entry is added to cloud.events table for the uploaded volume.
4.Cleans up the resources.
"""
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
from marvin.sshClient import SshClient
from marvin.codes import (BACKED_UP, PASS, FAIL)
import time
def verify_vm(self, vmid, state):
list_vm = list_virtual_machines(self.userapiclient,
account=self.account.name,
domainid=self.account.domainid,
id=vmid
)
self.assertEqual(
validateList(list_vm)[0],
PASS,
"Check List vm response for vmid: %s" %
vmid)
self.assertGreater(
len(list_vm),
0,
"Check the list vm response for vm id: %s" %
vmid)
vm = list_vm[0]
self.assertEqual(
vm.id,
str(vmid),
"Vm deployed is different from the test")
self.assertEqual(vm.state, state, "VM is in %s state" %state)
def uploadVolume(self):
# upload a volume
self.debug("Upload volume format is '%s'" %self.uploadVolumeformat)
self.testdata["configurableData"]["upload_volume"]["format"] = self.uploadVolumeformat
self.testdata["configurableData"]["upload_volume"]["url"] = self.uploadvolumeUrl
upload_volume = Volume.upload(
self.apiclient,
self.testdata["configurableData"]["upload_volume"],
account=self.account.name,
domainid=self.domain.id,
zoneid=self.zone.id
)
upload_volume.wait_for_upload(self.apiclient)
return upload_volume.id
def restartUsageServer(self):
#Restart usage server
sshClient = SshClient(
self.mgtSvrDetails["mgtSvrIp"],
22,
self.mgtSvrDetails["user"],
self.mgtSvrDetails["passwd"]
)
command = "service cloudstack-usage restart"
sshClient.execute(command)
return
def checkUsage(self, uuid_upload_volume_id):
volume_id = self.dbclient.execute("SELECT id from cloud.volumes where uuid='%s';" % uuid_upload_volume_id)
self.debug("Volume id of uploaded volume is= %s" %volume_id[0]);
qryresult_after_usageServerExecution = self.dbclient.execute(
"SELECT type FROM cloud.usage_event where resource_id = '%s';" % (volume_id[0]))
self.debug("Usage Type is %s " % qryresult_after_usageServerExecution[0][0])
self.assertEqual(qryresult_after_usageServerExecution[0][0], 'VOLUME.UPLOAD')
class TestSecondaryVolumeUsage(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestSecondaryVolumeUsage, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.dbclient = testClient.getDbConnection()
cls.testdata = testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.storagetype = 'shared'
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
cls._cleanup = []
# Create an account
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup.append(cls.account)
# Create user api client of the account
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name,
DomainName=cls.account.domain
)
# Create Service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"],
)
cls._cleanup.append(cls.service_offering)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.testdata["disk_offering"],
)
cls._cleanup.append(cls.disk_offering)
cls.skip = 0
hosts = list_hosts(
cls.apiclient,
type="Routing"
)
for hypervisorhost in hosts:
if hypervisorhost.hypervisor.lower() in ["xenserver"]:
cls.uploadVolumeformat = "VHD"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.0.0/systemvm.vhd.bz2"
break
elif hypervisorhost.hypervisor.lower() in ["vmware"]:
cls.uploadVolumeformat = "OVA"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.2.0/systemvm-redundant-router.ova"
break
elif hypervisorhost.hypervisor == "KVM":
cls.uploadVolumeformat = "QCOW2"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2"
break
elif hypervisorhost.hypervisor == "LXC":
cls.uploadvolumeformat = "QCOW2"
cls.uploadvolumeUrl = "http://download.cloudstack.org/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2"
break
else:
break
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata["ostype"])
try:
cls.vm = VirtualMachine.create(
cls.userapiclient,
cls.testdata["small"],
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
zoneid=cls.zone.id
)
except Exception as e:
cls.tearDownClass()
raise e
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
@attr(tags=["basic", "advanced"], required_hardware="true")
def test_01_SecondaryUsageUploadedVolume(self):
try:
uploaded_volume_id_uuid = uploadVolume(self)
checkUsage(self, uploaded_volume_id_uuid)
except Exception as e:
self.tearDown()
raise e
return
|
ocr/utils/sclite_helper.py | vee51/Hand | 435 | 12753578 | <gh_stars>100-1000
import os
import subprocess
import re
import uuid
class ScliteHelper():
'''
The Sclite helper class calculates the word error rate (WER) and charater error rate (CER)
given a predicted and actual text.
This class uses sclite2.4 (ftp://jaguar.ncsl.nist.gov/pub/sctk-2.4.10-20151007-1312Z.tar.bz2)
and formats the data according.
Parameters
----------
sclite_location: optional, default="sctk-2.4.10/bin"
Location of the sclite_program
tmp_file_location: optional, default=tmp
folder to store the temporary text files.
'''
def __init__(self, sclite_location=os.path.join("..", "SCTK", "bin"),
tmp_file_location="tmp", use_uuid=True):
# Check if sclite exists
assert os.path.isdir(sclite_location), "{} does not exist".format(sclite_location)
sclite_error = "{} doesn't contain sclite".format(sclite_location)
retries = 10
for i in range(retries):
if self._test_sclite(sclite_location):
break
elif i == retries-1:
raise sclite_error
self.sclite_location = sclite_location
if use_uuid:
tmp_file_location += "/" + str(uuid.uuid4())
# Check if tmp_file_location exists
if not os.path.isdir(tmp_file_location):
os.makedirs(tmp_file_location)
self.tmp_file_location = tmp_file_location
self.predicted_text = []
self.actual_text = []
def clear(self):
'''
Clear the class for new calculations.
'''
self.predicted_text = []
self.actual_text = []
def _test_sclite(self, sclite_location):
sclite_path = os.path.join(sclite_location, "sclite")
command_line_options = [sclite_path]
try:
subprocess.check_output(command_line_options, stderr=subprocess.STDOUT)
except OSError:
return False
except subprocess.CalledProcessError:
return True
return True
def _write_string_to_sclite_file(self, sentences_arrays, filename):
SPEAKER_LABEL = "(spk{}_{})"
# Split string into sentences
converted_string = ''
for i, sentences_array in enumerate(sentences_arrays):
for line, sentence in enumerate(sentences_array):
converted_string += sentence + SPEAKER_LABEL.format(i+1, line+1) + "\n"
# Write converted_string into file
filepath = os.path.join(self.tmp_file_location, filename)
with open(filepath, "w") as f:
f.write(converted_string)
def _run_sclite(self, predicted_filename, actual_filename, mode, output):
'''
Run command line for sclite.
Parameters
---------
predicted_filename: str
file containing output string of the network
actual_filename: str
file containing string of the label
mode: string, Options = ["CER", "WER"]
Choose between CER or WER
output: string, Options = ["print", "string"]
Choose between printing the output or returning a string
Returns
-------
stdoutput
If string was chosen as the output option, this function will return a file
containing the stdout
'''
assert mode in ["CER", "WER"], "mode {} is not in ['CER', 'WER]".format(mode)
assert output in ["print", "string"], "output {} is not in ['print', 'string']".format(
output)
command_line = [os.path.join(self.sclite_location, "sclite"),
"-h", os.path.join(self.tmp_file_location, predicted_filename),
"-r", os.path.join(self.tmp_file_location, actual_filename),
"-i", "rm"]
if mode == "WER":
pass # Word error rate is by default
retries = 10
for i in range(retries):
try:
if mode == "CER":
command_line.append("-c")
if output == "print":
subprocess.call(command_line)
elif output == "string":
cmd = subprocess.Popen(command_line, stdout=subprocess.PIPE)
return cmd.stdout
except:
print("There was an error")
def _print_error_rate_summary(self, mode, predicted_filename="predicted.txt",
actual_filename="actual.txt"):
'''
Print the error rate summary of sclite
Parameters
----------
mode: string, Options = ["CER", "WER"]
Choose between CER or WER
'''
self._run_sclite(predicted_filename, actual_filename, mode, output="print")
def _get_error_rate(self, mode, predicted_filename="predicted.txt",
actual_filename="actual.txt"):
'''
Get the error rate by analysing the output of sclite
Parameters
----------
mode: string, Options = ["CER", "WER"]
Choose between CER or WER
Returns
-------
number: int
The number of characters or words depending on the mode selected.
error_rate: float
'''
number = None
er = None
output_file = self._run_sclite(predicted_filename, actual_filename,
mode, output="string")
match_tar = r'.*Mean.*\|.* (\d*.\d) .* (\d*.\d).* \|'
for line in output_file.readlines():
match = re.match(match_tar, line.decode('utf-8'), re.M|re.I)
if match:
number = match.group(1)
er = match.group(2)
assert number != None and er != None, "Error in parsing output."
return float(number), 100.0 - float(er)
def _make_sclite_files(self, predicted_filename="predicted.txt",
actual_filename="actual.txt"):
'''
Run command line for sclite.
Parameters
---------
predicted_filename: str, default: predicted.txt
filename of the predicted file
actual_filename: str, default: actual.txt
filename of the actual file
'''
self._write_string_to_sclite_file(self.predicted_text, filename=predicted_filename)
self._write_string_to_sclite_file(self.actual_text, filename=actual_filename)
def add_text(self, predicted_text, actual_text):
'''
Function to save predicted and actual text pairs in memory.
Running the future fuctions will generate the required text files.
'''
self.predicted_text.append(predicted_text)
self.actual_text.append(actual_text)
def print_wer_summary(self):
'''
see _print_error_rate_summary for docstring
'''
self._make_sclite_files()
self._print_error_rate_summary(mode="WER")
def print_cer_summary(self):
'''
see _print_error_rate_summary for docstring
'''
self._make_sclite_files()
self._print_error_rate_summary(mode="CER")
def get_wer(self):
'''
See _get_error_rate for docstring
'''
self._make_sclite_files()
return self._get_error_rate(mode="WER")
def get_cer(self):
'''
See _get_error_rate for docstring
'''
self._make_sclite_files()
return self._get_error_rate(mode="CER")
if __name__ == "__main__":
cls = ScliteHelper()
actual1 = 'Jonathan loves to eat apples. This is the second sentence.'
predicted1 = 'Jonothon loves to eot. This is the second santense.'
cls.add_text(predicted1, actual1)
actual2 = 'Jonathan loves to eat apples. This is the second sentence.'
predicted2 = 'Jonothan loves to eot. This is the second santense.'
cls.add_text(predicted2, actual2)
cls.print_cer_summary()
num, er = cls.get_cer()
print(num, er) |
tests/ssr.py | xuhao1/taichi_three | 152 | 12753584 | <filename>tests/ssr.py
import taichi as ti
import tina
ti.init(ti.gpu)
scene = tina.Scene((640, 480), smoothing=True, ssr=True, taa=True)
monkey_material = tina.PBR(metallic=0.0, roughness=0.4)
monkey = tina.MeshModel('assets/monkey.obj')
scene.add_object(monkey, monkey_material)
param_metallic = tina.Param()
param_roughness = tina.Param()
plane_material = tina.PBR(metallic=param_metallic, roughness=param_roughness)
plane = tina.MeshTransform(tina.MeshGrid(32),
tina.scale(2) @ tina.eularXYZ([-ti.pi / 2, 0, 0]))
scene.add_object(plane, plane_material)
gui = ti.GUI(res=scene.res)
nsteps = gui.slider('nsteps', 1, 128, 1)
nsamples = gui.slider('nsamples', 1, 128, 1)
stepsize = gui.slider('stepsize', 0, 32, 0.1)
tolerance = gui.slider('tolerance', 0, 64, 0.1)
blurring = gui.slider('blurring', 1, 8, 1)
metallic = gui.slider('metallic', 0, 1, 0.01)
roughness = gui.slider('roughness', 0, 1, 0.01)
nsteps.value = 64
nsamples.value = 12
blurring.value = 4
stepsize.value = 2
tolerance.value = 15
metallic.value = 1.0
roughness.value = 0.0
while gui.running:
scene.ssr.nsteps[None] = int(nsteps.value)
scene.ssr.nsamples[None] = int(nsamples.value)
scene.ssr.blurring[None] = int(blurring.value)
scene.ssr.stepsize[None] = stepsize.value
scene.ssr.tolerance[None] = tolerance.value
param_metallic.value[None] = metallic.value
param_roughness.value[None] = roughness.value
scene.input(gui)
scene.render()
gui.set_image(scene.img)
gui.show()
|
tests/unit/sagemaker/test_deserializers.py | LastRemote/sagemaker-python-sdk | 1,690 | 12753646 | <reponame>LastRemote/sagemaker-python-sdk<gh_stars>1000+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import io
import json
import numpy as np
import pandas as pd
import pytest
from sagemaker.deserializers import (
StringDeserializer,
BytesDeserializer,
CSVDeserializer,
StreamDeserializer,
NumpyDeserializer,
JSONDeserializer,
PandasDeserializer,
JSONLinesDeserializer,
)
def test_string_deserializer():
deserializer = StringDeserializer()
result = deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
assert result == "[1, 2, 3]"
def test_bytes_deserializer():
deserializer = BytesDeserializer()
result = deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
assert result == b"[1, 2, 3]"
@pytest.fixture
def csv_deserializer():
return CSVDeserializer()
def test_csv_deserializer_single_element(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1"), "text/csv")
assert result == [["1"]]
def test_csv_deserializer_array(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1,2,3"), "text/csv")
assert result == [["1", "2", "3"]]
def test_csv_deserializer_2dimensional(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1,2,3\n3,4,5"), "text/csv")
assert result == [["1", "2", "3"], ["3", "4", "5"]]
def test_csv_deserializer_posix_compliant(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1,2,3\n3,4,5\n"), "text/csv")
assert result == [["1", "2", "3"], ["3", "4", "5"]]
def test_stream_deserializer():
deserializer = StreamDeserializer()
stream, content_type = deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
try:
result = stream.read()
finally:
stream.close()
assert result == b"[1, 2, 3]"
assert content_type == "application/json"
@pytest.fixture
def numpy_deserializer():
return NumpyDeserializer()
def test_numpy_deserializer_from_csv(numpy_deserializer):
stream = io.BytesIO(b"1,2,3\n4,5,6")
array = numpy_deserializer.deserialize(stream, "text/csv")
assert np.array_equal(array, np.array([[1, 2, 3], [4, 5, 6]]))
def test_numpy_deserializer_from_csv_ragged(numpy_deserializer):
stream = io.BytesIO(b"1,2,3\n4,5,6,7")
with pytest.raises(ValueError) as error:
numpy_deserializer.deserialize(stream, "text/csv")
assert "errors were detected" in str(error)
def test_numpy_deserializer_from_csv_alpha():
numpy_deserializer = NumpyDeserializer(dtype="U5")
stream = io.BytesIO(b"hello,2,3\n4,5,6")
array = numpy_deserializer.deserialize(stream, "text/csv")
assert np.array_equal(array, np.array([["hello", 2, 3], [4, 5, 6]]))
def test_numpy_deserializer_from_json(numpy_deserializer):
stream = io.BytesIO(b"[[1,2,3],\n[4,5,6]]")
array = numpy_deserializer.deserialize(stream, "application/json")
assert np.array_equal(array, np.array([[1, 2, 3], [4, 5, 6]]))
# Sadly, ragged arrays work fine in JSON (giving us a 1D array of Python lists)
def test_numpy_deserializer_from_json_ragged(numpy_deserializer):
stream = io.BytesIO(b"[[1,2,3],\n[4,5,6,7]]")
array = numpy_deserializer.deserialize(stream, "application/json")
assert np.array_equal(array, np.array([[1, 2, 3], [4, 5, 6, 7]]))
def test_numpy_deserializer_from_json_alpha():
numpy_deserializer = NumpyDeserializer(dtype="U5")
stream = io.BytesIO(b'[["hello",2,3],\n[4,5,6]]')
array = numpy_deserializer.deserialize(stream, "application/json")
assert np.array_equal(array, np.array([["hello", 2, 3], [4, 5, 6]]))
def test_numpy_deserializer_from_npy(numpy_deserializer):
array = np.ones((2, 3))
stream = io.BytesIO()
np.save(stream, array)
stream.seek(0)
result = numpy_deserializer.deserialize(stream, "application/x-npy")
assert np.array_equal(array, result)
def test_numpy_deserializer_from_npy_object_array(numpy_deserializer):
array = np.array([{"a": "", "b": ""}, {"c": "", "d": ""}])
stream = io.BytesIO()
np.save(stream, array)
stream.seek(0)
result = numpy_deserializer.deserialize(stream, "application/x-npy")
assert np.array_equal(array, result)
def test_numpy_deserializer_from_npy_object_array_with_allow_pickle_false():
numpy_deserializer = NumpyDeserializer(allow_pickle=False)
array = np.array([{"a": "", "b": ""}, {"c": "", "d": ""}])
stream = io.BytesIO()
np.save(stream, array)
stream.seek(0)
with pytest.raises(ValueError):
numpy_deserializer.deserialize(stream, "application/x-npy")
@pytest.fixture
def json_deserializer():
return JSONDeserializer()
def test_json_deserializer_array(json_deserializer):
result = json_deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
assert result == [1, 2, 3]
def test_json_deserializer_2dimensional(json_deserializer):
result = json_deserializer.deserialize(
io.BytesIO(b"[[1, 2, 3], [3, 4, 5]]"), "application/json"
)
assert result == [[1, 2, 3], [3, 4, 5]]
def test_json_deserializer_invalid_data(json_deserializer):
with pytest.raises(ValueError) as error:
json_deserializer.deserialize(io.BytesIO(b"[[1]"), "application/json")
assert "column" in str(error)
@pytest.fixture
def pandas_deserializer():
return PandasDeserializer()
def test_pandas_deserializer_json(pandas_deserializer):
data = {"col 1": {"row 1": "a", "row 2": "c"}, "col 2": {"row 1": "b", "row 2": "d"}}
stream = io.BytesIO(json.dumps(data).encode("utf-8"))
result = pandas_deserializer.deserialize(stream, "application/json")
expected = pd.DataFrame(
[["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
)
assert result.equals(expected)
def test_pandas_deserializer_csv(pandas_deserializer):
stream = io.BytesIO(b"col 1,col 2\na,b\nc,d")
result = pandas_deserializer.deserialize(stream, "text/csv")
expected = pd.DataFrame([["a", "b"], ["c", "d"]], columns=["col 1", "col 2"])
assert result.equals(expected)
@pytest.fixture
def json_lines_deserializer():
return JSONLinesDeserializer()
@pytest.mark.parametrize(
"source, expected",
[
(b'["Name", "Score"]\n["Gilbert", 24]', [["Name", "Score"], ["Gilbert", 24]]),
(b'["Name", "Score"]\n["Gilbert", 24]\n', [["Name", "Score"], ["Gilbert", 24]]),
(
b'{"Name": "Gilbert", "Score": 24}\n{"Name": "Alexa", "Score": 29}',
[{"Name": "Gilbert", "Score": 24}, {"Name": "Alexa", "Score": 29}],
),
],
)
def test_json_lines_deserializer(json_lines_deserializer, source, expected):
stream = io.BytesIO(source)
content_type = "application/jsonlines"
actual = json_lines_deserializer.deserialize(stream, content_type)
assert actual == expected
|
python/ht/pyfilter/utils.py | Hengle/Houdini-Toolbox | 136 | 12753664 | <gh_stars>100-1000
"""This module contains functions related to Mantra Python filtering."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library
import logging
import os
from typing import List, Optional
_logger = logging.getLogger(__name__)
# =============================================================================
# FUNCTIONS
# =============================================================================
def build_pyfilter_command(
pyfilter_args: Optional[List[str]] = None, pyfilter_path: Optional[str] = None
) -> str:
"""Build a PyFilter -P command.
:param pyfilter_args: Optional list of args to pass to the command.
:param pyfilter_path: Optional path to the filter script.
:return: The constructed PyFilter command.
"""
import hou
if pyfilter_args is None:
pyfilter_args = []
# If no path was passed, use the one located in the HOUDINI_PATH.
if pyfilter_path is None:
try:
pyfilter_path = hou.findFile("pyfilter/ht-pyfilter.py")
# If we can't find the script them log an error and return nothing.
except hou.OperationFailed:
_logger.error("Could not find pyfilter/ht-pyfilter.py")
return ""
else:
# Ensure the script path exists.
if not os.path.isfile(pyfilter_path):
raise OSError("No such file: {}".format(pyfilter_path))
cmd = '-P "{} {}"'.format(pyfilter_path, " ".join(pyfilter_args))
return cmd
|
sortedm2m_tests/compat.py | Freston2021/dfconfecciones | 187 | 12753669 | def m2m_set(instance, field_name, objs):
getattr(instance, field_name).set(objs)
|
Codes/gracekoo/4_find_median_sorted_array.py | ghoslation/algorithm | 256 | 12753689 | <filename>Codes/gracekoo/4_find_median_sorted_array.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @Time: 2019/12/4 12:14 上午
# @Author: GraceKoo
# @File: 4_find_median_sorted_array.py
# @Desc: https://leetcode-cn.com/problems/median-of-two-sorted-arrays/
import timeit
class Solution:
def findMedianSortedArrays(self, nums1, nums2) -> float:
m = len(nums1)
n = len(nums2)
k = (m + n) % 2
if k == 1:
return self.find_k(nums1, nums2, (m + n) // 2)
else:
return (
self.find_k(nums1, nums2, (m + n) // 2 - 1)
+ self.find_k(nums1, nums2, (m + n) // 2)
) / 2
def find_k(self, nums1, nums2, k):
if not nums1:
return nums2[k]
if not nums2:
return nums1[k]
# print("len nums1:", len(nums1), "len nums2:", len(nums2))
i = len(nums1) // 2
j = len(nums2) // 2
# print(i, j, k)
if k > i + j:
if nums1[i] > nums2[j]:
return self.find_k(nums1, nums2[j + 1 :], k - j - 1)
else:
return self.find_k(nums1[i + 1 :], nums2, k - i - 1)
else:
if nums1[i] > nums2[j]:
return self.find_k(nums1[:i], nums2, k)
else:
return self.find_k(nums1, nums2[:j], k)
if __name__ == "__main__":
so = Solution()
nums1 = [1, 2, 3]
nums2 = [1, 2, 3]
start = timeit.default_timer()
print(so.findMedianSortedArrays(nums1, nums2))
end = timeit.default_timer()
print(str((end - start) * 1000), "s")
nums1 = [1, 2, 3]
nums2 = [4, 5, 6]
print(so.findMedianSortedArrays(nums1, nums2))
nums1 = [1, 2, 3]
nums2 = [4, 5]
print(so.findMedianSortedArrays(nums1, nums2))
nums1 = [1, 4, 6]
nums2 = [2, 5]
print(so.findMedianSortedArrays(nums1, nums2))
|
fonts/vector/symbol.py | szczys/st7789_mpy | 153 | 12753697 | <gh_stars>100-1000
WIDTH = 87
HEIGHT = 87
FIRST = 0x20
LAST = 0x7f
_font =\
b'\x00\x4a\x5a\x02\x44\x60\x44\x52\x60\x52\x02\x44\x60\x44\x60'\
b'\x60\x44\x02\x52\x52\x52\x3e\x52\x66\x02\x44\x60\x44\x44\x60'\
b'\x60\x02\x44\x60\x44\x52\x60\x52\x02\x46\x5e\x46\x59\x5e\x4b'\
b'\x02\x4b\x59\x4b\x5e\x59\x46\x02\x52\x52\x52\x44\x52\x60\x02'\
b'\x4b\x59\x4b\x46\x59\x5e\x02\x46\x5e\x46\x4b\x5e\x59\x02\x4b'\
b'\x59\x4b\x52\x59\x52\x02\x4d\x57\x4d\x57\x57\x4d\x02\x52\x52'\
b'\x52\x4b\x52\x59\x02\x4d\x57\x4d\x4d\x57\x57\x07\x47\x52\x52'\
b'\x47\x50\x47\x4d\x48\x4a\x4a\x48\x4d\x47\x50\x47\x52\x07\x47'\
b'\x52\x47\x52\x47\x54\x48\x57\x4a\x5a\x4d\x5c\x50\x5d\x52\x5d'\
b'\x07\x52\x5d\x52\x5d\x54\x5d\x57\x5c\x5a\x5a\x5c\x57\x5d\x54'\
b'\x5d\x52\x07\x52\x5d\x5d\x52\x5d\x50\x5c\x4d\x5a\x4a\x57\x48'\
b'\x54\x47\x52\x47\x08\x44\x60\x44\x4f\x47\x51\x4b\x53\x50\x54'\
b'\x54\x54\x59\x53\x5d\x51\x60\x4f\x08\x50\x55\x55\x44\x53\x47'\
b'\x51\x4b\x50\x50\x50\x54\x51\x59\x53\x5d\x55\x60\x08\x4f\x54'\
b'\x4f\x44\x51\x47\x53\x4b\x54\x50\x54\x54\x53\x59\x51\x5d\x4f'\
b'\x60\x08\x44\x60\x44\x55\x47\x53\x4b\x51\x50\x50\x54\x50\x59'\
b'\x51\x5d\x53\x60\x55\x04\x4b\x59\x52\x4a\x59\x4e\x4b\x56\x52'\
b'\x5a\x04\x4a\x5a\x4a\x52\x4e\x4b\x56\x59\x5a\x52\x04\x4b\x59'\
b'\x4b\x56\x4b\x4e\x59\x56\x59\x4e\x04\x4a\x5a\x4c\x58\x4a\x50'\
b'\x5a\x54\x58\x4c\x16\x4a\x5a\x4a\x5d\x4c\x5d\x4f\x5c\x51\x5b'\
b'\x54\x58\x55\x56\x56\x53\x56\x4f\x55\x4c\x54\x4a\x53\x49\x51'\
b'\x49\x50\x4a\x4f\x4c\x4e\x4f\x4e\x53\x4f\x56\x50\x58\x53\x5b'\
b'\x55\x5c\x58\x5d\x5a\x5d\x16\x49\x5d\x5d\x5a\x5d\x58\x5c\x55'\
b'\x5b\x53\x58\x50\x56\x4f\x53\x4e\x4f\x4e\x4c\x4f\x4a\x50\x49'\
b'\x51\x49\x53\x4a\x54\x4c\x55\x4f\x56\x53\x56\x56\x55\x58\x54'\
b'\x5b\x51\x5c\x4f\x5d\x4c\x5d\x4a\x16\x4a\x5a\x5a\x47\x58\x47'\
b'\x55\x48\x53\x49\x50\x4c\x4f\x4e\x4e\x51\x4e\x55\x4f\x58\x50'\
b'\x5a\x51\x5b\x53\x5b\x54\x5a\x55\x58\x56\x55\x56\x51\x55\x4e'\
b'\x54\x4c\x51\x49\x4f\x48\x4c\x47\x4a\x47\x16\x47\x5b\x47\x4a'\
b'\x47\x4c\x48\x4f\x49\x51\x4c\x54\x4e\x55\x51\x56\x55\x56\x58'\
b'\x55\x5a\x54\x5b\x53\x5b\x51\x5a\x50\x58\x4f\x55\x4e\x51\x4e'\
b'\x4e\x4f\x4c\x50\x49\x53\x48\x55\x47\x58\x47\x5a\x14\x45\x5b'\
b'\x45\x50\x46\x52\x48\x54\x4a\x55\x4d\x56\x51\x56\x55\x55\x58'\
b'\x53\x5a\x50\x5b\x4e\x5a\x4c\x57\x4c\x53\x4d\x51\x4e\x4e\x50'\
b'\x4c\x53\x4b\x56\x4b\x59\x4c\x5c\x4d\x5e\x12\x45\x59\x45\x54'\
b'\x48\x56\x4b\x57\x50\x57\x53\x56\x56\x54\x58\x51\x59\x4e\x59'\
b'\x4c\x58\x4b\x56\x4b\x53\x4c\x50\x4e\x4e\x51\x4d\x54\x4d\x59'\
b'\x4e\x5c\x50\x5f\x19\x4f\x55\x51\x4f\x4f\x51\x4f\x53\x51\x55'\
b'\x53\x55\x55\x53\x55\x51\x53\x4f\x51\x4f\x20\x52\x51\x50\x50'\
b'\x51\x50\x53\x51\x54\x53\x54\x54\x53\x54\x51\x53\x50\x51\x50'\
b'\x20\x52\x52\x51\x51\x52\x52\x53\x53\x52\x52\x51\x0a\x52\x57'\
b'\x52\x4d\x53\x4d\x55\x4e\x56\x4f\x57\x51\x57\x53\x56\x55\x55'\
b'\x56\x53\x57\x52\x57\x08\x44\x60\x44\x52\x4a\x52\x20\x52\x4f'\
b'\x52\x55\x52\x20\x52\x5a\x52\x60\x52\x04\x44\x60\x44\x55\x44'\
b'\x4f\x60\x4f\x60\x55\x05\x4a\x5a\x52\x44\x4a\x52\x20\x52\x52'\
b'\x44\x5a\x52\x08\x44\x60\x44\x52\x60\x52\x20\x52\x4a\x59\x5a'\
b'\x59\x20\x52\x50\x60\x54\x60\x08\x44\x60\x44\x52\x60\x52\x20'\
b'\x52\x44\x52\x52\x62\x20\x52\x60\x52\x52\x62\x11\x4b\x59\x51'\
b'\x4b\x4e\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59'\
b'\x53\x59\x56\x58\x58\x56\x59\x53\x59\x51\x58\x4e\x56\x4c\x53'\
b'\x4b\x51\x4b\x05\x4c\x58\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c'\
b'\x4c\x04\x4b\x59\x52\x4a\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58'\
b'\x52\x48\x4c\x52\x52\x5c\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49'\
b'\x50\x4f\x4a\x4f\x4f\x53\x4d\x59\x52\x55\x57\x59\x55\x53\x5a'\
b'\x4f\x54\x4f\x52\x49\x05\x4b\x59\x52\x4b\x52\x59\x20\x52\x4b'\
b'\x52\x59\x52\x05\x4d\x57\x4d\x4d\x57\x57\x20\x52\x57\x4d\x4d'\
b'\x57\x08\x4d\x57\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20'\
b'\x52\x57\x4f\x4d\x55\x22\x4e\x56\x51\x4e\x4f\x4f\x4e\x51\x4e'\
b'\x53\x4f\x55\x51\x56\x53\x56\x55\x55\x56\x53\x56\x51\x55\x4f'\
b'\x53\x4e\x51\x4e\x20\x52\x4f\x51\x4f\x53\x20\x52\x50\x50\x50'\
b'\x54\x20\x52\x51\x4f\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52'\
b'\x53\x4f\x53\x55\x20\x52\x54\x50\x54\x54\x20\x52\x55\x51\x55'\
b'\x53\x1a\x4e\x56\x4e\x4e\x4e\x56\x56\x56\x56\x4e\x4e\x4e\x20'\
b'\x52\x4f\x4f\x4f\x55\x20\x52\x50\x4f\x50\x55\x20\x52\x51\x4f'\
b'\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20'\
b'\x52\x54\x4f\x54\x55\x20\x52\x55\x4f\x55\x55\x10\x4d\x57\x52'\
b'\x4c\x4d\x55\x57\x55\x52\x4c\x20\x52\x52\x4f\x4f\x54\x20\x52'\
b'\x52\x4f\x55\x54\x20\x52\x52\x52\x51\x54\x20\x52\x52\x52\x53'\
b'\x54\x10\x4c\x55\x4c\x52\x55\x57\x55\x4d\x4c\x52\x20\x52\x4f'\
b'\x52\x54\x55\x20\x52\x4f\x52\x54\x4f\x20\x52\x52\x52\x54\x53'\
b'\x20\x52\x52\x52\x54\x51\x10\x4d\x57\x52\x58\x57\x4f\x4d\x4f'\
b'\x52\x58\x20\x52\x52\x55\x55\x50\x20\x52\x52\x55\x4f\x50\x20'\
b'\x52\x52\x52\x53\x50\x20\x52\x52\x52\x51\x50\x10\x4f\x58\x58'\
b'\x52\x4f\x4d\x4f\x57\x58\x52\x20\x52\x55\x52\x50\x4f\x20\x52'\
b'\x55\x52\x50\x55\x20\x52\x52\x52\x50\x51\x20\x52\x52\x52\x50'\
b'\x53\x0a\x52\x59\x52\x4b\x52\x59\x20\x52\x52\x4b\x59\x4e\x52'\
b'\x51\x20\x52\x53\x4d\x56\x4e\x53\x4f\x14\x49\x5b\x52\x47\x52'\
b'\x56\x20\x52\x4d\x4a\x57\x50\x20\x52\x57\x4a\x4d\x50\x20\x52'\
b'\x49\x56\x4c\x5c\x20\x52\x5b\x56\x58\x5c\x20\x52\x49\x56\x5b'\
b'\x56\x20\x52\x4c\x5c\x58\x5c\x0c\x4d\x57\x52\x4c\x52\x58\x20'\
b'\x52\x4f\x4f\x55\x4f\x20\x52\x4d\x55\x4f\x57\x51\x58\x53\x58'\
b'\x55\x57\x57\x55\x0a\x4c\x58\x52\x4c\x52\x58\x20\x52\x4c\x51'\
b'\x4d\x4f\x57\x4f\x58\x51\x20\x52\x50\x57\x54\x57\x0d\x4b\x59'\
b'\x4d\x4e\x57\x58\x20\x52\x57\x4e\x4d\x58\x20\x52\x4f\x4c\x4c'\
b'\x4f\x4b\x51\x20\x52\x55\x4c\x58\x4f\x59\x51\x11\x49\x5b\x4e'\
b'\x49\x49\x5b\x20\x52\x56\x49\x5b\x5b\x20\x52\x4d\x4d\x5b\x5b'\
b'\x20\x52\x57\x4d\x49\x5b\x20\x52\x4e\x49\x56\x49\x20\x52\x4d'\
b'\x4d\x57\x4d\x02\x4b\x59\x4b\x46\x59\x5e\x0a\x47\x5b\x4d\x4a'\
b'\x53\x56\x20\x52\x4b\x50\x53\x4c\x20\x52\x47\x5c\x5b\x5c\x5b'\
b'\x52\x47\x5c\x0d\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50'\
b'\x54\x50\x58\x54\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c'\
b'\x50\x4c\x1f\x4b\x59\x59\x50\x58\x4e\x56\x4c\x53\x4b\x51\x4b'\
b'\x4e\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53'\
b'\x59\x56\x58\x58\x56\x59\x54\x20\x52\x59\x50\x57\x4e\x55\x4d'\
b'\x53\x4d\x51\x4e\x50\x4f\x4f\x51\x4f\x53\x50\x55\x51\x56\x53'\
b'\x57\x55\x57\x57\x56\x59\x54\x09\x4b\x59\x52\x4a\x4b\x56\x59'\
b'\x56\x52\x4a\x20\x52\x52\x5a\x59\x4e\x4b\x4e\x52\x5a\x21\x47'\
b'\x5d\x50\x49\x50\x47\x51\x46\x53\x46\x54\x47\x54\x49\x20\x52'\
b'\x47\x5a\x48\x58\x4a\x56\x4b\x54\x4c\x50\x4c\x4b\x4d\x4a\x4f'\
b'\x49\x55\x49\x57\x4a\x58\x4b\x58\x50\x59\x54\x5a\x56\x5c\x58'\
b'\x5d\x5a\x20\x52\x47\x5a\x5d\x5a\x20\x52\x51\x5a\x50\x5b\x51'\
b'\x5c\x53\x5c\x54\x5b\x53\x5a\x3f\x4a\x5a\x52\x4d\x52\x53\x20'\
b'\x52\x52\x53\x51\x5c\x20\x52\x52\x53\x53\x5c\x20\x52\x51\x5c'\
b'\x53\x5c\x20\x52\x52\x4d\x51\x4a\x50\x48\x4e\x47\x20\x52\x51'\
b'\x4a\x4e\x47\x20\x52\x52\x4d\x53\x4a\x54\x48\x56\x47\x20\x52'\
b'\x53\x4a\x56\x47\x20\x52\x52\x4d\x4e\x4b\x4c\x4b\x4a\x4d\x20'\
b'\x52\x50\x4c\x4c\x4c\x4a\x4d\x20\x52\x52\x4d\x56\x4b\x58\x4b'\
b'\x5a\x4d\x20\x52\x54\x4c\x58\x4c\x5a\x4d\x20\x52\x52\x4d\x50'\
b'\x4e\x4f\x4f\x4f\x52\x20\x52\x52\x4d\x50\x4f\x4f\x52\x20\x52'\
b'\x52\x4d\x54\x4e\x55\x4f\x55\x52\x20\x52\x52\x4d\x54\x4f\x55'\
b'\x52\x5d\x4a\x5a\x52\x49\x52\x4b\x20\x52\x52\x4e\x52\x50\x20'\
b'\x52\x52\x53\x52\x55\x20\x52\x52\x59\x51\x5c\x20\x52\x52\x59'\
b'\x53\x5c\x20\x52\x51\x5c\x53\x5c\x20\x52\x52\x47\x51\x49\x50'\
b'\x4a\x20\x52\x52\x47\x53\x49\x54\x4a\x20\x52\x50\x4a\x52\x49'\
b'\x54\x4a\x20\x52\x52\x4b\x50\x4e\x4e\x4f\x4d\x4e\x20\x52\x52'\
b'\x4b\x54\x4e\x56\x4f\x57\x4e\x20\x52\x4e\x4f\x50\x4f\x52\x4e'\
b'\x54\x4f\x56\x4f\x20\x52\x52\x50\x50\x53\x4e\x54\x4c\x54\x4b'\
b'\x52\x4b\x53\x4c\x54\x20\x52\x52\x50\x54\x53\x56\x54\x58\x54'\
b'\x59\x52\x59\x53\x58\x54\x20\x52\x4e\x54\x50\x54\x52\x53\x54'\
b'\x54\x56\x54\x20\x52\x52\x55\x50\x58\x4f\x59\x4d\x5a\x4c\x5a'\
b'\x4b\x59\x4a\x57\x4a\x59\x4c\x5a\x20\x52\x52\x55\x54\x58\x55'\
b'\x59\x57\x5a\x58\x5a\x59\x59\x5a\x57\x5a\x59\x58\x5a\x20\x52'\
b'\x4d\x5a\x4f\x5a\x52\x59\x55\x5a\x57\x5a\x27\x4a\x5a\x52\x59'\
b'\x51\x5c\x20\x52\x52\x59\x53\x5c\x20\x52\x51\x5c\x53\x5c\x20'\
b'\x52\x52\x59\x55\x5a\x58\x5a\x5a\x58\x5a\x55\x59\x54\x57\x54'\
b'\x59\x52\x5a\x4f\x59\x4d\x57\x4c\x55\x4d\x56\x4a\x55\x48\x53'\
b'\x47\x51\x47\x4f\x48\x4e\x4a\x4f\x4d\x4d\x4c\x4b\x4d\x4a\x4f'\
b'\x4b\x52\x4d\x54\x4b\x54\x4a\x55\x4a\x58\x4c\x5a\x4f\x5a\x52'\
b'\x59\x1f\x4a\x5a\x52\x59\x51\x5c\x20\x52\x52\x59\x53\x5c\x20'\
b'\x52\x51\x5c\x53\x5c\x20\x52\x52\x59\x56\x58\x56\x56\x58\x55'\
b'\x58\x52\x5a\x51\x5a\x4c\x59\x49\x58\x48\x56\x48\x54\x47\x50'\
b'\x47\x4e\x48\x4c\x48\x4b\x49\x4a\x4c\x4a\x51\x4c\x52\x4c\x55'\
b'\x4e\x56\x4e\x58\x52\x59\x0e\x49\x5b\x49\x50\x4b\x52\x20\x52'\
b'\x4c\x4b\x4e\x50\x20\x52\x52\x47\x52\x4f\x20\x52\x58\x4b\x56'\
b'\x50\x20\x52\x5b\x50\x59\x52\x1b\x47\x5d\x49\x49\x4a\x4b\x4b'\
b'\x4f\x4b\x55\x4a\x59\x49\x5b\x20\x52\x5b\x49\x5a\x4b\x59\x4f'\
b'\x59\x55\x5a\x59\x5b\x5b\x20\x52\x49\x49\x4b\x4a\x4f\x4b\x55'\
b'\x4b\x59\x4a\x5b\x49\x20\x52\x49\x5b\x4b\x5a\x4f\x59\x55\x59'\
b'\x59\x5a\x5b\x5b\x36\x46\x5e\x52\x52\x52\x5b\x51\x5c\x20\x52'\
b'\x52\x56\x51\x5c\x20\x52\x52\x49\x51\x48\x4f\x48\x4e\x49\x4e'\
b'\x4b\x4f\x4e\x52\x52\x20\x52\x52\x49\x53\x48\x55\x48\x56\x49'\
b'\x56\x4b\x55\x4e\x52\x52\x20\x52\x52\x52\x4e\x4f\x4c\x4e\x4a'\
b'\x4e\x49\x4f\x49\x51\x4a\x52\x20\x52\x52\x52\x56\x4f\x58\x4e'\
b'\x5a\x4e\x5b\x4f\x5b\x51\x5a\x52\x20\x52\x52\x52\x4e\x55\x4c'\
b'\x56\x4a\x56\x49\x55\x49\x53\x4a\x52\x20\x52\x52\x52\x56\x55'\
b'\x58\x56\x5a\x56\x5b\x55\x5b\x53\x5a\x52\x2d\x4a\x5a\x55\x49'\
b'\x54\x4a\x55\x4b\x56\x4a\x56\x49\x55\x47\x53\x46\x51\x46\x4f'\
b'\x47\x4e\x49\x4e\x4b\x4f\x4d\x51\x4f\x56\x52\x20\x52\x4f\x4d'\
b'\x54\x50\x56\x52\x57\x54\x57\x56\x56\x58\x54\x5a\x20\x52\x50'\
b'\x4e\x4e\x50\x4d\x52\x4d\x54\x4e\x56\x50\x58\x55\x5b\x20\x52'\
b'\x4e\x56\x53\x59\x55\x5b\x56\x5d\x56\x5f\x55\x61\x53\x62\x51'\
b'\x62\x4f\x61\x4e\x5f\x4e\x5e\x4f\x5d\x50\x5e\x4f\x5f\x1d\x4a'\
b'\x5a\x52\x46\x51\x48\x52\x4a\x53\x48\x52\x46\x20\x52\x52\x46'\
b'\x52\x62\x20\x52\x52\x51\x51\x54\x52\x62\x53\x54\x52\x51\x20'\
b'\x52\x4c\x4d\x4e\x4e\x50\x4d\x4e\x4c\x4c\x4d\x20\x52\x4c\x4d'\
b'\x58\x4d\x20\x52\x54\x4d\x56\x4e\x58\x4d\x56\x4c\x54\x4d\x37'\
b'\x4a\x5a\x52\x46\x51\x48\x52\x4a\x53\x48\x52\x46\x20\x52\x52'\
b'\x46\x52\x54\x20\x52\x52\x50\x51\x52\x53\x56\x52\x58\x51\x56'\
b'\x53\x52\x52\x50\x20\x52\x52\x54\x52\x62\x20\x52\x52\x5e\x51'\
b'\x60\x52\x62\x53\x60\x52\x5e\x20\x52\x4c\x4d\x4e\x4e\x50\x4d'\
b'\x4e\x4c\x4c\x4d\x20\x52\x4c\x4d\x58\x4d\x20\x52\x54\x4d\x56'\
b'\x4e\x58\x4d\x56\x4c\x54\x4d\x20\x52\x4c\x5b\x4e\x5c\x50\x5b'\
b'\x4e\x5a\x4c\x5b\x20\x52\x4c\x5b\x58\x5b\x20\x52\x54\x5b\x56'\
b'\x5c\x58\x5b\x56\x5a\x54\x5b\x11\x45\x5f\x52\x49\x51\x4a\x52'\
b'\x4b\x53\x4a\x52\x49\x20\x52\x49\x59\x48\x5a\x49\x5b\x4a\x5a'\
b'\x49\x59\x20\x52\x5b\x59\x5a\x5a\x5b\x5b\x5c\x5a\x5b\x59\x20'\
b'\x46\x5e\x52\x48\x4e\x4c\x4b\x50\x4a\x53\x4a\x55\x4b\x57\x4d'\
b'\x58\x4f\x58\x51\x57\x52\x55\x20\x52\x52\x48\x56\x4c\x59\x50'\
b'\x5a\x53\x5a\x55\x59\x57\x57\x58\x55\x58\x53\x57\x52\x55\x20'\
b'\x52\x52\x55\x51\x59\x50\x5c\x20\x52\x52\x55\x53\x59\x54\x5c'\
b'\x20\x52\x50\x5c\x54\x5c\x19\x46\x5e\x52\x4e\x51\x4b\x50\x49'\
b'\x4e\x48\x4d\x48\x4b\x49\x4a\x4b\x4a\x4f\x4b\x52\x4c\x54\x4e'\
b'\x57\x52\x5c\x20\x52\x52\x4e\x53\x4b\x54\x49\x56\x48\x57\x48'\
b'\x59\x49\x5a\x4b\x5a\x4f\x59\x52\x58\x54\x56\x57\x52\x5c\x13'\
b'\x46\x5e\x52\x47\x50\x4a\x4c\x4f\x49\x52\x20\x52\x52\x47\x54'\
b'\x4a\x58\x4f\x5b\x52\x20\x52\x49\x52\x4c\x55\x50\x5a\x52\x5d'\
b'\x20\x52\x5b\x52\x58\x55\x54\x5a\x52\x5d\x2f\x46\x5e\x52\x54'\
b'\x54\x57\x56\x58\x58\x58\x5a\x57\x5b\x55\x5b\x53\x5a\x51\x58'\
b'\x50\x56\x50\x53\x51\x20\x52\x53\x51\x55\x4f\x56\x4d\x56\x4b'\
b'\x55\x49\x53\x48\x51\x48\x4f\x49\x4e\x4b\x4e\x4d\x4f\x4f\x51'\
b'\x51\x20\x52\x51\x51\x4e\x50\x4c\x50\x4a\x51\x49\x53\x49\x55'\
b'\x4a\x57\x4c\x58\x4e\x58\x50\x57\x52\x54\x20\x52\x52\x54\x51'\
b'\x59\x50\x5c\x20\x52\x52\x54\x53\x59\x54\x5c\x20\x52\x50\x5c'\
b'\x54\x5c\x2f\x49\x5b\x56\x2b\x53\x2d\x51\x2f\x50\x31\x4f\x34'\
b'\x4f\x38\x50\x3c\x54\x44\x55\x47\x55\x4a\x54\x4d\x52\x50\x20'\
b'\x52\x53\x2d\x51\x30\x50\x34\x50\x38\x51\x3b\x55\x43\x56\x47'\
b'\x56\x4a\x55\x4d\x52\x50\x4e\x52\x52\x54\x55\x57\x56\x5a\x56'\
b'\x5d\x55\x61\x51\x69\x50\x6c\x50\x70\x51\x74\x53\x77\x20\x52'\
b'\x52\x54\x54\x57\x55\x5a\x55\x5d\x54\x60\x50\x68\x4f\x6c\x4f'\
b'\x70\x50\x73\x51\x75\x53\x77\x56\x79\x2f\x49\x5b\x4e\x2b\x51'\
b'\x2d\x53\x2f\x54\x31\x55\x34\x55\x38\x54\x3c\x50\x44\x4f\x47'\
b'\x4f\x4a\x50\x4d\x52\x50\x20\x52\x51\x2d\x53\x30\x54\x34\x54'\
b'\x38\x53\x3b\x4f\x43\x4e\x47\x4e\x4a\x4f\x4d\x52\x50\x56\x52'\
b'\x52\x54\x4f\x57\x4e\x5a\x4e\x5d\x4f\x61\x53\x69\x54\x6c\x54'\
b'\x70\x53\x74\x51\x77\x20\x52\x52\x54\x50\x57\x4f\x5a\x4f\x5d'\
b'\x50\x60\x54\x68\x55\x6c\x55\x70\x54\x73\x53\x75\x51\x77\x4e'\
b'\x79\x1f\x49\x5b\x56\x2e\x53\x31\x51\x34\x4f\x38\x4e\x3d\x4e'\
b'\x43\x4f\x49\x50\x4d\x53\x58\x54\x5c\x55\x62\x55\x67\x54\x6c'\
b'\x53\x6f\x51\x73\x20\x52\x53\x31\x51\x35\x50\x38\x4f\x3d\x4f'\
b'\x42\x50\x48\x51\x4c\x54\x57\x55\x5b\x56\x61\x56\x67\x55\x6c'\
b'\x53\x70\x51\x73\x4e\x76\x1f\x49\x5b\x4e\x2e\x51\x31\x53\x34'\
b'\x55\x38\x56\x3d\x56\x43\x55\x49\x54\x4d\x51\x58\x50\x5c\x4f'\
b'\x62\x4f\x67\x50\x6c\x51\x6f\x53\x73\x20\x52\x51\x31\x53\x35'\
b'\x54\x38\x55\x3d\x55\x42\x54\x48\x53\x4c\x50\x57\x4f\x5b\x4e'\
b'\x61\x4e\x67\x4f\x6c\x51\x70\x53\x73\x56\x76\x0d\x37\x5a\x3a'\
b'\x52\x41\x52\x52\x6f\x20\x52\x40\x52\x51\x6f\x20\x52\x3f\x52'\
b'\x52\x72\x20\x52\x5a\x22\x56\x4a\x52\x72\x1a\x49\x5b\x54\x4d'\
b'\x56\x4e\x58\x50\x58\x4f\x57\x4e\x54\x4d\x51\x4d\x4e\x4e\x4d'\
b'\x4f\x4c\x51\x4c\x53\x4d\x55\x4f\x57\x53\x5a\x20\x52\x51\x4d'\
b'\x4f\x4e\x4e\x4f\x4d\x51\x4d\x53\x4e\x55\x53\x5a\x54\x5c\x54'\
b'\x5e\x53\x5f\x51\x5f\x2c\x47\x5d\x4c\x4d\x4b\x4e\x4a\x50\x4a'\
b'\x52\x4b\x55\x4f\x59\x50\x5b\x20\x52\x4a\x52\x4b\x54\x4f\x58'\
b'\x50\x5b\x50\x5d\x4f\x60\x4d\x62\x4c\x62\x4b\x61\x4a\x5f\x4a'\
b'\x5c\x4b\x58\x4d\x54\x4f\x51\x52\x4e\x54\x4d\x56\x4d\x59\x4e'\
b'\x5a\x50\x5a\x54\x59\x58\x57\x5a\x55\x5b\x54\x5b\x53\x5a\x53'\
b'\x58\x54\x57\x55\x58\x54\x59\x20\x52\x56\x4d\x58\x4e\x59\x50'\
b'\x59\x54\x58\x58\x57\x5a\x44\x45\x5f\x59\x47\x58\x48\x59\x49'\
b'\x5a\x48\x59\x47\x57\x46\x54\x46\x51\x47\x4f\x49\x4e\x4b\x4d'\
b'\x4e\x4c\x52\x4a\x5b\x49\x5f\x48\x61\x20\x52\x54\x46\x52\x47'\
b'\x50\x49\x4f\x4b\x4e\x4e\x4c\x57\x4b\x5b\x4a\x5e\x49\x60\x48'\
b'\x61\x46\x62\x44\x62\x43\x61\x43\x60\x44\x5f\x45\x60\x44\x61'\
b'\x20\x52\x5f\x47\x5e\x48\x5f\x49\x60\x48\x60\x47\x5f\x46\x5d'\
b'\x46\x5b\x47\x5a\x48\x59\x4a\x58\x4d\x55\x5b\x54\x5f\x53\x61'\
b'\x20\x52\x5d\x46\x5b\x48\x5a\x4a\x59\x4e\x57\x57\x56\x5b\x55'\
b'\x5e\x54\x60\x53\x61\x51\x62\x4f\x62\x4e\x61\x4e\x60\x4f\x5f'\
b'\x50\x60\x4f\x61\x20\x52\x49\x4d\x5e\x4d\x33\x46\x5e\x5b\x47'\
b'\x5a\x48\x5b\x49\x5c\x48\x5b\x47\x58\x46\x55\x46\x52\x47\x50'\
b'\x49\x4f\x4b\x4e\x4e\x4d\x52\x4b\x5b\x4a\x5f\x49\x61\x20\x52'\
b'\x55\x46\x53\x47\x51\x49\x50\x4b\x4f\x4e\x4d\x57\x4c\x5b\x4b'\
b'\x5e\x4a\x60\x49\x61\x47\x62\x45\x62\x44\x61\x44\x60\x45\x5f'\
b'\x46\x60\x45\x61\x20\x52\x59\x4d\x57\x54\x56\x58\x56\x5a\x57'\
b'\x5b\x5a\x5b\x5c\x59\x5d\x57\x20\x52\x5a\x4d\x58\x54\x57\x58'\
b'\x57\x5a\x58\x5b\x20\x52\x4a\x4d\x5a\x4d\x35\x46\x5e\x59\x47'\
b'\x58\x48\x59\x49\x5a\x48\x5a\x47\x58\x46\x20\x52\x5c\x46\x55'\
b'\x46\x52\x47\x50\x49\x4f\x4b\x4e\x4e\x4d\x52\x4b\x5b\x4a\x5f'\
b'\x49\x61\x20\x52\x55\x46\x53\x47\x51\x49\x50\x4b\x4f\x4e\x4d'\
b'\x57\x4c\x5b\x4b\x5e\x4a\x60\x49\x61\x47\x62\x45\x62\x44\x61'\
b'\x44\x60\x45\x5f\x46\x60\x45\x61\x20\x52\x5b\x46\x57\x54\x56'\
b'\x58\x56\x5a\x57\x5b\x5a\x5b\x5c\x59\x5d\x57\x20\x52\x5c\x46'\
b'\x58\x54\x57\x58\x57\x5a\x58\x5b\x20\x52\x4a\x4d\x59\x4d\x55'\
b'\x40\x63\x54\x47\x53\x48\x54\x49\x55\x48\x54\x47\x52\x46\x4f'\
b'\x46\x4c\x47\x4a\x49\x49\x4b\x48\x4e\x47\x52\x45\x5b\x44\x5f'\
b'\x43\x61\x20\x52\x4f\x46\x4d\x47\x4b\x49\x4a\x4b\x49\x4e\x47'\
b'\x57\x46\x5b\x45\x5e\x44\x60\x43\x61\x41\x62\x3f\x62\x3e\x61'\
b'\x3e\x60\x3f\x5f\x40\x60\x3f\x61\x20\x52\x60\x47\x5f\x48\x60'\
b'\x49\x61\x48\x60\x47\x5d\x46\x5a\x46\x57\x47\x55\x49\x54\x4b'\
b'\x53\x4e\x52\x52\x50\x5b\x4f\x5f\x4e\x61\x20\x52\x5a\x46\x58'\
b'\x47\x56\x49\x55\x4b\x54\x4e\x52\x57\x51\x5b\x50\x5e\x4f\x60'\
b'\x4e\x61\x4c\x62\x4a\x62\x49\x61\x49\x60\x4a\x5f\x4b\x60\x4a'\
b'\x61\x20\x52\x5e\x4d\x5c\x54\x5b\x58\x5b\x5a\x5c\x5b\x5f\x5b'\
b'\x61\x59\x62\x57\x20\x52\x5f\x4d\x5d\x54\x5c\x58\x5c\x5a\x5d'\
b'\x5b\x20\x52\x44\x4d\x5f\x4d\x57\x40\x63\x54\x47\x53\x48\x54'\
b'\x49\x55\x48\x54\x47\x52\x46\x4f\x46\x4c\x47\x4a\x49\x49\x4b'\
b'\x48\x4e\x47\x52\x45\x5b\x44\x5f\x43\x61\x20\x52\x4f\x46\x4d'\
b'\x47\x4b\x49\x4a\x4b\x49\x4e\x47\x57\x46\x5b\x45\x5e\x44\x60'\
b'\x43\x61\x41\x62\x3f\x62\x3e\x61\x3e\x60\x3f\x5f\x40\x60\x3f'\
b'\x61\x20\x52\x5e\x47\x5d\x48\x5e\x49\x5f\x48\x5f\x47\x5d\x46'\
b'\x20\x52\x61\x46\x5a\x46\x57\x47\x55\x49\x54\x4b\x53\x4e\x52'\
b'\x52\x50\x5b\x4f\x5f\x4e\x61\x20\x52\x5a\x46\x58\x47\x56\x49'\
b'\x55\x4b\x54\x4e\x52\x57\x51\x5b\x50\x5e\x4f\x60\x4e\x61\x4c'\
b'\x62\x4a\x62\x49\x61\x49\x60\x4a\x5f\x4b\x60\x4a\x61\x20\x52'\
b'\x60\x46\x5c\x54\x5b\x58\x5b\x5a\x5c\x5b\x5f\x5b\x61\x59\x62'\
b'\x57\x20\x52\x61\x46\x5d\x54\x5c\x58\x5c\x5a\x5d\x5b\x20\x52'\
b'\x44\x4d\x5e\x4d\x13\x4c\x59\x4d\x51\x4e\x4f\x50\x4d\x53\x4d'\
b'\x54\x4e\x54\x51\x52\x57\x52\x5a\x53\x5b\x20\x52\x52\x4d\x53'\
b'\x4e\x53\x51\x51\x57\x51\x5a\x52\x5b\x55\x5b\x57\x59\x58\x57'\
b'\x15\x4c\x58\x52\x4c\x4e\x57\x58\x50\x4c\x50\x56\x57\x52\x4c'\
b'\x20\x52\x52\x52\x52\x4c\x20\x52\x52\x52\x4c\x50\x20\x52\x52'\
b'\x52\x4e\x57\x20\x52\x52\x52\x56\x57\x20\x52\x52\x52\x58\x50'\
b'\x17\x46\x5e\x49\x55\x49\x53\x4a\x50\x4c\x4f\x4e\x4f\x50\x50'\
b'\x54\x53\x56\x54\x58\x54\x5a\x53\x5b\x51\x20\x52\x49\x53\x4a'\
b'\x51\x4c\x50\x4e\x50\x50\x51\x54\x54\x56\x55\x58\x55\x5a\x54'\
b'\x5b\x51\x5b\x4f'
_index =\
b'\x00\x00\x03\x00\x0a\x00\x11\x00\x18\x00\x1f\x00\x26\x00\x2d'\
b'\x00\x34\x00\x3b\x00\x42\x00\x49\x00\x50\x00\x57\x00\x5e\x00'\
b'\x65\x00\x76\x00\x87\x00\x98\x00\xa9\x00\xbc\x00\xcf\x00\xe2'\
b'\x00\xf5\x00\x00\x01\x0b\x01\x16\x01\x21\x01\x50\x01\x7f\x01'\
b'\xae\x01\xdd\x01\x08\x02\x2f\x02\x64\x02\x7b\x02\x8e\x02\x99'\
b'\x02\xa6\x02\xb9\x02\xcc\x02\xf1\x02\xfe\x02\x09\x03\x16\x03'\
b'\x2f\x03\x3c\x03\x49\x03\x5c\x03\xa3\x03\xda\x03\xfd\x03\x20'\
b'\x04\x43\x04\x66\x04\x7d\x04\xa8\x04\xc3\x04\xda\x04\xf7\x04'\
b'\x1c\x05\x23\x05\x3a\x05\x57\x05\x98\x05\xad\x05\xf2\x05\x73'\
b'\x06\x30\x07\x81\x07\xc2\x07\xe1\x07\x1a\x08\x89\x08\xe6\x08'\
b'\x23\x09\x94\x09\xb9\x09\xfc\x09\x31\x0a\x5a\x0a\xbb\x0a\x1c'\
b'\x0b\x7d\x0b\xbe\x0b\xff\x0b\x1c\x0c\x53\x0c\xae\x0c\x39\x0d'\
b'\xa2\x0d\x0f\x0e\xbc\x0e\x6d\x0f\x96\x0f\xc3\x0f'
INDEX = memoryview(_index)
FONT = memoryview(_font)
|
scripts/release18/colorchooser/colorchooser_savePreset.py | tdapper/cinema4d_py_sdk | 113 | 12753704 | <reponame>tdapper/cinema4d_py_sdk
# This example loads the swatches of the given BaseDocument and stores them as a preset.
# Note: The ColorSwatchData of the active document must contain some colors.
import c4d
from c4d.modules import colorchooser as cc
def main():
# Creates a new ColorSwatchData
swatchData = cc.ColorSwatchData()
if swatchData is None:
return
# Loads color groups from the active document
if not swatchData.Load(doc):
return
# Builds preset URL
url = cc.GetColorSwatchPresetDirectory()
url = url + "/newColorPreset"
# Saves color swatches preset
if swatchData.SavePresetByURL(url, "User", "This is my preset"):
print "Color swatch preset saved successfully"
else:
print "Color swatch preset could not be saved!"
c4d.EventAdd()
if __name__=='__main__':
main()
|
satchless/process/test_process.py | sahil2128/satchless | 381 | 12753710 | from unittest import TestCase
from . import ProcessManager, Step, InvalidData
class AddSwallows(Step):
def __init__(self, data):
self.data = data
def __str__(self):
return 'swallows-needed'
def validate(self):
if self.data.swallows < 2:
raise InvalidData('Not enough swallows')
class AddCoconuts(Step):
def __init__(self, data):
self.data = data
def __str__(self):
return 'coconuts-needed'
def validate(self):
if self.data.coconuts < 1:
raise InvalidData('Need a coconut')
class CoconutDelivery(ProcessManager):
def __init__(self):
self.swallows = 0
self.coconuts = 0
def __iter__(self):
yield AddSwallows(self)
yield AddCoconuts(self)
class ProcessManagerTest(TestCase):
def test_iter(self):
'ProcessManager.__iter__() returns the steps'
process = CoconutDelivery()
steps = list(map(str, list(process)))
self.assertEqual(steps, ['swallows-needed', 'coconuts-needed'])
def test_get_next_step(self):
'ProcessManager.get_next_step() returns the first step with invalid data'
process = CoconutDelivery()
process.coconuts = 1
self.assertEqual(str(process.get_next_step()), 'swallows-needed')
process.swallows = 2
self.assertEqual(process.get_next_step(), None)
process.coconuts = 0
self.assertEqual(str(process.get_next_step()), 'coconuts-needed')
def test_is_complete(self):
'ProcessManager.is_complete() returns true if all steps are satisfied'
process = CoconutDelivery()
self.assertFalse(process.is_complete())
process.coconuts = 1
process.swallows = 2
self.assertTrue(process.is_complete())
def test_item_access(self):
'You can index a ProcessManager using step names'
process = CoconutDelivery()
self.assertTrue(isinstance(process['coconuts-needed'], AddCoconuts))
def invalid():
return process['spam-needed']
self.assertRaises(KeyError, invalid)
def test_errors(self):
'ProcessManager.get_errors() returns a dict of all invalid steps'
process = CoconutDelivery()
process.swallows = 2
errors = process.get_errors()
self.assertFalse('swallows-needed' in errors)
self.assertTrue('coconuts-needed' in errors)
|
tests/models/torchvision_models/retinanet/lightning/test_train.py | ai-fast-track/mantisshrimp | 580 | 12753729 | import pytest
from icevision.all import *
from icevision.models.torchvision import retinanet
@pytest.fixture
def light_model_cls():
class LightModel(retinanet.lightning.ModelAdapter):
def configure_optimizers(self):
return SGD(self.parameters(), lr=1e-4)
return LightModel
@pytest.mark.parametrize("metrics", [[], [COCOMetric()]])
def test_lightining_retinanet_train(
fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls, metrics
):
train_dl, valid_dl = fridge_faster_rcnn_dls
light_model = light_model_cls(fridge_retinanet_model, metrics=metrics)
trainer = pl.Trainer(
max_epochs=1,
weights_summary=None,
num_sanity_val_steps=0,
logger=False,
checkpoint_callback=False,
)
trainer.fit(light_model, train_dl, valid_dl)
|
tests/test_cwa.py | al3pht/cloud-custodian | 2,415 | 12753732 | <reponame>al3pht/cloud-custodian
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class AlarmTest(BaseTest):
def test_delete(self):
alarm_name = "c7n-test-alarm-delete"
factory = self.replay_flight_data("test_alarm_delete")
client = factory().client("cloudwatch")
client.put_metric_alarm(
AlarmName=alarm_name,
MetricName="CPUUtilization",
Namespace="AWS/EC2",
Statistic="Average",
Period=3600,
EvaluationPeriods=5,
Threshold=10,
ComparisonOperator="GreaterThanThreshold",
)
p = self.load_policy(
{
"name": "delete-alarm",
"resource": "alarm",
"filters": [{"AlarmName": alarm_name}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"], []
)
def test_filter_tags(self):
factory = self.replay_flight_data("test_alarm_tags_filter")
p = self.load_policy(
{
"name": "filter-alarm-tags",
"resource": "alarm",
"filters": [
{
'type': 'value',
'key': 'tag:some-tag',
'value': 'some-value',
'op': 'eq'
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].get('c7n:MatchedFilters'), ['tag:some-tag'])
def test_add_alarm_tags(self):
factory = self.replay_flight_data("test_alarm_add_tags")
p = self.load_policy(
{
"name": "add-alarm-tags",
"resource": "alarm",
"actions": [{
"type": "tag",
"key": "OwnerName",
"value": "SomeName"
}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertTrue({'Key': 'OwnerName', 'Value': 'SomeName'} in resources[0].get('Tags'))
|
venv/Lib/site-packages/IPython/utils/shimmodule.py | ajayiagbebaku/NFL-Model | 6,989 | 12753741 | """A shim module for deprecated imports
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
import types
from importlib import import_module
from .importstring import import_item
class ShimWarning(Warning):
"""A warning to show when a module has moved, and a shim is in its place."""
class ShimImporter(object):
"""Import hook for a shim.
This ensures that submodule imports return the real target module,
not a clone that will confuse `is` and `isinstance` checks.
"""
def __init__(self, src, mirror):
self.src = src
self.mirror = mirror
def _mirror_name(self, fullname):
"""get the name of the mirrored module"""
return self.mirror + fullname[len(self.src):]
def find_module(self, fullname, path=None):
"""Return self if we should be used to import the module."""
if fullname.startswith(self.src + '.'):
mirror_name = self._mirror_name(fullname)
try:
mod = import_item(mirror_name)
except ImportError:
return
else:
if not isinstance(mod, types.ModuleType):
# not a module
return None
return self
def load_module(self, fullname):
"""Import the mirrored module, and insert it into sys.modules"""
mirror_name = self._mirror_name(fullname)
mod = import_item(mirror_name)
sys.modules[fullname] = mod
return mod
class ShimModule(types.ModuleType):
def __init__(self, *args, **kwargs):
self._mirror = kwargs.pop("mirror")
src = kwargs.pop("src", None)
if src:
kwargs['name'] = src.rsplit('.', 1)[-1]
super(ShimModule, self).__init__(*args, **kwargs)
# add import hook for descendent modules
if src:
sys.meta_path.append(
ShimImporter(src=src, mirror=self._mirror)
)
@property
def __path__(self):
return []
@property
def __spec__(self):
"""Don't produce __spec__ until requested"""
return import_module(self._mirror).__spec__
def __dir__(self):
return dir(import_module(self._mirror))
@property
def __all__(self):
"""Ensure __all__ is always defined"""
mod = import_module(self._mirror)
try:
return mod.__all__
except AttributeError:
return [name for name in dir(mod) if not name.startswith('_')]
def __getattr__(self, key):
# Use the equivalent of import_item(name), see below
name = "%s.%s" % (self._mirror, key)
try:
return import_item(name)
except ImportError:
raise AttributeError(key)
|
datasets/data.py | gongda0e/AVT | 102 | 12753744 | <reponame>gongda0e/AVT<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import torch
from importlib import import_module
from tqdm import tqdm
import omegaconf
import hydra
from common import utils
__all__ = [
"get_dataset",
]
def get_dataset(dataset_cfg, data_cfg, transform, logger):
# If there is _precomputed_metadata file passed in, load that in
kwargs = {}
precomp_metadata_fpath = None
if '_precomputed_metadata_file' in dataset_cfg:
precomp_metadata_fpath = dataset_cfg._precomputed_metadata_file
# Remove from the config since otherwise can't init the obj
with omegaconf.open_dict(dataset_cfg):
del dataset_cfg['_precomputed_metadata_file']
if os.path.exists(precomp_metadata_fpath):
_precomputed_metadata = torch.load(precomp_metadata_fpath)
kwargs['_precomputed_metadata'] = _precomputed_metadata
kwargs['transform'] = transform
kwargs['frame_rate'] = data_cfg.frame_rate
kwargs['frames_per_clip'] = data_cfg.num_frames
# Have to call dict() here since relative interpolation somehow doesn't
# work once I get the subclips object
kwargs['subclips_options'] = dict(data_cfg.subclips)
kwargs['load_seg_labels'] = data_cfg.load_seg_labels
logger.info('Creating the dataset object...')
# Not recursive since many of the sub-instantiations would need positional
# arguments
_dataset = hydra.utils.instantiate(dataset_cfg,
_recursive_=False,
**kwargs)
try:
logger.info('Computing clips...')
_dataset.video_clips.compute_clips(data_cfg.num_frames,
1,
frame_rate=data_cfg.frame_rate)
logger.info('Done')
except AttributeError: # if video_clips not in _dataset
logger.warning('No video_clips present')
logger.info(f'Created dataset with {len(_dataset)} elts')
if precomp_metadata_fpath and not os.path.exists(precomp_metadata_fpath):
utils.save_on_master(_dataset.metadata, precomp_metadata_fpath)
return _dataset
|
rl_coach/tests/exploration_policies/test_additive_noise.py | Lpallett4/coach | 1,960 | 12753794 | <gh_stars>1000+
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
from rl_coach.spaces import DiscreteActionSpace, BoxActionSpace
from rl_coach.exploration_policies.additive_noise import AdditiveNoise
from rl_coach.schedules import LinearSchedule
import numpy as np
@pytest.mark.unit_test
def test_init():
# discrete control
action_space = DiscreteActionSpace(3)
noise_schedule = LinearSchedule(1.0, 1.0, 1000)
# additive noise requires a bounded range for the actions
action_space = BoxActionSpace(np.array([10]))
with pytest.raises(ValueError):
policy = AdditiveNoise(action_space, noise_schedule, 0)
@pytest.mark.unit_test
def test_get_action():
# make sure noise is in range
action_space = BoxActionSpace(np.array([10]), -1, 1)
noise_schedule = LinearSchedule(1.0, 1.0, 1000)
policy = AdditiveNoise(action_space, noise_schedule, 0)
# the action range is 2, so there is a ~0.1% chance that the noise will be larger than 3*std=3*2=6
for i in range(1000):
action = policy.get_action(np.zeros([10]))
assert np.all(action < 10)
# make sure there is no clipping of the action since it should be the environment that clips actions
assert np.all(action != 1.0)
assert np.all(action != -1.0)
# make sure that each action element has a different value
assert np.all(action[0] != action[1:])
|
test-framework/test-suites/integration/files/list/mock_switch_mac_test_x1052.py | kmcm0/stacki | 123 | 12753813 | from unittest.mock import patch, create_autospec, PropertyMock
from stack.expectmore import ExpectMore
from stack.switch.x1052 import SwitchDellX1052
# Switch data to mock MAC address table
SWITCH_DATA = """
show mac address-table
show mac address-table
Flags: I - Internal usage VLAN
Aging time is 300 sec
Vlan Mac Address Port Type
------------ --------------------- ---------- ----------
1 00:00:00:00:00:00 gi1/0/10 dynamic
1 f4:8e:38:44:10:15 0 self
console#:
"""
# Intercept expectmore calls
mock_expectmore = patch(target = "stack.switch.x1052.ExpectMore", autospec = True).start()
# Need to set the instance mock returned from calling ExpectMore()
mock_expectmore.return_value = create_autospec(
spec = ExpectMore,
spec_set = True,
instance = True,
)
# Need to set the match_index to the base console prompt so that the switch thinks it is at the
# correct prompt, and wont try to page through output.
type(mock_expectmore.return_value).match_index = PropertyMock(
return_value = SwitchDellX1052.CONSOLE_PROMPTS.index(SwitchDellX1052.CONSOLE_PROMPT)
)
# Return our SWITCH_DATA from ExpectMore().ask()
mock_expectmore.return_value.ask.return_value = SWITCH_DATA.splitlines()
|
src/azure-cli/azure/cli/command_modules/eventhubs/_completers.py | YuanyuanNi/azure-cli | 3,287 | 12753827 | <filename>src/azure-cli/azure/cli/command_modules/eventhubs/_completers.py<gh_stars>1000+
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument
# pylint: disable=line-too-long
# pylint: disable=no-value-for-parameter
from azure.cli.core.decorators import Completer
@Completer
def get_eventhubs_command_completion_list(cmd, prefix, namespace):
from ._client_factory import event_hub_mgmt_client_factory
resource_group_name = namespace.resource_group_name
namespace_name = namespace.name
result = event_hub_mgmt_client_factory(cmd.cli_ctx).list_by_namespace(resource_group_name, namespace_name)
return [r.name for r in result]
@Completer
def get_consumergroup_command_completion_list(cmd, prefix, namespace):
from ._client_factory import consumer_groups_mgmt_client_factory
resource_group_name = namespace.resource_group_name
namespace_name = namespace.namespace_name
eventhub_name = namespace.name
result = consumer_groups_mgmt_client_factory(cmd.cli_ctx).list_by_event_hub(resource_group_name, namespace_name, eventhub_name)
return [r.name for r in result]
|
setup.py | nadabyte/openmaptiles-tools | 254 | 12753832 | <reponame>nadabyte/openmaptiles-tools
import setuptools
import re
from pathlib import Path
path = Path('.')
with (path / "README.md").open() as fh:
long_description = fh.read()
version_file = path / "openmaptiles" / "__init__.py"
with version_file.open() as fh:
m = re.search(r"^__version__\s*=\s*(['\"])([^'\"]*)\1", fh.read().strip(), re.M)
if not m:
raise ValueError(f"Version string is not found in {version_file}")
version = m.group(2)
with (path / "requirements.txt").open(encoding="utf-8") as fh:
# Requirements will contain a list of libraries without version restrictions
# It seems this is a common practice for the setup.py vs requirements.txt
requirements = [m.group(1) for m in
(re.match(r'^[ \t]*([^>=<!#\n]+).*', line) for line in fh.readlines())
if m]
scripts = [str(p) for p in path.glob('bin/*') if p.is_file()]
setuptools.setup(
name='openmaptiles-tools',
version=version,
packages=['openmaptiles'],
description="The OpenMapTiles tools for generating TM2Source projects, imposm3 mappings and SQL instructions from "
"OpenMapTiles layers. We encourage other people to use this for their vector tile projects as well "
"since this approach works well for us.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/openmaptiles/openmaptiles-tools",
license='MIT',
scripts=scripts,
install_requires=requirements,
)
|
src/saml2/ws/wsutil.py | cnelson/pysaml2 | 5,079 | 12753853 | #!/usr/bin/env python
#
# Generated Sun Jun 14 12:18:10 2015 by parse_xsd.py version 0.5.
#
import saml2
from saml2 import SamlBase
NAMESPACE = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd'
class TTimestampFault_(SamlBase):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:tTimestampFault element """
c_tag = 'tTimestampFault'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def t_timestamp_fault__from_string(xml_string):
return saml2.create_class_from_xml_string(TTimestampFault_, xml_string)
class AttributedDateTime_(SamlBase):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:AttributedDateTime element """
c_tag = 'AttributedDateTime'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Id'] = ('Id', 'anyURI', False)
def __init__(self,
Id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.Id=Id
def attributed_date_time__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributedDateTime_, xml_string)
class AttributedURI_(SamlBase):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:AttributedURI element """
c_tag = 'AttributedURI'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Id'] = ('Id', 'anyURI', False)
def __init__(self,
Id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.Id=Id
def attributed_ur_i__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributedURI_, xml_string)
class Expires(AttributedDateTime_):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:Expires element """
c_tag = 'Expires'
c_namespace = NAMESPACE
c_children = AttributedDateTime_.c_children.copy()
c_attributes = AttributedDateTime_.c_attributes.copy()
c_child_order = AttributedDateTime_.c_child_order[:]
c_cardinality = AttributedDateTime_.c_cardinality.copy()
def expires_from_string(xml_string):
return saml2.create_class_from_xml_string(Expires, xml_string)
class Created(AttributedDateTime_):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:Created element """
c_tag = 'Created'
c_namespace = NAMESPACE
c_children = AttributedDateTime_.c_children.copy()
c_attributes = AttributedDateTime_.c_attributes.copy()
c_child_order = AttributedDateTime_.c_child_order[:]
c_cardinality = AttributedDateTime_.c_cardinality.copy()
def created_from_string(xml_string):
return saml2.create_class_from_xml_string(Created, xml_string)
class TimestampType_(SamlBase):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:TimestampType element """
c_tag = 'TimestampType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd}Created'] = ('created', Created)
c_cardinality['created'] = {"min":0, "max":1}
c_children['{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd}Expires'] = ('expires', Expires)
c_cardinality['expires'] = {"min":0, "max":1}
c_attributes['Id'] = ('Id', 'anyURI', False)
c_child_order.extend(['created', 'expires'])
def __init__(self,
created=None,
expires=None,
Id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.created=created
self.expires=expires
self.Id=Id
def timestamp_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TimestampType_, xml_string)
class Timestamp(TimestampType_):
"""The http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd:Timestamp element """
c_tag = 'Timestamp'
c_namespace = NAMESPACE
c_children = TimestampType_.c_children.copy()
c_attributes = TimestampType_.c_attributes.copy()
c_child_order = TimestampType_.c_child_order[:]
c_cardinality = TimestampType_.c_cardinality.copy()
def timestamp_from_string(xml_string):
return saml2.create_class_from_xml_string(Timestamp, xml_string)
#..................
AG_commonAtts = [
('Id', '', False),
]
ELEMENT_FROM_STRING = {
TTimestampFault_.c_tag: t_timestamp_fault__from_string,
AttributedDateTime_.c_tag: attributed_date_time__from_string,
AttributedURI_.c_tag: attributed_ur_i__from_string,
TimestampType_.c_tag: timestamp_type__from_string,
Timestamp.c_tag: timestamp_from_string,
Expires.c_tag: expires_from_string,
Created.c_tag: created_from_string,
}
ELEMENT_BY_TAG = {
'tTimestampFault': TTimestampFault_,
'AttributedDateTime': AttributedDateTime_,
'AttributedURI': AttributedURI_,
'TimestampType': TimestampType_,
'Timestamp': Timestamp,
'Expires': Expires,
'Created': Created,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
|
argoverse/utils/plane_visualization_utils.py | gargrohin/argoverse-api | 560 | 12753868 | # <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
from typing import List, Optional
import numpy as np
from argoverse.utils import mayavi_wrapper
from argoverse.utils.mesh_grid import get_mesh_grid_as_point_cloud
from argoverse.visualization.mayavi_utils import (
Figure,
draw_mayavi_line_segment,
plot_3d_clipped_bbox_mayavi,
plot_points_3D_mayavi,
)
def populate_frustum_voxels(planes: List[np.ndarray], fig: Figure, axis_pair: str) -> Figure:
"""
Generate grid in xy plane, and then treat it as grid in xz (ground) plane
in camera coordinate system.
Args:
planes: list of length 5. Each list element is a Numpy array
of shape (4,) representing the equation of a plane,
e.g. (a,b,c,d) in ax+by+cz=d
fig: Mayavi figure to draw on
axis_pair: Either "xz" or "yz"
Returns:
Mayavi figure
"""
sparse_xz_voxel_grid = get_mesh_grid_as_point_cloud(-20, 20, 0, 40, downsample_factor=0.1)
sparse_voxel_grid = np.zeros((sparse_xz_voxel_grid.shape[0], 3))
if axis_pair == "xz":
sparse_voxel_grid[:, 0] = sparse_xz_voxel_grid[:, 0]
sparse_voxel_grid[:, 2] = sparse_xz_voxel_grid[:, 1]
elif axis_pair == "yz":
sparse_voxel_grid[:, 1] = sparse_xz_voxel_grid[:, 0]
sparse_voxel_grid[:, 2] = sparse_xz_voxel_grid[:, 1]
# keep only the points that have signed distance > 0 (inside the frustum, plane
# normals also point into the frustum)
for plane in planes:
signed_d = np.matmul(sparse_voxel_grid, plane[:3]) + plane[3]
sparse_voxel_grid = sparse_voxel_grid[np.where(signed_d > 0)]
plot_points_3D_mayavi(sparse_voxel_grid, fig, fixed_color=(1, 0, 0))
return fig
def plot_frustum_planes_and_normals(
planes: List[np.ndarray],
cuboid_verts: Optional[np.ndarray] = None,
near_clip_dist: float = 0.5,
) -> None:
"""
Args:
planes: list of length 5. Each list element is a Numpy array
of shape (4,) representing the equation of a plane,
e.g. (a,b,c,d) in ax+by+cz=d
cuboid_verts: Numpy array of shape (N,3) representing
cuboid vertices
Returns:
None
"""
fig = mayavi_wrapper.mlab.figure(bgcolor=(1, 1, 1), size=(2000, 1000)) # type: ignore
if cuboid_verts is not None:
# fig = plot_bbox_3d_mayavi(fig, cuboid_verts)
fig = plot_3d_clipped_bbox_mayavi(fig, planes, cuboid_verts)
P = np.array([0.0, 0.0, 0.0])
for i, plane in enumerate(planes):
(a, b, c, d) = plane
if i == 0:
color = (1, 0, 0) # red left
elif i == 1:
color = (0, 0, 1) # blue right
elif i == 2:
color = (1, 1, 0) # near yellow
P = np.array([0.0, 0.0, near_clip_dist])
elif i == 3:
color = (0, 1, 0) # low is green
elif i == 4:
color = (0, 1, 1) # top is teal
plane_pts = generate_grid_on_plane(a, b, c, d, P)
fig = plot_points_3D_mayavi(plane_pts, fig, color)
# plot the normals at (0,0,0.5) and normal vector (u,v,w) given by (a,b,c)
mayavi_wrapper.mlab.quiver3d( # type: ignore
0,
0,
0.5,
a * 1000,
b * 1000,
c * 1000,
color=color,
figure=fig,
line_width=8,
)
# draw teal line at top below the camera
pt1 = np.array([-5, 0, -5])
pt2 = np.array([5, 0, -5])
color = (0, 1, 1)
draw_mayavi_line_segment(fig, [pt1, pt2], color=color, line_width=8)
# draw blue line in middle
pt1 = np.array([-5, 5, -5])
pt2 = np.array([5, 5, -5])
color = (0, 0, 1)
draw_mayavi_line_segment(fig, [pt1, pt2], color=color, line_width=8)
# draw yellow, lowest line (+y axis is down)
pt1 = np.array([-5, 10, -5])
pt2 = np.array([5, 10, -5])
color = (1, 1, 0)
draw_mayavi_line_segment(fig, [pt1, pt2], color=color, line_width=8)
fig = populate_frustum_voxels(planes, fig, "xz")
fig = populate_frustum_voxels(planes, fig, "yz")
mayavi_wrapper.mlab.view(distance=200) # type: ignore
mayavi_wrapper.mlab.show() # type: ignore
def get_perpendicular(n: np.ndarray) -> np.ndarray:
"""
n guarantees that dot(n, getPerpendicular(n)) is zero, which is the
orthogonality condition, while also keeping the magnitude of the vector
as high as possible. Note that setting the component with the smallest
magnitude to 0 also guarantees that you don't get a 0,0,0 vector as a
result, unless that is already your input.
Args:
n: Numpy array of shape (3,)
Returns:
result: Numpy array of shape (3,)
"""
# find smallest component
i = np.argmin(n)
# get the other two indices
a = (i + 1) % 3
b = (i + 2) % 3
result = np.zeros(3)
result[i] = 0.0
result[a] = n[b]
result[b] = -n[a]
return result
def generate_grid_on_plane(a: float, b: float, c: float, d: float, P: np.ndarray, radius: float = 15) -> np.ndarray:
"""
Args:
a,b,c,d: Coefficients of ``ax + by + cz = d`` defining plane
P: Numpy array of shape (3,) representing point on the plane
radius: Radius (default 15)
Returns:
plane_pts: Numpy array of shape (N,3) with points on the input plane
"""
n = np.array([a, b, c]) # a,b,c from your equation
perp = get_perpendicular(n)
u = perp / np.linalg.norm(perp)
v = np.cross(u, n)
N = 100
# delta and epsilon are floats:
delta = radius / N # N is how many points you want max in one direction
epsilon = delta * 0.5
n_pts = int((2 * radius + epsilon) / delta)
pts = np.linspace(-radius, radius + epsilon, n_pts)
plane_pts: List[float] = []
for y in pts:
for x in pts:
# if (x*x+y*y < radius*radius): # only in the circle:
plane_pts += [P + x * u + y * v] # P is the point on the plane
return np.array(plane_pts)
|
{{ cookiecutter.project_slug }}/tests/test_sample.py | gphillips8frw/cookiecutter-docker-science | 308 | 12753877 | import unittest
class TestSample(unittest.TestCase):
def setUp(self):
pass
def test_add(self):
self.assertEqual((3 + 4), 7)
|
tools/coresctostandoff.py | pombredanne/brat | 1,406 | 12753879 | <gh_stars>1000+
#!/usr/bin/env python
import re
import sys
try:
import cElementTree as ET
except BaseException:
import xml.etree.cElementTree as ET
# tags of elements to exclude from standoff output
# (not used now; anything not explicitly converted is excluded)
EXCLUDED_TAGS = [
# "SP",
# "IT",
# "SB",
# "REF",
# "P",
# "B",
# "TITLE",
# "PAPER",
# "HEADER",
# "DIV",
# "BODY",
# "ABSTRACT",
# "THEAD",
# "TGROUP",
# "TBODY",
# "SUP",
# "EQN",
# "ENTRY",
# "XREF",
# "ROW",
# "EQ-S",
# "text",
# "datasection",
# "s",
# "mode2",
]
EXCLUDED_TAG = {t: True for t in EXCLUDED_TAGS}
# string to use to indicate elided text in output
ELIDED_TEXT_STRING = "[[[...]]]"
# maximum length of text strings printed without elision
MAXIMUM_TEXT_DISPLAY_LENGTH = 1000
# c-style string escaping for just newline, tab and backslash.
# (s.encode('string_escape') does too much for utf-8)
def c_escape(s):
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n')
def strip_ns(tag):
# remove namespace spec from tag, if any
return tag if tag[0] != '{' else re.sub(r'\{.*?\}', '', tag)
class Standoff:
def __init__(self, sid, element, start, end, text):
self.sid = sid
self.element = element
self.start = start
self.end = end
self.text = text
def compress_text(self, l):
if len(self.text) >= l:
el = len(ELIDED_TEXT_STRING)
sl = (l - el) / 2
self.text = (
self.text[:sl] + ELIDED_TEXT_STRING + self.text[-(l - sl - el):])
def tag(self):
return strip_ns(self.element.tag)
def attrib(self):
# remove namespace specs from attribute names, if any
attrib = {}
for a in self.element.attrib:
if a[0] == "{":
an = re.sub(r'\{.*?\}', '', a)
else:
an = a
attrib[an] = self.element.attrib[a]
return attrib
def __str__(self):
return "X%d\t%s %d %d\t%s\t%s" % \
(self.sid, self.tag(), self.start, self.end,
c_escape(self.text.encode("utf-8")),
" ".join(['%s="%s"' % (k.encode("utf-8"), v.encode("utf-8"))
for k, v in list(self.attrib().items())]))
def txt(s):
return s if s is not None else ""
next_free_so_id = 1
def text_and_standoffs(e, curroff=0, standoffs=None):
global next_free_so_id
if standoffs is None:
standoffs = []
startoff = curroff
# to keep standoffs in element occurrence order, append
# a placeholder before recursing
so = Standoff(next_free_so_id, e, 0, 0, "")
next_free_so_id += 1
standoffs.append(so)
setext, dummy = subelem_text_and_standoffs(
e, curroff + len(txt(e.text)), standoffs)
text = txt(e.text) + setext
curroff += len(text)
so.start = startoff
so.end = curroff
so.text = text
return (text, standoffs)
def subelem_text_and_standoffs(e, curroff, standoffs):
startoff = curroff
text = ""
for s in e:
stext, dummy = text_and_standoffs(s, curroff, standoffs)
text += stext
text += txt(s.tail)
curroff = startoff + len(text)
return (text, standoffs)
def empty_elements(e, tags=None):
if tags is None or strip_ns(e.tag) in tags:
e.clear()
for c in e:
empty_elements(c, tags)
def add_space(e):
if strip_ns(e.tag) in ('title', ):
e.tail = (e.tail if e.tail is not None else '') + '\n'
for c in e:
add_space(c)
def convert_coresc1(s):
sostrings = []
# create a textbound of the type specified by the "type"
# attribute.
tid = "T%d" % convert_coresc1._idseq
sostrings.append('%s\t%s %d %d\t%s' %
(tid, s.attrib()['type'], s.start, s.end,
s.text.encode('utf-8')))
# TODO: consider converting "advantage" and "novelty" attributes
convert_coresc1._idseq += 1
return sostrings
convert_coresc1._idseq = 1
convert_function = {
'CoreSc1': convert_coresc1,
'annotationART': convert_coresc1,
}
def main(argv=[]):
if len(argv) != 4:
print("Usage:", argv[0], "IN-XML OUT-TEXT OUT-SO", file=sys.stderr)
return -1
in_fn, out_txt_fn, out_so_fn = argv[1:]
# "-" for STDIN / STDOUT
if in_fn == "-":
in_fn = "/dev/stdin"
if out_txt_fn == "-":
out_txt_fn = "/dev/stdout"
if out_so_fn == "-":
out_so_fn = "/dev/stdout"
tree = ET.parse(in_fn)
root = tree.getroot()
# remove unannotated, (primarily) non-content elements
empty_elements(root, set(['article-categories',
'copyright-statement', 'license',
'copyright-holder', 'copyright-year',
'journal-meta', 'article-id',
'back',
'fig', 'table-wrap',
'contrib-group',
'aff', 'author-notes',
'pub-date',
'volume', 'issue',
'fpage', 'lpage',
'history'
]))
add_space(root)
text, standoffs = text_and_standoffs(root)
# filter
standoffs = [s for s in standoffs if not s.tag() in EXCLUDED_TAG]
# convert selected elements
converted = []
for s in standoffs:
if s.tag() in convert_function:
converted.extend(convert_function[s.tag()](s))
# else:
# converted.append(s)
standoffs = converted
for so in standoffs:
try:
so.compress_text(MAXIMUM_TEXT_DISPLAY_LENGTH)
except AttributeError:
pass
# open output files
out_txt = open(out_txt_fn, "wt")
out_so = open(out_so_fn, "wt")
out_txt.write(text.encode("utf-8"))
for so in standoffs:
print(so, file=out_so)
out_txt.close()
out_so.close()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
nogotofail/mitm/util/tls/types/errors.py | kbfl0912/nogotofail | 1,594 | 12753896 | <reponame>kbfl0912/nogotofail<gh_stars>1000+
r'''
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class TlsNotEnoughDataError(Exception):
"""Error in TLS parsing where the TLS record is so far valid but incomplete"""
pass
class TlsRecordIncompleteError(TlsNotEnoughDataError):
"""Error for when a TLS Record appears valid but is not enough data is present to parse
the record"""
def __init__(self, data_available, record_size):
self.data_available = data_available
self.record_size = record_size
class TlsMessageFragmentedError(TlsNotEnoughDataError):
"""Error for when not enough data is present to parse a TLS message because of
fragmentation"""
def __init__(self, fragment_data, data_consumed):
self.fragment_data = fragment_data
self.data_consumed = data_consumed
|
tests/esp32/uart2.py | rodgergr/pycom-micropython-sigfox-1 | 198 | 12753956 | <gh_stars>100-1000
'''
P11 and P12 must be connected together for this test to pass.
'''
from machine import UART
from machine import Pin
import os
import time
# do not execute this test on the GPy and FiPy
if os.uname().sysname == 'GPy' or os.uname().sysname == 'FiPy':
print("SKIP")
import sys
sys.exit()
uart = UART(2, 115200)
print(uart)
uart.init(57600, 8, None, 1, pins=('P11', 'P12'))
uart.init(baudrate=9600, stop=2, parity=UART.EVEN, pins=('P11', 'P12'))
uart.init(baudrate=115200, parity=UART.ODD, stop=1, pins=('P11', 'P12'))
uart.read()
print (uart.read())
print (uart.readline())
buff = bytearray(1)
print (uart.readinto(buff, 1))
print (uart.read())
print (uart.any())
print (uart.write('a'))
uart.deinit()
uart = UART(2, 1000000, pins=('P12', 'P11'))
print(uart)
uart.read()
print(uart.write(b'123456') == 6)
print(uart.read() == b'123456')
uart.deinit()
uart = UART(2, 1000000, pins=('P11', 'P12'))
print(uart)
uart.read()
print(uart.write(b'123456') == 6)
print(uart.read() == b'123456')
uart.deinit()
uart = UART(2, 1000000, pins=('P11', 'P12'))
print(uart.write(b'123') == 3)
print(uart.read(1) == b'1')
print(uart.read(2) == b'23')
print(uart.read() == None)
uart.write(b'123')
buf = bytearray(3)
print(uart.readinto(buf, 1) == 1)
print(buf)
print(uart.readinto(buf) == 2)
print(buf)
uart.deinit()
# check for memory leaks...
for i in range (0, 1000):
uart = UART(2, 1000000)
uart.deinit()
# next ones must raise
try:
UART(2, 9600, parity=None, pins=('GP12', 'GP13', 'GP7'))
except Exception:
print('Exception')
try:
UART(2, 9600, parity=UART.ODD, pins=('GP12', 'GP7'))
except Exception:
print('Exception')
# buffer overflow
uart = UART(2, 1000000, pins=('P11', 'P12'))
buf = bytearray([0x55AA] * 567)
for i in range(200):
r = uart.write(buf)
r = uart.read()
r = uart.read()
print(r)
print(uart.write(b'123456') == 6)
print(uart.read() == b'123456')
uart.deinit()
|
salt/utils/error.py | markgras/salt | 9,425 | 12753960 | <gh_stars>1000+
"""
Utilities to enable exception reraising across the master commands
"""
import builtins
import salt.exceptions
import salt.utils.event
def raise_error(name=None, args=None, message=""):
"""
Raise an exception with __name__ from name, args from args
If args is None Otherwise message from message\
If name is empty then use "Exception"
"""
name = name or "Exception"
if hasattr(salt.exceptions, name):
ex = getattr(salt.exceptions, name)
elif hasattr(builtins, name):
ex = getattr(builtins, name)
else:
name = "SaltException"
ex = getattr(salt.exceptions, name)
if args is not None:
raise ex(*args)
else:
raise ex(message)
def pack_exception(exc):
if hasattr(exc, "pack"):
packed_exception = exc.pack()
else:
packed_exception = {"message": exc.__unicode__(), "args": exc.args}
return packed_exception
def fire_exception(exc, opts, job=None, node="minion"):
"""
Fire raw exception across the event bus
"""
if job is None:
job = {}
event = salt.utils.event.SaltEvent(node, opts=opts, listen=False)
event.fire_event(pack_exception(exc), "_salt_error")
|
sdk/batch/microsoft-azure-batch/tools/publish.py | ppartarr/azure-sdk-for-java | 1,350 | 12753981 | #!/usr/bin/python
import sys
import os
import fnmatch
import shutil
import re
def main(argv):
# validation
if len(argv) < 4:
print "Usage: " + get_usage()
exit(1)
# figure out source dir
folder = os.path.dirname(os.path.realpath(__file__))
if len(argv) == 5:
folder = argv[4]
if not os.path.isdir(folder):
print "Cannot find directory " + folder
exit(1)
folder = folder.strip('/')
# make working dir
version = argv[1]
working = folder + "/" + version
if os.path.exists(working):
print "Cowardly exiting because " + working + " already exists"
exit(1)
os.mkdir(working)
# copy over all jars
for i in get_jars(folder):
shutil.copy(i, working)
# copy over all poms
pkgs = []
for i in get_poms(folder):
assert isinstance(i, str)
parts = i.rsplit("\\")
pkg_name = parts[len(parts) - 2]
if len(parts) == len(folder.rsplit("\\")) + 1:
# root folder
shutil.copyfile(i, "%s/%s-%s.pom" % (working, "azure-bom", version))
pkg_name = "azure-bom"
elif pkg_name == "azure":
# parent folder
shutil.copyfile(i, "%s/%s-%s.pom" % (working, "azure-parent", version))
pkg_name = "azure-parent"
else:
shutil.copyfile(i, "%s/%s-%s.pom" % (working, pkg_name, version))
pkgs.append(pkg_name)
# filter out packages
print "Publishing the following: "
to_pub = []
for pkg in pkgs:
if re.match(argv[3], pkg) is not None:
to_pub.append(os.path.join(working, pkg))
print pkg
if len(to_pub) == 0:
print "No compiled package matches the regex. Exiting."
exit(1)
for pkg in to_pub:
cmd = "mvn gpg:sign-and-deploy-file"
cmd += " -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/"
cmd += " -DrepositoryId=sonatype-nexus-staging -DpomFile=%s-%s.pom" % (pkg, version)
assert isinstance(pkg, str)
if pkg.endswith("azure-parent") or pkg.endswith("azure-bom"):
cmd += " -Dfile=%s-%s.pom -Dgpg.passphrase=%s" % (pkg, version, argv[2])
os.system(cmd)
else:
os.system(cmd + " -Dfile=%s-%s.jar -Dgpg.passphrase=%s" % (pkg, version, argv[2]))
os.system(cmd + " -Dfile=%s-%s-javadoc.jar -Dclassifier=javadoc -Dgpg.passphrase=%s" % (pkg, version, argv[2]))
os.system(cmd + " -Dfile=%s-%s-sources.jar -Dclassifier=sources -Dgpg.passphrase=%s" % (pkg, version, argv[2]))
print "Finished."
def mvn_package():
cmd = "mvn package source:jar javadoc:jar"
print "Shell: " + cmd
os.system(cmd)
def get_poms(folder):
matches = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, 'pom.xml'):
matches.append(os.path.join(root, filename))
return matches
def get_jars(folder):
matches = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*.jar'):
matches.append(os.path.join(root, filename))
return matches
def get_usage():
return "publish.py version gpg_passphrase package_grep_string [root_directory]"
main(sys.argv)
|
paco/thunk.py | lmicra/paco | 208 | 12753997 | # -*- coding: utf-8 -*-
import asyncio
from .assertions import assert_corofunction
def thunk(coro):
"""
A thunk is a subroutine that is created, often automatically, to assist
a call to another subroutine.
Creates a thunk coroutine which returns coroutine function that accepts no
arguments and when invoked it schedules the wrapper coroutine and
returns the final result.
See Wikipedia page for more information about Thunk subroutines:
https://en.wikipedia.org/wiki/Thunk
Arguments:
value (coroutinefunction): wrapped coroutine function to invoke.
Returns:
coroutinefunction
Usage::
async def task():
return 'foo'
coro = paco.thunk(task)
await coro()
# => 'foo'
await coro()
# => 'foo'
"""
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper():
return (yield from coro())
return wrapper
|
dts/utils/__init__.py | albertogaspar/dts | 139 | 12754004 | <filename>dts/utils/__init__.py
from dts.utils.utils import get_args
from dts.utils.losses import r2, smape, nrmse_a, nrmse_b, nrmse_c, nrmsd
from dts.utils.experiments import DTSExperiment, log_metrics, run_single_experiment, run_grid_search
metrics = ['mse',
'mae',
nrmse_a,
nrmse_b,
nrmsd,
r2,
smape,
'mape'] |
mammoth/cli.py | cockcrow/python-mammoth | 557 | 12754018 | import argparse
import io
import os
import shutil
import sys
import mammoth
from . import writers
def main():
args = _parse_args()
if args.style_map is None:
style_map = None
else:
with open(args.style_map) as style_map_fileobj:
style_map = style_map_fileobj.read()
with open(args.path, "rb") as docx_fileobj:
if args.output_dir is None:
convert_image = None
output_path = args.output
else:
convert_image = mammoth.images.img_element(ImageWriter(args.output_dir))
output_filename = "{0}.html".format(os.path.basename(args.path).rpartition(".")[0])
output_path = os.path.join(args.output_dir, output_filename)
result = mammoth.convert(
docx_fileobj,
style_map=style_map,
convert_image=convert_image,
output_format=args.output_format,
)
for message in result.messages:
sys.stderr.write(message.message)
sys.stderr.write("\n")
_write_output(output_path, result.value)
class ImageWriter(object):
def __init__(self, output_dir):
self._output_dir = output_dir
self._image_number = 1
def __call__(self, element):
extension = element.content_type.partition("/")[2]
image_filename = "{0}.{1}".format(self._image_number, extension)
with open(os.path.join(self._output_dir, image_filename), "wb") as image_dest:
with element.open() as image_source:
shutil.copyfileobj(image_source, image_dest)
self._image_number += 1
return {"src": image_filename}
def _write_output(path, contents):
if path is None:
if sys.version_info[0] <= 2:
stdout = sys.stdout
else:
stdout = sys.stdout.buffer
stdout.write(contents.encode("utf-8"))
stdout.flush()
else:
with io.open(path, "w", encoding="utf-8") as fileobj:
fileobj.write(contents)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"path",
metavar="docx-path",
help="Path to the .docx file to convert.")
output_group = parser.add_mutually_exclusive_group()
output_group.add_argument(
"output",
nargs="?",
metavar="output-path",
help="Output path for the generated document. Images will be stored inline in the output document. Output is written to stdout if not set.")
output_group.add_argument(
"--output-dir",
help="Output directory for generated HTML and images. Images will be stored in separate files. Mutually exclusive with output-path.")
parser.add_argument(
"--output-format",
required=False,
choices=writers.formats(),
help="Output format.")
parser.add_argument(
"--style-map",
required=False,
help="File containg a style map.")
return parser.parse_args()
if __name__ == "__main__":
main()
|
2018-evaluation-script/program/task1_eval.py | yolochai/scisumm-corpus | 198 | 12754026 | import os
import sys
import json, csv
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
from copy import copy
def dictify(r,root=True):
if root:
return {r.tag : dictify(r, False)}
d=copy(r.attrib)
if r.text:
d["_text"]=r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag]=[]
d[x.tag].append(dictify(x,False))
return d
def parse(file):
print("parsing: " + str(file))
parse_data = {}
with open(file, "r") as f:
data = f.read().strip().split("\n")
for line in data:
line = line.strip()
if len(line) == 0:
continue
if line[-1] == "|":
line = line[0:-1]
# print("Old line: " + line)
line = line.replace("a | s, ", "a PIPE s, ")
# print("New line: " + line)
items = line.split(" | ")
line_data = {}
for kvpair in items:
if len(kvpair) == 0:
continue
# print kvpair
key = kvpair.strip().split(":", 1)[0].strip()
value = kvpair.strip().split(":", 1)[1].strip()
# print key + ":" + value
line_data[key] = value
if "Discourse Facet" not in line_data:
line_data["Discourse Facet"] = "None"
line_data["Reference Article"] = line_data["Reference Article"].replace(".xml", "")
line_data["Citing Article"] = line_data["Citing Article"].replace(".xml", "")
print("original cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Citation Marker Offset"].startswith("["):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
if line_data["Citation Marker Offset"].endswith("]"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
if line_data["Citation Marker Offset"].startswith("\'"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
if line_data["Citation Marker Offset"].endswith("\'"):
line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
if line_data["Citation Offset"].startswith("["):
line_data["Citation Offset"] = line_data["Citation Offset"][1:]
if line_data["Citation Offset"].endswith("]"):
line_data["Citation Offset"] = line_data["Citation Offset"][:-1]
print("new cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Reference Article"] not in parse_data:
parse_data[line_data["Reference Article"]] = {}
if line_data["Citing Article"] not in parse_data[line_data["Reference Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]] = {}
if line_data["Citation Marker Offset"] not in parse_data[line_data["Reference Article"]][line_data["Citing Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]] = {"original": line_data, "comparable": False}
ref_offset = line_data["Reference Offset"]
if ref_offset.startswith("["):
ref_offset = ref_offset[1:]
if ref_offset.endswith("]"):
ref_offset = ref_offset[:-1]
parsed_ref_offset_tmp = [x.strip() for x in ref_offset.split(",")]
print("\n\n")
print(parsed_ref_offset_tmp)
parsed_ref_offset = []
for ref in parsed_ref_offset_tmp:
print(ref)
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_ref_offset.append(ref)
print(parsed_ref_offset)
# print("<root>" + line_data["Reference Text"] + "</root>")
line = "<root>" + line_data["Reference Text"] + "</root>"
# print("Line is:")
# print(line)
line = line.replace("&", "&")
line = str(BeautifulSoup(line, "xml"))
# line = line.replace("<\s>", "</s>")
# print("Line is:")
# print(line)
root = ET.fromstring(line)
ref_text_dict = dictify(root)
# print(ref_text_dict)
ref_text_dict_clean = {}
cnt = 0
for item in ref_text_dict["root"]["S"]:
cnt += 1
ref_text_dict_clean[item.get("sid", cnt)] = item["_text"]
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Text"] = ref_text_dict_clean
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Offset"] = parsed_ref_offset
ref_discourse_facet = line_data["Discourse Facet"]
parsed_discourse_facet = []
if len(ref_discourse_facet) > 0:
if ref_discourse_facet[0] == "[":
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet[1:-1].split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
else:
ref = ref_discourse_facet.lower().replace(" ", "_")
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Discourse Facet"] = parsed_discourse_facet
# print(json.dumps(parse_data, sort_keys=True, indent=4))
# print("###################################################################################################################")
return parse_data
def parse_csv(file):
print("parsing: " + str(file))
parse_data = {}
csv_obj = csv.reader(open(file,"r"))
items_list = None
for i, row in enumerate(csv_obj):
if i==0: # first line
items_list = row # Citance Number,Reference Article, ...
continue
line_data = {}
if len(row) != len(items_list):
print "Error: # of items mismatch"
print items_list
print row
continue
for key, value in zip(items_list, row):
# print kvpair
line_data[key] = value
if line_data["Reference Text"] == "NA":
continue
# print items_list
print line_data["Reference Text"]
line_data["Reference Article"] = line_data["Reference Article"].replace(".xml", "")
line_data["Citing Article"] = line_data["Citing Article"].replace(".xml", "")
print("original cit marker offset is " + line_data["Citation Marker Offset"])
# if line_data["Citation Marker Offset"].startswith("["):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
# if line_data["Citation Marker Offset"].endswith("]"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
# if line_data["Citation Marker Offset"].startswith("\'"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][1:]
# if line_data["Citation Marker Offset"].endswith("\'"):
# line_data["Citation Marker Offset"] = line_data["Citation Marker Offset"][:-1]
# if line_data["Citation Offset"].startswith("["):
# line_data["Citation Offset"] = line_data["Citation Offset"][1:]
# if line_data["Citation Offset"].endswith("]"):
# line_data["Citation Offset"] = line_data["Citation Offset"][:-1]
line_data["Citation Marker Offset"] = '0'
line_data["Citation Offset"] = '0'
print("new cit marker offset is " + line_data["Citation Marker Offset"])
if line_data["Reference Article"] not in parse_data:
parse_data[line_data["Reference Article"]] = {}
if line_data["Citing Article"] not in parse_data[line_data["Reference Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]] = {}
if line_data["Citation Marker Offset"] not in parse_data[line_data["Reference Article"]][line_data["Citing Article"]]:
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]] = {"original": line_data, "comparable": False}
ref_offset = line_data["Reference Offset"]
if ref_offset.startswith("["):
ref_offset = ref_offset[1:]
if ref_offset.endswith("]"):
ref_offset = ref_offset[:-1]
parsed_ref_offset_tmp = [x.strip() for x in ref_offset.split(",")]
print("\n\n")
print(parsed_ref_offset_tmp)
parsed_ref_offset = []
for ref in parsed_ref_offset_tmp:
print(ref)
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_ref_offset.append(ref)
print(parsed_ref_offset)
# print("<root>" + line_data["Reference Text"] + "</root>")
line = "<root>" + line_data["Reference Text"] + "</root>"
# print("Line is:")
# print(line)
line = line.replace("&", "&")
line = line.replace("&", "&")
line = str(BeautifulSoup(line, "xml"))
# line = line.replace("<\s>", "</s>")
# print("Line is:")
# print(line)
root = ET.fromstring(line)
ref_text_dict = dictify(root)
# print(ref_text_dict)
ref_text_dict_clean = {}
cnt = 0
# if "S" not in ref_text_dict["root"]:
# # print "Key Error at", file
# continue
try:
for item in ref_text_dict["root"]["S"]:
cnt += 1
ref_text_dict_clean[item.get("sid", cnt)] = item["_text"]
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Text"] = ref_text_dict_clean
# print "ref_text_dict_clean", ref_text_dict_clean
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Reference Offset"] = parsed_ref_offset
except:
print "Error in Reference Offset"
continue
try:
ref_discourse_facet = line_data["Discourse Facet"]
parsed_discourse_facet = []
if len(ref_discourse_facet) > 0:
if ref_discourse_facet[0] == "[":
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet[1:-1].split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
else:
parsed_discourse_facet_tmp = [x.strip().lower().replace(" ", "_") for x in ref_discourse_facet.split(",")]
parsed_discourse_facet = []
for ref in parsed_discourse_facet_tmp:
if ref.startswith("\'") or ref.startswith("\""):
ref = ref[1:]
if ref.endswith("\'") or ref.endswith("\""):
ref = ref[:-1]
parsed_discourse_facet.append(ref)
print "parsed_discourse_facet", parsed_discourse_facet
parse_data[line_data["Reference Article"]][line_data["Citing Article"]][line_data["Citation Marker Offset"]]["Discourse Facet"] = parsed_discourse_facet
except:
print "Error in Discourse Facet"
continue
# print(json.dumps(parse_data, sort_keys=True, indent=4))
# print("###################################################################################################################")
return parse_data
def calculate(gold_data, submit_data):
# print(json.dumps(gold_data, indent=4, sort_keys=True))
# print(json.dumps(submit_data, indent=4, sort_keys=True))
[TP_ref, FN_ref, FP_ref, TP_facet, FN_facet, FP_facet] = [0, 0, 0, 0, 0, 0]
for ref_article in gold_data:
for cit_article in gold_data[ref_article]:
for cit_marker_offset in gold_data[ref_article][cit_article]:
old_TP_ref = TP_ref
try:
for ref_offset in gold_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]:
try:
ref_offset_list = submit_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]
if ref_offset in ref_offset_list:
TP_ref += 1
gold_data[ref_article][cit_article][cit_marker_offset]["comparable"] = True
else:
FN_ref += 1
except KeyError as e:
print("IGNORE THIS: key error 1")
FN_ref += 1
except: continue
for ref_article in submit_data:
for cit_article in submit_data[ref_article]:
for cit_marker_offset in submit_data[ref_article][cit_article]:
try:
for ref_offset in submit_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]:
try:
ref_offset_list = gold_data[ref_article][cit_article][cit_marker_offset]["Reference Offset"]
if ref_offset not in ref_offset_list:
FP_ref += 1
except KeyError as e:
print("IGNORE THIS: key error 2")
FP_ref += 1
except: continue
[precision_ref, recall_ref, f_ref] = [0.0, 0.0, 0.0]
try:
precision_ref = TP_ref / float(TP_ref + FP_ref)
except ZeroDivisionError as e:
precision_ref = 0
try:
recall_ref = TP_ref / float(TP_ref + FN_ref)
except ZeroDivisionError as e:
recall_ref = 0
try:
f_ref = 2.0 * precision_ref * recall_ref / float(precision_ref + recall_ref)
except ZeroDivisionError as e:
f_ref = 0
for ref_article in gold_data:
for cit_article in gold_data[ref_article]:
for cit_marker_offset in gold_data[ref_article][cit_article]:
try:
for facet in gold_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
if gold_data[ref_article][cit_article][cit_marker_offset]["comparable"]:
print("\n\n")
print(ref_article)
print(cit_article)
print(cit_marker_offset)
print(facet)
print(submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"])
try:
if facet in submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
TP_facet += 1
else:
FN_facet += 1
except KeyError as e:
print("IGNORE THIS: Key error 4")
FN_facet += 1
else:
FN_facet += 1
except: continue
for ref_article in submit_data:
for cit_article in submit_data[ref_article]:
for cit_marker_offset in submit_data[ref_article][cit_article]:
try:
for facet in submit_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
try:
if gold_data[ref_article][cit_article][cit_marker_offset]["comparable"]:
if facet not in gold_data[ref_article][cit_article][cit_marker_offset]["Discourse Facet"]:
FP_facet += 1
except KeyError as e:
print("IGNORE THIS: Key error 5")
FP_facet += 1
except: continue
[precision_facet, recall_facet, f_facet] = [0.0, 0.0, 0.0]
try:
precision_facet = TP_facet / float(TP_facet + FP_facet)
except ZeroDivisionError as e:
precision_facet = 0
try:
recall_facet = TP_facet / float(TP_facet + FN_facet)
except ZeroDivisionError as e:
recall_facet = 0
try:
f_facet = 2.0 * precision_facet * recall_facet / float(precision_facet + recall_facet)
except ZeroDivisionError as e:
f_facet = 0
return (precision_ref, recall_ref, f_ref, precision_facet, recall_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet)
def evaluate(gold_file, submit_file, score_file):
# print(gold_file)
# print(submit_file)
gold_data = parse_csv(gold_file)
submit_data = parse_csv(submit_file)
(p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet) = calculate(gold_data, submit_data)
with open(score_file, "a") as f:
f.write(os.path.basename(gold_file) + "_task1a_precision: " + str(p_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1a_recall: " + str(r_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1a_f1: " + str(f_ref) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_precision: " + str(p_facet) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_recall: " + str(r_facet) + "\n")
f.write(os.path.basename(gold_file) + "_task1b_f1: " + str(f_facet) + "\n")
return (p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet)
def main(input_dir, output_dir):
if not os.path.exists(input_dir):
print("%s not a valid director" % input_dir)
if not os.path.exists(output_dir):
print("%s not a valid director" % output_dir)
truth_dir = os.path.join(input_dir, "ref", "Task1")
if not os.path.exists(truth_dir):
print("%s not a valid director" % truth_dir)
submit_dir = os.path.join(input_dir, "res", "Task1")
if not os.path.exists(submit_dir):
print("%s not a valid director" % submit_dir)
score_file = os.path.join(output_dir, "scores.txt")
if os.path.exists(score_file):
os.remove(score_file)
P_ref_list = []
P_facet_list = []
R_ref_list = []
R_facet_list = []
F_ref_list = []
F_facet_list = []
TP_ref_list = []
FP_ref_list = []
FN_ref_list = []
TP_facet_list = []
FP_facet_list = []
FN_facet_list = []
for gold_file in os.listdir(truth_dir):
if gold_file.startswith('.'):
continue
paper_id = gold_file.split('_')[0]
submit_file = os.path.join(submit_dir, paper_id +".csv")
if not os.path.exists(submit_file):
continue
(p_ref, r_ref, f_ref, p_facet, r_facet, f_facet, TP_ref, FP_ref, FN_ref, TP_facet, FP_facet, FN_facet) = evaluate(os.path.join(truth_dir, gold_file), submit_file, score_file)
P_ref_list.append(p_ref)
P_facet_list.append(p_facet)
R_ref_list.append(r_ref)
R_facet_list.append(r_facet)
F_ref_list.append(f_ref)
F_facet_list.append(f_facet)
TP_ref_list.append(TP_ref)
FP_ref_list.append(FP_ref)
FN_ref_list.append(FN_ref)
TP_facet_list.append(TP_facet)
FP_facet_list.append(FP_facet)
FN_facet_list.append(FN_facet)
TP_ref_sum = sum(TP_ref_list)
FP_ref_sum = sum(FP_ref_list)
FN_ref_sum = sum(FN_ref_list)
TP_facet_sum = sum(TP_facet_list)
FP_facet_sum = sum(FP_facet_list)
FN_facet_sum = sum(FN_facet_list)
try:
precision_ref_micro = TP_ref_sum / float(TP_ref_sum + FP_ref_sum)
except ZeroDivisionError as e:
precision_ref_micro = 0
try:
recall_ref_micro = TP_ref_sum / float(TP_ref_sum + FN_ref_sum)
except ZeroDivisionError as e:
recall_ref_micro = 0
try:
f_ref_micro = 2.0 * precision_ref_micro * recall_ref_micro / float(precision_ref_micro + recall_ref_micro)
except ZeroDivisionError as e:
f_ref_micro = 0
try:
precision_ref_macro = sum(P_ref_list) / len(P_ref_list)
except ZeroDivisionError as e:
precision_ref_macro = 0
try:
recall_ref_macro = sum(R_ref_list) / len(R_ref_list)
except ZeroDivisionError as e:
recall_ref_macro = 0
try:
f_ref_macro = 2.0 * precision_ref_macro * recall_ref_macro / float(precision_ref_macro + recall_ref_macro)
except ZeroDivisionError as e:
f_ref_macro = 0
try:
# precision_facet_micro = TP_ref_sum / float(TP_ref_sum + FP_ref_sum)
precision_facet_micro = TP_facet_sum / float(TP_facet_sum + FP_facet_sum)
except ZeroDivisionError as e:
precision_facet_micro = 0
try:
# recall_facet_micro = TP_ref_sum / float(TP_ref_sum + FN_ref_sum)
recall_facet_micro = TP_facet_sum / float(TP_facet_sum + FN_facet_sum)
except ZeroDivisionError as e:
recall_facet_micro = 0
try:
# f_facet_micro = 2.0 * precision_ref_micro * recall_ref_micro / float(precision_ref_micro + recall_ref_micro)
f_facet_micro = 2.0 * precision_facet_micro * recall_facet_micro / float(precision_facet_micro + recall_facet_micro)
except ZeroDivisionError as e:
f_facet_micro = 0
try:
precision_facet_macro = sum(P_facet_list) / len(P_facet_list)
except ZeroDivisionError as e:
precision_facet_macro = 0
try:
recall_facet_macro = sum(R_facet_list) / len(R_facet_list)
except ZeroDivisionError as e:
recall_facet_macro = 0
try:
f_facet_macro = 2.0 * precision_facet_macro * recall_facet_macro / float(precision_facet_macro + recall_facet_macro)
except ZeroDivisionError as e:
f_facet_macro = 0
with open(score_file, "a") as f:
f.write("task1a_precision_micro_avg: " + str(precision_ref_micro) + "\n")
f.write("task1a_precision_macro_avg: " + str(precision_ref_macro) + "\n")
f.write("task1a_recall_micro_avg: " + str(recall_ref_micro) + "\n")
f.write("task1a_recall_macro_avg: " + str(recall_ref_macro) + "\n")
f.write("task1a_f1_micro_avg: " + str(f_ref_micro) + "\n")
f.write("task1a_f1_macro_avg: " + str(f_ref_macro) + "\n")
f.write("task1b_precision_micro_avg: " + str(precision_facet_micro) + "\n")
f.write("task1b_precision_macro_avg: " + str(precision_facet_macro) + "\n")
f.write("task1b_recall_micro_avg: " + str(recall_facet_micro) + "\n")
f.write("task1b_recall_macro_avg: " + str(recall_facet_macro) + "\n")
f.write("task1b_f1_micro_avg: " + str(f_facet_micro) + "\n")
f.write("task1b_f1_macro_avg: " + str(f_facet_macro) + "\n")
if __name__ == "__main__":
input_dir = sys.argv[1]
output_dir = sys.argv[2]
main(input_dir, output_dir)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.