ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a489369451796825651cb21879453f4897c9a10 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
u"""
OMD Livestatus dynamic inventory script
=======================================
If running as an OMD site user, i.e. if ${OMD_ROOT} is set, we try to
connect to the Livestatus socket at the default location
${OMD_ROOT}/tmp/run/live
Alternatively, the path to the Livestatus socket can be set from the
environment via
export OMD_LIVESTATUS_SOCKET=/omd/sites/mysite/tmp/run/live
or on the command-line with --socket.
Inspired by the DigitalOcean inventory script:
https://github.com/ansible/ansible/blob/devel/contrib/inventory/digital_ocean.py
:author: Andreas Härpfer <[email protected]>
:updated by: Samuel López @elchicodepython
"""
from __future__ import print_function
__version__ = '0.2'
import datetime
import os
import sys
import optparse # Legacy ... 2.6 still out there
import socket
import subprocess
try:
import json
except ImportError:
import simplejson as json
try:
maketrans = str.maketrans # Python 3
except AttributeError:
from string import maketrans # Python 2
class OMDLivestatusInventory(object):
#: default socket path
_def_socket_path = u'/tmp/run/live'
#: Livestatus query string
_def_host_query = (u'GET hosts\n'
'Columns: address name alias groups host_custom_variables\n'
'OutputFormat: json\n')
#: string of bad characters in host or group names
_bad_chars = u'.,;:[]/ '
#: replacement char for bad chars
_replacement_char = u'_'
def __init__(self, location=None, method='socket', by_ip=False):
self.data = {}
self.inventory = {}
self.method = method
#: translation table for sanitizing group names
#
# See the following to find out why this can't be a class variable:
# http://stackoverflow.com/questions/13905741/accessing-class-variables-from-a-list-comprehension-in-the-class-definition
# This version only works for byte strings but not for unicode :-(
#self._trans_table = maketrans(
# self._bad_chars, self._replacement_char * len(_bad_chars))
# Unicode version; see also:
# http://stackoverflow.com/questions/1324067/how-do-i-get-str-translate-to-work-with-unicode-strings
self._trans_table = dict((ord(char), self._replacement_char)
for char in self._bad_chars)
if not location:
if 'OMD_LIVESTATUS_SOCKET' in os.environ:
self.location = os.environ['OMD_LIVESTATUS_SOCKET']
elif 'OMD_ROOT' in os.environ:
self.location = (os.environ['OMD_ROOT']
+ OMDLivestatusInventory._def_socket_path)
else:
raise EnvironmentError(
'Unable to determine location of Livestatus socket. '
'Try setting OMD_LIVESTATUS_SOCKET environment variable.'
)
else:
self.location = location
self.load_from_omd()
if by_ip:
self.build_inventory_by_ip()
else:
self.build_inventory_by_name()
def load_from_omd(self):
"""Read host data from livestatus socket.
Populates self.data['hosts'].
"""
self.data['hosts'] = []
if self.method == 'ssh':
answer = json.loads(self._read_from_ssh())
else:
answer = json.loads(self._read_from_socket())
for host in answer:
self.data['hosts'].append(
dict(zip((u'ip', u'name', u'alias', u'groups', u'custom_vars'),
host)))
def _read_from_socket(self):
"""Read data from local Livestatus socket."""
if ':' in self.location:
s = self._get_tcp_socket()
else:
s = self._get_unix_socket()
s.send(self._def_host_query.encode('utf-8'))
s.shutdown(socket.SHUT_WR)
chunks = []
while len(chunks) == 0 or chunks[-1] != "":
chunks.append(s.recv(4096))
s.close()
reply = "".join(chunks)
return reply
def _get_unix_socket(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(location)
return s
def _get_tcp_socket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address, port = self.location.split(':')
s.connect((address, int(port)))
return s
def _read_from_ssh(self):
"""Read data from remote Livestatus socket via SSH.
Assumes non-interactive (e.g. via ssh-agent) access to the
remote host. The `unixcat` command (part of Livestatus) has to
be available via $PATH at the remote end.
"""
l = self.location.split(':', 1)
l.append('.' + OMDLivestatusInventory._def_socket_path)
host, path = l[0], l[1]
cmd = ['ssh', host,
'-o', 'BatchMode=yes',
'-o', 'ConnectTimeout=10',
'unixcat {0}'.format(path)]
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(
input=OMDLivestatusInventory._def_host_query.encode('utf-8'))
if p.returncode:
raise RuntimeError(err)
return out.decode('utf-8')
def build_inventory_by_ip(self):
"""Create Ansible inventory by IP address instead of by name.
Cave: contrary to hostnames IP addresses are not guaranteed to
be unique in OMD! Since there is only one set of hostvars for a
given IP, duplicate IPs might mean that you are loosing data.
When creating static inventory output we issue a warning for
duplicate IPs. For the default JSON output this warning is
suppressed since Ansible discards any output on STDERR.
Group names are sanitized to not contain characters that Ansible
can't digest. In particular group names in Ansible must not
contain blanks!
"""
inventory = {}
hostvars = {}
for host in self.data['hosts']:
for group in host['groups'] or [u'_NOGROUP']:
sanitized_group = group.translate(self._trans_table)
if sanitized_group in inventory:
inventory[sanitized_group].append(host['ip'])
else:
inventory[sanitized_group] = [host['ip']]
# Detect duplicate IPs in inventory. Keep first occurence
# in hostvars instead of overwriting with later data.
ip = host['ip']
if ip not in hostvars:
hostvars[ip] = {
'omd_name': host['name'],
'omd_alias': host['alias'],
'omd_custom_vars': host['custom_vars'],
}
#else:
# # duplicate IP
# pass
self.inventory = inventory
self.inventory['_meta'] = {
'hostvars': hostvars
}
def build_inventory_by_name(self):
"""Create Ansible inventory by OMD name.
Group names are sanitized to not contain characters that Ansible
can't digest. In particular group names in Ansible must not
contain blanks!
"""
inventory = {}
hostvars = {}
for host in self.data['hosts']:
for group in host['groups'] or [u'_NOGROUP']:
sanitized_group = group.translate(self._trans_table)
if sanitized_group in inventory:
inventory[sanitized_group].append(host['name'])
else:
inventory[sanitized_group] = [host['name']]
hostvars[host['name']] = {
'ansible_host': host['ip'],
'omd_alias': host['alias'],
'omd_custom_vars': host['custom_vars'],
}
self.inventory = inventory
self.inventory['_meta'] = {
'hostvars': hostvars
}
def list(self, indent=None, sort_keys=False):
"""Return full inventory data as JSON."""
return json.dumps(self.inventory, indent=indent, sort_keys=sort_keys)
def host(self, name, indent=None, sort_keys=False):
"""Return hostvars for a single host as JSON."""
if name in self.inventory['_meta']['hostvars']:
return(json.dumps(
self.inventory['_meta']['hostvars'][name],
indent=indent,
sort_keys=sort_keys
))
else:
return("{}")
def static(self):
"""Return data in static inventory format."""
out = []
out.append('# File created: {}'.format(datetime.datetime.now()))
for group in [k for k in self.inventory.keys() if k != '_meta']:
out.append('\n[{0}]'.format(group))
for host in self.inventory[group]:
vars = self.inventory['_meta']['hostvars'][host]
hostvars = []
for varname in vars.keys():
hostvars.append('{0}="{1}"'.format(varname, vars[varname]))
out.append('{0}\t{1}'.format(host, ' '.join(hostvars)))
return '\n'.join(out)
def _save_method(option, opt_str, value, parser):
parser.values.method = opt_str.lstrip('-')
parser.values.location = value
def parse_arguments():
"""Parse command line arguments."""
parser = optparse.OptionParser(version='%prog {0}'.format(__version__))
parser.set_defaults(method='socket')
output_group = optparse.OptionGroup(parser, 'Output formats')
output_group.add_option(
'--list', action='store_true', dest='list', default=False,
help='Return full Ansible inventory as JSON (default action).')
output_group.add_option(
'--host', type='string', dest='host', default=None,
help='Return Ansible hostvars for HOST as JSON.')
output_group.add_option(
'--static', action='store_true', dest='static', default=False,
help='Print inventory in static file format to stdout.')
output_group.add_option(
'--by-ip', action='store_true', dest='by_ip', default=False,
help='Create inventory by IP (instead of the default by name).')
parser.add_option_group(output_group)
connect_group = optparse.OptionGroup(parser, 'Connection options')
connect_group.add_option(
'--socket', type='string', dest='location', default=None,
action='callback', callback=_save_method,
help=('Set path to Livestatus socket. If omitted, try to use '
'$OMD_LIVESTATUS_SOCKET or $OMD_ROOT/tmp/run/live.'
))
connect_group.add_option(
'--ssh', type='string', dest='location', default=None,
action='callback', callback=_save_method,
help=('Connect to Livestatus socket via SSH. LOCATION has the '
'form [user@]host[:path], the default path is ./tmp/run/live.'
))
parser.add_option_group(connect_group)
opts, args = parser.parse_args()
# Make `list` the default action.
if not opts.host:
opts.list = True
return opts, args
if __name__ == '__main__':
opts, args = parse_arguments()
inv = OMDLivestatusInventory(opts.location,
method=opts.method,
by_ip=opts.by_ip)
if opts.static:
print(inv.static())
elif opts.list:
print(inv.list(indent=4, sort_keys=True))
elif opts.host:
print(inv.host(opts.host, indent=4, sort_keys=True))
else:
print('Missing command.')
sys.exit(1)
|
py | 1a489388ae335955dd311a3500714cef6b041bfd | import ssadata
"""
How many names are subsets of other names?
"""
names_in_other_names = []
for boy_name in ssadata.boys.keys():
for other_boy_name in ssadata.boys.keys():
if boy_name in other_boy_name:
if boy_name != other_boy_name:
names_in_other_names.append(boy_name)
break
else:
pass
else:
pass
for girl_name in ssadata.girls.keys():
for other_girl_name in ssadata.girls.keys():
if girl_name in other_girl_name:
#this time through, we check for duplicates
if girl_name != other_girl_name and girl_name not in names_in_other_names:
names_in_other_names.append(girl_name)
break
else:
pass
else:
pass
print(names_in_other_names)
num_subset_names = len(names_in_other_names)
print("%d baby names are subsets of other baby names." % (num_subset_names))
#OUTPUT
#9046 baby names are subsets of other baby names. |
py | 1a4893a7a2fd953dd17f53c474194be56bf012bc | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Allocates IP address as per DHCP server in the uplink network.
"""
import datetime
import logging
import threading
import time
from ipaddress import IPv4Network, ip_address
from threading import Condition
from typing import MutableMapping, Optional
from magma.mobilityd.dhcp_desc import DHCPDescriptor, DHCPState
from magma.mobilityd.mac import MacAddress, hex_to_mac
from magma.mobilityd.uplink_gw import UplinkGatewayInfo
from scapy.all import AsyncSniffer
from scapy.layers.dhcp import BOOTP, DHCP
from scapy.layers.inet import IP, UDP
from scapy.layers.l2 import Dot1Q, Ether
from scapy.sendrecv import sendp
LOG = logging.getLogger('mobilityd.dhcp.sniff')
DHCP_ACTIVE_STATES = [DHCPState.ACK, DHCPState.OFFER]
class DHCPClient:
THREAD_YIELD_TIME = .1
def __init__(
self,
dhcp_store: MutableMapping[str, DHCPDescriptor],
gw_info: UplinkGatewayInfo,
dhcp_wait: Condition,
iface: str = "dhcp0",
lease_renew_wait_min: int = 200,
):
"""
Implement DHCP client to allocate IP for given Mac address.
DHCP client state is maintained in user provided hash table.
Args:
dhcp_store: maintain DHCP transactions, key is mac address.
gw_info_map: stores GW IP info from DHCP server
dhcp_wait: notify users on new DHCP packet
iface: DHCP egress and ingress interface.
"""
self._sniffer = AsyncSniffer(
iface=iface,
filter="udp and (port 67 or 68)",
store=False,
prn=self._rx_dhcp_pkt,
)
self.dhcp_client_state = dhcp_store # mac => DHCP_State
self.dhcp_gw_info = gw_info
self._dhcp_notify = dhcp_wait
self._dhcp_interface = iface
self._msg_xid = 0
self._lease_renew_wait_min = lease_renew_wait_min
self._monitor_thread = threading.Thread(
target=self._monitor_dhcp_state,
)
self._monitor_thread.daemon = True
self._monitor_thread_event = threading.Event()
def run(self):
"""
Start DHCP sniffer thread.
This initializes state required for DHCP sniffer thread anf starts it.
Returns: None
"""
self._sniffer.start()
LOG.info("DHCP sniffer started")
# give it time to schedule the thread and start sniffing.
time.sleep(self.THREAD_YIELD_TIME)
self._monitor_thread.start()
def stop(self):
self._sniffer.stop()
self._monitor_thread_event.set()
def send_dhcp_packet(
self, mac: MacAddress, vlan: int,
state: DHCPState,
dhcp_desc: Optional[DHCPDescriptor] = None,
) -> None:
"""
Send DHCP packet and record state in dhcp_client_state.
Args:
mac: MAC address of interface
state: state of DHCP packet
dhcp_desc: DHCP protocol state.
Returns:
"""
ciaddr = None
# generate DHCP request packet
if state == DHCPState.DISCOVER:
dhcp_opts = [("message-type", "discover")]
dhcp_desc = DHCPDescriptor(
mac=mac, ip="", vlan=vlan,
state_requested=DHCPState.DISCOVER,
)
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
elif state == DHCPState.REQUEST and dhcp_desc:
dhcp_opts = [
("message-type", "request"),
("requested_addr", dhcp_desc.ip),
("server_id", dhcp_desc.server_ip),
]
dhcp_desc.state_requested = DHCPState.REQUEST
pkt_xid = dhcp_desc.xid
ciaddr = dhcp_desc.ip
elif state == DHCPState.RELEASE and dhcp_desc:
dhcp_opts = [
("message-type", "release"),
("server_id", dhcp_desc.server_ip),
]
dhcp_desc.state_requested = DHCPState.RELEASE
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
ciaddr = dhcp_desc.ip
else:
LOG.warning(
"Unknown egress request mac %s state %s",
str(mac),
state,
)
return
dhcp_opts.append("end") # type: ignore[arg-type]
dhcp_desc.xid = pkt_xid
with self._dhcp_notify:
self.dhcp_client_state[mac.as_redis_key(vlan)] = dhcp_desc
pkt = Ether(src=str(mac), dst="ff:ff:ff:ff:ff:ff")
if vlan and vlan != "0":
pkt /= Dot1Q(vlan=int(vlan))
pkt /= IP(src="0.0.0.0", dst="255.255.255.255")
pkt /= UDP(sport=68, dport=67)
pkt /= BOOTP(op=1, chaddr=mac.as_hex(), xid=pkt_xid, ciaddr=ciaddr)
pkt /= DHCP(options=dhcp_opts)
LOG.debug("DHCP pkt xmit %s", pkt.show(dump=True))
sendp(pkt, iface=self._dhcp_interface, verbose=0)
def get_dhcp_desc(
self, mac: MacAddress,
vlan: str,
) -> Optional[DHCPDescriptor]:
"""
Get DHCP description for given MAC.
Args:
mac: Mac address of the client
vlan: vlan id if the IP allocated in a VLAN
Returns: Current DHCP info.
"""
key = mac.as_redis_key(vlan)
if key in self.dhcp_client_state:
return self.dhcp_client_state[key]
LOG.debug("lookup error for %s", str(key))
return None
def release_ip_address(self, mac: MacAddress, vlan: str):
"""
Release DHCP allocated IP.
Args:
mac: MAC address of the IP allocated.
vlan: vlan id if the IP allocated in a VLAN
Returns: None
"""
key = mac.as_redis_key(vlan)
if key not in self.dhcp_client_state:
LOG.error("Unallocated DHCP release for MAC: %s", key)
return
dhcp_desc = self.dhcp_client_state[key]
self.send_dhcp_packet(
mac,
dhcp_desc.vlan,
DHCPState.RELEASE,
dhcp_desc,
)
del self.dhcp_client_state[key]
def _monitor_dhcp_state(self):
"""
monitor DHCP client state.
"""
while True:
wait_time = self._lease_renew_wait_min
with self._dhcp_notify:
for dhcp_record in self.dhcp_client_state.values():
logging.debug("monitor: %s", dhcp_record)
# Only process active records.
if dhcp_record.state not in DHCP_ACTIVE_STATES:
continue
now = datetime.datetime.now()
logging.debug("monitor time: %s", now)
request_state = DHCPState.REQUEST
# in case of lost DHCP lease rediscover it.
if now >= dhcp_record.lease_expiration_time:
request_state = DHCPState.DISCOVER
if now >= dhcp_record.lease_renew_deadline:
logging.debug("sending lease renewal")
self.send_dhcp_packet(
dhcp_record.mac, dhcp_record.vlan,
request_state, dhcp_record,
)
else:
# Find next renewal wait time.
time_to_renew = dhcp_record.lease_renew_deadline - now
wait_time = min(
wait_time, time_to_renew.total_seconds(),
)
# default in wait is 30 sec
wait_time = max(wait_time, self._lease_renew_wait_min)
logging.debug("lease renewal check after: %s sec", wait_time)
self._monitor_thread_event.wait(wait_time)
if self._monitor_thread_event.is_set():
break
@staticmethod
def _get_option(packet, name):
for opt in packet[DHCP].options:
if opt[0] == name:
return opt[1]
return None
def _process_dhcp_pkt(self, packet, state: DHCPState):
LOG.debug("DHCP pkt recv %s", packet.show(dump=True))
mac_addr = MacAddress(hex_to_mac(packet[BOOTP].chaddr.hex()[0:12]))
vlan = ""
if Dot1Q in packet:
vlan = str(packet[Dot1Q].vlan)
mac_addr_key = mac_addr.as_redis_key(vlan)
with self._dhcp_notify:
if mac_addr_key in self.dhcp_client_state:
state_requested = self.dhcp_client_state[mac_addr_key].state_requested
if BOOTP not in packet or packet[BOOTP].yiaddr is None:
LOG.error("no ip offered")
return
ip_offered = packet[BOOTP].yiaddr
subnet_mask = self._get_option(packet, "subnet_mask")
if subnet_mask is not None:
ip_subnet = IPv4Network(
ip_offered + "/" + subnet_mask, strict=False,
)
else:
ip_subnet = IPv4Network(
ip_offered + "/" + "32", strict=False,
)
dhcp_server_ip = None
if IP in packet:
dhcp_server_ip = packet[IP].src
dhcp_router_opt = self._get_option(packet, "router")
if dhcp_router_opt is not None:
router_ip_addr = ip_address(dhcp_router_opt)
else:
# use DHCP as upstream router in case of missing Open 3.
router_ip_addr = dhcp_server_ip
self.dhcp_gw_info.update_ip(router_ip_addr, vlan)
lease_expiration_time = self._get_option(packet, "lease_time")
dhcp_state = DHCPDescriptor(
mac=mac_addr,
ip=ip_offered,
state=state,
vlan=vlan,
state_requested=state_requested,
subnet=str(ip_subnet),
server_ip=dhcp_server_ip,
router_ip=router_ip_addr,
lease_expiration_time=lease_expiration_time,
xid=packet[BOOTP].xid,
)
LOG.info(
"Record DHCP for: %s state: %s",
mac_addr_key,
dhcp_state,
)
self.dhcp_client_state[mac_addr_key] = dhcp_state
self._dhcp_notify.notifyAll()
if state == DHCPState.OFFER:
# let other thread work on fulfilling IP allocation
# request.
threading.Event().wait(self.THREAD_YIELD_TIME)
self.send_dhcp_packet(
mac_addr, vlan, DHCPState.REQUEST, dhcp_state,
)
else:
LOG.debug("Unknown MAC: %s ", packet.summary())
return
# ref: https://fossies.org/linux/scapy/scapy/layers/dhcp.py
def _rx_dhcp_pkt(self, packet):
if DHCP not in packet:
return
# Match DHCP offer
if packet[DHCP].options[0][1] == int(DHCPState.OFFER):
self._process_dhcp_pkt(packet, DHCPState.OFFER)
# Match DHCP ack
elif packet[DHCP].options[0][1] == int(DHCPState.ACK):
self._process_dhcp_pkt(packet, DHCPState.ACK)
# TODO handle other DHCP protocol events.
|
py | 1a48949e6adb2a25b089b2b7bf0217a22ffbe565 | import device_module
def test_validate_device_json():
"""A test for validate_device_json"""
test = "test_files/device_test.json"
data = device_module.validate_device_json(test)
assert data == {'device_id': 1,
'patient_assigned': 12,
'device_type': 'temperature',
'measurement': 98.6,
'MAC': '30-65-EC-6F-C4-58',
'purchase_date': '01-01-2001',
'model_number': 1234,
'model_name': 'temp-o-matic',
'serial_number': 56789}
|
py | 1a4894a765c51ac2f8fd9fdb1f0870857c6728ca | def shout(word='yes'):
return word.capitalize() + '!'
print(shout())
# outputs : 'Yes!'
# As an object, you can assign the function to a variable like any
# other object
scream = shout
# Notice we don’t use parentheses: we are not calling the function, we are
# putting the function `shout` into the variable `scream`.
# It means you can then call `shout` from `scream`:
print(scream())
# outputs : 'Yes!'
# More than that, it means you can remove the old name `shout`, and
# the function will still be accessible from `scream`
del shout
try:
print(shout())
except NameError as e:
print(e)
#outputs: "name 'shout' is not defined"
print(scream())
# outputs: 'Yes!'
def talk():
# You can define a function on the fly in `talk` ...
def whisper(word='yes'):
return word.lower() + '...'
# ... and use it right away!
print(whisper())
# You call `talk`, that defines `whisper` EVERY TIME you call it, then
# `whisper` is called in `talk`.
talk()
# outputs:
# "yes..."
# But `whisper` DOES NOT EXIST outside `talk`:
try:
print(whisper())
except NameError as e:
print(e)
#outputs : "name 'whisper' is not defined"*
#Python's functions are objects
def getTalk(kind='shout'):
# We define functions on the fly
def shout(word='yes'):
return word.capitalize() + '!'
def whisper(word='yes'):
return word.lower() + '...'
# Then we return one of them
if kind == 'shout':
# We don’t use '()'. We are not calling the function;
# instead, we’re returning the function object
return shout
else:
return whisper
# How do you use this strange beast?
# Get the function and assign it to a variable
talk = getTalk()
# You can see that `talk` is here a function object:
print(talk)
#outputs : <function shout at 0xb7ea817c>
# The object is the one returned by the function:
print(talk())
#outputs : Yes!
# And you can even use it directly if you feel wild:
print(getTalk('whisper')())
#outputs : yes...
def doSomethingBefore(func):
print('I do something before then I call the function you gave me')
print(func())
doSomethingBefore(scream)
#outputs:
#I do something before then I call the function you gave me
#Yes!
|
py | 1a48961e7a7a5fce4c4933b6a04b47e0ed3be0f9 | #!/usr/bin/env python
#===============================================================================
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
try:
from packaging.version import Version
except ImportError:
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
import warnings
from sklearn.neighbors._base import NeighborsBase as sklearn_NeighborsBase
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors._kd_tree import KDTree
from sklearn.neighbors._base import _check_weights
from sklearn.neighbors._base import VALID_METRICS
from sklearn.neighbors._classification import KNeighborsClassifier as \
sklearn_KNeighborsClassifier
from sklearn.neighbors._unsupervised import NearestNeighbors as \
sklearn_NearestNeighbors
from sklearn.utils.validation import _deprecate_positional_args, check_is_fitted
from onedal.datatypes import _check_array, _num_features, _num_samples
from onedal.neighbors import KNeighborsClassifier as onedal_KNeighborsClassifier
from .._device_offload import dispatch, wrap_output_data
import numpy as np
from scipy import sparse as sp
if Version(sklearn_version) >= Version("0.24"):
class KNeighborsClassifier_(sklearn_KNeighborsClassifier):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = \
weights if Version(sklearn_version) >= Version("1.0") \
else _check_weights(weights)
elif Version(sklearn_version) >= Version("0.22"):
from sklearn.neighbors._base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(sklearn_KNeighborsClassifier,
BaseSupervisedIntegerMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
else:
from sklearn.neighbors.base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(sklearn_KNeighborsClassifier,
BaseSupervisedIntegerMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
class KNeighborsClassifier(KNeighborsClassifier_):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
def fit(self, X, y):
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=True)
if self.metric_params is not None and 'p' in self.metric_params:
if self.p is not None:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=2)
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.metric_params["p"]
else:
self.effective_metric_params_ = {}
effective_p = self.p
if self.metric in ["minkowski"]:
if effective_p < 1:
raise ValueError("p must be greater or equal to one for minkowski metric")
self.effective_metric_params_["p"] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == "minkowski":
p = self.effective_metric_params_.pop("p", 2)
if p < 1:
raise ValueError(
"p must be greater or equal to one for minkowski metric"
)
if p == 1:
self.effective_metric_ = "manhattan"
elif p == 2:
self.effective_metric_ = "euclidean"
elif p == np.inf:
self.effective_metric_ = "chebyshev"
else:
self.effective_metric_params_["p"] = p
if self.metric == "manhattan":
self.p = 1
if not isinstance(X, (KDTree, BallTree, sklearn_NeighborsBase)):
self._fit_X = _check_array(
X, dtype=[np.float64, np.float32], accept_sparse=True)
self.n_samples_fit_ = _num_samples(self._fit_X)
self.n_features_in_ = _num_features(self._fit_X)
if self.algorithm == "auto":
# A tree approach is better for small number of neighbors or small
# number of features, with KDTree generally faster when available
is_n_neighbors_valid_for_brute = self.n_neighbors is not None and \
self.n_neighbors >= self._fit_X.shape[0] // 2
if self._fit_X.shape[1] > 15 or is_n_neighbors_valid_for_brute:
self._fit_method = "brute"
else:
if self.effective_metric_ in VALID_METRICS["kd_tree"]:
self._fit_method = "kd_tree"
elif callable(self.effective_metric_) or \
self.effective_metric_ in \
VALID_METRICS["ball_tree"]:
self._fit_method = "ball_tree"
else:
self._fit_method = "brute"
else:
self._fit_method = self.algorithm
if hasattr(self, '_onedal_estimator'):
delattr(self, '_onedal_estimator')
# To cover test case when we pass patched
# estimator as an input for other estimator
if isinstance(X, sklearn_NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self.n_samples_fit_ = X.n_samples_fit_
self.n_features_in_ = X.n_features_in_
if hasattr(X, '_onedal_estimator'):
if self._fit_method == "ball_tree":
X._tree = BallTree(
X._fit_X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "kd_tree":
X._tree = KDTree(
X._fit_X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "brute":
X._tree = None
else:
raise ValueError("algorithm = '%s' not recognized" % self.algorithm)
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
self.n_samples_fit_ = X.data.shape[0]
self.n_features_in_ = X.data.shape[1]
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
self.n_samples_fit_ = X.data.shape[0]
self.n_features_in_ = X.data.shape[1]
dispatch(self, 'neighbors.KNeighborsClassifier.fit', {
'onedal': self.__class__._onedal_fit,
'sklearn': sklearn_KNeighborsClassifier.fit,
}, X, y)
return self
@wrap_output_data
def predict(self, X):
check_is_fitted(self)
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=False)
return dispatch(self, 'neighbors.KNeighborsClassifier.predict', {
'onedal': self.__class__._onedal_predict,
'sklearn': sklearn_KNeighborsClassifier.predict,
}, X)
@wrap_output_data
def predict_proba(self, X):
check_is_fitted(self)
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=False)
return dispatch(self, 'neighbors.KNeighborsClassifier.predict_proba', {
'onedal': self.__class__._onedal_predict_proba,
'sklearn': sklearn_KNeighborsClassifier.predict_proba,
}, X)
@wrap_output_data
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
check_is_fitted(self)
if Version(sklearn_version) >= Version("1.0"):
self._check_feature_names(X, reset=False)
return dispatch(self, 'neighbors.KNeighborsClassifier.kneighbors', {
'onedal': self.__class__._onedal_kneighbors,
'sklearn': sklearn_KNeighborsClassifier.kneighbors,
}, X, n_neighbors, return_distance)
@wrap_output_data
def radius_neighbors(self, X=None, radius=None, return_distance=True,
sort_results=False):
_onedal_estimator = getattr(self, '_onedal_estimator', None)
if _onedal_estimator is not None or getattr(self, '_tree', 0) is None and \
self._fit_method == 'kd_tree':
if Version(sklearn_version) >= Version("0.24"):
sklearn_NearestNeighbors.fit(self, self._fit_X, getattr(self, '_y', None))
else:
sklearn_NearestNeighbors.fit(self, self._fit_X)
if Version(sklearn_version) >= Version("0.22"):
result = sklearn_NearestNeighbors.radius_neighbors(
self, X, radius, return_distance, sort_results)
else:
result = sklearn_NearestNeighbors.radius_neighbors(
self, X, radius, return_distance)
return result
def _onedal_gpu_supported(self, method_name, *data):
X_incorrect_type = isinstance(data[0], (KDTree, BallTree, sklearn_NeighborsBase))
if X_incorrect_type:
return False
if self._fit_method in ['auto', 'ball_tree']:
condition = self.n_neighbors is not None and \
self.n_neighbors >= self.n_samples_fit_ // 2
if self.n_features_in_ > 15 or condition:
result_method = 'brute'
else:
if self.effective_metric_ in ['euclidean']:
result_method = 'kd_tree'
else:
result_method = 'brute'
else:
result_method = self._fit_method
is_sparse = sp.isspmatrix(data[0])
is_single_output = False
class_count = 1
if len(data) > 1 or hasattr(self, '_onedal_estimator'):
# To check multioutput, might be overhead
if len(data) > 1:
y = np.asarray(data[1])
class_count = len(np.unique(y))
if hasattr(self, '_onedal_estimator'):
y = self._onedal_estimator._y
is_single_output = y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1
is_valid_for_brute = result_method in ['brute'] and \
self.effective_metric_ in ['manhattan',
'minkowski',
'euclidean',
'chebyshev',
'cosine']
is_valid_weights = self.weights in ['uniform', "distance"]
main_condition = is_valid_for_brute and not is_sparse and \
is_single_output and is_valid_weights
if method_name == 'neighbors.KNeighborsClassifier.fit':
return main_condition and class_count >= 2
if method_name in ['neighbors.KNeighborsClassifier.predict',
'neighbors.KNeighborsClassifier.predict_proba',
'neighbors.KNeighborsClassifier.kneighbors']:
return main_condition and hasattr(self, '_onedal_estimator')
raise RuntimeError(f'Unknown method {method_name} in {self.__class__.__name__}')
def _onedal_cpu_supported(self, method_name, *data):
X_incorrect_type = isinstance(data[0], (KDTree, BallTree, sklearn_NeighborsBase))
if X_incorrect_type:
return False
if self._fit_method in ['auto', 'ball_tree']:
condition = self.n_neighbors is not None and \
self.n_neighbors >= self.n_samples_fit_ // 2
if self.n_features_in_ > 15 or condition:
result_method = 'brute'
else:
if self.effective_metric_ in ['euclidean']:
result_method = 'kd_tree'
else:
result_method = 'brute'
else:
result_method = self._fit_method
is_sparse = sp.isspmatrix(data[0])
is_single_output = False
class_count = 1
if len(data) > 1 or hasattr(self, '_onedal_estimator'):
# To check multioutput, might be overhead
if len(data) > 1:
y = np.asarray(data[1])
class_count = len(np.unique(y))
if hasattr(self, '_onedal_estimator'):
y = self._onedal_estimator._y
is_single_output = y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1
is_valid_for_kd_tree = \
result_method in ['kd_tree'] and self.effective_metric_ in ['euclidean']
is_valid_for_brute = result_method in ['brute'] and \
self.effective_metric_ in ['manhattan',
'minkowski',
'euclidean',
'chebyshev',
'cosine']
is_valid_weights = self.weights in ['uniform', "distance"]
main_condition = (is_valid_for_kd_tree or is_valid_for_brute) and \
not is_sparse and is_single_output and is_valid_weights
if method_name == 'neighbors.KNeighborsClassifier.fit':
return main_condition and class_count >= 2
if method_name in ['neighbors.KNeighborsClassifier.predict',
'neighbors.KNeighborsClassifier.predict_proba',
'neighbors.KNeighborsClassifier.kneighbors']:
return main_condition and hasattr(self, '_onedal_estimator')
raise RuntimeError(f'Unknown method {method_name} in {self.__class__.__name__}')
def _onedal_fit(self, X, y, queue=None):
onedal_params = {
'n_neighbors': self.n_neighbors,
'weights': self.weights,
'algorithm': self.algorithm,
'metric': self.effective_metric_,
'p': self.p,
}
try:
requires_y = self._get_tags()["requires_y"]
except KeyError:
requires_y = False
self._onedal_estimator = onedal_KNeighborsClassifier(**onedal_params)
self._onedal_estimator.requires_y = requires_y
self._onedal_estimator.effective_metric_ = self.effective_metric_
self._onedal_estimator.effective_metric_params_ = self.effective_metric_params_
self._onedal_estimator.fit(X, y, queue=queue)
self._save_attributes()
def _onedal_predict(self, X, queue=None):
return self._onedal_estimator.predict(X, queue=queue)
def _onedal_predict_proba(self, X, queue=None):
return self._onedal_estimator.predict_proba(X, queue=queue)
def _onedal_kneighbors(self, X=None, n_neighbors=None,
return_distance=True, queue=None):
return self._onedal_estimator.kneighbors(
X, n_neighbors, return_distance, queue=queue)
def _save_attributes(self):
self.classes_ = self._onedal_estimator.classes_
self.n_features_in_ = self._onedal_estimator.n_features_in_
self.n_samples_fit_ = self._onedal_estimator.n_samples_fit_
self._fit_X = self._onedal_estimator._fit_X
self._y = self._onedal_estimator._y
self._fit_method = self._onedal_estimator._fit_method
self.outputs_2d_ = self._onedal_estimator.outputs_2d_
self._tree = self._onedal_estimator._tree
|
py | 1a48964951031ccace5b38e4e37faf6e19fe9b71 | for _ in range(int(input())):
a, b = input().split()
print(b * int(a))
|
py | 1a489671560fe7b8c1004c5f675ae7c6a575e4f6 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..graph.model_transformer import ModelTransformer
from ..configs.config import Config
from ..utils.save import save_model
class CompressionLoss:
"""
Used to calculate additional loss to be added to the base loss during the
training process. It uses the model graph to measure variables and activations
values of the layers during the loss construction. For example, the $L_0$-based
sparsity algorithm calculates the number of non-zero weights in convolutional
and fully-connected layers to construct the loss function.
"""
def call(self):
"""
Returns the compression loss value.
"""
return 0
def statistics(self):
"""
Returns a dictionary of printable statistics.
"""
return {}
def __call__(self, *args, **kwargs):
"""
Invokes the `CompressionLoss` instance.
Returns:
the compression loss value.
"""
return self.call(*args, **kwargs)
def get_config(self):
"""
Returns the config dictionary for a `CompressionLoss` instance.
"""
return {}
@classmethod
def from_config(cls, config):
"""
Instantiates a `CompressionLoss` from its config (output of `get_config()`).
Arguments:
config: Output of `get_config()`.
Returns:
A `CompressionLoss` instance.
"""
return cls(**config)
class CompressionScheduler:
"""
Implements the logic of compression method control during the training process.
May change the method hyperparameters in regards to the current training step or
epoch. For example, the sparsity method can smoothly increase the sparsity rate
over several epochs.
"""
def __init__(self):
self.last_epoch = 0
self.last_step = 0
def step(self, last=None):
"""
Should be called after each optimizer step during training.
Arguments:
`last` - specifies the initial "previous" step.
"""
if last is None:
last = self.last_step + 1
self.last_step = last
def epoch_step(self, last=None):
"""
Should be called after each training epoch.
Arguments:
`last` - specifies the initial "previous" epoch.
"""
if last is None:
last = self.last_epoch + 1
self.last_epoch = last
def get_config(self):
"""
Returns the config dictionary for a `CompressionScheduler` instance.
"""
return {}
@classmethod
def from_config(cls, config):
"""
Instantiates a `CompressionScheduler` from its config (output of `get_config()`).
Arguments:
config: Output of `get_config()`.
Returns:
A `CompressionScheduler` instance.
"""
return cls(**config)
class CompressionAlgorithmInitializer:
"""
Configures certain parameters of the algorithm that require access to the dataset
(for example, in order to do range initialization for activation quantizers) or
to the loss function to be used during fine-tuning (for example, to determine
quantizer precision bitwidth using HAWQ).
"""
def call(self, model, dataset=None, loss=None):
pass
def __call__(self, *args, **kwargs):
self.call(*args, **kwargs)
class CompressionAlgorithmController:
"""
Serves as a handle to the additional modules, parameters and hooks inserted
into the original uncompressed model in order to enable algorithm-specific compression.
Hosts entities that are to be used during the training process, such as compression scheduler and
compression loss.
"""
def __init__(self, target_model):
"""
Arguments:
`target_model` - model with additional modifications necessary to enable algorithm-specific
compression during fine-tuning built by the `CompressionAlgorithmBuilder`.
"""
self._model = target_model
self._loss = CompressionLoss()
self._scheduler = CompressionScheduler()
self._initializer = CompressionAlgorithmInitializer()
@property
def model(self):
return self._model
@property
def loss(self):
return self._loss
@property
def scheduler(self):
return self._scheduler
def initialize(self, dataset=None, loss=None):
"""
Configures certain parameters of the algorithm that require access to the dataset
(for example, in order to do range initialization for activation quantizers) or
to the loss function to be used during fine-tuning (for example, to determine
quantizer precision bitwidth using HAWQ).
"""
self._initializer(self._model, dataset, loss)
def statistics(self):
"""
Returns a dictionary of printable statistics.
"""
return self._loss.statistics()
def export_model(self, save_path, save_format='frozen_graph'):
"""
Used to export the compressed model to the Frozen Graph, TensorFlow SavedModel or
Keras H5 formats. Makes method-specific preparations of the model, (e.g. removing
auxiliary layers that were used for the model compression), then exports the model
in specified path.
Arguments:
`save_path` - a path to export model.
`save_format` - saving format (`frozen_graph` for Frozen Graph,
`tf` for Tensorflow SavedModel, `h5` for Keras H5 format).
"""
stripped_model = self.strip_model(self.model)
save_model(stripped_model, save_path, save_format)
def strip_model(self, model):
"""
Strips auxiliary layers that were used for the model compression, as it's only needed
for training. The method is used before exporting model in the target format.
Arguments:
model: compressed model.
Returns:
A stripped model.
"""
return model
class CompressionAlgorithmBuilder:
"""
Determines which modifications should be made to the original model in
order to enable algorithm-specific compression during fine-tuning.
"""
def __init__(self, config: Config):
"""
Arguments:
`config` - a dictionary that contains parameters of compression method.
"""
self.config = config
def apply_to(self, model):
"""
Applies algorithm-specific modifications to the model.
Arguments:
model: original uncompressed model.
Returns:
Model with additional modifications necessary to enable algorithm-specific
compression during fine-tuning.
"""
#transformation_layout = self.get_transformation_layout(model)
#return ModelTransformer(model, transformation_layout).transform()
return model
def build_controller(self, model):
"""
Builds `CompressionAlgorithmController` to handle to the additional modules, parameters
and hooks inserted into the model in order to enable algorithm-specific compression.
Arguments:
model: model with additional modifications necessary to enable
algorithm-specific compression during fine-tuning.
Returns:
An instance of the `CompressionAlgorithmController`.
"""
return CompressionAlgorithmController(model)
def get_transformation_layout(self, model):
"""
Computes necessary model transformations to enable algorithm-specific compression.
Arguments:
model: original uncompressed model.
Returns:
An instance of the `TransformationLayout` class containing a list of
algorithm-specific modifications.
"""
raise NotImplementedError
|
py | 1a48971555e2e1e25f35bf1377b986a1e57d31f6 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image41.xlsx')
# Despite a lot of effort and testing I can't match Excel's
# calculations exactly for EMF files. The differences are are small
# (<1%) and in general aren't visible. The following ignore the
# elements where these differences occur until the they can be
# resolved. This issue doesn't occur for any other image type.
self.ignore_elements = {'xl/drawings/drawing1.xml': ['<xdr:rowOff>', '<xdr:colOff>', '<a:ext cx=']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'logo.emf')
workbook.close()
self.assertExcelEqual()
|
py | 1a48978af2248cf6b489fc76291268969ea4054d | import sys
sys.path.append("..")
from intcode import IntCodeMachine
from collections import defaultdict
def read_input():
f = open("input_day09.txt")
l = [int(n) for n in f.readline().strip().split(",")]
return defaultdict(int, enumerate(l))
def run():
input_list = read_input()
m = IntCodeMachine(input_list, lambda: 2, lambda x: sys.stdout.write(str(x)))
m.run()
print()
if __name__ == '__main__':
run()
|
py | 1a48982278605d7eafc165c42170399a188cf979 | # Copyright (c) 2019 Remi Salmon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# imports
import gpxpy
import numpy as np
from datetime import datetime
from scipy.interpolate import interp1d, splprep, splev
# constants
EARTH_RADIUS = 6371e3 # meters
# functions
def gpx_interpolate(gpx_data, res, deg = 1):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# res = float
# deg = int
# output: gpx_data_interp = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
if not type(deg) is int:
raise TypeError('deg must be int')
if not 1 <= deg <= 5:
raise ValueError('deg must be in [1-5]')
if not len(gpx_data['lat']) > deg:
raise ValueError('number of data points must be > deg')
# interpolate spatial data
_gpx_data = gpx_remove_duplicate(gpx_data)
_gpx_dist = gpx_calculate_distance(_gpx_data, use_ele = True)
x = [_gpx_data[i] for i in ('lat', 'lon', 'ele') if _gpx_data[i]]
tck, _ = splprep(x, u = np.cumsum(_gpx_dist), k = deg, s = 0)
u_interp = np.linspace(0, np.sum(_gpx_dist), num = 1+int(np.sum(_gpx_dist)/res))
x_interp = splev(u_interp, tck)
# interpolate time data linearly to preserve monotonicity
if _gpx_data['tstamp']:
f = interp1d(np.cumsum(_gpx_dist), _gpx_data['tstamp'], fill_value = 'extrapolate')
tstamp_interp = f(u_interp)
gpx_data_interp = {'lat':list(x_interp[0]),
'lon':list(x_interp[1]),
'ele':list(x_interp[2]) if gpx_data['ele'] else None,
'tstamp':list(tstamp_interp) if gpx_data['tstamp'] else None,
'tzinfo':gpx_data['tzinfo']}
return gpx_data_interp
def gpx_calculate_distance(gpx_data, use_ele = True):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# use_ele = bool
# output: gpx_dist = numpy.ndarray[float]
gpx_dist = np.zeros(len(gpx_data['lat']))
for i in range(len(gpx_dist)-1):
lat1 = np.radians(gpx_data['lat'][i])
lon1 = np.radians(gpx_data['lon'][i])
lat2 = np.radians(gpx_data['lat'][i+1])
lon2 = np.radians(gpx_data['lon'][i+1])
delta_lat = lat2-lat1
delta_lon = lon2-lon1
c = 2.0*np.arcsin(np.sqrt(np.sin(delta_lat/2.0)**2+np.cos(lat1)*np.cos(lat2)*np.sin(delta_lon/2.0)**2)) # haversine formula
dist_latlon = EARTH_RADIUS*c # great-circle distance
if gpx_data['ele'] and use_ele:
dist_ele = gpx_data['ele'][i+1]-gpx_data['ele'][i]
gpx_dist[i+1] = np.sqrt(dist_latlon**2+dist_ele**2)
else:
gpx_dist[i+1] = dist_latlon
return gpx_dist
def gpx_calculate_speed(gpx_data):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# output: gpx_speed = numpy.ndarray[float]
gpx_dist = gpx_calculate_distance(gpx_data, use_ele = True)
gpx_dtstamp = np.diff(gpx_data['tstamp'], prepend = gpx_data['tstamp'][0])
gpx_dtstamp[gpx_dtstamp < 1e-6] = np.nan
gpx_speed = np.nan_to_num(gpx_dist/gpx_dtstamp, nan = 0.0)
return gpx_speed
def gpx_remove_duplicate(gpx_data):
# input: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# output: gpx_data_nodup = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
gpx_dist = gpx_calculate_distance(gpx_data)
i_dist = np.concatenate(([0], np.nonzero(gpx_dist)[0])) # keep gpx_dist[0] = 0.0
if not len(gpx_dist) == len(i_dist):
print('Removed {} duplicate trackpoint(s)'.format(len(gpx_dist)-len(i_dist)))
gpx_data_nodup = {'lat':[], 'lon':[], 'ele':[], 'tstamp':[], 'tzinfo':gpx_data['tzinfo']}
for k in ('lat', 'lon', 'ele', 'tstamp'):
gpx_data_nodup[k] = [gpx_data[k][i] for i in i_dist] if gpx_data[k] else None
return gpx_data_nodup
def gpx_read(gpx_file):
# input: gpx_file = str
# output: gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
gpx_data = {'lat':[], 'lon':[], 'ele':[], 'tstamp':[], 'tzinfo':None}
i = 0
i_latlon = []
i_tstamp = []
with open(gpx_file, 'r') as file:
gpx = gpxpy.parse(file)
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
gpx_data['lat'].append(point.latitude)
gpx_data['lon'].append(point.longitude)
i_latlon.append(i)
try:
gpx_data['ele'].append(point.elevation)
except:
pass
try:
gpx_data['tstamp'].append(point.time.timestamp())
except:
pass
else:
if not gpx_data['tzinfo']:
gpx_data['tzinfo'] = point.time.tzinfo
i_tstamp.append(i)
i += 1
if i_tstamp and not len(i_latlon) == len(i_tstamp):
for k in ('lat', 'lon', 'ele', 'tstamp'):
gpx_data[k] = [gpx_data[k][i] for i in i_tstamp] if gpx_data[k] else None
return gpx_data
def gpx_write(gpx_file, gpx_data, write_speed = False):
# input: gpx_file = str
# gpx_data = dict{'lat':list[float], 'lon':list[float], 'ele':list[float], 'tstamp':list[float], 'tzinfo':datetime.tzinfo}
# write_speed = bool
# output: None
if write_speed:
gpx_speed = gpx_calculate_speed(gpx_data)
gpx = gpxpy.gpx.GPX()
gpx_track = gpxpy.gpx.GPXTrack()
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx.tracks.append(gpx_track)
gpx_track.segments.append(gpx_segment)
for i in range(len(gpx_data['lat'])):
lat = gpx_data['lat'][i]
lon = gpx_data['lon'][i]
ele = gpx_data['ele'][i] if gpx_data['ele'] else None
time = datetime.fromtimestamp(gpx_data['tstamp'][i], tz = gpx_data['tzinfo']) if gpx_data['tstamp'] else None
speed = gpx_speed[i] if write_speed else None
gpx_point = gpxpy.gpx.GPXTrackPoint(lat, lon, ele, time, speed = speed)
gpx_segment.points.append(gpx_point)
try:
with open(gpx_file, 'w') as file:
file.write(gpx.to_xml(version = '1.0' if write_speed else '1.1'))
except:
exit('ERROR Failed to save {}'.format(gpx_file))
return
# main
def main():
import argparse
parser = argparse.ArgumentParser(description = 'interpolate GPX file(s) using linear or spline interpolation')
parser.add_argument('gpx_files', metavar = 'FILE', nargs = '+', help = 'GPX file(s)')
parser.add_argument('-d', '--deg', type = int, default = 1, help = 'interpolation degree, 1=linear, 2-5=spline (default: 1)')
parser.add_argument('-r', '--res', type = float, default = 1.0, help = 'interpolation resolution in meters (default: 1)')
parser.add_argument('-s', '--speed', action = 'store_true', help = 'Save interpolated speed')
args = parser.parse_args()
for gpx_file in args.gpx_files:
if not gpx_file.endswith('_interpolated.gpx'):
gpx_data = gpx_read(gpx_file)
print('Read {} trackpoints from {}'.format(len(gpx_data['lat']), gpx_file))
gpx_data_interp = gpx_interpolate(gpx_data, args.res, args.deg)
output_file = '{}_interpolated.gpx'.format(gpx_file[:-4])
gpx_write(output_file, gpx_data_interp, write_speed = args.speed)
print('Saved {} trackpoints to {}'.format(len(gpx_data_interp['lat']), output_file))
if __name__ == '__main__':
main()
|
py | 1a489831a69f94d1a9455ca084d48104a0762611 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import service
import survey_common
import survey_utils
import validation_utils
import validation_common
import errors
import json
import time
import StringIO
import uuid
from botocore.exceptions import ClientError
@service.api
def post(request, survey_id, question):
validation_common.validate_survey_id(survey_id)
validation_common.validate_question(question)
survey_common.ensure_survey_exists(survey_id)
question_id = str(uuid.uuid4())
survey_utils.get_survey_table().update_item(
Key={'survey_id':survey_id},
UpdateExpression='SET questions = list_append(questions, :question) ADD num_active_questions :one',
ExpressionAttributeValues={
':question': [{'id': question_id, 'active': True}],
':one': 1
}
)
item = {}
item['survey_id'] = survey_id
item['question_id'] = question_id
item['metadata'] = survey_common.extract_question_metadata(question)
item['title'] = question['title']
item['type'] = question['type']
survey_utils.get_question_table().put_item(
Item=item
)
return {
'question_id': question_id
}
@service.api
def put(request, survey_id, question_id, question):
validation_utils.validate_param(survey_id, 'survey_id', validation_utils.is_not_blank_str)
validation_utils.validate_param(question_id, 'question_id', validation_utils.is_not_blank_str)
validation_common.validate_question(question)
survey_common.ensure_question_belongs_to_survey(survey_id, question_id)
item = {}
item['survey_id'] = survey_id
item['question_id'] = question_id
item['metadata'] = survey_common.extract_question_metadata(question)
item['title'] = question['title']
item['type'] = question['type']
survey_utils.get_question_table().put_item(
Item=item
)
return 'success'
@service.api
def delete(request, survey_id, question_id):
validation_utils.validate_param(survey_id, 'survey_id', validation_utils.is_not_blank_str)
validation_utils.validate_param(question_id, 'question_id', validation_utils.is_not_blank_str)
survey_metadata = survey_utils.get_survey_metadata_by_id(survey_id, ['questions'], True)
for i, question in enumerate(survey_metadata['questions']):
if question.get('id') == question_id:
try:
if question.get('active'):
survey_utils.get_survey_table().update_item(
Key={'survey_id':survey_id},
UpdateExpression='REMOVE questions[{}] ADD num_active_questions :minus_one'.format(i),
ConditionExpression='questions[{}].id = :question_id and questions[{}].active = :true'.format(i,i),
ExpressionAttributeValues={':question_id':question_id, ':minus_one':-1, ':true': True}
)
else:
survey_utils.get_survey_table().update_item(
Key={'survey_id':survey_id},
UpdateExpression='REMOVE questions[{}]'.format(i),
ConditionExpression='questions[{}].id = :question_id and questions[{}].active = :false'.format(i,i),
ExpressionAttributeValues={':question_id':question_id, ':false': False}
)
except ClientError as e:
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
raise errors.ClientError('Survey has been modified before update')
else:
raise RuntimeError('Failed to update DynamoDB')
break
else:
raise errors.ClientError('No question with id [{}] found for survey [{}]'.format(question_id, survey_id))
survey_utils.get_question_table().delete_item(
Key={
'survey_id': survey_id,
'question_id': question_id
}
)
return 'success'
|
py | 1a489a5e2c5a36cfef7d8ebda98207dfbaec55be | # Simula o rolamento de uma quantidade de dados definida pelo usuario.
import random # Importa o modulo de geracao automatica de numeros.
# Le a quantidade de dados a ser rolada.
rolls = int( raw_input( "Entre com o numero de rolagens: " ) )
for i in range( 1, rolls + 1 ) :
print "%10d" % ( random.randrange( 1, 7 ) ),
# Solta uma linha entre cada 4 rolagens.
if not ( i % 8 ) :
print |
py | 1a489af714fee1bf0fc75ad0f73ff706af6d54e3 | import inspect
import os
import unittest
import pytest
from mockito import mock, when, unstub, ANY
from robot.utils import JYTHON, WINDOWS
from selenium import webdriver
from SeleniumLibrary.keywords.webdrivertools import SeleniumOptions, WebDriverCreator
try:
from approvaltests.approvals import verify_all
from approvaltests.reporters.generic_diff_reporter_factory import GenericDiffReporterFactory
except ImportError:
if JYTHON:
verify = None
GenericDiffReporterFactory = None
else:
raise
@pytest.fixture(scope='module')
def options():
return SeleniumOptions()
@pytest.fixture(scope='module')
def reporter():
if JYTHON:
return None
else:
path = os.path.dirname(__file__)
reporter_json = os.path.abspath(os.path.join(path, '..', 'approvals_reporters.json'))
factory = GenericDiffReporterFactory()
factory.load(reporter_json)
return factory.get_first_working()
def teardown_function():
unstub()
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_parse_options_string(options, reporter):
results = []
results.append(options._parse('method("arg1")'))
results.append(options._parse('method("arg1", "arg2")'))
results.append(options._parse('method(True)'))
results.append(options._parse('method(1)'))
results.append(options._parse('method("arg1", 2, None, False, "arg2")'))
results.append(options._parse('method ( " arg1 " , 2 , None , False , " arg2 " )'))
results.append(options._parse('attribute="arg1"'))
results.append(options._parse(' attribute = True '))
results.append(options._parse('method("arg1");attribute=True'))
results.append(options._parse('method("arg1") ; attribute=True ; method("arg2")'))
results.append(options._parse('attribute'))
results.append(options._parse('method()'))
results.append(options._parse('method(None)'))
results.append(options._parse('method("--proxy 10.10.1.3:2345")'))
results.append(options._parse('method(";arg1")'))
results.append(options._parse('method ( "arg1" , 2 ,"arg2" )'))
results.append(options._parse("method('arg1')"))
results.append(options._parse('add_argument("-profile"); add_argument("C:\\\\path\\to\\\\profile")'))
results.append(options._parse(r'add_argument("-profile"); add_argument("C:\\path\\to\\profile")'))
results.append(options._parse('attribute=None'))
results.append(options._parse('method("foo", {"key": False});attribute=True;method("bar", {"key": None})'))
verify_all('Selenium options string to dict', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_index_of_separator(options, reporter):
results = []
results.append(options._get_arument_index('method({"key": "value"})'))
results.append(options._get_arument_index('attribute={"key": "value"}'))
results.append(options._get_arument_index('method(foo={"key": "value"})'))
results.append(options._get_arument_index('attribute=("value1", "value2")'))
verify_all('Get argument index', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_parse_complex_object(options, reporter):
results = []
results.append(options._parse_to_tokens('method({"key": "value"})'))
results.append(options._parse_to_tokens('attribute={"key": "value"}'))
results.append(options._parse_to_tokens('attribute=("value1", "value2")'))
results.append(options._parse_to_tokens('method("foo", {"key": "value"})'))
verify_all('Parse complex Python object', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_parse_arguemnts(options, reporter):
results = []
results.append(options._parse_arguments(("arg1", ), True))
results.append(options._parse_arguments("arg1", False))
results.append(options._parse_arguments({"key": "value"}, False))
results.append(options._parse_arguments(["value1", "value2"], False))
results.append(options._parse_arguments(("foo", {"key": "value"}), False))
verify_all('Parse arguments from complex object', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_parse_options_string_errors(options, reporter):
results = []
results.append(error_formatter(options._parse, 'method("arg1)', True))
results.append(error_formatter(options._parse, 'method(arg1")', True))
results.append(error_formatter(options._parse, 'method(arg1)', True))
results.append(error_formatter(options._parse, 'attribute=arg1', True))
results.append(error_formatter(options._parse, 'attribute=webdriver', True))
results.append(error_formatter(options._parse, 'method(argument="value")', True))
verify_all('Selenium options string errors', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_split_options(options, reporter):
results = []
results.append(options._split('method("arg1");method("arg2")'))
results.append(options._split('method("arg1")'))
results.append(options._split('attribute=True'))
results.append(options._split('attribute="semi;colons;middle";other_attribute=True'))
results.append(options._split('method("arg1;");method(";arg2;")'))
results.append(options._split(' method ( " arg1 ") ; method ( " arg2 " ) '))
verify_all('Selenium options string splitting', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_options_create(options, reporter):
results = []
options_str = 'add_argument("--disable-dev-shm-usage")'
sel_options = options.create('chrome', options_str)
results.append(sel_options.arguments)
options_str = '%s;add_argument("--headless")' % options_str
sel_options = options.create('chrome', options_str)
results.append(sel_options.arguments)
options_str = '%s;add_argument("--proxy-server=66.97.38.58:80")' % options_str
sel_options = options.create('chrome', options_str)
results.append(sel_options.arguments)
options_str = '%s;binary_location("too", "many", "args")' % options_str
try:
options.create('chrome', options_str)
except Exception as error:
results.append(error.__str__()[:7])
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--disable-dev-shm-usage')
sel_options = options.create('chrome', chrome_options)
results.append(sel_options.arguments)
sel_options = options.create('chrome', None)
results.append(sel_options)
sel_options = options.create('chrome', 'None')
results.append(sel_options)
verify_all('Selenium options', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_create_with_android(options, reporter):
results = []
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option('androidPackage', 'com.android.chrome')
sel_options = options.create('android', chrome_options)
results.append([sel_options.arguments, sel_options.experimental_options])
verify_all('Selenium options with android', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_get_options(options, reporter):
options_str = 'add_argument("--proxy-server=66.97.38.58:80")'
sel_options = options.create('chrome', options_str)
results = [sel_options.arguments]
verify_all('Selenium options with string.', results, reporter=reporter)
@unittest.skipIf(JYTHON, 'ApprovalTest does not work with Jython')
@unittest.skipIf(WINDOWS, reason='ApprovalTest do not support different line feeds')
def test_importer(options, reporter):
results = []
results.append(options._import_options('firefox'))
results.append(options._import_options('headless_firefox'))
results.append(options._import_options('chrome'))
results.append(options._import_options('headless_chrome'))
results.append(options._import_options('ie'))
results.append(options._import_options('opera'))
results.append(options._import_options('edge'))
results.append(error_formatter(options._import_options, 'phantomjs'))
results.append(error_formatter(options._import_options, 'safari'))
results.append(error_formatter(options._import_options, 'htmlunit'))
results.append(error_formatter(options._import_options, 'htmlunit_with_js'))
results.append(options._import_options('android'))
results.append(error_formatter(options._import_options, 'iphone'))
verify_all('Selenium options import', results, reporter=reporter)
def error_formatter(method, arg, full=False):
try:
return method(arg)
except Exception as error:
if full:
return '%s %s' % (arg, error)
return '%s %s' % (arg, error.__str__()[:15])
@pytest.fixture(scope='module')
def creator():
curr_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.abspath(
os.path.join(curr_dir, '..', '..', 'output_dir'))
return WebDriverCreator(output_dir)
@pytest.fixture(scope='module')
def output_dir():
curr_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.abspath(
os.path.join(curr_dir, '..', '..', 'output_dir'))
return output_dir
def test_create_chrome_with_options(creator):
options = mock()
expected_webdriver = mock()
when(webdriver).Chrome(service_log_path=None, options=options,
executable_path='chromedriver').thenReturn(expected_webdriver)
driver = creator.create_chrome({}, None, options=options)
assert driver == expected_webdriver
def test_create_chrome_with_options_and_remote_url(creator):
url = 'http://localhost:4444/wd/hub'
caps = webdriver.DesiredCapabilities.CHROME.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor=url,
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_chrome({}, url, options=options)
assert driver == expected_webdriver
def test_create_headless_chrome_with_options(creator):
options = mock()
expected_webdriver = mock()
when(webdriver).Chrome(service_log_path=None, options=options,
executable_path='chromedriver').thenReturn(expected_webdriver)
driver = creator.create_headless_chrome({}, None, options=options)
assert driver == expected_webdriver
def test_create_firefox_with_options(creator, output_dir):
log_file = os.path.join(output_dir, 'geckodriver-1.log')
options = mock()
profile = mock()
expected_webdriver = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
when(webdriver).Firefox(options=options, firefox_profile=profile, executable_path='geckodriver',
service_log_path=log_file).thenReturn(expected_webdriver)
driver = creator.create_firefox({}, None, None, options=options)
assert driver == expected_webdriver
def test_create_firefox_with_options_and_remote_url(creator):
url = 'http://localhost:4444/wd/hub'
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
caps = webdriver.DesiredCapabilities.FIREFOX.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor=url,
desired_capabilities=caps,
browser_profile=profile, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_firefox({}, url, None, options=options)
assert driver == expected_webdriver
def test_create_headless_firefox_with_options(creator, output_dir):
log_file = os.path.join(output_dir, 'geckodriver-1.log')
options = mock()
profile = mock()
expected_webdriver = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
when(webdriver).Firefox(options=options, firefox_profile=profile, executable_path='geckodriver',
service_log_path=log_file).thenReturn(expected_webdriver)
driver = creator.create_headless_firefox({}, None, None, options=options)
assert driver == expected_webdriver
def test_create_ie_with_options(creator):
options = mock()
expected_webdriver = mock()
when(webdriver).Ie(service_log_path=None, options=options,
executable_path='IEDriverServer.exe').thenReturn(expected_webdriver)
driver = creator.create_ie({}, None, options=options)
assert driver == expected_webdriver
def test_create_ie_with_options_and_remote_url(creator):
url = 'http://localhost:4444/wd/hub'
caps = webdriver.DesiredCapabilities.INTERNETEXPLORER.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor=url,
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_ie({}, url, options=options)
assert driver == expected_webdriver
def test_create_ie_with_options_and_log_path(creator):
options = mock()
expected_webdriver = mock()
when(webdriver).Ie(options=options, service_log_path=None,
executable_path='IEDriverServer.exe').thenReturn(expected_webdriver)
driver = creator.create_ie({}, None, options=options)
assert driver == expected_webdriver
def test_has_options(creator):
assert creator._has_options(webdriver.Chrome)
assert creator._has_options(webdriver.Firefox)
assert creator._has_options(webdriver.Ie)
assert creator._has_options(webdriver.Edge) is False
assert creator._has_options(webdriver.Opera)
assert creator._has_options(webdriver.Safari) is False
@unittest.skipIf('options' not in inspect.getargspec(webdriver.Edge.__init__), "Requires Selenium 4.0.")
def test_create_edge_with_options(creator):
# TODO: This test requires Selenium 4.0 in Travis
options = mock()
expected_webdriver = mock()
when(creator)._has_options(ANY).thenReturn(True)
when(webdriver).Edge(service_log_path=None, options=options).thenReturn(expected_webdriver)
driver = creator.create_edge({}, None, options=options)
assert driver == expected_webdriver
def test_create_opera_with_options(creator):
options = mock()
expected_webdriver = mock()
executable_path = 'operadriver'
when(webdriver).Opera(options=options, service_log_path=None,
executable_path=executable_path).thenReturn(expected_webdriver)
driver = creator.create_opera({}, None, options=options)
assert driver == expected_webdriver
def test_create_opera_with_options_and_remote_url(creator):
url = 'http://localhost:4444/wd/hub'
caps = webdriver.DesiredCapabilities.OPERA.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor=url,
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_opera({}, url, options=options)
assert driver == expected_webdriver
def test_create_safari_no_options_support(creator):
options = mock()
expected_webdriver = mock()
executable_path = '/usr/bin/safaridriver'
when(webdriver).Safari(executable_path=executable_path).thenReturn(expected_webdriver)
driver = creator.create_safari({}, None, options=options)
assert driver == expected_webdriver
def test_create_phantomjs_no_options_support(creator):
options = mock()
expected_webdriver = mock()
executable_path = 'phantomjs'
when(webdriver).PhantomJS(service_log_path=None, executable_path=executable_path).thenReturn(expected_webdriver)
driver = creator.create_phantomjs({}, None, options=options)
assert driver == expected_webdriver
def test_create_htmlunit_no_options_support(creator):
caps = webdriver.DesiredCapabilities.HTMLUNIT.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor='None',
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_htmlunit({'desired_capabilities': caps}, None, options=options)
assert driver == expected_webdriver
def test_create_htmlunit_with_js_no_options_support(creator):
caps = webdriver.DesiredCapabilities.HTMLUNITWITHJS.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor='None',
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_htmlunit_with_js({}, None, options=options)
assert driver == expected_webdriver
def test_android_options_support(creator):
caps = webdriver.DesiredCapabilities.ANDROID.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor='None',
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_android({}, None, options=options)
assert driver == expected_webdriver
def test_iphone_options_support(creator):
caps = webdriver.DesiredCapabilities.IPHONE.copy()
options = mock()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(command_executor='None',
desired_capabilities=caps,
browser_profile=None, options=options,
file_detector=file_detector).thenReturn(expected_webdriver)
driver = creator.create_iphone({}, None, options=options)
assert driver == expected_webdriver
def test_create_driver_chrome(creator):
str_options = 'add_argument:--disable-dev-shm-usage'
options = mock()
expected_webdriver = mock()
when(creator.selenium_options).create('chrome', str_options).thenReturn(options)
executable_path = 'chromedriver'
when(creator)._get_executable_path(ANY).thenReturn(executable_path)
when(webdriver).Chrome(service_log_path=None, options=options,
executable_path=executable_path).thenReturn(expected_webdriver)
driver = creator.create_driver('Chrome', desired_capabilities={}, remote_url=None,
options=str_options)
assert driver == expected_webdriver
def test_create_driver_firefox(creator, output_dir):
log_file = os.path.join(output_dir, 'geckodriver-1.log')
str_options = 'add_argument:--disable-dev-shm-usage'
options = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
expected_webdriver = mock()
when(creator.selenium_options).create('firefox', str_options).thenReturn(options)
executable_path = 'geckodriver'
when(creator)._get_executable_path(ANY).thenReturn(executable_path)
when(webdriver).Firefox(options=options, firefox_profile=profile, executable_path=executable_path,
service_log_path=log_file).thenReturn(expected_webdriver)
driver = creator.create_driver('FireFox', desired_capabilities={}, remote_url=None,
options=str_options)
assert driver == expected_webdriver
def mock_file_detector(creator):
file_detector = mock()
when(creator)._get_sl_file_detector().thenReturn(file_detector)
return file_detector
|
py | 1a489b9563122ec876320dfb0b2b8670498ba004 | import json
from . import eosapi
from . import config
def create_account_on_chain(from_account, new_account, balance, public_key):
assert len(new_account) == 12
assert balance <= 1.0
assert len(public_key) == 53 and public_key[:3] == 'EOS'
memo = '%s-%s'%(new_account, public_key)
return eosapi.transfer(from_account, 'signupeoseos', balance, memo)
def buyrambytes(payer, receiver, _bytes):
args = {"payer":payer,"receiver":receiver,"bytes":_bytes}
return eosapi.push_action(config.system_contract, 'buyrambytes', args, {payer:'active'})
def buyram(payer, receiver, quant):
args = {'payer':payer, 'receiver':receiver, 'quant':'%.4f %s'%(quant, config.main_token)}
return eosapi.push_action(config.system_contract, 'buyram', args, {payer:'active'})
def sellram(account, _bytes):
return eosapi.push_action(config.system_contract, 'sellram', {'account':account, 'bytes':_bytes}, {account:'active'})
def dbw(_from, _to, net, cpu, transfer=False):
args = {'from':_from,
'receiver':_to,
'stake_net_quantity':'%.4f %s'%(net, config.main_token),
'stake_cpu_quantity':'%.4f %s'%(cpu, config.main_token),
'transfer':transfer
}
return eosapi.push_action(config.system_contract, 'delegatebw', args, {_from:'active'})
def undbw(_from, _to, net, cpu, transfer=False):
args = {'from':_from,
'receiver':_to,
'unstake_net_quantity':'%.4f %s'%(net, config.main_token),
'unstake_cpu_quantity':'%.4f %s'%(cpu, config.main_token),
'transfer':transfer
}
return eosapi.push_action(config.system_contract, 'undelegatebw', args, {_from:'active'})
|
py | 1a489bcaab0d448ead8d08fef74f7471978107d1 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class NodePool(pulumi.CustomResource):
autoscaling: pulumi.Output[dict]
"""
Configuration required by cluster autoscaler to adjust
the size of the node pool to the current cluster usage. Structure is documented below.
"""
cluster: pulumi.Output[str]
"""
The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters.
"""
initial_node_count: pulumi.Output[float]
"""
The initial node count for the pool. Changing this will force
recreation of the resource.
"""
instance_group_urls: pulumi.Output[list]
location: pulumi.Output[str]
"""
The location (region or zone) in which the cluster
resides.
"""
management: pulumi.Output[dict]
"""
Node management configuration, wherein auto-repair and
auto-upgrade is configured. Structure is documented below.
"""
max_pods_per_node: pulumi.Output[float]
"""
) The maximum number of pods per node in this node pool.
Note that this does not work on node pools which are "route-based" - that is, node
pools belonging to clusters that do not have IP Aliasing enabled.
"""
name: pulumi.Output[str]
"""
The name of the node pool. If left blank, Terraform will
auto-generate a unique name.
"""
name_prefix: pulumi.Output[str]
node_config: pulumi.Output[dict]
"""
The node configuration of the pool. See
google_container_cluster for schema.
"""
node_count: pulumi.Output[float]
"""
The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside `autoscaling`.
"""
project: pulumi.Output[str]
"""
The ID of the project in which to create the node pool. If blank,
the provider-configured project will be used.
"""
region: pulumi.Output[str]
"""
The region in which the cluster resides (for
regional clusters). `zone` has been deprecated in favor of `location`.
"""
version: pulumi.Output[str]
"""
The Kubernetes version for the nodes in this pool. Note that if this field
and `auto_upgrade` are both specified, they will fight each other for what the node version should
be, so setting both is highly discouraged. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as Terraform will see spurious diffs
when fuzzy versions are used. See the `google_container_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions in a Terraform-compatible way.
"""
zone: pulumi.Output[str]
"""
The zone in which the cluster resides. `zone`
has been deprecated in favor of `location`.
"""
def __init__(__self__, resource_name, opts=None, autoscaling=None, cluster=None, initial_node_count=None, location=None, management=None, max_pods_per_node=None, name=None, name_prefix=None, node_config=None, node_count=None, project=None, region=None, version=None, zone=None, __name__=None, __opts__=None):
"""
Manages a node pool in a Google Kubernetes Engine (GKE) cluster separately from
the cluster control plane. For more information see [the official documentation](https://cloud.google.com/container-engine/docs/node-pools)
and [the API reference](https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters.nodePools).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] autoscaling: Configuration required by cluster autoscaler to adjust
the size of the node pool to the current cluster usage. Structure is documented below.
:param pulumi.Input[str] cluster: The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters.
:param pulumi.Input[float] initial_node_count: The initial node count for the pool. Changing this will force
recreation of the resource.
:param pulumi.Input[str] location: The location (region or zone) in which the cluster
resides.
:param pulumi.Input[dict] management: Node management configuration, wherein auto-repair and
auto-upgrade is configured. Structure is documented below.
:param pulumi.Input[float] max_pods_per_node: ) The maximum number of pods per node in this node pool.
Note that this does not work on node pools which are "route-based" - that is, node
pools belonging to clusters that do not have IP Aliasing enabled.
:param pulumi.Input[str] name: The name of the node pool. If left blank, Terraform will
auto-generate a unique name.
:param pulumi.Input[dict] node_config: The node configuration of the pool. See
google_container_cluster for schema.
:param pulumi.Input[float] node_count: The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside `autoscaling`.
:param pulumi.Input[str] project: The ID of the project in which to create the node pool. If blank,
the provider-configured project will be used.
:param pulumi.Input[str] region: The region in which the cluster resides (for
regional clusters). `zone` has been deprecated in favor of `location`.
:param pulumi.Input[str] version: The Kubernetes version for the nodes in this pool. Note that if this field
and `auto_upgrade` are both specified, they will fight each other for what the node version should
be, so setting both is highly discouraged. While a fuzzy version can be specified, it's
recommended that you specify explicit versions as Terraform will see spurious diffs
when fuzzy versions are used. See the `google_container_engine_versions` data source's
`version_prefix` field to approximate fuzzy versions in a Terraform-compatible way.
:param pulumi.Input[str] zone: The zone in which the cluster resides. `zone`
has been deprecated in favor of `location`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['autoscaling'] = autoscaling
if cluster is None:
raise TypeError('Missing required property cluster')
__props__['cluster'] = cluster
__props__['initial_node_count'] = initial_node_count
__props__['location'] = location
__props__['management'] = management
__props__['max_pods_per_node'] = max_pods_per_node
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['node_config'] = node_config
__props__['node_count'] = node_count
__props__['project'] = project
__props__['region'] = region
__props__['version'] = version
__props__['zone'] = zone
__props__['instance_group_urls'] = None
super(NodePool, __self__).__init__(
'gcp:container/nodePool:NodePool',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a489bf8d60c2138cd3dacd4f31f464a3c7f7f6c | from pm4pymdl.algo.mvp.utils import filter_metaclass
from pm4pymdl.algo.mvp.utils import succint_mdl_to_exploded_mdl
def filter_ot(df, act, ot, v1, v2, parameters=None):
if parameters is None:
parameters = {}
print(len(df))
try:
if df.type == "succint":
df = succint_mdl_to_exploded_mdl.apply(df)
except:
pass
red_df0 = df[df["event_activity"] == act]
red_df = red_df0.dropna(subset=[ot])
dct = red_df.groupby("event_id").size().to_dict()
lst = [x for x,y in dct.items() if v1 <= y <= v2]
red_df = red_df0[red_df0["event_id"].isin(lst)]
return filter_metaclass.do_filtering(df, red_df, parameters=parameters)
|
py | 1a489e74c06444c1652f99c9fd32c9ede1ad5627 | import os
f=open("/tmp/yy.txt")
q=f.read()
s=''
for i in str(q):
# print(ord(i)+2,end="")
s=s+str(ord(i)+2)
#print()
#print(s)
s1=open("/tmp/yy1.txt",'w')
s1.write(s)
s1.close()
#print(q)
|
py | 1a489e7b669c299161b7cd0ffcb5a86ac6064deb | # Construção do grafo com networkx para obtenção de informações e alterações necessárias
import networkx as nx
import json
from networkx.algorithms.simple_paths import all_simple_paths
with open("./data/disciplinas.json", 'r') as f:
line = f.readline()
disciplinas = json.loads(line)
G = nx.DiGraph()
G.add_nodes_from(list(disciplinas.keys()))
for key in list(disciplinas.keys()):
for req in disciplinas[key]['requisitos']:
G.add_edge(req, key)
if(len(disciplinas[key]['requisitos']) == 0):
G.add_edge('START', key)
# Obtem o maior caminho de disciplinas necessária para cada uma.
# Usado para determinar a posição do vértice durante a construção da visualização no app.
for key in list(disciplinas.keys()):
max_path = [len(p) for p in all_simple_paths(G, 'START', key)]
if (max_path):
disciplinas[key]['maxpath'] = max(max_path)-2
else:
disciplinas[key]['maxpath'] = 0
with open("./public/assets/data/disciplinas.json", 'w+') as f:
json.dump(disciplinas, f) |
py | 1a489ead26e4938db1493ba707aaa73a50b7095f | from .middleware import RoleBasedPolicy # NOQA
|
py | 1a489f787399a450328b1a0fded0cb1a77f17f26 | """Contains methods and classes to collect data from
tushare API
"""
import pandas as pd
import tushare as ts
from tqdm import tqdm
class TushareDownloader :
"""Provides methods for retrieving daily stock data from
tushare API
Attributes
----------
start_date : str
start date of the data (modified from config.py)
end_date : str
end date of the data (modified from config.py)
ticker_list : list
a list of stock tickers (modified from config.py)
Methods
-------
fetch_data()
Fetches data from tushare API
date:date
Open: opening price
High: the highest price
Close: closing price
Low: lowest price
Volume: volume
Price_change: price change
P_change: fluctuation
ma5: 5-day average price
Ma10: 10 average daily price
Ma20:20 average daily price
V_ma5:5 daily average
V_ma10:10 daily average
V_ma20:20 daily average
"""
def __init__(self, start_date: str, end_date: str, ticker_list: list):
self.start_date = start_date
self.end_date = end_date
self.ticker_list = ticker_list
def fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = pd.DataFrame()
for tic in tqdm(self.ticker_list, total=len(self.ticker_list)):
temp_df = ts.get_hist_data(tic[0:6],start=self.start_date,end=self.end_date)
temp_df["tic"] = tic[0:6]
data_df = data_df.append(temp_df)
data_df = data_df.reset_index(level="date")
# create day of the week column (monday = 0)
data_df = data_df.drop(["price_change","p_change","ma5","ma10","ma20","v_ma5","v_ma10","v_ma20"], 1)
data_df["day"] = pd.to_datetime(data_df["date"]).dt.dayofweek
#rank desc
data_df = data_df.sort_index(axis=0,ascending=False)
data_df = data_df.reset_index(drop=True)
# convert date to standard string format, easy to filter
data_df["date"] = pd.to_datetime(data_df["date"])
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data
data_df = data_df.dropna()
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
print(data_df)
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def select_equal_rows_stock(self, df):
df_check = df.tic.value_counts()
df_check = pd.DataFrame(df_check).reset_index()
df_check.columns = ["tic", "counts"]
mean_df = df_check.counts.mean()
equal_list = list(df.tic.value_counts() >= mean_df)
names = df.tic.value_counts().index
select_stocks_list = list(names[equal_list])
df = df[df.tic.isin(select_stocks_list)]
return df
def select_equal_rows_stock(self, df):
df_check = df.tic.value_counts()
df_check = pd.DataFrame(df_check).reset_index()
df_check.columns = ["tic", "counts"]
mean_df = df_check.counts.mean()
equal_list = list(df.tic.value_counts() >= mean_df)
names = df.tic.value_counts().index
select_stocks_list = list(names[equal_list])
df = df[df.tic.isin(select_stocks_list)]
return df
|
py | 1a48a10c933f18a36dc5c2fb78f28173b0c3efa4 | from twisted.internet.defer import DeferredLock as _Lock
class Lock(_Lock):
pass
|
py | 1a48a1b567272806505da10a92321c04d11b078a | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""You may copy this file as the starting point of your own model."""
import fastestimator as fe
from fastestimator.dataset.data import cifar10
from fastestimator.op.numpyop.univariate import Normalize
from openfl.federated import FastEstimatorDataLoader
class FastEstimatorCifarInMemory(FastEstimatorDataLoader):
"""TensorFlow Data Loader for MNIST Dataset."""
def __init__(self, data_path, batch_size, **kwargs):
"""
Initialize.
Args:
data_path: File path for the dataset
batch_size (int): The batch size for the data loader
**kwargs: Additional arguments, passed to super init and
load_mnist_shard
"""
# TODO: We should be downloading the dataset shard into a directory
# TODO: There needs to be a method to ask how many collaborators and
# what index/rank is this collaborator.
# Then we have a way to automatically shard based on rank and size
# of collaborator list.
train_data, eval_data = cifar10.load_data()
test_data = eval_data.split(0.5)
collaborator_count = kwargs['collaborator_count']
train_data, eval_data, test_data = self.split_data(
train_data,
eval_data,
test_data,
int(data_path),
collaborator_count
)
print(f'train_data = {train_data}')
print(f'eval_data = {eval_data}')
print(f'test_data = {test_data}')
print(f'batch_size = {batch_size}')
super().__init__(fe.Pipeline(
train_data=train_data,
eval_data=eval_data,
test_data=test_data,
batch_size=batch_size,
ops=[
Normalize(inputs='x', outputs='x',
mean=(0.4914, 0.4822, 0.4465),
std=(0.2471, 0.2435, 0.2616))
]), **kwargs)
def split_data(self, train, eva, test, rank, collaborator_count):
"""Split data into N parts, where N is the collaborator count."""
if collaborator_count == 1:
return train, eva, test
fraction = [1.0 / float(collaborator_count)]
fraction *= (collaborator_count - 1)
# Expand the split list into individual parameters
train_split = train.split(*fraction)
eva_split = eva.split(*fraction)
test_split = test.split(*fraction)
train = [train]
eva = [eva]
test = [test]
if type(train_split) is not list:
train.append(train_split)
eva.append(eva_split)
test.append(test_split)
else:
# Combine all partitions into a single list
train = [train] + train_split
eva = [eva] + eva_split
test = [test] + test_split
# Extract the right shard
train = train[rank - 1]
eva = eva[rank - 1]
test = test[rank - 1]
return train, eva, test
|
py | 1a48a1f9e69250b893f30c7eea6ec5d782154777 | #!/home/knielbo/virtenvs/teki/bin/python
"""
# front page only for us
$ python infomedia_parser.py --dataset ../dat/NEWS-DATA/berglinske-print --pagecontrol True --page 1 --sort True --verbose 10
# all pages for Peter
$ python infomedia_parser.py --dataset ../dat/NEWS-DATA/berglinske-print --pagecontrol False --page 1 --sort True --verbose 10
"""
import os
import argparse
import json
import re
import glob
import newlinejson
def preprocess(dobj):
# filters
stopwords = [r"forsidehenvisning", r" side "]#, r"side", r"SIDE"]
pat0 = re.compile(r"<.*?>")# remove html tags
pat1 = re.compile(r" +")# remove extra spacing to deal with p1 header
text = dobj["BodyText"]
heading = dobj["Heading"]
subheading = dobj["SubHeading"]
text = text + " " + heading + " " + subheading
text = re.sub(pat0, " ", text)
for word in stopwords:
text = re.sub(word, " ", text, flags=re.IGNORECASE)
text = re.sub(pat1, " ", text)
title = dobj["Paragraph"]
date = dobj["PublishDate"]
return text, title, date
flatten = lambda l: [item for sublist in l for item in sublist]
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to folder with input data")
ap.add_argument("-c", "--pagecontrol", required=False, default=False, help="if extraction should be focused on specific page")
ap.add_argument("-p", "--page", required=False, type=int, default=1, help="which page to be focused on, default is front page")
ap.add_argument("-s", "--sort", required=False, type=bool, default=True, help="sort data in date")
ap.add_argument("-v", "--verbose", required=False, type=int, default=-1, help="verbose mode (number of object to print), -1 to deactivate")
ap.add_argument('-fn', '--filename', required=False, type=bool, default=False, help='Print filenames during processing')
args = vars(ap.parse_args())
TEXT, TITLE, DATE = list(), list(), list()
error = list()
filenames = glob.glob(os.path.join(args["dataset"], "*.ndjson"))
for i, filename in enumerate(filenames):
if args['filename']:
print(filename)
with open(filename, "r") as fobj:
lignes = fobj.readlines()
if lignes:
texts = list()
titles = list()
dates = list()
for ligne in lignes:
dobj = json.loads(ligne)
# control for missing PageIds
if dobj["PageIds"][0]:
pageid = int(dobj["PageIds"][0])
else:
pageid = 'NA (PageIds blank in API)'
# extract date from page
if args["pagecontrol"]:
if pageid == args["page"]:
text, title, date = preprocess(dobj)
texts.append(text)
titles.append(title)
dates.append(date)
# get all data
else:
text, title, date = preprocess(dobj)
texts.append(text)
titles.append(title)
dates.append(date)
# concatenate all content on page
if args["pagecontrol"]:
# control for empty pages
if texts and dates and titles:
texts = [" ".join(texts)]
dates = [dates[0]]
titles = [" ".join(titles)]
else:
texts = []
dates = []
titles = []
TEXT.append(texts)
DATE.append(dates)
TITLE.append(titles)
# record empty files
else:
error.append(os.path.basename(filename))
if args["verbose"] > 0 and i > 0 and (i + 1) % args["verbose"] == 0:
print("[INFO] processed {}/{}".format(i + 1, len(filenames)))
print("[INFO] {} of {} are empty in {} ...".format(len(error),len(filenames), os.path.basename(args["dataset"])))
# flatten ls of ls
TEXT = flatten(TEXT)
TITLE = flatten(TITLE)
DATE = flatten(DATE)
# sort data on date
if args["sort"]:
TEXT = [text for _,text in sorted(zip(DATE, TEXT))]
TITLE = [title for _,title in sorted(zip(DATE, TITLE))]
DATE = sorted(DATE)
# write to external
lignes = list()
for i, date in enumerate(DATE):
d = dict()
d["date"] = date
d["text"] = TEXT[i]
d["title"] = TITLE[i]
lignes.append(d)
# folder
if args['pagecontrol']:
outdir = 'FrontPage'
else:
outdir = 'AllPages'
if not os.path.exists(outdir):
os.mkdir(outdir)
fname = os.path.join(outdir,
os.path.basename(
os.path.normpath(args["dataset"])
) + ".ndjson")
print("[INFO] writing target data to: {}".format(fname))
with open(fname, "w") as f:
newlinejson.dump(lignes, f, ensure_ascii=False)
if __name__=="__main__":
main() |
py | 1a48a41e1700a3234cea3b7bcf5e358a3d4c3487 | __version__ = '6.2.1 R-5'
|
py | 1a48a4973ba864db855eaca25b8be8b397bd3346 | import argparse
import cPickle as pickle
from glob import glob
import multiprocessing as mp
import numpy as np
import os
from scipy.io import loadmat
from skimage.io import imread
from sklearn.datasets import dump_svmlight_file
from subprocess import call
import sys
import xgboost as xgb
import tempfile as tm
import configs
from configs import HAIR, FACE, BKG
import data
from utils import *
EPS = np.finfo(float).eps
def pr_calc(yt, yp):
tp = np.sum((yt == yp) & (yt == 1))
tn = np.sum((yt == yp) & (yt == 0))
fp = np.sum((yt != yp) & (yt == 1))
fn = np.sum((yt != yp) & (yt == 0))
return tp, tn, fp, fn
def evaluate(names, keyps, model_fname, q):
models = hr_name_to_models(model_fname)
ttp, ttn, tfp, tfn = 0, 0, 0, 0
for k, (name, keyp) in enumerate(zip(names, keyps)):
if not os.path.exists(name): continue
im = imread(name)
pr = hr_predict_single(im, keyp, models, overlap=0.5)
gt = data.img2gt(name)
tp, tn, fp, fn = pr_calc((gt==HAIR), (pr==HAIR))
ttp += tp; ttn += tn; tfp += fp; tfn += fn
# if k % 50 == 0: print "[{}] Done {}".format(os.getpid(), k)
q.put((ttp, ttn, tfp, tfn))
def eval(model_fname, mat_viz_file):
print "=================================="
q = mp.Queue()
names, keypoints = data.mat_to_name_keyp(mat_viz_file)
NUM_TRAIN_SAMPS = len(names)
nprocs = mp.cpu_count()
chunksize = int(NUM_TRAIN_SAMPS // nprocs)
procs = []
for i in range(nprocs):
lim = chunksize * (i+1) if i < nprocs - 1 else NUM_TRAIN_SAMPS
p = mp.Process(target=evaluate,
args=(names[chunksize*i:lim],
keypoints[chunksize*i:lim],
model_fname, q))
procs.append(p)
p.start()
for p in procs:
p.join()
ttp, ttn, tfp, tfn = 0., 0., 0., 0.
for i in range(nprocs):
tp, tn, fp, fn = q.get()
ttp += tp; ttn += tn; tfp += fp; tfn += fn
print "Model: {} pixel level:".format(model_fname)
print "\thair accuracy = {:.03f}".format(1. - (tfp + tfn) / (EPS + tfn + tfp + ttp + ttn))
print "\tprec \t= {:.03f}".format((ttp) / (EPS + ttp + tfp))
print "\trec \t= {:.03f}".format((ttp) / (EPS + ttp + tfn))
def args():
args = argparse.ArgumentParser()
args.add_argument('model_file', help='')
args.add_argument('mat_file', help='')
return args.parse_args()
if __name__ == '__main__':
parse = args()
eval(parse.model_file, parse.mat_file)
|
py | 1a48a567ae43276bd3874dd320c0987e240ddff7 | # Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
import traceback
from colcon_core.logging import colcon_logger
from colcon_core.plugin_system import instantiate_extensions
from colcon_core.plugin_system import order_extensions_by_priority
logger = colcon_logger.getChild(__name__)
class ArgcompleteCompleterExtensionPoint:
"""
The interface for argcomplete completer extensions.
An argcomplete completer extension provides completion proposals for
command line arguments.
For each instance the attribute `ARGCOMPLETE_COMPLETER_NAME` is being set
to the basename of the entry point registering the extension.
"""
"""The version of the argcomplete completer extension interface."""
EXTENSION_POINT_VERSION = '1.0'
"""The default priority of argcomplete completer extensions."""
PRIORITY = 100
def get_completer(self, parser, *args, **kwargs):
"""
Get a completer for a specific argument.
The argument is identified by the same `args` and `kwargs` which are
passed to the `add_argument()` function of the parser.
This method must be overridden in a subclass.
:param parser: The argument parser on which `add_argument()` was
called on
:param args: The positional arguments to `add_argument()`
:param kwargs: The keyword arguments to `add_argument()`
:returns: An argcomplete completer, or None
"""
raise NotImplementedError()
def get_argcomplete_completer_extensions(*, exclude_names=None):
"""Get the argcomplete completer extensions in priority order."""
extensions = instantiate_extensions(__name__, exclude_names=exclude_names)
for name, extension in extensions.items():
extension.ARGCOMPLETE_COMPLETER_NAME = name
return order_extensions_by_priority(extensions)
def get_argcomplete_completer(parser, *args, **kwargs):
"""Get the completer for given arguments."""
extensions = get_argcomplete_completer_extensions()
# try extensions in priority order
for extension in extensions.values():
# check if extension provides a completer
logger.log(
1,
'get_argcomplete_completer('
'{extension.ARGCOMPLETE_COMPLETER_NAME}) for {args}'
.format_map(locals()))
try:
completer = extension.get_completer(parser, *args, **kwargs)
assert callable(completer) or completer is None, \
'get_completer() should return a callable or None'
except Exception as e: # noqa: F841
# catch exceptions raised in completer extension
exc = traceback.format_exc()
logger.error(
'Exception in argcomplete completer extension '
"'{extension.ARGCOMPLETE_COMPLETER_NAME}': {e}\n{exc}"
.format_map(locals()))
# skip failing extension, continue with next one
continue
# if not continue with next extension
if completer is None:
continue
# return provided completer
logger.log(
5,
'get_argcomplete_completer('
'{extension.ARGCOMPLETE_COMPLETER_NAME}) provided a completer for '
'{args}'.format_map(locals()))
return completer
return None
|
py | 1a48a61b65720e84563fea320dbec07ac8c099ed | import os
import errno
import random
import numpy as np
import torch as th
def set_random_seeds(seed, cuda):
"""Set seeds for python random module numpy.random and torch.
Parameters
----------
seed: int
Random seed.
cuda: bool
Whether to set cuda seed with torch.
"""
random.seed(seed)
th.manual_seed(seed)
if cuda:
th.cuda.manual_seed_all(seed)
np.random.seed(seed)
def np_to_var(
X, requires_grad=False, dtype=None, pin_memory=False, **tensor_kwargs
):
"""
Convenience function to transform numpy array to `torch.Tensor`.
Converts `X` to ndarray using asarray if necessary.
Parameters
----------
X: ndarray or list or number
Input arrays
requires_grad: bool
passed on to Variable constructor
dtype: numpy dtype, optional
var_kwargs:
passed on to Variable constructor
Returns
-------
var: `torch.Tensor`
"""
if not hasattr(X, "__len__"):
X = [X]
X = np.asarray(X)
if dtype is not None:
X = X.astype(dtype)
X_tensor = th.tensor(X, requires_grad=requires_grad, **tensor_kwargs)
if pin_memory:
X_tensor = X_tensor.pin_memory()
return X_tensor
def var_to_np(var):
"""Convenience function to transform `torch.Tensor` to numpy
array.
Should work both for CPU and GPU."""
return var.cpu().data.numpy()
def corr(a, b):
"""
Computes correlation only between terms of a and terms of b, not within
a and b.
Parameters
----------
a, b: 2darray, features x samples
Returns
-------
Correlation between features in x and features in y
"""
# Difference to numpy:
# Correlation only between terms of x and y
# not between x and x or y and y
this_cov = cov(a, b)
return _cov_to_corr(this_cov, a, b)
def cov(a, b):
"""
Computes covariance only between terms of a and terms of b, not within
a and b.
Parameters
----------
a, b: 2darray, features x samples
Returns
-------
Covariance between features in x and features in y
"""
demeaned_a = a - np.mean(a, axis=1, keepdims=True)
demeaned_b = b - np.mean(b, axis=1, keepdims=True)
this_cov = np.dot(demeaned_a, demeaned_b.T) / (b.shape[1] - 1)
return this_cov
def _cov_to_corr(this_cov, a, b):
# computing "unbiased" corr
# ddof=1 for unbiased..
var_a = np.var(a, axis=1, ddof=1)
var_b = np.var(b, axis=1, ddof=1)
return _cov_and_var_to_corr(this_cov, var_a, var_b)
def _cov_and_var_to_corr(this_cov, var_a, var_b):
divisor = np.outer(np.sqrt(var_a), np.sqrt(var_b))
return this_cov / divisor
def wrap_reshape_apply_fn(stat_fn, a, b, axis_a, axis_b):
"""
Reshape two nd-arrays into 2d-arrays, apply function and reshape
result back.
Parameters
----------
stat_fn: function
Function to apply to 2d-arrays
a: nd-array: nd-array
b: nd-array
axis_a: int or list of int
sample axis
axis_b: int or list of int
sample axis
Returns
-------
result: nd-array
The result reshaped to remaining_dims_a + remaining_dims_b
"""
if not hasattr(axis_a, "__len__"):
axis_a = [axis_a]
if not hasattr(axis_b, "__len__"):
axis_b = [axis_b]
other_axis_a = [i for i in range(a.ndim) if i not in axis_a]
other_axis_b = [i for i in range(b.ndim) if i not in axis_b]
transposed_topo_a = a.transpose(tuple(other_axis_a) + tuple(axis_a))
n_stat_axis_a = [a.shape[i] for i in axis_a]
n_other_axis_a = [a.shape[i] for i in other_axis_a]
flat_topo_a = transposed_topo_a.reshape(
np.prod(n_other_axis_a), np.prod(n_stat_axis_a)
)
transposed_topo_b = b.transpose(tuple(other_axis_b) + tuple(axis_b))
n_stat_axis_b = [b.shape[i] for i in axis_b]
n_other_axis_b = [b.shape[i] for i in other_axis_b]
flat_topo_b = transposed_topo_b.reshape(
np.prod(n_other_axis_b), np.prod(n_stat_axis_b)
)
assert np.array_equal(n_stat_axis_a, n_stat_axis_b)
stat_result = stat_fn(flat_topo_a, flat_topo_b)
topo_result = stat_result.reshape(
tuple(n_other_axis_a) + tuple(n_other_axis_b)
)
return topo_result
class FuncAndArgs(object):
"""Container for a function and its arguments.
Useful in case you want to pass a function and its arguments
to another function without creating a new class.
You can call the new instance either with the apply method or
the ()-call operator:
>>> FuncAndArgs(max, 2,3).apply(4)
4
>>> FuncAndArgs(max, 2,3)(4)
4
>>> FuncAndArgs(sum, [3,4])(8)
15
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def apply(self, *other_args, **other_kwargs):
all_args = self.args + other_args
all_kwargs = self.kwargs.copy()
all_kwargs.update(other_kwargs)
return self.func(*all_args, **all_kwargs)
def __call__(self, *other_args, **other_kwargs):
return self.apply(*other_args, **other_kwargs)
def add_message_to_exception(exc, additional_message):
# give some more info...
# see http://www.ianbicking.org/blog/2007/09/re-raising-exceptions.html
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
arg0 += additional_message
exc.args = (arg0,) + args[1:]
def dict_compare(d1, d2):
"""From http://stackoverflow.com/a/18860653/1469195"""
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
def dict_equal(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
return (
intersect_keys == d2_keys and
intersect_keys == d1_keys and
len(modified) == 0
)
def dict_is_subset(d1, d2):
added, removed, modified, same = dict_compare(d1, d2)
return len(added) == 0 and len(modified) == 0
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
http://stackoverflow.com/a/26853961
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def touch_file(path):
# from http://stackoverflow.com/a/12654798/1469195
basedir = os.path.dirname(path)
if not os.path.exists(basedir):
os.makedirs(basedir)
with open(path, "a"):
os.utime(path, None)
def to_tuple(sequence_or_element, length=None):
if hasattr(sequence_or_element, "__len__"):
assert length is None
return tuple(sequence_or_element)
else:
if length is None:
return (sequence_or_element,)
else:
return (sequence_or_element,) * length
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def select_inverse_inds(arr, inds):
mask = np.ones(len(arr), dtype=bool)
mask[inds] = False
return arr[mask]
|
py | 1a48a782ad222861d212f2d9cdad1bb6ef25b36a | #! /usr/bin/env python3
from __future__ import print_function
import sys
import os
import re
from ROOT import *
import MultipleCompare as MultipleCompare
__author__ = "Lars Perchalla ([email protected])"
__doc__ = """Script to execute multiple plotting commands via MultipleCompare.py. Switch between massiveMode producing a set of plots comparing each one by one, and defaultMode producing a smaller set of default plot combinations by adding the commandline option massiveMode:\n\n
Usage: SteerMultipleCompare.py -T testFile -R refFile [options] [search strings that you want to apply '*' is supported as special character]
see MultiCompare.py for details
"""
def StripPath(name):
path = ''
plot = ''
matches = re.match(r'(.*)\/(.*)$', name)
if matches:
path = matches.group(1)
plot = matches.group(2)
return [path, plot]
def CreateDirectory(dir,addToExisting=False):
if os.path.exists(dir) and not addToExisting:
print("Output directory %s already exists! OK to overwrite?" % dir)
while True:
input = raw_input("Please enter [y/n] ")
if (input == 'y'):
break
elif (input == 'n'):
print(" ...exiting.")
sys.exit()
if not os.path.exists(dir):
os.makedirs(dir)
def CreateBaseDirectory(options):
if options.out == 'MultipleCompare.png' or options.out.find('.')!=-1:
#default case, so no directory was given
#or a filename was given
outputDirName = 'MultipleCompareOutput'
else:
outputDirName = options.out
outputDir = os.path.join(os.getcwd(), outputDirName)
CreateDirectory(outputDir)
return outputDir
def CreateSubDirectory(basedir, path):
outputDir = os.path.join(basedir, path)
CreateDirectory(outputDir,True)
def CleanArguments(argv, option):
#remove existing output arguments
while argv.count(option) > 0:
index = argv.index(option)
if index < len(argv)-1:
argv.pop(index+1)#drop the corresponding value
argv.pop(index)#drop the option itself
#execute Multicompare for each plot as a comparison one by one
#argv was modified to contain only one plot each
def plotOneByOne(argv, outputDir, histoList, histoSubNames, paths):
for hist, name, path in zip(histoList, histoSubNames, paths):
CreateSubDirectory(outputDir, path)
#now give modified arguments to MultipleCompare
tmpArgv = argv[:]
tmpArgv.append('-o')
tmpArgv.append(outputDir+'/'+path+'/'+name+'.png')
tmpArgv.append(hist)
MultipleCompare.main(tmpArgv)
def plotDefault(argv, outputDir, name, type, plots, addArgv=[]):
tmpArgv = argv[:]
tmpArgv.append('-o')
tmpArgv.append(outputDir+'/'+name+type)
tmpArgv.extend(addArgv)
tmpArgv.extend(plots)
MultipleCompare.main(tmpArgv)
#make some default plots grouping several histograms
def plotDefaults(argv, options, outputDir):
name = 'Validation_'
if options.testLabel != None:
name += options.testLabel+'_'
else:
name += options.test+'_vs_'
if options.refLabel != None:
name += options.refLabel+'_'
else:
name += options.ref+'_'
outputType = '.eps'
additionalArgv = []
if outputDir.find('QCD')!=-1:
additionalArgv.append('-f') #fakerate
plotDefault(argv, outputDir, name, 'LeptonRejectionEffphi'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effphi'], additionalArgv)
plotDefault(argv, outputDir, name, 'LeptonRejectionEffeta'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effeta'], additionalArgv)
plotDefault(argv, outputDir, name, 'LeptonRejectionEffpt'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*Rejection/*Effpt'], additionalArgv)
if outputDir.find('QCD')!=-1:
additionalArgv.append('--logScale')
plotDefault(argv, outputDir, name, 'Effphi'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effphi', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effphi'], additionalArgv)
plotDefault(argv, outputDir, name, 'Effeta'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effeta', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effeta'], additionalArgv)
plotDefault(argv, outputDir, name, 'Effpt'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*Effpt', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*Effpt'], additionalArgv)
plotDefault(argv, outputDir, name, 'pTRatio_allHadronic'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_allHadronic', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_allHadronic'])
plotDefault(argv, outputDir, name, 'pTRatio_oneProng1Pi0'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_oneProng1Pi0', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_oneProng1Pi0'])
plotDefault(argv, outputDir, name, 'pTRatio_threeProng0Pi0'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_pTRatio_threeProng0Pi0', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_pTRatio_threeProng0Pi0'])
plotDefault(argv, outputDir, name, 'Size_isolationPFChargedHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFChargedHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFChargedHadrCands'])
plotDefault(argv, outputDir, name, 'Size_isolationPFNeutrHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFNeutrHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFNeutrHadrCands'])
plotDefault(argv, outputDir, name, 'Size_isolationPFGammaCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_Size_isolationPFGammaCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_Size_isolationPFGammaCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFChargedHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFChargedHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFChargedHadrCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFNeutrHadrCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFNeutrHadrCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFNeutrHadrCands'])
plotDefault(argv, outputDir, name, 'SumPt_isolationPFGammaCands'+outputType, ['DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationByDecayModeFinding/*_SumPt_isolationPFGammaCands', 'DQMData/RecoTauV/hpsPFTauProducer_hpsPFTauDiscriminationBy*CombinedIsolationDBSumPtCorr/*_SumPt_isolationPFGammaCands'])
def main(argv=None):
if argv is None:
argv = sys.argv
options, toPlot = MultipleCompare.LoadCommandlineOptions(argv)
gROOT.SetBatch()
testFile = TFile(options.test)
refFile = None
if options.ref != '':
refFile = TFile(options.ref)
plotList = []
MultipleCompare.MapDirStructure( testFile,'',plotList)
if len(plotList)<1:
print('\tError: Please specify at least one histogram. The following ones are available in the root file.')
print(plotList)
sys.exit()
histoList = []
histoSubNames = []
paths = []
massiveMode = False
for plot in toPlot:
#clean the arguments. toPlot contains the list of positional arguments leftover after parsing options
argv.remove(plot)
for path in plotList:
if MultipleCompare.Match(plot.lower(),path.lower()):
histoList.append(path)
strippedPath, strippedPlot = StripPath(path)
paths.append(strippedPath)
histoSubNames.append(strippedPlot)
#print histoSubNames[-1]
elif plot.find('massiveMode') != -1:
massiveMode = True
CleanArguments(argv,'--output')
CleanArguments(argv,'-o')
outputDir = CreateBaseDirectory(options)
if massiveMode:
print("Massive mode: scan all subdirs and make plots comparing each histogram one by one.")
plotOneByOne(argv, outputDir, histoList, histoSubNames, paths)
else:
print("Default mode: Make default plot combinations.")
plotDefaults(argv, options, outputDir)
#only execute main() if manually run
if __name__ == '__main__':
#main(*sys.argv[1:])
# the calls to sys.exit(n) inside main() all become return n.
sys.exit(main())
else:
print("This is ",__name__)
|
py | 1a48a7967ae667b3e868b83f04fd34b1dc8e2e4e | from lib.interface.cores import c
from lib.interface.valida import leiaInt
def linha(s, x=60, cor=0):
print(f'{c(cor)}{s}'*x, f'{c(0)}')
def cabecalho(x, msg):
linha(x, cor=9)
print('{}{}{}'.format(c(11), msg.center(60), c(0)))
linha(x, cor=9)
def menu(lst):
cabecalho('-', 'MENU PRINCIPAL')
op = 1
for val in lst:
print(f'{c(7)}[{op}] {val}{c(0)}')
op += 1
linha('-', cor=9)
resp = leiaInt(f'{c(1)}Escolha sua opção{c(0)} ')
return resp
|
py | 1a48a8f84d703986f7d59334d40cfcf657d29f69 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AssessmentArgs', 'Assessment']
@pulumi.input_type
class AssessmentArgs:
def __init__(__self__, *,
resource_details: pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']],
resource_id: pulumi.Input[str],
status: pulumi.Input['AssessmentStatusArgs'],
additional_data: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
assessment_name: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']] = None,
partners_data: Optional[pulumi.Input['SecurityAssessmentPartnerDataArgs']] = None):
"""
The set of arguments for constructing a Assessment resource.
:param pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']] resource_details: Details of the resource that was assessed
:param pulumi.Input[str] resource_id: The identifier of the resource.
:param pulumi.Input['AssessmentStatusArgs'] status: The result of the assessment
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_data: Additional data regarding the assessment
:param pulumi.Input[str] assessment_name: The Assessment Key - Unique key for the assessment type
:param pulumi.Input['SecurityAssessmentMetadataPropertiesArgs'] metadata: Describes properties of an assessment metadata.
:param pulumi.Input['SecurityAssessmentPartnerDataArgs'] partners_data: Data regarding 3rd party partner integration
"""
pulumi.set(__self__, "resource_details", resource_details)
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "status", status)
if additional_data is not None:
pulumi.set(__self__, "additional_data", additional_data)
if assessment_name is not None:
pulumi.set(__self__, "assessment_name", assessment_name)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if partners_data is not None:
pulumi.set(__self__, "partners_data", partners_data)
@property
@pulumi.getter(name="resourceDetails")
def resource_details(self) -> pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']]:
"""
Details of the resource that was assessed
"""
return pulumi.get(self, "resource_details")
@resource_details.setter
def resource_details(self, value: pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']]):
pulumi.set(self, "resource_details", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The identifier of the resource.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def status(self) -> pulumi.Input['AssessmentStatusArgs']:
"""
The result of the assessment
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input['AssessmentStatusArgs']):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="additionalData")
def additional_data(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Additional data regarding the assessment
"""
return pulumi.get(self, "additional_data")
@additional_data.setter
def additional_data(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "additional_data", value)
@property
@pulumi.getter(name="assessmentName")
def assessment_name(self) -> Optional[pulumi.Input[str]]:
"""
The Assessment Key - Unique key for the assessment type
"""
return pulumi.get(self, "assessment_name")
@assessment_name.setter
def assessment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "assessment_name", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]:
"""
Describes properties of an assessment metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="partnersData")
def partners_data(self) -> Optional[pulumi.Input['SecurityAssessmentPartnerDataArgs']]:
"""
Data regarding 3rd party partner integration
"""
return pulumi.get(self, "partners_data")
@partners_data.setter
def partners_data(self, value: Optional[pulumi.Input['SecurityAssessmentPartnerDataArgs']]):
pulumi.set(self, "partners_data", value)
class Assessment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_data: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
assessment_name: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['SecurityAssessmentMetadataPropertiesArgs']]] = None,
partners_data: Optional[pulumi.Input[pulumi.InputType['SecurityAssessmentPartnerDataArgs']]] = None,
resource_details: Optional[pulumi.Input[Union[pulumi.InputType['AzureResourceDetailsArgs'], pulumi.InputType['OnPremiseResourceDetailsArgs'], pulumi.InputType['OnPremiseSqlResourceDetailsArgs']]]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[pulumi.InputType['AssessmentStatusArgs']]] = None,
__props__=None):
"""
Security assessment on a resource - response format
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_data: Additional data regarding the assessment
:param pulumi.Input[str] assessment_name: The Assessment Key - Unique key for the assessment type
:param pulumi.Input[pulumi.InputType['SecurityAssessmentMetadataPropertiesArgs']] metadata: Describes properties of an assessment metadata.
:param pulumi.Input[pulumi.InputType['SecurityAssessmentPartnerDataArgs']] partners_data: Data regarding 3rd party partner integration
:param pulumi.Input[Union[pulumi.InputType['AzureResourceDetailsArgs'], pulumi.InputType['OnPremiseResourceDetailsArgs'], pulumi.InputType['OnPremiseSqlResourceDetailsArgs']]] resource_details: Details of the resource that was assessed
:param pulumi.Input[str] resource_id: The identifier of the resource.
:param pulumi.Input[pulumi.InputType['AssessmentStatusArgs']] status: The result of the assessment
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AssessmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Security assessment on a resource - response format
:param str resource_name: The name of the resource.
:param AssessmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AssessmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_data: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
assessment_name: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['SecurityAssessmentMetadataPropertiesArgs']]] = None,
partners_data: Optional[pulumi.Input[pulumi.InputType['SecurityAssessmentPartnerDataArgs']]] = None,
resource_details: Optional[pulumi.Input[Union[pulumi.InputType['AzureResourceDetailsArgs'], pulumi.InputType['OnPremiseResourceDetailsArgs'], pulumi.InputType['OnPremiseSqlResourceDetailsArgs']]]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[pulumi.InputType['AssessmentStatusArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AssessmentArgs.__new__(AssessmentArgs)
__props__.__dict__["additional_data"] = additional_data
__props__.__dict__["assessment_name"] = assessment_name
__props__.__dict__["metadata"] = metadata
__props__.__dict__["partners_data"] = partners_data
if resource_details is None and not opts.urn:
raise TypeError("Missing required property 'resource_details'")
__props__.__dict__["resource_details"] = resource_details
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__.__dict__["resource_id"] = resource_id
if status is None and not opts.urn:
raise TypeError("Missing required property 'status'")
__props__.__dict__["status"] = status
__props__.__dict__["display_name"] = None
__props__.__dict__["links"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:security/v20210601:Assessment"), pulumi.Alias(type_="azure-native:security:Assessment"), pulumi.Alias(type_="azure-nextgen:security:Assessment"), pulumi.Alias(type_="azure-native:security/v20190101preview:Assessment"), pulumi.Alias(type_="azure-nextgen:security/v20190101preview:Assessment"), pulumi.Alias(type_="azure-native:security/v20200101:Assessment"), pulumi.Alias(type_="azure-nextgen:security/v20200101:Assessment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Assessment, __self__).__init__(
'azure-native:security/v20210601:Assessment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':
"""
Get an existing Assessment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AssessmentArgs.__new__(AssessmentArgs)
__props__.__dict__["additional_data"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["links"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partners_data"] = None
__props__.__dict__["resource_details"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
return Assessment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalData")
def additional_data(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Additional data regarding the assessment
"""
return pulumi.get(self, "additional_data")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
User friendly display name of the assessment
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def links(self) -> pulumi.Output['outputs.AssessmentLinksResponse']:
"""
Links relevant to the assessment
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:
"""
Describes properties of an assessment metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnersData")
def partners_data(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentPartnerDataResponse']]:
"""
Data regarding 3rd party partner integration
"""
return pulumi.get(self, "partners_data")
@property
@pulumi.getter(name="resourceDetails")
def resource_details(self) -> pulumi.Output[Any]:
"""
Details of the resource that was assessed
"""
return pulumi.get(self, "resource_details")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.AssessmentStatusResponseResponse']:
"""
The result of the assessment
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
|
py | 1a48a9852f7c54dbdbeacf11fe0eb4365b4b2d86 | import json
from pprint import pprint
from configparser import ConfigParser
from pybea.client import BureauEconomicAnalysisClient
# Grab configuration values.
config = ConfigParser()
config.read("configs/config.ini")
API_KEY = config.get("alex_credentials", "API_KEY")
def save_response(name: str, data: dict) -> None:
"""Use this if you want to save the responses."""
with open(
file=f"samples/responses/{name}.jsonc",
mode="w+",
encoding="utf-8",
) as sample_file:
json.dump(obj=data, fp=sample_file, indent=4)
# Initalize the new Client.
bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
# Grab the Dataset List.
dataset_list = bea_client.get_dataset_list()
pprint(dataset_list)
# Grab the Paramters List.
parameters_set_list = bea_client.get_parameters_list(dataset_name="Regional")
pprint(parameters_set_list)
# Grab GDP for the Finance & Insurance Industry (58), for the years 2018 &
# 2019 and an annual basis ('A')
gdp_by_industry = bea_client.gdp_by_industry(
year=["2019", "2018"], industry="52", frequency="A"
)
pprint(gdp_by_industry)
# Grab National Product and Income Data.
national_income = bea_client.national_income_and_product_accounts(
table_name="T10101", frequency=["A", "Q"], year=["2011", "2012"]
)
pprint(national_income)
# Grab National Product and Income Data.
national_income_detail = bea_client.national_income_and_product_accounts_detail(
table_name="U20305", frequency=["A", "Q"], year=["2011", "2012"]
)
pprint(national_income_detail)
# Grab Current-Cost Net Stock of Private Fixed Assets, Equipment, Structures,
# and Intellectual Property Products by Type, for all years.
fixed_assets = bea_client.fixed_assets(table_name="FAAt201", year=["2011", "2012"])
pprint(fixed_assets)
# U. S. direct investment position in China and Asia for 2011 and 2012
investments = bea_client.direct_investments_and_multinational_enterprises(
direction_of_investment="outward",
classification="country",
series_id=["30"],
year=["2011", "2012"],
country=["650", "699"],
)
pprint(investments)
# Net income and sales for Brazilian affiliates of U. S. parent enterprises,
# all industries, 2011 and 2012.
investments = bea_client.activities_investments_and_multinational_enterprises(
direction_of_investment="outward",
classification="CountryByIndustry",
series_id=["4", "5"],
year=["2011", "2012"],
country=["202"],
ownership_level=False,
industry="ALL",
non_bank_affilates_only=False,
)
pprint(investments)
# Balance on goods with China for 2011 and 2012.
balance_on_goods = bea_client.international_transactions(
indicator=["BalGds"],
area_or_country=["China"],
year=["2011", "2012"],
frequency=["A"],
)
pprint(balance_on_goods)
# U.S. assets excluding financial derivatives; change in position
# attributable to price changes for all available years
us_assets = bea_client.international_investments_positions(
type_of_investment=["FinAssetsExclFinDeriv"],
component=["ChgPosPrice"],
year="ALL",
frequency=["A"],
)
pprint(us_assets)
# Data from Industry‐by‐Commodity Total Requirements, After Redefinitions
# (Sector Level) table for years 2010, 2011, and 2012.
input_output_data = bea_client.input_output_statstics(
table_id=["56"], year=["2010", "2011", "2012", "2013"]
)
pprint(input_output_data)
# Quarterly Value Added by Industry data for all industries for years 2012 and 2013.
underlying_gdp_by_industry = bea_client.underlying_gdp_by_industry(
industry="ALL", frequency=["A"], year=["2012", "2013"], table_id="ALL"
)
pprint(underlying_gdp_by_industry)
# Exports of telecommunications services by U.S. parents to their foreign affiliates for all years.
international_trade_services = bea_client.international_trade_services(
type_of_service="Telecom",
trade_direction=["Exports"],
year="ALL",
affiliation=["USPARENTS"],
area_or_country="AllCountries",
)
pprint(international_trade_services)
save_response(
name="get_international_trade_services", data=international_trade_services
)
# Personal income for 2012 and 2013 for all counties.
regional_data = bea_client.regional(
table_name=["CAINC1"], line_code=1, geo_fips=["COUNTY"], year=["2012", "2013"]
)
pprint(regional_data)
save_response(name="get_regional_data", data=regional_data)
|
py | 1a48a9ca9e085ea033db82947ed995a74f049b9a | from django.core import mail
from selenium.webdriver.common.keys import Keys
import re
from .base import FunctionalTest
TEST_EMAIL = '[email protected]'
SUBJECT = 'Your login link for Superlists'
class LoginTest(FunctionalTest):
def test_can_get_email_link_to_log_in(self):
# Edith entra no site e nota o campo "Log in"
# O campo informa para ela entrar com o email, e ela entra
self.browser.get(self.live_server_url)
self.browser.find_element_by_name('email').send_keys(TEST_EMAIL)
self.browser.find_element_by_name('email').send_keys(Keys.ENTER)
# Uma mensagem informa que o email email foi enviado para ela
self.wait_for(lambda: self.assertIn(
'Check your email',
self.browser.find_element_by_tag_name('body').text
))
# Ela verifica o email e encontra a mensagem
email = mail.outbox[0]
self.assertIn(TEST_EMAIL, email.to)
self.assertEqual(email.subject, SUBJECT)
# O email contém uma URL
self.assertIn('Use this link to log in', email.body)
url_search = re.search(r'http://.+/.+$', email.body)
if not url_search:
self.fail(f'Could not find url in email body:\n{email.body}')
url = url_search.group(0)
self.assertIn(self.live_server_url, url)
# Ela clica na URL
self.browser.get(url)
# Ela está logada
self.wait_for(
lambda: self.browser.find_element_by_link_text('Log out')
)
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertIn(TEST_EMAIL, navbar.text) |
py | 1a48aa7c4542b9b44c197fc55b1295d0fddec32e | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import ast
import errno
import logging
import sqlite3
import pytz
import threading
import os
import re
from .basedb import DbDriver
from collections import defaultdict
from datetime import datetime
from math import ceil
from volttron.platform.agent import utils
from volttron.platform import jsonapi
from volttron.platform.agent.utils import fix_sqlite3_datetime
utils.setup_logging()
_log = logging.getLogger(__name__)
# Make sure sqlite3 datetime adapters are updated.
fix_sqlite3_datetime()
class SqlLiteFuncts(DbDriver):
"""
Implementation of SQLite3 database operation for
:py:class:`sqlhistorian.historian.SQLHistorian` and
:py:class:`sqlaggregator.aggregator.SQLAggregateHistorian`
For method details please refer to base class
:py:class:`volttron.platform.dbutils.basedb.DbDriver`
"""
def __init__(self, connect_params, table_names):
database = connect_params['database']
thread_name = threading.currentThread().getName()
_log.debug(
"initializing sqlitefuncts in thread {}".format(thread_name))
if database == ':memory:':
self.__database = database
else:
self.__database = os.path.expandvars(os.path.expanduser(database))
db_dir = os.path.dirname(self.__database)
# If the db does not exist create it in case we are started
# before the historian.
try:
if db_dir == '':
if utils.is_secure_mode():
data_dir = os.path.basename(os.getcwd()) + ".agent-data"
db_dir = os.path.join(os.getcwd(), data_dir)
else:
db_dir = './data'
self.__database = os.path.join(db_dir, self.__database)
os.makedirs(db_dir)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(db_dir):
raise
connect_params['database'] = self.__database
if 'detect_types' not in connect_params:
connect_params['detect_types'] = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
if 'timeout' not in connect_params.keys():
connect_params['timeout'] = 10
self.data_table = None
self.topics_table = None
self.meta_table = None
self.agg_topics_table = None
self.agg_meta_table = None
if table_names:
self.data_table = table_names['data_table']
self.topics_table = table_names['topics_table']
self.meta_table = table_names['meta_table']
self.agg_topics_table = table_names['agg_topics_table']
self.agg_meta_table = table_names['agg_meta_table']
_log.debug("In sqlitefuncts connect params {}".format(connect_params))
super(SqlLiteFuncts, self).__init__('sqlite3', **connect_params)
def setup_historian_tables(self):
result = self.select('''PRAGMA auto_vacuum''')
auto_vacuum = result[0][0]
if auto_vacuum != 1:
_log.info("auto_vacuum set to 0 (None), updating to 1 (full).")
_log.info("VACCUUMing DB to cause new auto_vacuum setting to take effect. "
"This could be slow on a large database.")
self.select('''PRAGMA auto_vacuum=1''')
self.select('''VACUUM;''')
rows = self.select(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.data_table}';")
if rows:
_log.debug("Tables already exists")
rows = self.select(f"PRAGMA table_info({self.topics_table})")
for row in rows:
if row[1] == "metadata":
_log.debug("Existing topics table contains metadata column")
self.meta_table = self.topics_table
else:
self.meta_table = self.topics_table
self.execute_stmt(
'''CREATE TABLE IF NOT EXISTS ''' + self.data_table +
''' (ts timestamp NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
UNIQUE(topic_id, ts))''', commit=False)
self.execute_stmt(
'''CREATE INDEX IF NOT EXISTS data_idx
ON ''' + self.data_table + ''' (ts ASC)''', commit=False)
self.execute_stmt(
'''CREATE TABLE IF NOT EXISTS ''' + self.topics_table +
''' (topic_id INTEGER PRIMARY KEY,
topic_name TEXT NOT NULL,
metadata TEXT,
UNIQUE(topic_name))''', commit=False)
self.commit()
# metadata is in topics table
self.meta_table = self.topics_table
_log.debug("Created new schema. data and topics tables")
def setup_aggregate_historian_tables(self):
self.execute_stmt(
'CREATE TABLE IF NOT EXISTS ' + self.agg_topics_table +
' (agg_topic_id INTEGER PRIMARY KEY, \
agg_topic_name TEXT NOT NULL, \
agg_type TEXT NOT NULL, \
agg_time_period TEXT NOT NULL, \
UNIQUE(agg_topic_name, agg_type, agg_time_period));')
self.execute_stmt(
'CREATE TABLE IF NOT EXISTS ' + self.agg_meta_table +
'(agg_topic_id INTEGER NOT NULL PRIMARY KEY, \
metadata TEXT NOT NULL);')
_log.debug("Created aggregate topics and meta tables")
self.commit()
def query(self, topic_ids, id_name_map, start=None, end=None, agg_type=None, agg_period=None, skip=0, count=None,
order="FIRST_TO_LAST"):
"""
This function should return the results of a query in the form:
.. code-block:: python
{"values": [(timestamp1, value1), (timestamp2, value2), ...],
"metadata": {"key1": value1, "key2": value2, ...}}
metadata is not required (The caller will normalize this to {} for you)
@param topic_ids: topic_ids to query data for
@param id_name_map: dictionary containing topic_id:topic_name
@param start:
@param end:
@param agg_type:
@param agg_period:
@param skip:
@param count:
@param order:
"""
table_name = self.data_table
value_col = 'value_string'
if agg_type and agg_period:
table_name = agg_type + "_" + agg_period
value_col = 'agg_value'
query = '''SELECT topic_id, ts, ''' + value_col + '''
FROM ''' + table_name + '''
{where}
{order_by}
{limit}
{offset}'''
where_clauses = ["WHERE topic_id = ?"]
args = [topic_ids[0]]
# base historian converts naive timestamps to UTC, but if the start and end had explicit timezone info then they
# need to get converted to UTC since sqlite3 only store naive timestamp
if start:
start = start.astimezone(pytz.UTC)
if end:
end = end.astimezone(pytz.UTC)
if start and end and start == end:
where_clauses.append("ts = ?")
args.append(start)
else:
if start:
where_clauses.append("ts >= ?")
args.append(start)
if end:
where_clauses.append("ts < ?")
args.append(end)
where_statement = ' AND '.join(where_clauses)
order_by = 'ORDER BY topic_id ASC, ts ASC'
if order == 'LAST_TO_FIRST':
order_by = ' ORDER BY topic_id DESC, ts DESC'
# can't have an offset without a limit
# -1 = no limit and allows the user to provide just an offset
if count is None:
count = -1
limit_statement = 'LIMIT ?'
args.append(count)
offset_statement = ''
if skip > 0:
offset_statement = 'OFFSET ?'
args.append(skip)
real_query = query.format(where=where_statement,
limit=limit_statement,
offset=offset_statement,
order_by=order_by)
_log.debug("Real Query: " + real_query)
_log.debug("args: " + str(args))
values = defaultdict(list)
start_t = datetime.utcnow()
for topic_id in topic_ids:
args[0] = topic_id
values[id_name_map[topic_id]] = []
cursor = self.select(real_query, args, fetch_all=False)
if cursor:
if value_col == 'agg_value':
for _id, ts, value in cursor:
values[id_name_map[topic_id]].append((utils.format_timestamp(ts), value))
cursor.close()
else:
for _id, ts, value in cursor:
values[id_name_map[topic_id]].append((utils.format_timestamp(ts), jsonapi.loads(value)))
cursor.close()
_log.debug("Time taken to load results from db:{}".format(datetime.utcnow()-start_t))
return values
def manage_db_size(self, history_limit_timestamp, storage_limit_gb):
"""
Manage database size.
:param history_limit_timestamp: remove all data older than this timestamp
:param storage_limit_gb: remove oldest data until database is smaller than this value.
"""
_log.debug("Managing store - timestamp limit: {} GB size limit: {}".format(
history_limit_timestamp, storage_limit_gb))
commit = False
if history_limit_timestamp is not None:
count = self.execute_stmt(
'''DELETE FROM ''' + self.data_table +
''' WHERE ts < ?''', (history_limit_timestamp,))
if count is not None and count > 0:
_log.debug("Deleted {} old items from historian. (TTL exceeded)".format(count))
commit = True
if storage_limit_gb is not None:
result = self.select('''PRAGMA page_size''')
page_size = result[0][0]
max_storage_bytes = storage_limit_gb * 1024 ** 3
max_pages = int(ceil(max_storage_bytes / page_size))
def page_count():
result = self.select("PRAGMA page_count")
return result[0][0]
while page_count() >= max_pages:
count = self.execute_stmt(
'''DELETE FROM ''' + self.data_table +
'''
WHERE ts IN
(SELECT ts FROM ''' + self.data_table +
'''
ORDER BY ts ASC LIMIT 100)''')
_log.debug("Deleted 100 old items from historian. (Managing store size)".format(count))
commit = True
if commit:
_log.debug("Committing changes for manage_db_size.")
self.commit()
def insert_meta_query(self):
return '''INSERT OR REPLACE INTO ''' + self.meta_table + \
''' values(?, ?)'''
def update_meta_query(self):
return '''UPDATE ''' + self.meta_table + ''' SET metadata = ?
WHERE topic_id = ?'''
def insert_data_query(self):
return '''INSERT OR REPLACE INTO ''' + self.data_table + \
''' values(?, ?, ?)'''
def insert_topic_query(self):
return '''INSERT INTO ''' + self.topics_table + \
''' (topic_name) values (?)'''
def insert_topic_and_meta_query(self):
return '''INSERT INTO ''' + self.topics_table + \
''' (topic_name, metadata) values (?, ?)'''
def update_topic_query(self):
return '''UPDATE ''' + self.topics_table + ''' SET topic_name = ?
WHERE topic_id = ?'''
def update_topic_and_meta_query(self):
return '''UPDATE ''' + self.topics_table + ''' SET topic_name = ?, metadata = ?
WHERE topic_id = ?'''
def get_aggregation_list(self):
return ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM', 'TOTAL']
def insert_agg_topic_stmt(self):
return '''INSERT INTO ''' + self.agg_topics_table + '''
(agg_topic_name, agg_type, agg_time_period )
values (?, ?, ?)'''
def update_agg_topic_stmt(self):
return '''UPDATE ''' + self.agg_topics_table + ''' SET
agg_topic_name = ? WHERE agg_topic_id = ? '''
def replace_agg_meta_stmt(self):
return '''INSERT OR REPLACE INTO ''' + self.agg_meta_table + '''
values(?, ?)'''
def get_topic_map(self):
_log.debug("in get_topic_map")
q = "SELECT topic_id, topic_name FROM " + self.topics_table
rows = self.select(q, None)
_log.debug("loading topic map from db")
id_map = dict()
name_map = dict()
for t, n in rows:
id_map[n.lower()] = t
name_map[n.lower()] = n
return id_map, name_map
def get_topic_meta_map(self):
q = "SELECT topic_id, metadata FROM " + self.meta_table + ";"
rows = self.select(q, None)
_log.debug("loading metadata from db")
topic_meta_map = dict()
for id, meta in rows:
topic_meta_map[id] = jsonapi.loads(meta)
return topic_meta_map
def get_agg_topics(self):
try:
_log.debug("in get_agg_topics")
query = "SELECT agg_topic_name, agg_type, agg_time_period, metadata FROM " + self.agg_topics_table + \
" as t, " + self.agg_meta_table + " as m WHERE t.agg_topic_id = m.agg_topic_id "
rows = self.select(query, None)
topics = []
for row in rows:
_log.debug("rows from aggregate_t")
meta = ast.literal_eval(row[3])['configured_topics']
topics.append((row[0], row[1], row[2], meta))
return topics
except sqlite3.Error as e:
if e.args[0][0:13] == 'no such table':
_log.warning("No such table : {}".format(self.agg_topics_table))
return []
else:
raise
def get_agg_topic_map(self):
try:
_log.debug("in get_agg_topic_map")
q = "SELECT agg_topic_id, agg_topic_name, agg_type, agg_time_period FROM " + self.agg_topics_table
rows = self.select(q, None)
_log.debug("loading agg_topic map from db")
id_map = dict()
for row in rows:
_log.debug("rows from aggregate_t")
id_map[(row[1].lower(), row[2], row[3])] = row[0]
return id_map
except sqlite3.Error as e:
if e.args[0][0:13] == 'no such table':
_log.warning("No such table : {}".format(self.agg_topics_table))
return {}
else:
raise
@staticmethod
def regexp(expr, item):
_log.debug("item {} matched against expr {}".format(item, expr))
return re.search(expr, item, re.IGNORECASE) is not None
def set_cache(self, cache_size):
self.execute_stmt("PRAGMA CACHE_SIZE={}".format(cache_size))
def regex_select(self, query, args, fetch_all=True, cache_size=None):
conn = None
cursor = None
try:
conn = sqlite3.connect(self.__database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
if conn is None:
_log.error("Unable to connect to sqlite database {} ".format(self.__database))
return []
conn.create_function("REGEXP", 2, SqlLiteFuncts.regexp)
if cache_size:
conn.execute("PRAGMA CACHE_SIZE={}".format(cache_size))
_log.debug("REGEXP query {} ARGS: {}".format(query, args))
cursor = conn.cursor()
if args is not None:
cursor.execute(query, args)
else:
_log.debug("executing query")
cursor.execute(query)
if fetch_all:
rows = cursor.fetchall()
_log.debug("Regex returning {}".format(rows))
return rows
else:
return cursor, conn
except Exception as e:
_log.error("Exception querying database based on regular expression:{}".format(e.args))
finally:
if fetch_all:
if cursor:
cursor.close()
if conn:
conn.close()
def query_topics_by_pattern(self, topic_pattern):
id_map, name_map = self.get_topic_map()
_log.debug("Contents of topics table {}".format(list(id_map.keys())))
q = "SELECT topic_id, topic_name FROM " + self.topics_table + " WHERE topic_name REGEXP '" + topic_pattern + \
"';"
rows = self.regex_select(q, None)
_log.debug("loading topic map from db")
id_map = dict()
for t, n in rows:
id_map[n] = t
_log.debug("topics that matched the pattern {} : {}".format(topic_pattern, id_map))
return id_map
def create_aggregate_store(self, agg_type, period):
table_name = agg_type + '''_''' + period
stmt = "CREATE TABLE IF NOT EXISTS " + table_name + \
" (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, " \
"agg_value REAL NOT NULL, topics TEXT, " \
"UNIQUE(topic_id, ts)); "
self.execute_stmt(stmt)
stmt = "CREATE INDEX IF NOT EXISTS idx_" + table_name + " ON " + table_name + "(ts ASC);"
self.execute_stmt(stmt, commit=True)
return True
def insert_aggregate_stmt(self, table_name):
return '''INSERT OR REPLACE INTO ''' + table_name + ''' values(?, ?, ?, ?)'''
def collect_aggregate(self, topic_ids, agg_type, start=None, end=None):
"""
This function should return the results of a aggregation query
@param topic_ids: list of single topics
@param agg_type: type of aggregation
@param start: start time
@param end: end time
@return: aggregate value, count of number of records over which
aggregation was computed
"""
if isinstance(agg_type, str):
if agg_type.upper() not in ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM']:
raise ValueError("Invalid aggregation type {}".format(agg_type))
query = '''SELECT ''' + agg_type + '''(value_string), count(value_string) FROM ''' + \
self.data_table + ''' {where}'''
where_clauses = ["WHERE topic_id = ?"]
args = [topic_ids[0]]
if len(topic_ids) > 1:
where_str = "WHERE topic_id IN ("
for _ in topic_ids:
where_str += "?, "
where_str = where_str[:-2] # strip last comma and space
where_str += ") "
where_clauses = [where_str]
args = topic_ids[:]
# base historian converts naive timestamps to UTC, but if the start and end had explicit timezone info then they
# need to get converted to UTC since sqlite3 only store naive timestamp
if start:
start = start.astimezone(pytz.UTC)
if end:
end = end.astimezone(pytz.UTC)
if start and end and start == end:
where_clauses.append("ts = ?")
args.append(start)
else:
if start:
where_clauses.append("ts >= ?")
args.append(start)
if end:
where_clauses.append("ts < ?")
args.append(end)
where_statement = ' AND '.join(where_clauses)
real_query = query.format(where=where_statement)
_log.debug("Real Query: " + real_query)
_log.debug("args: " + str(args))
results = self.select(real_query, args)
if results:
_log.debug("results got {}, {}".format(results[0][0], results[0][1]))
return results[0][0], results[0][1]
else:
return 0, 0
@staticmethod
def get_tagging_query_from_ast(topic_tags_table, tup, tag_refs):
"""
Get a query condition syntax tree and generate sqlite query to query
topic names by tags. It calls the get_compound_query to parse the
abstract syntax tree tuples and then fixes the precedence
Example:
# User input query string :
.. code-block::
campus.geoPostalCode="20500" and equip and boiler and "equip_tag 7" > 4
# Example output sqlite query
.. code-block::
SELECT topic_prefix from test_topic_tags WHERE tag="campusRef"
and value IN(
SELECT topic_prefix from test_topic_tags WHERE tag="campus" and
value=1
INTERSECT
SELECT topic_prefix from test_topic_tags WHERE tag="geoPostalCode"
and value="20500"
)
INTERSECT
SELECT topic_prefix from test_tags WHERE tag="equip" and value=1
INTERSECT
SELECT topic_prefix from test_tags WHERE tag="boiler" and value=1
INTERSECT
SELECT topic_prefix from test_tags WHERE tag = "equip_tag 7" and
value > 4
:param topic_tags_table: table to query
:param tup: parsed query string (abstract syntax tree)
:param tag_refs: dictionary of ref tags and its parent tag
:return: sqlite query
:rtype str
"""
query = SqlLiteFuncts._get_compound_query(topic_tags_table, tup, tag_refs)
# Verify for parent tag finally. if present convert to subquery
# Process parent tag
# Convert
# WHERE tag='campusRef.geoPostalCode' AND value="20500"
# to
# where tag='campusRef' and value IN (
# SELECT topic_prefix FROM test_topic_tags
# WHERE tag='campus' AND value=1
# INTERSECT
# SELECT topic_prefix FROM test_topic_tags
# WHERE tag='geoPostalCode' and value="20500"
# )
parent = ""
search_pattern = r"WHERE\s+tag='(.+)\.(.+)'\s+AND\s+value\s+(.+)($|\n)"
results = re.findall(search_pattern, query, flags=re.IGNORECASE)
# Example result :<type 'list'>: [('campusRef', 'tag1', '= 2', '\n'),
# ('siteRef', 'tag2', '= 3 ', '\n')]
# Loop through and replace comparison operation with sub query
for result in results:
parent = tag_refs[result[0]]
replace_pattern = r"WHERE tag = '\1' AND value IN \n (" \
r"SELECT topic_prefix " \
r"FROM {table} WHERE tag = '{parent}' AND " \
r"value = 1\n " \
r"INTERSECT\n " \
r"SELECT topic_prefix FROM {table} WHERE " \
r"tag = '\2' " \
r"AND " \
r"value \3 \4)".format(table=topic_tags_table,
parent=parent)
query = re.sub(search_pattern, replace_pattern, query, count=1, flags=re.I)
_log.debug("Returning sqlite query condition {}".format(query))
return query
@staticmethod
def _get_compound_query(topic_tags_table, tup, tag_refs, root=True):
"""
Get a query condition syntax tree and generate sqlite query to query
topic names by tags
Example:
# User input query string :
campus.geoPostalCode="20500" and equip and boiler and "equip_tag 7" > 4
SELECT topic_prefix FROM test_topic_tags WHERE tag="campusRef"
and value IN(
SELECT topic_prefix FROM test_topic_tags WHERE tag="campus" AND
value=1
INTERSECT
SELECT topic_prefix FROM test_topic_tags WHERE tag="geoPostalCode"
AND value="20500"
)
INTERSECT
SELECT topic_prefix FROM test_tags WHERE tag="equip" AND value=1
INTERSECT
SELECT topic_prefix FROM test_tags WHERE tag="boiler" AND value=1
INTERSECT
SELECT topic_prefix FROM test_tags WHERE tag = "equip_tag 7" AND
value > 4
:param topic_tags_table: table to query
:param tup: parsed query string (abstract syntax tree)
:param tag_refs: dictionary of ref tags and its parent tag
:param root: Boolean to indicate if it is the top most tuple in the
abstract syntax tree.
:return: sqlite query
:rtype str
"""
# Instead of using sqlite LIKE operator we use python regular expression and sqlite REGEXP operator
reserved_words = {'and': 'INTERSECT', "or": 'UNION', 'not': 'NOT', 'like': 'REGEXP'}
prefix = 'SELECT topic_prefix FROM {} WHERE '.format(topic_tags_table)
if tup is None:
return tup
if not isinstance(tup[1], tuple):
left = repr(tup[1]) # quote the tag
else:
left = SqlLiteFuncts._get_compound_query(topic_tags_table, tup[1], tag_refs, False)
if not isinstance(tup[2], tuple):
if isinstance(tup[2],str):
right = repr(tup[2])
elif isinstance(tup[2], bool):
right = 1 if tup[2] else 0
else:
right = tup[2]
else:
right = SqlLiteFuncts._get_compound_query(topic_tags_table, tup[2], tag_refs, False)
assert isinstance(tup[0], str)
lower_tup0 = tup[0].lower()
operator = lower_tup0
if lower_tup0 in reserved_words:
operator = reserved_words[lower_tup0]
if operator == 'NOT':
query = SqlLiteFuncts._negate_condition(right, topic_tags_table)
elif operator == 'INTERSECT' or operator == 'UNION':
if root:
query = "{left}\n{operator}\n{right}".format(left=left, operator=operator, right=right)
else:
query = 'SELECT topic_prefix FROM ({left} \n{operator}\n{right})'.format(
left=left, operator=operator, right=right)
else:
query = "{prefix} tag={tag} AND value {operator} {value}".format(
prefix=prefix, tag=left, operator=operator, value=right)
return query
@staticmethod
def _negate_condition(condition, table_name):
"""
change NOT(bool_expr AND bool_expr) to NOT(bool_expr) OR NOT(bool_expr)
recursively. In sqlite syntax:
TO negate the following sql query:
SELECT * FROM
(SELECT * FROM
(SELECT topic_prefix FROM topic_tags WHERE tag='tag3' AND value > 1
INTERSECT
SELECT topic_prefix FROM topic_tags WHERE tag='tag2' AND value > 2)
UNION
SELECT topic_prefix FROM topic_tags WHERE tag='tag4' AND value < 2)
We have to change it to:
SELECT * FROM
(SELECT * FROM
(SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN
(SELECT topic_prefix FROM topic_tags WHERE tag='tag3' AND
value > 1)
UNION
SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN
(SELECT topic_prefix FROM topic_tags WHERE tag='tag2' AND
value > 2))
INTERSECT
SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN(
SELECT topic_prefix FROM topic_tags WHERE tag='tag4' AND
value < 2))
:param condition: select query that needs to be negated. It could be a
compound query.
:return: negated select query
:rtype str
"""
_log.debug("Query condition to negate: {}".format(condition))
# Change and to or and or to and
condition = condition.replace('INTERSECT\n', 'UNION_1\n')
condition = condition.replace('UNION\n', 'INTERSECT\n')
condition = condition.replace('UNION_1\n', 'UNION\n')
# Now negate all SELECT... value<operator><value> with
# SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN (SELECT....value<operator><value>)
search_pattern = r'(SELECT\s+topic_prefix\s+FROM\s+' + table_name + \
r'\s+WHERE\s+tag=\'.*\'\s+AND\s+value.*($|\n))'
replace_pattern = r'SELECT topic_prefix FROM ' + table_name + r' WHERE topic_prefix NOT IN (\1)\2'
c = re.search(search_pattern, condition)
condition = re.sub(search_pattern,
replace_pattern,
condition,
flags=re.I
)
_log.debug("Condition after negation: {}".format(condition))
return condition
if __name__ == '__main__':
con = {
"database": '/tmp/tmpgLzWr3/historian.sqlite'
}
tables_def = {
"table_prefix": "prefix",
"data_table": "data_table",
"topics_table": "topics_table",
"meta_table": "meta_table"
}
functs = SqlLiteFuncts(con, tables_def)
functs.collect_aggregate('device1/in_temp', 'sum',
datetime.strptime('2016-06-05 22:47:02.417604+00:00', "%Y-%m-%d %H:%M:%S.%f+00:00"),
datetime.strptime('2016-06-05 22:49:02.417604+00:00', "%Y-%m-%d %H:%M:%S.%f+00:00"))
|
py | 1a48abfc725a1c3971218cd74242b5c4e8eefdfb | import torch
from torch.autograd import Variable
from torch.autograd import Function
import numpy as np
import scipy.linalg
class MatrixSquareRoot(Function):
"""Square root of a positive definite matrix.
NOTE: matrix square root is not differentiable for matrices with
zero eigenvalues.
"""
@staticmethod
def forward(ctx, input):
itr_TH = 10 # number of iterations threshold
dim = input.shape[0]
norm = torch.norm(input)#.double())
#Y = input.double()/norm
Y = input/norm
I = torch.eye(dim,dim,device=input.device)#.double()
Z = torch.eye(dim,dim,device=input.device)#.double()
#print('Check: ', Y.type(), I.type(), Z.type())
for i in range(itr_TH):
T = 0.5*(3.0*I - Z.mm(Y))
Y = Y.mm(T)
Z = T.mm(Z)
sqrtm = Y*torch.sqrt(norm)
ctx.mark_dirty(Y,I,Z)
ctx.save_for_backward(sqrtm)
return sqrtm
@staticmethod
def backward(ctx, grad_output):
itr_TH = 10 # number of iterations threshold
grad_input = None
sqrtm, = ctx.saved_tensors
dim = sqrtm.shape[0]
norm = torch.norm(sqrtm)
A = sqrtm/norm
I = torch.eye(dim, dim, device=sqrtm.device)#.double()
#Q = grad_output.double()/norm
Q = grad_output/norm
for i in range(itr_TH):
Q = 0.5*(Q.mm(3.0*I-A.mm(A))-A.t().mm(A.t().mm(Q)-Q.mm(A)))
A = 0.5*A.mm(3.0*I-A.mm(A))
grad_input = 0.5*Q
return grad_input
sqrtm = MatrixSquareRoot.apply
def original_main():
from torch.autograd import gradcheck
k = torch.randn(20, 10).double()
# Create a positive definite matrix
pd_mat = k.t().matmul(k)
pd_mat = Variable(pd_mat, requires_grad=True)
test = gradcheck(MatrixSquareRoot.apply, (pd_mat,))
print(test)
def single_main():
from torch.autograd import gradcheck
n = 1
A = torch.randn( 20, 10).double()
# Create a positive definite matrix
pd_mat = A.t().matmul(A)
pd_mat = Variable(pd_mat, requires_grad=True)
test = gradcheck(MatrixSquareRoot.apply, (pd_mat,))
print(test)
#sqrtm_scipy = np.zeros_like(A)
print('err: ', pd_mat)
sqrtm_scipy = scipy.linalg.sqrtm(pd_mat.detach().numpy().astype(np.float_))
# for i in range(n):
# sqrtm_scipy[i] = sqrtm(pd_mat[i].detach().numpy())
sqrtm_torch = sqrtm(pd_mat)
print('sqrtm torch: ', sqrtm_torch)
print('scipy', sqrtm_scipy)
print('Difference: ', np.linalg.norm(sqrtm_scipy - sqrtm_torch.detach().numpy()))
def main():# batch
from torch.autograd import gradcheck
n = 2
A = torch.randn(n, 4, 5).double()
A.requires_grad = True
# Create a positive definite matrix
#pd_mat = A.t().matmul(A)
pd_mat = torch.matmul(A.transpose(-1, -2), A)
pd_mat = Variable(pd_mat, requires_grad=True)
print('err: ', pd_mat.shape)
#test = gradcheck(MatrixSquareRoot.apply, (pd_mat,))
#print(test)
sqrtm_scipy = np.zeros_like(pd_mat.detach().numpy())
#sqrtm_scipy = scipy.linalg.sqrtm(pd_mat.detach().numpy().astype(np.float_))
for i in range(n):
sqrtm_scipy[i] = scipy.linalg.sqrtm(pd_mat[i].detach().numpy())
# batch implementation
sqrtm_torch = torch.zeros(pd_mat.shape)
for i in range(n):
sqrtm_torch[i] = sqrtm(pd_mat[i])
#sqrtm_torch = sqrtm(pd_mat)
print('sqrtm torch: ', sqrtm_torch)
print('scipy', sqrtm_scipy)
print('Difference: ', np.linalg.norm(sqrtm_scipy - sqrtm_torch.detach().numpy()))
if __name__ == '__main__':
main()
|
py | 1a48ac610dd99dee0de0b9fc8b84ddd59a199ed4 | class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
k = k%len(nums)
nums[:] = nums[-k:] + nums[:-k]
|
py | 1a48ad413cc79b20495122c2be5660cdab148dee | def enc(inp, key, count):
ans = ''
for i in inp:
new = ((ord(i) - ord('A')) + int(key) + 1) % int(count)
ans = ans + chr(new + ord('A') -1 )
return ans
inp = raw_input()
l = []
count = 0
while inp != '#':
l += inp.upper()
count += 1
inp = raw_input()
v = []
for i in range(count):
j = l
v.append(j)
l = []
for i in range(count):
l.append(enc(j[i], 1, count))
a = ''
for i in range(len(v)):
p = ''
for j in range(count):
if v[i][j] == '@':
a = ' '
p += a
else: p += v[i][j]
print p
|
py | 1a48ad66ff041db89549d0b46d61033ed548f359 | from __future__ import absolute_import, print_function, division
import copy
import numpy as np
import logging
import pdb
import time
from six import iteritems
from six.moves import xrange
import sys
import theano
from theano import tensor, scalar, gof, config
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
LocalGroupDB,
SequenceDB, Optimizer, DB, toolbox, graph)
from theano.ifelse import IfElse
from theano.misc.ordered_set import OrderedSet
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scalar.basic_scipy import Erfinv, Erfcinv
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet import bn
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter
from theano.tensor.nnet.abstract_conv import (BaseAbstractConv,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs)
import theano.tensor.nlinalg as nlinalg
import theano.tensor.signal.pool as pool
import theano.tensor.slinalg as slinalg
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined, move_to_gpu)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous, gpu_contiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemm_no_inplace, gpugemm_inplace,
gpugemmbatch_no_inplace,
gpugemv_no_inplace, gpugemv_inplace,
GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,
GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights)
from .pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad, GpuMaxPoolRop,
GpuDownsampleFactorMaxGradGrad)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer,
gpu_sparse_block_outer_inplace,
gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)
from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,
gpu_crossentropy_softmax_argmax_1hot_with_bias,
gpu_softmax_with_bias, gpu_softmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY, gpu_erfinv, gpu_erfcinv,
max_inputs_to_GpuElemwise)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge, pad_dims, unpad_dims
from .reduction import GpuMaxAndArgmax
from .linalg import (GpuCusolverSolve, MATRIX_STRUCTURES_SOLVE, GpuCholesky,
cusolver_available, GpuMagmaMatrixInverse, GpuMagmaSVD)
_logger = logging.getLogger("theano.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
# Not used for an EquilibriumOptimizer. It has the "tracks" that we need for GraphToGPUDB.
gpu_optimizer2 = EquilibriumDB()
class GraphToGPUDB(DB):
"""
Retrieves the list local optimizers based on the optimizer flag's value
from EquilibriumOptimizer by calling the method query.
"""
def query(self, *tags, **kwtags):
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpuarray_graph_optimization', GraphToGPUDB(), -0.5,
'fast_compile', 'fast_run', 'gpuarray')
gpu_seqopt.register('gpuarray_local_optimizations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'gpuarray', 'gpuarray_local_optimiziations')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
'''
Decorator for the new GraphToGPU optimizer.
Takes an extra parameter(Op) compared to register_opt decorator.
Parameters
----------
tracks : List of Op class Or Op instance or None
The Node's Op to which optimization is being applied.
tags : String
The optimization tag to which the optimizer will be registered.
'''
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
if isinstance(local_opt, theano.gof.DB):
opt = local_opt
else:
opt = theano.gof.local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpua_constant_folding')(
tensor.opt.constant_folding)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return x.transfer('cpu')
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if (i.owner and i.owner.op == host_from_gpu and
move_to_gpu(i)):
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != b'cuda') or
any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs])):
return False
# tag the inputs with the context in case
# the context was derived from the outputs
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node.op, context_name, node.inputs, node.outputs)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
return [safe_to_cpu(o) for o in
new_op(*node.inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [new_op.transfer('cpu')]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
# If all clients are outputs or transfers don't do anything.
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
target = getattr(input.tag, 'target', None)
if target == 'cpu':
continue
if (isinstance(input.type, tensor.TensorType) and
not move_to_gpu(input)):
continue
try:
new_input = GpuFromHost(target)(input).transfer('cpu')
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'target'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
class GraphToGPU(Optimizer):
"""
Transfer the graph as a whole to GPU instead of transfering node by node.
Parameters
----------
local_optimizers_all : List or SortedSet
The local optimizations to apply to a node.
local_optimizers_map : Dict
Dictionary object containing the mapping of Op to list of
LocalOptimizers.
"""
def __init__(self, local_optimizers_all, local_optimizers_map):
self.local_optimizers_all = local_optimizers_all
self.local_optimizers_map = local_optimizers_map
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
mapping = {}
time_opts = {}
node_created = {}
process_count = {}
t_topo = time.time()
topo = fgraph.toposort()
time_topo = time.time()
toposort_timing = time_topo - t_topo
# Building a new graph
# Iterating through inputs of graph
target = infer_context_name(*fgraph.inputs)
for i in fgraph.inputs:
if isinstance(i.type, tensor.TensorType) and move_to_gpu(i):
mapping[i] = i.transfer(getattr(i.tag, 'target', target))
else:
mapping[i] = i
for i in fgraph.variables:
if isinstance(i, theano.Constant):
mapping[i] = i
for node in topo:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
process_count.setdefault(lopt, 0)
time_opts.setdefault(lopt, 0)
node_created.setdefault(lopt, 0)
for node in topo:
if isinstance(node.op, HostFromGpu):
mapping[node.outputs[0]] = mapping[node.inputs[0]]
continue
# Move only if any of the inputs are on the GPU.
move_to_GPU = False
context_name = None
for i in [mapping[i] for i in node.inputs]:
if isinstance(i.type, GpuArrayType):
context_name = i.type.context_name
move_to_GPU = True
break
if (not move_to_GPU and
isinstance(node.op, (theano.tensor.Alloc,
theano.tensor.AllocEmpty,
theano.tensor.basic.Eye))):
# If the Alloc[Empty] have a client that will be moved
# to the GPU, we should move the Alloc* on the GPU.
# We approximate this by supposing that if we have an
# optimization for one of the clients op, then we will
# move the client to the GPU.
for c, _ in node.outputs[0].clients:
if (c != 'output' and
(self.local_optimizers_map.get(c.op, []) +
self.local_optimizers_map.get(type(c.op), []))):
move_to_GPU = True
new_ops = None
if move_to_GPU and any(["complex" in getattr(i, 'dtype', "")
for i in node.inputs]):
move_to_GPU = False
# Apply the lifter
if move_to_GPU:
for lopt in (self.local_optimizers_map.get(node.op, []) +
self.local_optimizers_map.get(type(node.op), []) +
self.local_optimizers_all):
t_opt = time.time()
new_ops = lopt.transform(node.op, context_name,
[mapping[i] for i in node.inputs],
node.outputs)
t_opt2 = time.time()
time_opts[lopt] += t_opt2 - t_opt
if new_ops:
process_count[lopt] += 1
break
outputs = []
if isinstance(new_ops, theano.Op):
outputs = new_ops(*[mapping[i] for i in node.inputs], return_list=True)
elif not new_ops:
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
elif isinstance(new_ops, (tuple, list)):
outputs = new_ops
elif isinstance(new_ops, theano.Variable):
outputs = [new_ops]
if new_ops:
node_created[lopt] += len(graph.ops([mapping[i] for i in node.inputs], outputs))
if any([getattr(old_o, 'dtype', None) != getattr(new_o, 'dtype', None)
for old_o, new_o in zip(outputs, node.outputs)]):
_logger.warning(
"The optimization %s returned bad dtype. Skipping it."
" Write to theano-dev mailing list about this." %
str(lopt))
newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])
outputs = newnode.outputs
for new_o, old_o in zip(outputs, node.outputs):
assert len(outputs) == len(node.outputs)
mapping[old_o] = new_o
new_nodes = []
for o in fgraph.outputs:
new_o = mapping[o]
if new_o.type != o.type:
assert isinstance(o.type, tensor.TensorType)
assert isinstance(new_o.type, GpuArrayType)
# This condition is needed in the case one input is an
# output of the graph. Without this, it would
# introduce cycle as we don't replace correctly that
# case. It would also add extra transfer to/from the
# gpu.
if (new_o.owner and
isinstance(new_o.owner.op, GpuFromHost) and
new_o.owner.inputs[0].type == o.type):
new_o = new_o.owner.inputs[0]
else:
new_o = safe_to_cpu(new_o)
new_nodes.append(new_o)
fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),
reason=self.__class__.__name__)
return (self, toposort_timing, time_opts, node_created, process_count)
@staticmethod
def print_profile(stream, prof, level=0):
(opt, toposort_timing, time_opts, node_created, process_count) = prof
blanc = (' ' * level)
print(blanc, "GraphToGPUOptimizer", end=' ', file=stream)
print(blanc, getattr(opt, "name",
getattr(opt, "__name__", "")), file=stream)
print(blanc, " time io_toposort %.3fs" % toposort_timing, file=stream)
s = sum(time_opts.values())
print(blanc, "Total time taken by local optimizers %.3fs " % s, file=stream)
count_opt = []
not_used = []
not_used_time = 0
for o, count in iteritems(process_count):
if count > 0:
count_opt.append((time_opts[o], count,
node_created[o], o))
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
print(blanc,
' times - times applied - Node created - name:',
file=stream)
count_opt.sort()
for (t, count, n_created, o) in count_opt[::-1]:
print(blanc, ' %.3fs - %d - %d - %s' % (
t, count, n_created, o), file=stream)
print(blanc, ' %.3fs - in %d optimization that were not used (display only those with a runtime > 0)' % (
not_used_time, len(not_used)), file=stream)
not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
# Skip opt that have 0 times, they probably wasn't even tried.
print(blanc + " ", ' %.3fs - %s' % (t, o), file=stream)
print(file=stream)
@staticmethod
def merge_profile(prof1, prof2):
# (opt, toposort_timing, time_opts, node_created, process_count) = prof1
local_optimizers = OrderedSet(prof1[0].local_optimizers_all).union(
prof2[0].local_optimizers_all)
def merge_dict(d1, d2):
"""
merge 2 dicts by adding the values.
"""
d = d1.copy()
for k, v in iteritems(d2):
if k in d:
d[k] += v
else:
d[k] = v
return d
local_optimizers_map = merge_dict(prof1[0].local_optimizers_map,
prof2[0].local_optimizers_map)
new_opt = GraphToGPU(local_optimizers, local_optimizers_map)
toposort_timing = prof1[1] + prof2[1]
time_opts = merge_dict(prof1[2], prof2[2])
node_created = merge_dict(prof1[3], prof2[3])
process_count = merge_dict(prof1[4], prof2[4])
return (new_opt,
toposort_timing,
time_opts,
node_created,
process_count)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print("%s%s (%i)" % (
(' ' * level), self.__class__.__name__, id(self)), file=stream)
if depth != 0:
map_values = []
for opts in self.local_optimizers_map.values():
map_values += opts
for opt in self.local_optimizers_all + map_values:
opt.print_summary(stream, level=(level + 2), depth=(depth - 1))
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [n2.inputs[0].transfer('cpu')]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [as_gpuarray_variable(n2.inputs[0],
node.op.context_name)]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpua_alloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
isinstance(c.op, tensor.Join) and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [GpuAlloc(None)(*node.inputs).transfer('cpu')]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
@register_opt2([tensor.Alloc], 'fast_compile')
def local_gpuaalloc(op, context_name, inputs, outputs):
return GpuAlloc(context_name)(*inputs)
@register_opt('fast_compile')
@op_lifter([tensor.AllocEmpty])
@register_opt2([tensor.AllocEmpty], 'fast_compile')
def local_gpua_alloc_empty(op, context_name, inputs, outputs):
# We use _props_dict() to make sure that the GPU op know all the
# CPU op props.
return GpuAllocEmpty(context_name=context_name, **op._props_dict())(*inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(np.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
# Don't register by default.
@gof.local_optimizer([GpuAllocEmpty])
def local_gpua_alloc_empty_to_zeros(node):
if isinstance(node.op, GpuAllocEmpty):
context_name = infer_context_name(*node.inputs)
z = np.asarray(0, dtype=node.outputs[0].dtype)
return [GpuAlloc(context_name)(as_gpuarray_variable(z, context_name),
*node.inputs)]
optdb.register('local_gpua_alloc_empty_to_zeros',
theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
'alloc_empty_to_zeros',)
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.extra_ops.CpuContiguous])
@register_opt2([tensor.extra_ops.CpuContiguous], 'fast_compile')
def local_gpua_contiguous(op, context_name, inputs, outputs):
return gpu_contiguous
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
@register_opt2([tensor.Reshape], 'fast_compile')
def local_gpua_reshape(op, context_name, inputs, outputs):
res = GpuReshape(op.ndim)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
@register_opt2([tensor.Rebroadcast], 'fast_compile')
def local_gpua_rebroadcast(op, context_name, inputs, outputs):
return op(as_gpuarray_variable(inputs[0], context_name))
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
@register_opt2([tensor.Flatten], 'fast_compile')
def local_gpua_flatten(op, context_name, inputs, outputs):
shp = []
if op.outdim != 1:
shp = [inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim)
o = res(inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
@register_opt2([tensor.Elemwise], 'fast_compile')
def local_gpua_elemwise(op, context_name, inputs, outputs):
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(outputs) > 1:
return
have_cuda = False
have_opencl = False
if inputs and isinstance(inputs[0].type, GpuArrayType):
kind = inputs[0].type.context.kind
if kind.startswith(b'opencl'):
have_opencl = True
elif kind.startswith(b'cuda'):
have_cuda = True
convert = {Erfinv: gpu_erfinv,
Erfcinv: gpu_erfcinv}
if scal_op.__class__ in convert:
scal_op = convert[scal_op.__class__]
if have_opencl:
_logger.warning(
'Function "%s" is not supported with OpenCL. Use "device=cuda" instead.' %
scal_op)
if not have_cuda:
return None
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
return [gpu_output]
elif op.scalar_op in (scalar.add, scalar.mul):
try:
return [split_inputs(inputs, max_inputs_to_GpuElemwise(outputs), res)]
except ValueError:
return False
else:
return res
def split_inputs(inputs, max_nb_inputs, op):
"""
For some ops like add and mul, a large number of inputs can make nvcc fail
compilation of our current code. We don't want node in the graph that can't
execute as this break DebugMode.
This should not happen for other GpuElemwise as their is only the fusion
that can generate op with too much input and it check for that.
Parameters
----------
inputs: List of theano variables.
List of inputs to node.
max_nb_inputs: int
Maximum number of inputs the node can handle without
compilation fail.
op : Theano operator instance.
Operator that should be used to rebuild the computation graph with smaller
number of inputs per node.
"""
if max_nb_inputs <= 1 and len(inputs) > 1:
raise ValueError("Can not split nodes because inputs' dimensionality and/or"
" number of outputs is too large")
while len(inputs) > max_nb_inputs:
inner_ops = []
for i in range(0, len(inputs), max_nb_inputs):
inner_ops.append(op(*inputs[i: i + max_nb_inputs]))
inputs = inner_ops
return op(*inputs)
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
# 48.5 move to gpu
# 48.6 specialize
# 49 cpu fusion
# 49.5 add destroy handler
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 49,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.InplaceElemwiseOptimizer(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
register_opt(tensor.opt.local_useless_elemwise)
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
@register_opt2([tensor.DimShuffle], 'fast_compile')
def local_gpua_dimshuffle(op, context_name, inputs, outputs):
return GpuDimShuffle(op.input_broadcastable,
op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_specifyShape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.SpecifyShape], 'fast_compile')
def local_gpua_specifyShape_graph(op, context_name, inputs, outputs):
inp = [as_gpuarray_variable(inputs[0], context_name)]
inp += inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(op, context_name, inputs, outputs):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_shape_graph(op, context_name, inputs, outputs)
@register_opt2([tensor.compile.ops.Shape], 'fast_compile')
def local_gpua_shape_graph(op, context_name, inputs, outputs):
return [as_gpuarray_variable(inputs[0], context_name).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, np.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
@register_opt2([tensor.printing.Print], 'fast_compile')
def local_gpua_print_op(op, context_name, inputs, outputs):
x, = inputs
gpu_x = as_gpuarray_variable(x, context_name=context_name)
new_op = op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(as_gpuarray_variable(inp, context_name))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(new_op_outputs[i].transfer('cpu'))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([IfElse])
@register_opt2([IfElse], 'fast_compile')
def local_gpua_lazy_ifelse(op, context_name, inputs, outputs):
if op.gpu:
return
c = inputs[0]
inps = []
falses = []
# ifelse need corresponding true/false inputs variables to be of the same type.
# But we can't rely on inputs to respect that, as GraphToGPU don't enforce that.
# So we need to take care of this here.
for v1, v2 in zip(inputs[1:1 + op.n_outs], inputs[1 + op.n_outs:]):
if ((isinstance(v1.type, tensor.TensorType) and move_to_gpu(v1)) or
isinstance(v1.type, GpuArrayType) or
isinstance(v2.type, GpuArrayType)):
inps.append(as_gpuarray_variable(v1, context_name))
falses.append(as_gpuarray_variable(v2, context_name))
else:
inps.append(v1)
falses.append(v2)
inps.extend(falses)
return IfElse(op.n_outs, gpu=True)(c, *inps, return_list=True)
@register_opt('fast_compile')
@op_lifter([tensor.Join])
@register_opt2([tensor.Join], 'fast_compile')
def local_gpua_join(op, context_name, inputs, outputs):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpua_join_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
@register_opt2([tensor.Split], 'fast_compile')
def local_gpua_split(op, context_name, inputs, outputs):
# TODO use props
return GpuSplit(op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(op, context_name, inputs, outputs):
x = inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in outputs[0].clients]):
return
else:
return [gpu_x.owner.op(outputs[0]).transfer('cpu')]
return GpuSubtensor(op.idx_list)
@register_opt2([tensor.Subtensor], 'fast_compile')
def local_gpua_subtensor_graph(op, context_name, inputs, outputs):
# We need different code as the condition is different as inputs
# aren't the same.
x = inputs[0]
# We don't want to move the subtensor to the GPU if the inputs is
# on the CPU and the only client of the CPU node is this
# subtensor. This allow to have a smaller transfer.
if (x.owner and isinstance(x.owner.op, GpuFromHost)):
cpu_x = x.owner.inputs[0]
# And it is a shared var or an input of the graph.
# and is used by only 1 node.
# x is in the new graph, so we can't tests its number of clients.
if not cpu_x.owner and len(cpu_x.clients) == 1:
c = outputs[0].clients
# If the subtensor have only 1 client, do it on the CPU.
# We let the other optimization to take care to move the
# next node or not.
if len(c) == 1:
return
return GpuSubtensor(op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
@register_opt2([tensor.IncSubtensor], 'fast_compile')
def local_gpua_inc_subtensor(op, context_name, inputs, outputs):
op = GpuIncSubtensor(op.idx_list, op.inplace,
op.set_instead_of_inc,
op.destroyhandler_tolerate_aliased)
ret = op(*inputs)
val = getattr(outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
return ret
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
@register_opt2([tensor.AdvancedSubtensor1], 'fast_compile')
def local_gpua_advanced_subtensor1(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor])
@register_opt2([tensor.AdvancedSubtensor], 'fast_compile')
def local_gpua_advanced_subtensor(op, context_name, inputs, outputs):
return GpuAdvancedSubtensor()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
@register_opt2([tensor.AdvancedIncSubtensor1], 'fast_compile')
def local_gpua_advanced_incsubtensor(op, context_name, inputs, outputs):
context = get_context(context_name)
# This is disabled on non-cuda contexts
if context.kind != b'cuda':
return None
x, y, ilist = inputs
set_instead_of_inc = op.set_instead_of_inc
compute_capability = int(context.bin_id[-2])
if compute_capability >= 2 and x.ndim == 1 and y.ndim == 0:
x = x.dimshuffle(0, 'x')
y = y.dimshuffle('x', 'x')
ret = GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)
ret = GpuDimShuffle(ret.type.broadcastable, [0])(ret)
return ret
elif compute_capability < 2 or x.ndim != 2 or y.ndim != 2:
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
@register_inplace()
@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])
def local_advincsub1_gpua_inplace(node):
if isinstance(node.op, (GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
@register_opt2([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod], 'fast_compile')
def local_gpua_careduce(op, context_name, inputs, outputs):
if isinstance(op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == b'opencl':
op2 = GpuCAReduceCPY
if op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == b'cuda':
op2 = GpuCAReduceCuda
else:
return False
x, = inputs
greduce = op2(
op.scalar_op, axis=op.axis,
dtype=getattr(op, 'dtype', outputs[0].dtype),
acc_dtype=getattr(op, 'acc_dtype', None))
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op2 is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([
as_gpuarray_variable(x, context_name)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
new_in_shp = [shape_i(x, 0)]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= shape_i(x, i)
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(shape_i(x, i))
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op2(
op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(op, 'dtype', outputs[0].dtype),
acc_dtype=getattr(op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = greduce(gpu_reshaped_x)
if reduce_reshaped_x.ndim != outputs[0].ndim:
out_shp = []
for i in range(x.ndim):
if i not in op.axis:
out_shp.append(shape_i(x, i))
unreshaped_reduce = GpuReshape(len(out_shp))(reduce_reshaped_x,
tensor.stack(out_shp))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
@register_opt2([tensor.blas.Gemv], 'fast_compile')
def local_gpua_gemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
# Use gemm implementation as cublas gemv don't support float16
return gpugemm_no_inplace(inputs[0][:, None],
inputs[1],
inputs[2],
inputs[3][:, None],
inputs[4]).dimshuffle(0)
if inputs[0].dtype not in ['float32', 'float64']:
return
if op.inplace:
return gpugemv_inplace
else:
return gpugemv_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
@register_opt2([tensor.blas.Gemm], 'fast_compile')
def local_gpua_gemm(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float16', 'float32', 'float64']:
return
if op.inplace:
return gpugemm_inplace
else:
return gpugemm_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.BatchedDot])
@register_opt2([tensor.blas.BatchedDot], 'fast_compile')
def local_gpua_gemmbatch(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
return
a, b = inputs
# Since GpuGemmBatch only supports 3D inputs and output,
# we need to add broadcastable dims to the inputs, and drop
# them from outputs
output_dims = [0, 1, 2]
if a.ndim == 2:
a = GpuDimShuffle(a.broadcastable, (0, 'x', 1))(a)
del output_dims[1]
if b.ndim == 2:
b = GpuDimShuffle(b.broadcastable, (0, 1, 'x'))(b)
del output_dims[-1]
# In case of mismatched dtypes, we also have to upcast
out_dtype = outputs[0].dtype
if a.dtype != out_dtype or b.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
if a.dtype != out_dtype:
a = gpu_cast_op(a)
if b.dtype != out_dtype:
b = gpu_cast_op(b)
c = tensor.AllocEmpty(out_dtype)(a.shape[0], a.shape[1], b.shape[2])
out = gpugemmbatch_no_inplace(c, np.asarray(1.0, dtype=out_dtype),
a, b, np.asarray(0.0, dtype=out_dtype))
if len(output_dims) != 3:
out = GpuDimShuffle(out.broadcastable, output_dims)(out)
return out
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpua_gemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)
def local_gpua_gemmbatch_alpha_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)
def local_gpua_gemmbatch_output_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
@register_opt2([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer], 'fast_compile')
def local_gpua_ger(op, context_name, inputs, outputs):
if inputs[0].dtype not in ['float32', 'float64']:
return
return GpuGer(inplace=op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
@register_opt2([tensor.blas.Dot22], 'fast_compile')
def local_gpua_dot22(op, context_name, inputs, outputs):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22Scalar])
@register_opt2([tensor.blas.Dot22Scalar], 'fast_compile')
def local_gpua_dot22scalar(op, context_name, inputs, outputs):
x, y, a = inputs
x = as_gpuarray_variable(x, context_name)
y = as_gpuarray_variable(y, context_name)
z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])
return [gpugemm_no_inplace(z, a, x, y, 0)]
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
@register_opt2([tensor.basic.Eye], 'fast_compile')
def local_gpua_eye(op, context_name, inputs, outputs):
return GpuEye(dtype=op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
@register_opt2([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], 'fast_compile')
def local_gpua_crossentropysoftmaxargmax1hotwithbias(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_argmax_1hot_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
@register_opt2([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], 'fast_compile')
def local_gpua_crossentropysoftmax1hotwithbiasdx(op, context_name, inputs, outputs):
return gpu_crossentropy_softmax_1hot_with_bias_dx
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
@register_opt2([tensor.nnet.Softmax], 'fast_compile')
def local_gpua_softmax(op, context_name, inputs, outputs):
return gpu_softmax
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
@register_opt2([tensor.nnet.SoftmaxWithBias], 'fast_compile')
def local_gpua_softmaxwithbias(op, context_name, inputs, outputs):
return gpu_softmax_with_bias
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_gpua_assert(op, context_name, inputs, outputs):
if isinstance(inputs[0].type, GpuArrayType):
return
return local_gpua_assert_graph(op, context_name, inputs, outputs)
@register_opt2([theano.tensor.opt.Assert], 'fast_compile')
def local_gpua_assert_graph(op, context_name, inputs, outputs):
return [op(as_gpuarray_variable(inputs[0], context_name),
*inputs[1:])]
@register_opt('fast_compile')
@op_lifter([ConvOp])
@register_opt2([ConvOp], 'fast_compile')
def local_gpua_error_convop(op, context_name, inputs, outputs):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.conv2d()
"""
@register_opt('fast_compile')
@op_lifter([SparseBlockGemv])
@register_opt2([SparseBlockGemv], 'fast_compile')
def local_gpua_sparseblockgemv(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_gemv_inplace
else:
return gpu_sparse_block_gemv
@register_opt('fast_compile')
@op_lifter([SparseBlockOuter])
@register_opt2([SparseBlockOuter], 'fast_compile')
def local_gpua_sparseblockouter(op, context_name, inputs, outputs):
if inputs[0].dtype == 'float16':
return
if op.inplace:
return gpu_sparse_block_outer_inplace
else:
return gpu_sparse_block_outer
@register_inplace()
@local_optimizer([GpuSparseBlockGemv], inplace=True)
def local_inplace_sparseblockgemv(node):
if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:
return [gpu_sparse_block_gemv_inplace(*node.inputs)]
@register_inplace()
@local_optimizer([GpuSparseBlockOuter], inplace=True)
def local_inplace_sparseblockouter(node):
if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:
return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]
# Move to Gpu optimization
@local_optimizer([GpuFromHost,
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_conv_gpu_conv(node):
"""
gpu_from_host(AbstractConv) -> AbstractConv(gpu_from_host)
AbstractConv(host_from_gpu) -> host_from_gpu(AbstractConv)
"""
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
BaseAbstractConv):
conv = host_input.owner.op
inps = list(host_input.owner.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
# out is on the GPU because both inputs are.
out = theano.tensor.patternbroadcast(out,
node.outputs[0].broadcastable)
return [out]
if isinstance(node.op, BaseAbstractConv):
# conv(host_from_gpu) -> host_from_gpu(gpu_conv)
inp1 = node.inputs[0]
inp2 = node.inputs[1]
if ((isinstance(inp1.type, GpuArrayType) and
isinstance(inp2.type, GpuArrayType))):
# Both inputs are already directly on the GPU, nothing to do
return
inp1_on_gpu = (isinstance(inp1.type, GpuArrayType) or
(inp1.owner and isinstance(inp1.owner.op, HostFromGpu)))
inp2_on_gpu = (isinstance(inp2.type, GpuArrayType) or
(inp2.owner and isinstance(inp2.owner.op, HostFromGpu)))
if inp1_on_gpu or inp2_on_gpu:
conv = node.op
inps = list(node.inputs)
ctx = infer_context_name(*inps)
inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)
inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)
out = conv(*inps)
# out is on the GPU because both inputs are.
out = theano.tensor.patternbroadcast(
out,
node.outputs[0].broadcastable)
# If the original output was on CPU, we have to transfer it
if isinstance(node.outputs[0].type, tensor.TensorType):
return [tensor.as_tensor_variable(out)]
else:
return [out]
register_opt()(local_conv_gpu_conv)
# CorrMM opt
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
if ((border_mode == 'full') and (subsample == (1, 1))):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3)
# call GpuCorrMM_gradInputs
rval = GpuCorrMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
# By default use GpuCorrMM
rval = GpuCorrMM(border_mode,
subsample,
filter_dilation)(gpu_contiguous(img),
gpu_contiguous(kern))
# call GpuCorrMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth
# is larger than inputChannels * outputHeight * outputWidth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1)) and (filter_dilation == (1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-2:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half"):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[1]
prod2 = ((node.op.imshp[-2] - node.op.kshp[0] + 1) *
(node.op.imshp[-1] - node.op.kshp[1] + 1))
if (None not in node.op.imshp[:1]):
# we also know batchsize and input channels
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
# compare to decide
if prod1 > prod2:
rval = GpuCorrMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))
# (we need to wrap the result in as_gpuarray_variable,
# because we are not allowed to replace a GpuArray with
# a DimShuffle instance in a graph optimization)
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3),
context_name=ctx)
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm(node):
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if (not isinstance(img.type, GpuArrayType) or
not isinstance(kern.type, GpuArrayType)):
return None
ctx = infer_context_name(img, kern)
border_mode = node.op.border_mode
subsample = node.op.subsample
filter_dilation = node.op.filter_dilation
if ((border_mode == 'full') and (subsample == (1, 1, 1))):
if not node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# need to dimshuffle the kernel for full convolution
kern = kern.dimshuffle(1, 0, 2, 3, 4)
# call GpuCorr3dMM_gradInputs
rval = GpuCorr3dMM_gradInputs('valid',
subsample,
filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(img))
else:
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
# By default use GpuCorr3dMM
rval = GpuCorr3dMM(border_mode,
subsample,
filter_dilation)(gpu_contiguous(img),
gpu_contiguous(kern))
# call GpuCorr3dMM_gradWeights if good
# (the latter is faster if batchsize * kernelHeight * kernelWidth * kernelDepth
# is larger than inputChannels * outputHeight * outputWidth * outputDepth.
# GpuConv does not always store information on the batchsize and
# channels, though, so we only use what information we have.)
if ((subsample == (1, 1, 1)) and (filter_dilation == (1, 1, 1)) and
(node.op.imshp is not None) and
(None not in node.op.imshp[-3:]) and
(node.op.kshp is not None) and
(None not in node.op.kshp) and
border_mode != "half"):
# we know the kernel and output size
prod1 = node.op.kshp[0] * node.op.kshp[1] * node.op.kshp[2]
prod2 = ((node.op.imshp[-3] - node.op.kshp[0] + 1) *
(node.op.imshp[-2] - node.op.kshp[1] + 1) *
(node.op.imshp[-1] - node.op.kshp[2] + 1))
if (None not in node.op.imshp[:1]):
# we also know batchsize and input channels
prod1 *= node.op.imshp[0]
prod2 *= node.op.imshp[1]
# compare to decide
if prod1 > prod2:
rval = GpuCorr3dMM_gradWeights(border_mode,
subsample,
filter_dilation)(
gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),
gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))
# (we need to wrap the result in as_gpuarray_variable,
# because we are not allowed to replace a GpuArray with
# a DimShuffle instance in a graph optimization)
rval = as_gpuarray_variable(
rval.dimshuffle(1, 0, 2, 3, 4),
context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gradweights_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
ctx = infer_context_name(img, topgrad)
rval = GpuCorr3dMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(img), gpu_contiguous(topgrad), shape)
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
rval = as_gpuarray_variable(rval, context_name=ctx)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = GpuCorrMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm(node):
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, GpuArrayType) or \
not isinstance(topgrad.type, GpuArrayType):
return None
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = GpuCorr3dMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(
gpu_contiguous(kern), gpu_contiguous(topgrad), shape)
return [rval]
# This deals with any abstract convs that have a transfer somewhere
@register_opt('fast_compile', 'conv_dnn', 'cudnn')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs])
def local_gpua_abstractconv(op, context_name, inputs, outputs):
if isinstance(outputs[0].type, GpuArrayType):
# Don't handle this node here, it's already on the GPU.
return
return local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs)
@register_opt2([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs], 'fast_compile', 'conv_dnn', 'cudnn')
def local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs):
inps = list(inputs)
inps[0] = as_gpuarray_variable(inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(inputs[1],
context_name=context_name)
return [op(*inps)]
def local_gpu_pool(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
op = GpuPool(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
ret_padded = op(inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
lifter = op_lifter([pool.Pool])(local_gpu_pool)
pool_db.register("local_gpu_pool", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_pool",
local_optimizer([pool.Pool])(local_gpu_pool),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
register_opt('fast_compile', name='pool_db')(pool_db)
register_opt2([pool.Pool], 'fast_compile', name='pool_db2')(pool_db2)
def local_gpu_max_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuMaxPoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.MaxPoolGrad])(local_gpu_max_pool_grad)
pool_db.register("local_gpu_max_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_max_pool_grad",
local_optimizer([pool.MaxPoolGrad])(local_gpu_max_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
def local_gpu_average_pool_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuAveragePoolGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
lifter = op_lifter([pool.AveragePoolGrad])(local_gpu_average_pool_grad)
pool_db.register("local_gpu_average_pool_grad", lifter,
'gpuarray', 'fast_compile', 'fast_run',
position=1)
pool_db2.register("local_gpu_average_pool_grad",
local_optimizer([pool.AveragePoolGrad])(local_gpu_average_pool_grad),
'gpuarray', 'fast_compile', 'fast_run',
position=1)
@register_opt()
@op_lifter([pool.DownsampleFactorMaxGradGrad])
@register_opt2([pool.DownsampleFactorMaxGradGrad])
def local_gpu_downsample_factor_max_grad_grad(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, out, out_grad, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))
out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))
op = GpuDownsampleFactorMaxGradGrad(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, out, out_grad, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
out_padded = pad_dims(out, 2, nd)
out_grad_padded = pad_dims(out_grad, 2, nd)
ret_padded = op(inp_padded, out_padded, out_grad_padded,
ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt()
@op_lifter([pool.MaxPoolRop])
@register_opt2([pool.MaxPoolRop])
def local_gpu_max_pool_rop(op, ctx_name, inputs, outputs):
assert op.__props__ == ('ignore_border', 'mode', 'ndim')
inp, eval_inp, ws, stride, pad = inputs
nd = op.ndim
if nd not in (2, 3):
return
inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))
eval_inp = gpu_contiguous(as_gpuarray_variable(eval_inp, ctx_name))
op = GpuMaxPoolRop(op.ignore_border, op.mode, op.ndim)
if inp.ndim == nd + 2:
return op(inp, eval_inp, ws, stride, pad)
else:
# reshape to 4D or 5D with 2 non-pooling dimensions
inp_padded = pad_dims(inp, 2, nd)
eval_inp_padded = pad_dims(eval_inp, 2, nd)
ret_padded = op(inp_padded, eval_inp_padded, ws, stride, pad)
return unpad_dims(ret_padded, inp, 2, nd)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably results
# in slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
axis=op.axis,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
"graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The Op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,
'assert_no_cpu_op')
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
@register_opt2([scan_op.Scan], 'fast_compile')
def local_gpua_scan_to_gpua(op, context_name, inputs, outputs):
info = copy.deepcopy(op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [inputs[0]]
e = (1 +
op.n_seqs +
op.n_mit_mot +
op.n_mit_sot +
op.n_sit_sot +
op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[1:e]]
b = e
e = e + op.n_nit_sot
nw_ins += inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs[:-1]]
scan_outs += [op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
# Add optimization : maxandargmax (CPU -> GPU)
@register_opt('fast_compile')
@op_lifter([tensor.MaxAndArgmax])
@register_opt2([tensor.MaxAndArgmax], 'fast_compile')
def local_gpu_maxandargmax(op, context_name, inputs, outputs):
op = GpuMaxAndArgmax(op.get_params(None))
if inputs[0].dtype == "float16":
# For now it is better to copy/cast on the GPU then transfer to the CPU
casted_inputs = inputs[0].astype('float32')
ret = op(casted_inputs)
return [ret[0].astype('float16'), ret[1]]
return op
# solve
@register_opt('fast_compile')
@op_lifter([slinalg.Solve])
@register_opt2([theano.tensor.slinalg.Solve], 'fast_compile')
def local_gpu_solve(op, context_name, inputs, outputs):
if not cusolver_available:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
if op.A_structure not in MATRIX_STRUCTURES_SOLVE:
return
op = GpuCusolverSolve(A_structure=op.A_structure)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32'),
inputs[1].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuCusolverSolve], inplace=True)
def local_inplace_gpu_solve(node):
if isinstance(node.op, GpuCusolverSolve) and not node.op.inplace:
return [GpuCusolverSolve(A_structure=node.op.A_structure, trans=node.op.trans,
inplace=True)(*node.inputs)]
# Cholesky decomposition
@register_opt('fast_compile')
@op_lifter([slinalg.Cholesky])
@register_opt2([theano.tensor.slinalg.Cholesky], 'fast_compile')
def local_gpu_cholesky(op, context_name, inputs, outputs):
if not cusolver_available:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuCholesky(lower=op.lower, inplace=op.destructive)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuCholesky], inplace=True)
def local_inplace_cholesky(node):
if isinstance(node.op, GpuCholesky) and not node.op.inplace:
return [GpuCholesky(lower=node.op.lower, inplace=True)(*node.inputs)]
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.MatrixInverse])
@register_opt2([theano.tensor.nlinalg.MatrixInverse], 'magma', 'fast_compile')
def local_gpu_matrix_inverse(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaMatrixInverse()
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
@register_inplace()
@local_optimizer([GpuMagmaMatrixInverse])
def local_inplace_matrix_inverse_inplace(node):
if isinstance(node.op, GpuMagmaMatrixInverse):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('magma', 'fast_compile')
@op_lifter([nlinalg.SVD])
@register_opt2([theano.tensor.nlinalg.SVD], 'magma', 'fast_compile')
def local_gpu_svd(op, context_name, inputs, outputs):
if not config.magma.enabled:
return
if inputs[0].dtype not in ['float16', 'float32']:
return
op = GpuMagmaSVD(full_matrices=op.full_matrices,
compute_uv=op.compute_uv)
if inputs[0].dtype == 'float16':
return op(inputs[0].astype('float32')).astype('float16')
return op
# Do not register in fast_run or fast_compile.
# It will be added to fast_run if the GPU is enabled.
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'inplace',
'scan')
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = theano.gof.optdb.LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt('fast_compile')(abstractconv_groupopt)
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstractconv_cudnn, local_abstractconv_gw_cudnn,
local_abstractconv_gi_cudnn) # noqa: 402
abstractconv_groupopt.register('local_abstractconv_dnn',
local_abstractconv_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gw_dnn',
local_abstractconv_gw_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
abstractconv_groupopt.register('local_abstractconv_gi_dnn',
local_abstractconv_gi_cudnn, 20,
'conv_dnn',
'gpuarray', 'fast_compile', 'fast_run', 'cudnn')
# The GEMM-based convolution comes last to catch all remaining cases.
# It can be disabled by excluding 'conv_gemm'.
abstractconv_groupopt.register('local_abstractconv_gemm', local_abstractconv_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gemm', local_abstractconv3d_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradweights_gemm',
local_abstractconv_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradweights_gemm',
local_abstractconv3d_gradweights_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv_gradinputs',
local_abstractconv_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
abstractconv_groupopt.register('local_abstractconv3d_gradinputs',
local_abstractconv3d_gradinputs_gemm, 30,
'conv_gemm',
'gpuarray', 'fast_compile', 'fast_run')
# Register cuDNN batch normalization implementation
# We import these opts here instead of at the top of this file
# to avoid a circular dependency problem with dnn
from .dnn import (local_abstract_batch_norm_train_cudnn,
local_abstract_batch_norm_train_grad_cudnn,
local_abstract_batch_norm_inference_cudnn) # noqa: 402
abstract_batch_norm_groupopt = theano.gof.optdb.LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
register_opt('fast_compile')(abstract_batch_norm_groupopt)
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(
local_opt=theano.gof.opt.GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
register_opt('fast_compile', name='abstract_batch_norm_db')(
abstract_batch_norm_db)
register_opt2([bn.AbstractBatchNormTrain,
bn.AbstractBatchNormTrainGrad,
bn.AbstractBatchNormInference],
'fast_compile', name='abstract_batch_norm_db2')(
abstract_batch_norm_db2)
for op, fct, cpu in [(bn.AbstractBatchNormTrain,
local_abstract_batch_norm_train_cudnn,
bn.local_abstract_batch_norm_train),
(bn.AbstractBatchNormTrainGrad,
local_abstract_batch_norm_train_grad_cudnn,
bn.local_abstract_batch_norm_train_grad),
(bn.AbstractBatchNormInference,
local_abstract_batch_norm_inference_cudnn,
bn.local_abstract_batch_norm_inference)]:
lifter = op_lifter([op])(fct)
abstract_batch_norm_db.register(fct.__name__,
lifter,
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
abstract_batch_norm_db2.register(fct.__name__,
local_optimizer([op])(fct),
'gpuarray', 'fast_compile', 'fast_run',
'cudnn', 'batchnorm_dnn',
position=1)
# cpu is a normal optimization. We can't register it in
# GraphToGPU. So for now, only add it to the slower EQ phase. If
# there is no cuDNN, we still want to move it to the GPU now with
# a Theano graph so to have this graph on the GPU.
abstract_batch_norm_db.register(cpu.__name__, cpu,
'gpuarray', 'fast_compile', 'fast_run',
position='last')
|
py | 1a48ae83165bfc5c059a35536f25cb2075cf2741 | import os
import methods.simulator as simulator
for i in range(13):
graphon = simulator.synthesize_graphon(r=1000, type_idx=i)
graphs = simulator.simulate_graphs(graphon, num_graphs=10, num_nodes=200, graph_size='fixed')
simulator.visualize_graphon(graphon, save_path=os.path.join('results', 'graphon_{}.pdf'.format(i)))
# simulator.visualize_unweighted_graph(graphs[0], save_path=os.path.join('results', 'adj_{}.pdf'.format(i)))
|
py | 1a48afb4461d2161dc47a0e805b0f9f44f49d56f | # coding: utf-8
"""
Container Security API
# Authentication You must authenticate to the Qualys Cloud Platform using Qualys account credentials (user name and password) and get the JSON Web Token (JWT) before you can start using the Container Security APIs. Use the Qualys Authentication API to get the JWT. **Example Authentication Curl Request**: curl -X POST https://gateway/auth -H 'Content-Type: application/x-www-form-urlencoded' -d 'username=value1&password=passwordValue&token=true' where - gateway is the base URL to the Qualys API server where your account is located. - **username** and **password** are the credentials of the user account for which you want to fetch Container Security data. - **token** should be **true** - **Content-Type** should be **application/x-www-form-urlencoded** # noqa: E501
OpenAPI spec version: v1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.bulk_container_details_list import BulkContainerDetailsList # noqa: E501
from swagger_client.rest import ApiException
class TestBulkContainerDetailsList(unittest.TestCase):
"""BulkContainerDetailsList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBulkContainerDetailsList(self):
"""Test BulkContainerDetailsList"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.bulk_container_details_list.BulkContainerDetailsList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a48afeeb48db571961e7227c7eaa2685d1979c4 | import pickle
import joblib
import pytest
import numpy as np
import scipy.sparse as sp
from unittest.mock import Mock
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone, is_classifier
from sklearn.svm import OneClassSVM
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.linear_model import _stochastic_gradient
from sklearn.model_selection import RandomizedSearchCV
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args, **kw)
class _SparseSGDOneClassSVM(linear_model.SGDOneClassSVM):
def fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw)
def partial_fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.decision_function(self, X, *args, **kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDOneClassSVM(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
def SparseSGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDOneClassSVM(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array(
[
[-1, 1],
[-0.75, 0.5],
[-1.5, 1.5],
[1, 1],
[0.75, 0.5],
[1.5, 1.5],
[-1, -1],
[0, -0.5],
[1, -1],
]
)
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
]
)
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array(
[
[1, 0.9, 0.8, 0, 0, 0],
[1, 0.84, 0.98, 0, 0, 0],
[1, 0.96, 0.88, 0, 0, 0],
[1, 0.91, 0.99, 0, 0, 0],
[0, 0, 0, 0.89, 0.91, 1],
[0, 0, 0, 0.79, 0.84, 1],
[0, 0, 0, 0.91, 0.95, 1],
[0, 0, 0, 0.93, 1, 1],
]
)
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
@pytest.mark.parametrize("fit_method", ["fit", "partial_fit"])
@pytest.mark.parametrize(
"params, err_msg",
[
({"alpha": -0.1}, "alpha must be >= 0"),
({"penalty": "foobar", "l1_ratio": 0.85}, "Penalty foobar is not supported"),
({"loss": "foobar"}, "The loss foobar is not supported"),
({"l1_ratio": 1.1}, r"l1_ratio must be in \[0, 1\]"),
({"learning_rate": "<unknown>"}, "learning rate <unknown> is not supported"),
({"nu": -0.5}, r"nu must be in \(0, 1]"),
({"nu": 2}, r"nu must be in \(0, 1]"),
({"alpha": 0, "learning_rate": "optimal"}, "alpha must be > 0"),
({"eta0": 0, "learning_rate": "constant"}, "eta0 must be > 0"),
({"max_iter": -1}, "max_iter must be > zero"),
({"shuffle": "false"}, "shuffle must be either True or False"),
({"early_stopping": "false"}, "early_stopping must be either True or False"),
(
{"validation_fraction": -0.1},
r"validation_fraction must be in range \(0, 1\)",
),
({"n_iter_no_change": 0}, "n_iter_no_change must be >= 1"),
],
# Avoid long error messages in test names:
# https://github.com/scikit-learn/scikit-learn/issues/21362
ids=lambda x: x[:10].replace("]", "") if isinstance(x, str) else x,
)
def test_sgd_estimator_params_validation(klass, fit_method, params, err_msg):
"""Validate parameters in the different SGD estimators."""
try:
sgd_estimator = klass(**params)
except TypeError as err:
if "unexpected keyword argument" in str(err):
# skip test if the parameter is not supported by the estimator
return
raise err
with pytest.raises(ValueError, match=err_msg):
if is_classifier(sgd_estimator) and fit_method == "partial_fit":
fit_params = {"classes": np.unique(Y)}
else:
fit_params = {}
getattr(sgd_estimator, fit_method)(X, Y, **fit_params)
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(
alpha=0.01, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr
)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty="l1")
clf = clone(clf)
clf.set_params(penalty="l2")
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty="l2")
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=0.01)
clf.fit(X, Y)
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, "_average_coef")
assert not hasattr(clf, "_average_intercept")
assert not hasattr(clf, "_standard_intercept")
assert not hasattr(clf, "_standard_coef")
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]:
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
assert_allclose(clf1.offset_, clf2.offset_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_late_onset_averaging_reached(klass):
eta0 = 0.001
alpha = 0.0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(
average=7,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=2,
shuffle=False,
)
clf2 = klass(
average=0,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=1,
shuffle=False,
)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = asgd(
klass,
X,
Y_encode,
eta0,
alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_,
)
assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3, max_iter=max_iter).fit(
X, Y
)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3, max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3, max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(
early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(
early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction, random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [
klass(
early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4,
max_iter=1000,
)
.fit(X, Y)
.n_iter_
for n_iter_no_change in [2, 3, 10]
]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
###############################################################################
# Classification Test Case
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log_loss", "modified_huber"):
clf = klass(
penalty="l2",
alpha=0.01,
fit_intercept=True,
loss=loss,
max_iter=10,
shuffle=True,
)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_provide_coef(klass):
"""Check that the shape of `coef_init` is validated."""
with pytest.raises(ValueError, match="Provided coef_init does not match dataset"):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": np.zeros((3,))}),
(SparseSGDClassifier, {"intercept_init": np.zeros((3,))}),
(SGDOneClassSVM, {"offset_init": np.zeros((3,))}),
(SparseSGDOneClassSVM, {"offset_init": np.zeros((3,))}),
],
)
def test_set_intercept_offset(klass, fit_params):
"""Check that `intercept_init` or `offset_init` is validated."""
sgd_estimator = klass()
with pytest.raises(ValueError, match="does not match dataset"):
sgd_estimator.fit(X, Y, **fit_params)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_early_stopping_with_partial_fit(klass):
"""Check that we raise an error for `early_stopping` used with
`partial_fit`.
"""
err_msg = "early_stopping should be False with partial_fit"
with pytest.raises(ValueError, match=err_msg):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": 0}),
(SparseSGDClassifier, {"intercept_init": 0}),
(SGDOneClassSVM, {"offset_init": 0}),
(SparseSGDOneClassSVM, {"offset_init": 0}),
],
)
def test_set_intercept_offset_binary(klass, fit_params):
"""Check that we can pass a scaler with binary classification to
`intercept_init` or `offset_init`."""
klass().fit(X5, Y5, **fit_params)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = 0.1
alpha = 2.0
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_, average_weights, decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (
r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can us a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\."
)
with pytest.raises(ValueError, match=regex):
klass(class_weight="balanced").partial_fit(X, Y, classes=np.unique(Y))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = 0.001
alpha = 0.01
# Multi-class average test case
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
# TODO: Remove filterwarnings in v1.2.
@pytest.mark.filterwarnings("ignore:.*squared_loss.*:FutureWarning")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
# TODO(1.3): Remove "log"
if loss in ("log_loss", "log", "modified_huber"):
assert hasattr(clf, "predict_proba")
assert hasattr(clf, "predict_log_proba")
else:
message = "probability estimates are not available for loss={!r}".format(
loss
)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
with pytest.raises(AttributeError, match=message):
clf.predict_proba
with pytest.raises(AttributeError, match=message):
clf.predict_log_proba
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log_loss", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log_loss", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[0.1, -0.1], [0.3, 0.2]])
p = clf.predict_proba([[0.1, -0.1], [0.3, 0.2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.0] * 3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(
penalty="l1",
alpha=0.2,
fit_intercept=False,
max_iter=2000,
tol=None,
shuffle=False,
)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_format(klass):
# ValueError due to wrong class_weight argument type.
clf = klass(alpha=0.1, max_iter=1000, class_weight=[0.5])
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: 0.6, 2: 0.3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000, class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(
alpha=0.0001, max_iter=1000, class_weight="balanced", shuffle=False
).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") > 0.96
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
if klass in [SGDClassifier, SparseSGDClassifier]:
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
clf = klass(nu=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
clf.partial_fit(X2[third:], Y2[third:])
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_fit_then_partial_fit(klass):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = klass()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_classif(klass, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = klass(alpha=0.01, eta0=0.01, max_iter=2, learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_regression_losses(klass):
random_state = np.random.RandomState(1)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="squared_epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, loss="huber", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.01,
loss="squared_error",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_warm_start_multiclass(klass):
_test_warm_start(klass, X2, Y2, "optimal")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_multiple_fit(klass):
# Test multiple calls of fit w/ different shaped inputs.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert hasattr(clf, "coef_")
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
###############################################################################
# Regression Test Case
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_reg(klass):
# Check that SGD gives any results.
clf = klass(alpha=0.1, max_iter=2, fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
# Tests the average regressor matches the naive implementation
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_partial_fit(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:], y[: int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2) :][:], y[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_average_sparse(klass):
# Checks the average weights on data with 0s
eta = 0.001
alpha = 0.01
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = Y3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)][:], Y3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :][:], Y3[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_least_squares_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_epsilon_insensitive(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_huber_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_elasticnet_convergence(klass):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False
)
cd.fit(X, y)
sgd = klass(
penalty="elasticnet",
max_iter=50,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=False,
)
sgd.fit(X, y)
err_msg = (
"cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)
)
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg)
@ignore_warnings
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_partial_fit(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.intercept_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit(klass, lr):
clf = klass(alpha=0.01, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_loss_function_epsilon(klass):
clf = klass(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions["huber"][1] == 0.1
###############################################################################
# SGD One Class SVM Test Case
# a simple implementation of ASGD to use for testing SGDOneClassSVM
def asgd_oneclass(klass, X, eta, nu, coef_init=None, offset_init=0.0):
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass == SparseSGDOneClassSVM:
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - (eta * nu / 2))
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_coef, 1 - average_intercept
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def _test_warm_start_oneclass(klass, X, lr):
# Test that explicit warm restart...
clf = klass(nu=0.5, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X)
clf2 = klass(nu=0.1, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, coef_init=clf.coef_.copy(), offset_init=clf.offset_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(nu=0.5, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr)
clf3.fit(X)
assert clf3.t_ == clf.t_
assert_allclose(clf3.coef_, clf.coef_)
clf3.set_params(nu=0.1)
clf3.fit(X)
assert clf3.t_ == clf2.t_
assert_allclose(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start_oneclass(klass, lr):
_test_warm_start_oneclass(klass, X, lr)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_clone_oneclass(klass):
# Test whether clone works ok.
clf = klass(nu=0.5)
clf = clone(clf)
clf.set_params(nu=0.1)
clf.fit(X)
clf2 = klass(nu=0.1)
clf2.fit(X)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_partial_fit_oneclass(klass):
third = X.shape[0] // 3
clf = klass(nu=0.1)
clf.partial_fit(X[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.offset_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
previous_coefs = clf.coef_
clf.partial_fit(X[third:])
# check that coef_ haven't been re-allocated
assert clf.coef_ is previous_coefs
# raises ValueError if number of features does not match previous data
with pytest.raises(ValueError):
clf.partial_fit(X[:, 1])
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_oneclass(klass, lr):
clf = klass(nu=0.05, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X)
y_scores = clf.decision_function(T)
t = clf.t_
coef = clf.coef_
offset = clf.offset_
clf = klass(nu=0.05, eta0=0.01, max_iter=1, learning_rate=lr, shuffle=False)
for _ in range(2):
clf.partial_fit(X)
y_scores2 = clf.decision_function(T)
assert clf.t_ == t
assert_allclose(y_scores, y_scores2)
assert_allclose(clf.coef_, coef)
assert_allclose(clf.offset_, offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_late_onset_averaging_reached_oneclass(klass):
# Test average
eta0 = 0.001
nu = 0.05
# 2 passes over the training set but average only at second pass
clf1 = klass(
average=7, learning_rate="constant", eta0=eta0, nu=nu, max_iter=2, shuffle=False
)
# 1 pass over the training set with no averaging
clf2 = klass(
average=0, learning_rate="constant", eta0=eta0, nu=nu, max_iter=1, shuffle=False
)
clf1.fit(X)
clf2.fit(X)
# Start from clf2 solution, compute averaging using asgd function and
# compare with clf1 solution
average_coef, average_offset = asgd_oneclass(
klass, X, eta0, nu, coef_init=clf2.coef_.ravel(), offset_init=clf2.offset_
)
assert_allclose(clf1.coef_.ravel(), average_coef.ravel())
assert_allclose(clf1.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
# Tests the average SGD One-Class SVM matches the naive implementation
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X)
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_partial_fit_oneclass(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:])
clf.partial_fit(X[int(n_samples / 2) :][:])
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_average_sparse_oneclass(klass):
# Checks the average coef on data with 0s
eta = 0.001
nu = 0.01
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = X3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :])
average_coef, average_offset = asgd_oneclass(klass, X3, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
def test_sgd_oneclass():
# Test fit, decision_function, predict and score_samples on a toy
# dataset
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
clf = SGDOneClassSVM(
nu=0.5, eta0=1, learning_rate="constant", shuffle=False, max_iter=1
)
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
def test_ocsvm_vs_sgdocsvm():
# Checks SGDOneClass SVM gives a good approximation of kernelized
# One-Class SVM
nu = 0.05
gamma = 2.0
random_state = 42
# Generate train and test data
rng = np.random.RandomState(random_state)
X = 0.3 * rng.randn(500, 2)
X_train = np.r_[X + 2, X - 2]
X = 0.3 * rng.randn(100, 2)
X_test = np.r_[X + 2, X - 2]
# One-Class SVM
clf = OneClassSVM(gamma=gamma, kernel="rbf", nu=nu)
clf.fit(X_train)
y_pred_ocsvm = clf.predict(X_test)
dec_ocsvm = clf.decision_function(X_test).reshape(1, -1)
# SGDOneClassSVM using kernel approximation
max_iter = 15
transform = Nystroem(gamma=gamma, random_state=random_state)
clf_sgd = SGDOneClassSVM(
nu=nu,
shuffle=True,
fit_intercept=True,
max_iter=max_iter,
random_state=random_state,
tol=-np.inf,
)
pipe_sgd = make_pipeline(transform, clf_sgd)
pipe_sgd.fit(X_train)
y_pred_sgdocsvm = pipe_sgd.predict(X_test)
dec_sgdocsvm = pipe_sgd.decision_function(X_test).reshape(1, -1)
assert np.mean(y_pred_sgdocsvm == y_pred_ocsvm) >= 0.99
corrcoef = np.corrcoef(np.concatenate((dec_ocsvm, dec_sgdocsvm)))[0, 1]
assert corrcoef >= 0.9
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(
n_samples=1000, n_features=100, n_informative=20, random_state=1234
)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.9999999999,
random_state=42,
).fit(X, y)
est_l1 = SGDClassifier(
alpha=0.001, penalty="l1", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.0000000001,
random_state=42,
).fit(X, y)
est_l2 = SGDClassifier(
alpha=0.001, penalty="l2", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all="raise"):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert np.isfinite(X).all()
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert np.isfinite(X_scaled).all()
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.0).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss="squared_hinge", max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert np.isfinite(model.coef_).all()
# model is numerically unstable on unscaled data
msg_regxp = (
r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help."
)
with pytest.raises(ValueError, match=msg_regxp):
model.fit(X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(
loss="squared_hinge",
max_iter=10,
shuffle=True,
penalty="elasticnet",
l1_ratio=0.3,
alpha=0.01,
eta0=0.001,
random_state=0,
tol=None,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert np.isfinite(model.coef_).all()
@pytest.mark.parametrize("penalty", ["l2", "l1", "elasticnet"])
def test_large_regularization(penalty):
# Non regression tests for numerical stability issues caused by large
# regularization parameters
model = SGDClassifier(
alpha=1e5,
learning_rate="constant",
eta0=0.1,
penalty=penalty,
shuffle=False,
tol=None,
max_iter=6,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
warning_message = (
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
def _test_loss_common(loss_function, cases):
# Test the different loss functions
# cases is a list of (p, y, expected)
for p, y, expected_loss, expected_dloss in cases:
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
def test_loss_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.1, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, 1.0, 0.0, -1.0),
(-1.0, -1.0, 0.0, 1.0),
(0.5, 1.0, 0.5, -1.0),
(2.0, -1.0, 3.0, 1.0),
(-0.5, -1.0, 0.5, 1.0),
(0.0, 1.0, 1, -1.0),
]
_test_loss_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-0.1, -1.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -1.0),
(0.0, -1.0, 0.0, 1.0),
(0.5, -1.0, 0.5, 1.0),
(2.0, -1.0, 2.0, 1.0),
(-0.5, 1.0, 0.5, -1.0),
(-1.0, 1.0, 1.0, -1.0),
]
_test_loss_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, -1.0, 4.0, 4.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, 1.0, 0.25, -1.0),
(0.5, -1.0, 2.25, 3.0),
]
_test_loss_common(loss, cases)
def test_loss_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, np.log(2), -0.5),
(0.0, -1.0, np.log(2), 0.5),
(17.9, -1.0, 17.9, 1.0),
(-17.9, 1.0, 17.9, -1.0),
]
_test_loss_common(loss, cases)
assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16)
assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16)
def test_loss_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(1.0, 1.0, 0.0, 0.0),
(1.0, 0.0, 0.5, 1.0),
(0.5, -1.0, 1.125, 1.5),
(-2.5, 2.0, 10.125, -4.5),
]
_test_loss_common(loss, cases)
def test_loss_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.005, 0.1),
(0.0, 0.1, 0.005, -0.1),
(3.95, 4.0, 0.00125, -0.05),
(5.0, 2.0, 0.295, 0.1),
(-1.0, 5.0, 0.595, -0.1),
]
_test_loss_common(loss, cases)
def test_loss_modified_huber():
# (p, y, expected_loss, expected_dloss)
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-1.0, -1.0, 0.0, 0.0),
(2.0, 1.0, 0.0, 0.0),
(0.0, 1.0, 1.0, -2.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, -1.0, 2.25, 3.0),
(-2.0, 1.0, 8, -4.0),
(-3.0, 1.0, 12, -4.0),
]
_test_loss_common(loss, cases)
def test_loss_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.1, 1.0),
(2.0, -1.0, 2.9, 1.0),
(2.0, 2.2, 0.1, -1.0),
(-2.0, 1.0, 2.9, -1.0),
]
_test_loss_common(loss, cases)
def test_loss_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.01, 0.2),
(2.0, -1.0, 8.41, 5.8),
(2.0, 2.2, 0.01, -0.2),
(-2.0, 1.0, 8.41, -5.8),
]
_test_loss_common(loss, cases)
def test_multi_thread_multi_class_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and thread-based parallelism.
clf = SGDClassifier(
alpha=1e-3,
tol=1e-3,
max_iter=1000,
early_stopping=True,
n_iter_no_change=100,
random_state=0,
n_jobs=2,
)
clf.fit(iris.data, iris.target)
assert clf.n_iter_ > clf.n_iter_no_change
assert clf.n_iter_ < clf.n_iter_no_change + 20
assert clf.score(iris.data, iris.target) > 0.8
def test_multi_core_gridsearch_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and process-based multi-core
# parallelism.
param_grid = {
"alpha": np.logspace(-4, 4, 9),
"n_iter_no_change": [5, 10, 50],
}
clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True, random_state=0)
search = RandomizedSearchCV(clf, param_grid, n_iter=5, n_jobs=2, random_state=0)
search.fit(iris.data, iris.target)
assert search.best_score_ > 0.8
@pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"])
def test_SGDClassifier_fit_for_all_backends(backend):
# This is a non-regression smoke test. In the multi-class case,
# SGDClassifier.fit fits each class in a one-versus-all fashion using
# joblib.Parallel. However, each OvA step updates the coef_ attribute of
# the estimator in-place. Internally, SGDClassifier calls Parallel using
# require='sharedmem'. This test makes sure SGDClassifier.fit works
# consistently even when the user asks for a backend that does not provide
# sharedmem semantics.
# We further test a case where memmapping would have been used if
# SGDClassifier.fit was called from a loky or multiprocessing backend. In
# this specific case, in-place modification of clf.coef_ would have caused
# a segmentation fault when trying to write in a readonly memory mapped
# buffer.
random_state = np.random.RandomState(42)
# Create a classification problem with 50000 features and 20 classes. Using
# loky or multiprocessing this make the clf.coef_ exceed the threshold
# above which memmaping is used in joblib and loky (1MB as of 2018/11/1).
X = sp.random(500, 2000, density=0.02, format="csr", random_state=random_state)
y = random_state.choice(20, 500)
# Begin by fitting a SGD classifier sequentially
clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1, random_state=42)
clf_sequential.fit(X, y)
# Fit a SGDClassifier using the specified backend, and make sure the
# coefficients are equal to those obtained using a sequential fit
clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4, random_state=42)
with joblib.parallel_backend(backend=backend):
clf_parallel.fit(X, y)
assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_)
@pytest.mark.parametrize(
"old_loss, new_loss, Estimator",
[
# TODO(1.2): Remove "squared_loss"
("squared_loss", "squared_error", linear_model.SGDClassifier),
("squared_loss", "squared_error", linear_model.SGDRegressor),
# TODO(1.3): Remove "log"
("log", "log_loss", linear_model.SGDClassifier),
],
)
def test_loss_deprecated(old_loss, new_loss, Estimator):
# Note: class BaseSGD calls self._validate_params() in __init__, therefore
# even instantiation of class raises FutureWarning for deprecated losses.
with pytest.warns(FutureWarning, match=f"The loss '{old_loss}' was deprecated"):
est1 = Estimator(loss=old_loss, random_state=0)
est1.fit(X, Y)
est2 = Estimator(loss=new_loss, random_state=0)
est2.fit(X, Y)
if hasattr(est1, "predict_proba"):
assert_allclose(est1.predict_proba(X), est2.predict_proba(X))
else:
assert_allclose(est1.predict(X), est2.predict(X))
@pytest.mark.parametrize(
"Estimator", [linear_model.SGDClassifier, linear_model.SGDRegressor]
)
def test_sgd_random_state(Estimator, global_random_seed):
# Train the same model on the same data without converging and check that we
# get reproducible results by fixing the random seed.
if Estimator == linear_model.SGDRegressor:
X, y = datasets.make_regression(random_state=global_random_seed)
else:
X, y = datasets.make_classification(random_state=global_random_seed)
# Fitting twice a model with the same hyper-parameters on the same training
# set with the same seed leads to the same results deterministically.
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_a = est.fit(X, y).coef_
assert est.n_iter_ == 1
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_b = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert_allclose(coef_same_seed_a, coef_same_seed_b)
# Fitting twice a model with the same hyper-parameters on the same training
# set but with different random seed leads to different results after one
# epoch because of the random shuffling of the dataset.
est = Estimator(random_state=global_random_seed + 1, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_other_seed = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert np.abs(coef_same_seed_a - coef_other_seed).max() > 1.0
def test_validation_mask_correctly_subsets(monkeypatch):
"""Test that data passed to validation callback correctly subsets.
Non-regression test for #23255.
"""
X, Y = iris.data, iris.target
n_samples = X.shape[0]
validation_fraction = 0.2
clf = linear_model.SGDClassifier(
early_stopping=True,
tol=1e-3,
max_iter=1000,
validation_fraction=validation_fraction,
)
mock = Mock(side_effect=_stochastic_gradient._ValidationScoreCallback)
monkeypatch.setattr(_stochastic_gradient, "_ValidationScoreCallback", mock)
clf.fit(X, Y)
X_val, y_val = mock.call_args[0][1:3]
assert X_val.shape[0] == int(n_samples * validation_fraction)
assert y_val.shape[0] == int(n_samples * validation_fraction)
|
py | 1a48b033ea8799bb0b4509b36cac779e21e44c12 | #!/usr/bin/env python
# coding=utf-8
import json
import os
from sts.sts import Sts, Scope
def test_policy():
scope = Scope('name/cos:PutObject', 'test-1250000000', 'ap-guangzhou', 'exampleobject')
scopes = list()
scopes.append(scope)
print(json.dumps(Sts.get_policy(scopes), indent=4))
def test_policy2():
scopes = list()
scopes.append(Scope('name/cos:PutObject', 'example-1250000000', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:GetObject', 'example-1250000000', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:InitiateMultipartUpload', 'example-1250000000', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:ListParts', 'example-1250000000', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:UploadPart', 'example-1250000000', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:CompleteMultipartUpload', 'example-1250000000', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:GetObject', 'example-1250000000', 'ap-guangzhou', '1/test.txt'))
print(json.dumps(Sts.get_policy(scopes), indent=4))
def test_sts():
scopes = list()
scopes.append(Scope('name/cos:PutObject', 'example-1253653367', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:GetObject', 'example-1253653367', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:InitiateMultipartUpload', 'example-1253653367', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:ListParts', 'example-1253653367', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:UploadPart', 'example-1253653367', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:CompleteMultipartUpload', 'example-1253653367', 'ap-guangzhou', 'exampleobject'))
scopes.append(Scope('name/cos:GetObject', 'example-1253653367', 'ap-guangzhou', '1/test.txt'))
config = {
'sts_scheme': 'https',
'sts_url': 'sts.tencentcloudapi.com/',
# 临时密钥有效时长,单位是秒
'duration_seconds': 1800,
'secret_id': os.environ['COS_SECRET_ID'],
# 固定密钥
'secret_key': os.environ['COS_SECRET_KEY'],
# 换成 bucket 所在地区
'region': 'ap-guangzhou',
# 设置网络代理
# 'proxy': {
# 'http': 'xxx',
# 'https': 'xxx'
# },
'policy': Sts.get_policy(scopes)
}
sts = Sts(config)
response = sts.get_credential()
print('get data : ' + json.dumps(dict(response), indent=4))
if __name__ == '__main__':
test_policy()
test_policy2()
test_sts()
|
py | 1a48b08001cd0d09be89e1bbd98b473cd5824681 | import os
import optparse
import logging
from logging.handlers import SysLogHandler
import ConfigParser
from esxsnmp.error import ConfigError
def get_config_path():
if os.environ.has_key('ESXSNMP_CONF'):
conf = os.environ['ESXSNMP_CONF']
else:
conf = './esxsnmp.conf'
return conf
def get_config(config_file, opts):
if not os.path.exists(config_file):
raise ConfigError("config file not found: %s" % config_file)
try:
conf = ESxSNMPConfig(config_file)
except ConfigParser.Error, e:
raise ConfigError("unable to parse config: %s" % e)
# the command line overrides the config file
if opts.pid_dir:
conf.pid_dir = opts.pid_dir
return conf
def get_opt_parser(default_config_file=None, default_pid_dir=None):
oparse = optparse.OptionParser()
oparse.add_option("-d", "--debug", dest="debug", action="store_true",
default=False)
oparse.add_option("-f", "--config-file", dest="config_file",
default=default_config_file)
oparse.add_option("-p", "--pid-dir", dest="pid_dir",
default=default_pid_dir)
return oparse
class ESxSNMPConfig(object):
def __init__(self, file):
self.file = file
self.db_uri = None
self.error_email_from = None
self.error_email_subject = None
self.error_email_to = None
self.esdb_uri = None
self.espersistd_uri = None
self.espoll_persist_uri = None
self.htpasswd_file = None
self.mib_dirs = []
self.mibs = []
self.pid_dir = None
self.poll_retries = 5
self.poll_timeout = 2
self.polling_tag = None
self.reload_interval = 1*10
self.rrd_path = None
self.send_error_email = False
self.streaming_log_dir = None
self.syslog_facility = None
self.syslog_priority = None
self.traceback_dir = None
self.tsdb_chunk_prefixes = None
self.tsdb_root = None
self.agg_tsdb_root = None
self.read_config()
self.convert_types()
# XXX(jdugan): validate_config is too restrictive needs to be fixed
# self.validate_config()
def read_config(self):
""" read in config from INI-style file, requiring section header 'main'"""
cfg = ConfigParser.ConfigParser()
cfg.read(self.file)
config_items = map(lambda x: x[0], cfg.items("main"))
for opt in (
'agg_tsdb_root',
'db_uri',
'error_email_from',
'error_email_subject',
'error_email_to',
'esdb_uri',
'espersistd_uri',
'espoll_persist_uri',
'htpasswd_file',
'mib_dirs',
'mibs',
'pid_dir',
'poll_retries',
'poll_timeout',
'polling_tag',
'reload_interval',
'rrd_path',
'streaming_log_dir',
'syslog_facility',
'syslog_priority',
'traceback_dir',
'tsdb_chunk_prefixes',
'tsdb_root',
):
if opt in config_items:
setattr(self, opt, cfg.get("main", opt))
self.persist_map = {}
for key, val in cfg.items("persist_map"):
self.persist_map[key] = val.replace(" ", "").split(",")
self.persist_queues = {}
for key, val in cfg.items("persist_queues"):
self.persist_queues[key] = val.split(':', 1)
self.persist_queues[key][1] = int(self.persist_queues[key][1])
if self.espoll_persist_uri:
self.espoll_persist_uri = \
self.espoll_persist_uri.replace(' ', '').split(',')
def convert_types(self):
"""update_types -- convert input from config file to appropriate types"""
if self.mib_dirs:
self.mib_dirs = map(str.strip, self.mib_dirs.split(','))
if self.mibs:
self.mibs = map(str.strip, self.mibs.split(','))
if self.poll_timeout:
self.poll_timeout = int(self.poll_timeout)
if self.poll_retries:
self.poll_retries = int(self.poll_retries)
if self.reload_interval:
self.reload_interval = int(self.reload_interval)
if self.error_email_to is not None \
and self.error_email_subject is not None \
and self.error_email_from is not None:
self.send_error_email = True
if self.syslog_facility is not None:
if not SysLogHandler.facility_names.has_key(self.syslog_facility):
raise ConfigError("invalid config: %s syslog facility is unknown" % self.syslog_facility)
self.syslog_facility = SysLogHandler.facility_names[self.syslog_facility]
if self.syslog_priority is None:
syslog_priority = logging.INFO
else:
if not SysLogHandler.priority_names.has_key(self.syslog_priority):
raise ConfigError("invaild config: unknown syslog_priority %s" %
self.syslog_priority)
self.syslog_priority = SysLogHandler.priority_names[self.syslog_priority]
def validate_config(self):
for attr in ('tsdb_root', 'db_uri'):
if getattr(self, attr) == None:
raise ConfigError("invalid config: %s: %s must be specified",
self.file, attr)
if not os.path.isdir(self.tsdb_root):
raise ConfigError("invalid config: tsdb_root does not exist: %s" % self.tsdb_root)
if not os.access(self.tsdb_root, os.W_OK):
raise ConfigError("invalid config: tsdb_root %s is not writable" % self.tsdb_root)
if self.tsdb_chunk_prefixes:
self.tsdb_chunk_prefixes = map(str.strip,
self.tsdb_chunk_prefixes.split(','))
for cdir in self.tsdb_chunk_prefixes:
if not os.path.isdir(cdir):
raise ConfigError("invalid config: tsdb_chunk_prefixes doesn't exist: %s" % cdir)
if not os.access(cdir, os.W_OK):
raise ConfigError("invalid config: tsdb_chunk_prefixes %s not writable" % cdir)
if self.traceback_dir is not None:
if not os.path.isdir(self.traceback_dir):
raise ConfigError("invalid config: traceback_dir %s does not exist" % self.traceback_dir)
if not os.access(self.traceback_dir, os.W_OK):
raise ConfigError("invalid config: traceback_dir %s is not writable" % self.traceback_dir)
if self.syslog_facility is not None:
if not SysLogHandler.facility_names.has_key(self.syslog_facility):
raise ConfigError("invalid config: %s syslog facility is unknown" % self.syslog_facility)
self.syslog_facility = SysLogHandler.facility_names[self.syslog_facility]
if self.syslog_priority is None:
syslog_priority = logging.INFO
else:
if not SysLogHandler.priority_names.has_key(self.syslog_priority):
raise ConfigError("invaild config: unknown syslog_priority %s" %
self.syslog_priority)
self.syslog_priority = SysLogHandler.priority_names[self.syslog_priority]
errors = []
for oidset, queues in self.persist_map.iteritems():
for queue in queues:
if not self.persist_queues.has_key(queue):
errors.append("%s for %s" % (queue, oidset))
if errors:
raise ConfigError("unknown persist queue(s): %s" \
% ", ".join(errors))
|
py | 1a48b0c10d1924b1de5b25109bf88a33230656a2 | import csv
import ctypes
import datetime
from easydict import EasyDict
import logging
import multiprocessing as mp
import os
import random
import sys
from pathlib import Path
from typing import Union, Tuple, Dict
import yaml
from typing import Any, Dict, List, Tuple
import GPUtil
import numpy as np
import psutil
import torch as th
import torch
import torch.backends.cudnn as cudnn
from torch import cuda
EVALKEYS = ["r1", "r5", "r10", "r50", "medr", "meanr", "sum"]
EVALHEADER = "Retriev | R@1 | R@5 | R@10 | R@50 | MeanR | MedR | Sum"
def create_dataloader_path(data_root,
shot_per_group,
dataset_name,
text_feature_name='default',
feature_name_modality_a='action',
feature_name_modality_b='flow', pickle_path=None):
"""create the path to meta file and features
#last modality will be modality_b
Args:
data_root ([PATH]): [Path to the data folder]
shot_per_group ([Int]): [number of shots (clips) per group (video)]
Returns:
[Dict]: [path to meta data and video/language features]
"""
meta_data_path = {}
video_feat_path = {}
if pickle_path is not None:
pickle_path = Path(pickle_path)
else:
pickle_path = ""
for mod_name in feature_name_modality_a:
meta_data_path[mod_name] = Path(
os.path.join(data_root, "meta",
"meta_group{}_{}.json".format(shot_per_group, mod_name)))
video_feat_path[mod_name] = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"video_features", "{}.h5".format(mod_name)))
#If modality B is "text" then we already have it in language feats
if feature_name_modality_b != "text":
meta_data_path[feature_name_modality_b] = Path(
os.path.join(data_root, "meta",
"meta_group{}_{}.json".format(shot_per_group, feature_name_modality_b)))
video_feat_path[feature_name_modality_b] = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"video_features", "{}.h5".format(feature_name_modality_b)))
language_feat_path = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"language_features",
"text_{}.h5".format(text_feature_name)))
meta_text_len_path = Path(
os.path.join(data_root, "group{}".format(shot_per_group),
"language_features",
"text_lens_{}.json".format(text_feature_name)))
return {
"meta_data": meta_data_path,
"video_feats": video_feat_path,
"language_feats": language_feat_path,
"meta_text_len": meta_text_len_path,
"dataset_name": dataset_name,
"pickle_path": pickle_path
}
def get_csv_header_keys(compute_clip_retrieval):
metric_keys = ["ep", "time"]
prefixes = ["v", "p"]
if compute_clip_retrieval:
prefixes += ["c", "s"]
for prefix in prefixes:
for key in EVALKEYS:
metric_keys.append(f"{prefix}-{key}")
return metric_keys
def print_csv_results(csv_file: str, cfg: EasyDict, print_fn=print):
metric_keys = get_csv_header_keys(True)
with Path(csv_file).open("rt", encoding="utf8") as fh:
reader = csv.DictReader(fh, metric_keys)
line_data = [line for line in reader][1:]
for line in line_data:
for key, val in line.items():
line[key] = float(val)
if cfg.val.det_best_field == "val_score_at_1":
relevant_field = [line["v-r1"] + line["p-r1"] for line in line_data]
elif cfg.val.det_best_field == "val_clip_score_at_1":
relevant_field = [line["c-r1"] + line["s-r1"] for line in line_data]
else:
raise NotImplementedError
best_epoch = np.argmax(relevant_field)
def get_res(search_key):
results = {}
for key_, val_ in line_data[best_epoch].items():
if key_[:2] == f"{search_key}-":
results[key_[2:]] = float(val_)
return results
print_fn(f"Total epochs {len(line_data)}. "
f"Results from best epoch {best_epoch}:")
print_fn(EVALHEADER)
print_fn(retrieval_results_to_str(get_res("p"), "Par2Vid"))
print_fn(retrieval_results_to_str(get_res("v"), "Vid2Par"))
print_fn(retrieval_results_to_str(get_res("s"), "Sen2Cli"))
print_fn(retrieval_results_to_str(get_res("c"), "Cli2Sen"))
def expand_segment(num_frames, num_target_frames, start_frame, stop_frame):
num_frames_seg = stop_frame - start_frame + 1
changes = False
if num_target_frames > num_frames:
num_target_frames = num_frames
if num_frames_seg < num_target_frames:
while True:
if start_frame > 0:
start_frame -= 1
num_frames_seg += 1
changes = True
if num_frames_seg == num_target_frames:
break
if stop_frame < num_frames - 1:
stop_frame += 1
num_frames_seg += 1
changes = True
if num_frames_seg == num_target_frames:
break
return start_frame, stop_frame, changes
def set_seed(seed: int, set_deterministic: bool = True):
"""
Set all relevant seeds for torch, numpy and python
Args:
seed: int seed
set_deterministic: Guarantee deterministic training, possibly at the cost of performance.
"""
torch.manual_seed(seed)
cuda.manual_seed(seed)
cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
if set_deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
elif cudnn.benchmark or not cudnn.deterministic:
print("WARNING: Despite fixed seed {}, training may not be deterministic with {} "
"(must be False for deterministic training) and {} (must be True for deterministic "
"training)".format(seed, cudnn.benchmark, cudnn.deterministic))
def load_config(file: Union[str, Path]) -> EasyDict:
with Path(file).open("rt", encoding="utf8") as fh:
config = yaml.load(fh, Loader=yaml.Loader)
cfg = EasyDict(config)
# model symmetry
for check_network in ["text_pooler", "text_sequencer"]:
if getattr(cfg, check_network).name == "same":
setattr(cfg, check_network,
getattr(cfg,
getattr(cfg, check_network).same_as))
return cfg
def dump_config(cfg: EasyDict, file: Union[str, Path]) -> None:
with Path(file).open("wt", encoding="utf8") as fh:
yaml.dump(cfg, fh, Dumper=yaml.Dumper)
def print_config(cfg: EasyDict, level=0) -> None:
for key, val in cfg.items():
if isinstance(val, EasyDict):
print(" " * level, str(key), sep="")
print_config(val, level=level + 1)
else:
print(" " * level, f"{key} - f{val} ({type(val)})", sep="")
def make_shared_array(np_array: np.ndarray) -> mp.Array:
flat_shape = int(np.prod(np_array.shape))
shared_array_base = mp.Array(ctypes.c_float, flat_shape)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = shared_array.reshape(np_array.shape)
shared_array[:] = np_array[:]
return shared_array
def compute_indices(num_frames_orig: int, num_frames_target: int,
is_train: bool):
def round_half_down(array: np.ndarray) -> np.ndarray:
return np.ceil(array - 0.5)
if is_train:
# random sampling during training
start_points = np.linspace(0,
num_frames_orig,
num_frames_target,
endpoint=False)
start_points = round_half_down(start_points).astype(int)
offsets = start_points[1:] - start_points[:-1]
np.random.shuffle(offsets)
last_offset = num_frames_orig - np.sum(offsets)
offsets = np.concatenate([offsets, np.array([last_offset])])
new_start_points = np.cumsum(offsets) - offsets[0]
offsets = np.roll(offsets, -1)
random_offsets = offsets * np.random.rand(num_frames_target)
indices = new_start_points + random_offsets
indices = np.floor(indices).astype(int)
return indices
# center sampling during validation
start_points = np.linspace(0,
num_frames_orig,
num_frames_target,
endpoint=False)
offset = num_frames_orig / num_frames_target / 2
indices = start_points + offset
indices = np.floor(indices).astype(int)
return indices
def truncated_normal_fill(shape: Tuple[int],
mean: float = 0,
std: float = 1,
limit: float = 2) -> torch.Tensor:
num_examples = 8
tmp = torch.empty(shape + (num_examples, )).normal_()
valid = (tmp < limit) & (tmp > -limit)
_, ind = valid.max(-1, keepdim=True)
return tmp.gather(-1, ind).squeeze(-1).mul_(std).add_(mean)
def retrieval_results_to_str(results: Dict[str, float], name: str):
return ("{:7s} | {:.3f} | {:.3f} | {:.3f} | {:.3f} | {:5.1f} | "
"{:5.1f} | {:6.3f}").format(name, *[results[a] for a in EVALKEYS])
# def compute_retr_vid_to_par(video_feat, cap_feat):
# similarity_scores = np.dot(video_feat, cap_feat.T)
# return compute_retrieval_metrics(similarity_scores)
def compute_retr_vid_to_par(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(video_feat, cap_feat.T)
return compute_retrieval_cosine(d, num_points)
def compute_retr_vid_to_par_softneighbor(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(video_feat, cap_feat.T)
return compute_retrieval_softneighbor(d, num_points)
def compute_retr_par_to_vid_softneighbor(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(cap_feat, video_feat.T)
return compute_retrieval_softneighbor(d, num_points)
def compute_retr_par_to_vid(video_feat, cap_feat):
num_points = video_feat.shape[0]
d = np.dot(cap_feat, video_feat.T)
return compute_retrieval_cosine(d, num_points)
# def compute_retr_par_to_vid(video_feat, cap_feat):
# similarity_scores = np.dot(cap_feat, video_feat.T)
# return compute_retrieval_metrics(similarity_scores)
def compute_retrieval_coarse_to_fine(coarse_ind, x_feat, y_feat):
len_dot_product = x_feat.shape[0]
dot_product = np.dot(x_feat, y_feat.T)
ranks = np.zeros(len_dot_product)
top1 = np.zeros(len_dot_product)
ind_coarse_to_fine = []
sum_corr = 0
group_k = 10
for index in range(len_dot_product):
ind_coarse = index // group_k
ind_fine = index - ind_coarse * group_k
ind_h = coarse_ind[ind_coarse]
if ind_h == ind_coarse:
# print("correct")
sum_corr += 1
inds = np.argsort(dot_product[index, ind_coarse * group_k : (ind_coarse + 1) * group_k])[::-1]
# print(inds, ind_fine)
where = np.where(inds == ind_fine)
rank = where[0][0]
else:
rank = 11
inds = [0]
ranks[index] = rank
#print(inds[0])
top1[index] = inds[0]
#print(sum_corr / len(ranks))
# print(ranks)
r1 = len(np.where(ranks < 1)[0]) / len(ranks)
r5 = len(np.where(ranks < 5)[0]) / len(ranks)
r10 = len(np.where(ranks < 10)[0]) / len(ranks)
r50 = len(np.where(ranks < 50)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
report_dict = dict()
report_dict['r1'] = r1
report_dict['r5'] = r5
report_dict['r10'] = r10
report_dict['r50'] = r50
report_dict['medr'] = medr
report_dict['meanr'] = meanr
report_dict['sum'] = r1 + r5 + r50
return report_dict, top1
def compute_retrieval_softneighbor(dot_product, len_dot_product):
ranks = np.zeros(len_dot_product)
top1 = np.zeros(len_dot_product)
sn_margin = 5 #neighborhood margin
for index in range(len_dot_product):
inds = np.argsort(dot_product[index])[::-1]
sn_inds = []
for i_sn in range(-sn_margin, sn_margin + 1):
idx_sn = min(len_dot_product - 1, max(0, (index + i_sn)))
where = np.where(inds == idx_sn)
#print(i_sn, idx_sn)
#print(index, i_sn, idx_sn, where)
sn_inds.append(where[0][0])
rank = sn_inds[np.argsort(sn_inds)[0]]
#print(sn_inds, rank)
#print("=="*20)
ranks[index] = rank
top1[index] = inds[0]
#print(sum(ranks < 0))
r1 = len(np.where(ranks < 1)[0]) / len(ranks)
r5 = len(np.where(ranks < 5)[0]) / len(ranks)
r10 = len(np.where(ranks < 10)[0]) / len(ranks)
r50 = len(np.where(ranks < 50)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
report_dict = dict()
report_dict['r1'] = r1
report_dict['r5'] = r5
report_dict['r10'] = r10
report_dict['r50'] = r50
report_dict['medr'] = medr
report_dict['meanr'] = meanr
report_dict['sum'] = r1 + r5 + r50
#print("R1 {}, R5 {}, R10 {}".format(r1, r5, r10))
return report_dict, ranks
def compute_retrieval_cosine(dot_product, len_dot_product):
ranks = np.zeros(len_dot_product)
top1 = np.zeros(len_dot_product)
ind_coarse_to_fine = []
for index in range(len_dot_product):
inds = np.argsort(dot_product[index])[::-1]
inds_org = np.argmax(dot_product[index])
where = np.where(inds == index)
ind_coarse_to_fine.append(inds_org)
rank = where[0][0]
ranks[index] = rank
top1[index] = inds[0]
r1 = len(np.where(ranks < 1)[0]) / len(ranks)
r5 = len(np.where(ranks < 5)[0]) / len(ranks)
r10 = len(np.where(ranks < 10)[0]) / len(ranks)
r50 = len(np.where(ranks < 50)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
report_dict = dict()
report_dict['r1'] = r1
report_dict['r5'] = r5
report_dict['r10'] = r10
report_dict['r50'] = r50
report_dict['medr'] = medr
report_dict['meanr'] = meanr
report_dict['sum'] = r1 + r5 + r50
return report_dict, top1, ind_coarse_to_fine
def compute_retrieval_metrics(dot_product):
sort_similarity = np.sort(-dot_product, axis=1)
diag_similarity = np.diag(-dot_product)
diag_similarity = diag_similarity[:, np.newaxis]
ranks = sort_similarity - diag_similarity
ranks = np.where(ranks == 0)
ranks = ranks[1]
report_dict = dict()
report_dict['r1'] = float(np.sum(ranks == 0)) / len(ranks)
report_dict['r5'] = float(np.sum(ranks < 5)) / len(ranks)
report_dict['r10'] = float(np.sum(ranks < 10)) / len(ranks)
report_dict['r50'] = float(np.sum(ranks < 50)) / len(ranks)
report_dict['medr'] = np.median(ranks) + 1
report_dict['meanr'] = ranks.mean()
report_dict[
'sum'] = report_dict['r1'] + report_dict['r5'] + report_dict['r50']
return report_dict, ranks
def get_logging_formatter():
return logging.Formatter("%(asctime)s %(levelname)s %(message)s",
datefmt="%m%d %H%M%S")
def get_timestamp_for_filename():
ts = str(datetime.datetime.now()).split(".")[0].replace(" ", "_")
ts = ts.replace(":", "_").replace("-", "_")
return ts
def get_logger_without_file(name, log_level="INFO") -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(log_level)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(get_logging_formatter())
logger.addHandler(strm_hdlr)
return logger
def get_logger(logdir, name, filename="run", log_level="INFO",
log_file=True) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(log_level)
formatter = get_logging_formatter()
if log_file:
file_path = Path(logdir) / "{}_{}.log".format(
filename,
str(datetime.datetime.now()).split(".")[0].replace(
" ", "_").replace(":", "_").replace("-", "_"))
file_hdlr = logging.FileHandler(str(file_path))
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(formatter)
logger.addHandler(strm_hdlr)
logger.propagate = False
return logger
def close_logger(logger: logging.Logger):
x = list(logger.handlers)
for i in x:
logger.removeHandler(i)
i.flush()
i.close()
# ---------- Profiling ----------
def profile_gpu_and_ram() -> Tuple[List[str], List[float], List[float], List[float], float, float, float]:
"""
Profile GPU and RAM.
Returns:
GPU names, total / used memory per GPU, load per GPU, total / used / available RAM.
"""
# get info from gputil
_str, dct_ = _get_gputil_info()
dev_num = os.getenv("CUDA_VISIBLE_DEVICES")
if dev_num is not None:
# single GPU set with OS flag
gpu_info = [dct_[int(dev_num)]]
else:
# possibly multiple gpus, aggregate values
gpu_info = []
for dev_dict in dct_:
gpu_info.append(dev_dict)
# convert to GPU info and MB to GB
gpu_names: List[str] = [gpu["name"] for gpu in gpu_info]
total_memory_per: List[float] = [gpu["memoryTotal"] / 1024 for gpu in gpu_info]
used_memory_per: List[float] = [gpu["memoryUsed"] / 1024 for gpu in gpu_info]
load_per: List[float] = [gpu["load"] / 100 for gpu in gpu_info]
# get RAM info and convert to GB
mem = psutil.virtual_memory()
ram_total: float = mem.total / 1024 ** 3
ram_used: float = mem.used / 1024 ** 3
ram_avail: float = mem.available / 1024 ** 3
return gpu_names, total_memory_per, used_memory_per, load_per, ram_total, ram_used, ram_avail
def _get_gputil_info():
"""
Returns info string for printing and list with gpu infos. Better formatting than the original GPUtil.
Returns:
gpu info string, List[Dict()] of values. dict example:
('id', 1),
('name', 'GeForce GTX TITAN X'),
('temperature', 41.0),
('load', 0.0),
('memoryUtil', 0.10645266950540452),
('memoryTotal', 12212.0)])]
"""
gpus = GPUtil.getGPUs()
attr_list = [
{'attr': 'id', 'name': 'ID'}, {'attr': 'name', 'name': 'Name'},
{'attr': 'temperature', 'name': 'Temp', 'suffix': 'C', 'transform': lambda x: x, 'precision': 0},
{'attr': 'load', 'name': 'GPU util.', 'suffix': '% GPU', 'transform': lambda x: x * 100,
'precision': 1},
{'attr': 'memoryUtil', 'name': 'Memory util.', 'suffix': '% MEM', 'transform': lambda x: x * 100,
'precision': 1}, {'attr': 'memoryTotal', 'name': 'Memory total', 'suffix': 'MB', 'precision': 0},
{'attr': 'memoryUsed', 'name': 'Memory used', 'suffix': 'MB', 'precision': 0}
]
gpu_strings = [''] * len(gpus)
gpu_info = []
for _ in range(len(gpus)):
gpu_info.append({})
for attrDict in attr_list:
attr_precision = '.' + str(attrDict['precision']) if (
'precision' in attrDict.keys()) else ''
attr_suffix = str(attrDict['suffix']) if (
'suffix' in attrDict.keys()) else ''
attr_transform = attrDict['transform'] if (
'transform' in attrDict.keys()) else lambda x: x
for gpu in gpus:
attr = getattr(gpu, attrDict['attr'])
attr = attr_transform(attr)
if isinstance(attr, float):
attr_str = ('{0:' + attr_precision + 'f}').format(attr)
elif isinstance(attr, int):
attr_str = '{0:d}'.format(attr)
elif isinstance(attr, str):
attr_str = attr
else:
raise TypeError('Unhandled object type (' + str(
type(attr)) + ') for attribute \'' + attrDict[
'name'] + '\'')
attr_str += attr_suffix
for gpuIdx, gpu in enumerate(gpus):
attr_name = attrDict['attr']
attr = getattr(gpu, attr_name)
attr = attr_transform(attr)
if isinstance(attr, float):
attr_str = ('{0:' + attr_precision + 'f}').format(attr)
elif isinstance(attr, int):
attr_str = ('{0:' + 'd}').format(attr)
elif isinstance(attr, str):
attr_str = ('{0:' + 's}').format(attr)
else:
raise TypeError(
'Unhandled object type (' + str(
type(attr)) + ') for attribute \'' + attrDict[
'name'] + '\'')
attr_str += attr_suffix
gpu_info[gpuIdx][attr_name] = attr
gpu_strings[gpuIdx] += '| ' + attr_str + ' '
return "\n".join(gpu_strings), gpu_info
|
py | 1a48b11c445a4789676954631b47dbfc50455a1d | """Top-level package for Keithley DAQ6510 instrument sample library."""
__author__ = """Carsten Rösnick-Neugebauer"""
__email__ = '[email protected]'
__version__ = '0.0.1'
import logging
logging.getLogger('keithley_daq6510').addHandler(logging.NullHandler())
|
py | 1a48b173b8be5e48b68845aa9b3bb7440e289cb7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SystemParam import SystemParam
class AnttechBlockchainDefinFinanceOrderSubmitModel(object):
def __init__(self):
self._encoded_biz_param = None
self._sys_param = None
@property
def encoded_biz_param(self):
return self._encoded_biz_param
@encoded_biz_param.setter
def encoded_biz_param(self, value):
self._encoded_biz_param = value
@property
def sys_param(self):
return self._sys_param
@sys_param.setter
def sys_param(self, value):
if isinstance(value, SystemParam):
self._sys_param = value
else:
self._sys_param = SystemParam.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.encoded_biz_param:
if hasattr(self.encoded_biz_param, 'to_alipay_dict'):
params['encoded_biz_param'] = self.encoded_biz_param.to_alipay_dict()
else:
params['encoded_biz_param'] = self.encoded_biz_param
if self.sys_param:
if hasattr(self.sys_param, 'to_alipay_dict'):
params['sys_param'] = self.sys_param.to_alipay_dict()
else:
params['sys_param'] = self.sys_param
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainDefinFinanceOrderSubmitModel()
if 'encoded_biz_param' in d:
o.encoded_biz_param = d['encoded_biz_param']
if 'sys_param' in d:
o.sys_param = d['sys_param']
return o
|
py | 1a48b21d5816d76a709299e4b81f47d6146a9d89 | from cvmodule.Module import csp
|
py | 1a48b24bbed487ac816fa8a0ce4856bd67c0a351 | from dataclasses import dataclass
from enum import IntEnum
from typing import Any, Optional
from staicoin.protocols.protocol_message_types import ProtocolMessageTypes
from staicoin.util.ints import uint8, uint16
from staicoin.util.streamable import Streamable, streamable
class NodeType(IntEnum):
FULL_NODE = 1
HARVESTER = 2
FARMER = 3
TIMELORD = 4
INTRODUCER = 5
WALLET = 6
class Delivery(IntEnum):
# A message is sent to the same peer that we received a message from
RESPOND = 1
# A message is sent to all peers
BROADCAST = 2
# A message is sent to all peers except the one from which we received the API call
BROADCAST_TO_OTHERS = 3
# A message is sent to a random peer
RANDOM = 4
# Pseudo-message to close the current connection
CLOSE = 5
# A message is sent to a speicific peer
SPECIFIC = 6
@dataclass(frozen=True)
@streamable
class Message(Streamable):
type: uint8 # one of ProtocolMessageTypes
# message id
id: Optional[uint16]
# Message data for that type
data: bytes
def make_msg(msg_type: ProtocolMessageTypes, data: Any) -> Message:
return Message(uint8(msg_type.value), None, bytes(data))
|
py | 1a48b2c7eb5cec00e126c3dc59da2f631f1cfa40 | #!/usr/bin/env python
from __future__ import print_function
import itertools
from sympy import symbols, simplify_logic
TMPL_ADD = '''def {name}({args}):
if {body}
else:
raise Exception('{name}: Unhandled case "{{}}"'.format({args}))'''
def _sym_to_py(expr):
try:
if expr.is_Symbol:
return expr.name
elif expr.is_Function:
name = str(expr.func)
if name == 'And':
return '(' + ' & '.join(sym_to_py(a) for a in expr.args) + ')'
elif name == 'Xor':
return '(' + ' ^ '.join(sym_to_py(a) for a in expr.args) + ')'
elif name == 'Or':
return '(' + ' | '.join(sym_to_py(a) for a in expr.args) + ')'
elif name == 'Not':
assert len(expr.args) == 1
return '(~{})'.format(sym_to_py(expr.args[0]))
else:
raise Exception('Operator "{}" missing'.format(name))
else:
return str(bool(expr))
except Exception as e:
print(e)
import IPython; IPython.embed()
def sym_to_py(expr):
expr_simp = simplify_logic(expr)
# Stupid heuristics
if expr.count_ops() > expr_simp.count_ops():
expr = expr_simp
return _sym_to_py(expr).replace('True', '_one').replace('False', '_zero')
def bool_to_cond(b):
if b is None:
return 'X'
elif b:
return 'I'
else:
return 'O'
TMPL_COND = '''{cond}:
return {stmts}'''
def mk_funk(funk, vars, exprs_tmpls):
nn = ['ss[{}]'.format(i) for i in range(vars)]
ii = ['b{}'.format(i) for i in range(vars)]
ss = symbols(' '.join(ii))
conds = []
body = []
exprs = []
for e in exprs_tmpls:
exprs.append(eval(e.format(*nn)))
for vv in itertools.product((False, True, None), repeat=3):
s = dict((si, vi if vi is not None else si) for si, vi in zip(ss, vv))
cond = ' and '.join('{}.{}'.format(n, bool_to_cond(v)) for n, v in zip(ii, vv))
conds.append(cond)
body.append(tuple(sym_to_py(e.subs(s)) for e in exprs))
stmts = [TMPL_COND.format(cond=cond, stmts=', '.join(stmts)) for cond, stmts in zip(conds, body)]
stmts = '\n elif '.join(stmts)
return TMPL_ADD.format(
name=funk,
args=', '.join(ii),
body=stmts,
)
def main():
print('Generating the functions...')
defs = (
('_bit_add', 3, ['{0} ^ {1} ^ {2}', '({0} & {1}) | ({2} & ({0} ^ {1}))']),
)
funks = []
for d in defs:
print('[+] Making "{}"'.format(d[0]))
funks.append(mk_funk(*d))
src = 'from ._tbits import _zero, _one\n\n\n' + '\n\n'.join(funks) + '\n'
print('Writing to file...')
with open('./tbits/_gen_tables.py', 'w') as f:
f.write(src)
print('End of this giant hack :D')
if __name__ == '__main__':
main()
|
py | 1a48b3831be33bd72c42f894c9377b80078f75a0 | # -*- coding: utf-8 -*-
import argparse
import requests
import json
from __init__ import get_connection
import pprint
pp = pprint.PrettyPrinter(indent=4)
def content_item_cli():
# Parse options
commands_description = """%(prog)s"""
parser = argparse.ArgumentParser(
usage="%(prog)s get|save|mv [options] slug",
description=commands_description)
parser.add_argument("command", choices=('get', 'save', 'mv'),
help="Action to take")
parser.add_argument("slug",
help="Slug of the content item to work with.")
parser.add_argument("-F", "--from-file", dest="body_file",
type=argparse.FileType('r'), default='-',
help="Load the body from a file")
# Save
parser.add_argument("-c", "--type-code", dest="type", default="blurb",
help="Set the content item type")
parser.add_argument("-t", "--title", dest="title",
help="Set the content item title")
parser.add_argument("-s", "--state", dest="state",
choices=('live', 'working', 'archived',
'pending', 'junk'),
help="Set the content item state")
# Move
parser.add_argument("--new-slug", dest="new_slug",
help="Change the slug")
# Get
parser.add_argument("-f", "--field-name", dest="field_name",
help="Field to output")
args = parser.parse_args()
slug = args.slug
content_item = dict()
p2p = get_connection()
if args.command == 'mv' and args.new_slug:
content_item['slug'] = args.new_slug
try:
p2p.update_content_item(content_item, slug=slug)
print("Moved '%s' to '%s'" % (slug, content_item['slug']))
except requests.exceptions.HTTPError, e:
print(e.message)
print(e.__dict__['response'].content)
if args.command == "save":
content_item = {
"slug": slug,
"content_item_type_code": args.type,
"body": args.body_file.read(),
}
if args.state:
content_item['content_item_state_code'] = args.state
if args.title:
content_item['title'] = args.title
print("Saving '%s'" % content_item['slug'])
try:
p2p.create_or_update_content_item(content_item)
print("Updated '%s'" % content_item['slug'])
except requests.exceptions.HTTPError, e:
print(e.message)
print(e.__dict__['response'].content)
elif args.command == "get":
try:
data = p2p.get_content_item(slug)
if args.field_name in data:
print(data[args.field_name])
else:
print(json.dumps(data))
except requests.exceptions.HTTPError, e:
print(e.message)
print(e.__dict__['response'].content)
|
py | 1a48b39b9ceffbbd4b179bdd0cd210bf2c23168b | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""ARM SIMD writeback and elements.
tags[0] tags args
type arm_simd_writeback_t =
| SIMDNoWriteback "n" 1 0
| SIMDBytesTransferred of int "b" 1 1
| SIMDAddressOffsetRegister of arm_reg_t "r" 2 0
type arm_simd_list_element_t =
| SIMDReg of arm_extension_register_t "r" 1 1
| SIMDRegElement of "e" 1 1
arm_extension_register_element_t
| SIMDRegRepElement of "re" 1 1
arm_extension_register_replicated_element_t
"""
from typing import List, TYPE_CHECKING
from chb.arm.ARMDictionaryRecord import ARMDictionaryRecord, armregistry
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.app.ARMExtensionRegister import (
ARMExtensionRegister,
ARMExtensionRegisterElement,
ARMExtensionRegisterReplicatedElement)
from chb.app.BDictionary import BDictionary
from chb.arm.ARMDictionary import ARMDictionary
class ARMSIMDWriteback(ARMDictionaryRecord):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMDictionaryRecord.__init__(self, d, ixval)
@property
def is_no_writeback(self) -> bool:
return False
@property
def is_bytes_transferred(self) -> bool:
return False
@property
def is_address_offset(self) -> bool:
return False
def __str__(self) -> str:
return "simd_writeback: " + self.tags[0]
@armregistry.register_tag("n", ARMSIMDWriteback)
class ARMSIMDNoWriteback(ARMSIMDWriteback):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMSIMDWriteback.__init__(self, d, ixval)
@property
def is_no_writeback(self) -> bool:
return True
def __str__(self) -> str:
return ""
@armregistry.register_tag("b", ARMSIMDWriteback)
class ARMSIMDBytesTransferred(ARMSIMDWriteback):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMSIMDWriteback.__init__(self, d, ixval)
@property
def is_bytes_transferred(self) -> bool:
return True
def __str__(self) -> str:
return ""
@armregistry.register_tag("r", ARMSIMDWriteback)
class ARMSIMDAddressOffsetRegister(ARMSIMDWriteback):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMSIMDWriteback.__init__(self, d, ixval)
@property
def offsetregister(self) -> str:
return self.tags[1]
@property
def is_address_offset(self) -> bool:
return True
def __str__(self) -> str:
return self.offsetregister
class ARMSIMDListElement(ARMDictionaryRecord):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMDictionaryRecord.__init__(self, d, ixval)
def __str__(self) -> str:
return "simd-list-element: " + self.tags[0]
@armregistry.register_tag("r", ARMSIMDListElement)
class ARMSIMDReg(ARMSIMDListElement):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMSIMDListElement.__init__(self, d, ixval)
@property
def xregister(self) -> "ARMExtensionRegister":
return self.bd.arm_extension_register(self.args[0])
def __str__(self) -> str:
return str(self.xregister)
@armregistry.register_tag("e", ARMSIMDListElement)
class ARMSIMDRegElement(ARMSIMDListElement):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMSIMDListElement.__init__(self, d, ixval)
@property
def xregelement(self) -> "ARMExtensionRegisterElement":
return self.bd.arm_extension_register_element(self.args[0])
def __str__(self) -> str:
return str(self.xregelement)
@armregistry.register_tag("re", ARMSIMDListElement)
class ARMSIMDRegRepElement(ARMSIMDListElement):
def __init__(
self,
d: "ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMSIMDListElement.__init__(self, d, ixval)
@property
def xrepelement(self) -> "ARMExtensionRegisterReplicatedElement":
return self.bd.arm_extension_register_replicated_element(self.args[0])
def __str_(self) -> str:
return str(self.xrepelement)
|
py | 1a48b59ce12bfdebf6273ada17099aad1548af40 | import psycopg2
from psycopg2.extras import RealDictCursor
__all__ = ['db_store_oauth_secret', 'db_check_twit_signin',
'db_get_oauth_token_secret', 'db_store_access_tokens',
'get_user_twitter_credentials', 'db_store_webhook', 'db_store_twit_sn',
'db_get_twit_sn', 'db_get_webhooks']
pg_connect_info = "dbname=da_db user=da_user password=docker host=db"
def db_get_webhooks(twit_sn):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
pg_cur.execute(\
""" SELECT """ \
""" hook_uid, """ \
""" label, """ \
""" url, """ \
""" twit_target, """ \
""" favorites, """ \
""" posts, """ \
""" media_only, """ \
""" extract(epoch from time_added) AS time_added, """ \
""" extract(epoch from time_queried) AS time_queried """ \
""" FROM webhooks """ \
""" WHERE """ \
""" twit_sn=%s; """ ,
(twit_sn,))
ret = pg_cur.fetchall()
if ret is None:
return None
else:
return ret
except Exception as e:
print(f'Error retreiving webhooks for user {session_user}\n{e}')
return None
finally:
pg_con.close()
def db_store_twit_sn(session_user, twit_sn):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" INSERT INTO user_status """ \
""" (user_0x, """ \
""" twit_sn) """ \
""" VALUES (%s, %s) """ \
""" ON CONFLICT (user_0x) """ \
""" DO UPDATE """ \
""" SET """ \
""" twit_sn=%s, """ \
""" last_visited=Now(); """ ,
(session_user, twit_sn, twit_sn))
pg_con.commit()
return 'ok'
except Exception as e:
return f'Store username/session failed: {e}'
finally:
pg_con.close()
def db_get_twit_sn(session_user):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" SELECT twit_sn """ \
""" FROM user_status """ \
""" WHERE """ \
""" user_0x=%s; """ ,
(session_user,))
ret = pg_cur.fetchone()
if ret is None:
return None
else:
return ret[0]
except Exception as e:
print(f'Error retreiving twit credentials for user {session_user}\n{e}')
return None
finally:
pg_con.close()
def get_user_twitter_credentials(session_user):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" SELECT """ \
""" access_token, """ \
""" access_token_secret """ \
""" FROM user_keys """ \
""" WHERE """ \
""" user_0x=%s """ \
""" AND access_token IS NOT NULL """ \
""" AND access_token_secret IS NOT NULL """ ,
(session_user,))
ret = pg_cur.fetchone()
if ret is None:
return None, None
else:
return ret[0], ret[1]
except Exception as e:
print(f'Error retreiving twit credentials for user {session_user}\n{e}')
return None, None
finally:
pg_con.close()
def db_store_webhook( hook_uid, twit_sn, label, webhook_url, twit_target,
favorites, posts, media_only):
query = None
existing_hooks = db_get_webhooks(twit_sn)
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
if existing_hooks is None:
query = \
""" INSERT INTO webhooks """ \
""" (hook_uid, """ \
""" twit_sn, """ \
""" label, """ \
""" url, """ \
""" twit_target, """ \
""" favorites, """ \
""" posts, """ \
""" media_only) """ \
""" VALUES """ \
""" (%s, %s, %s, %s, %s, %s, %s, %s) """ \
""" ON CONFLICT DO NOTHING; """
pg_cur.execute(query,
(hook_uid, twit_sn, label, webhook_url, twit_target,
favorites, posts, media_only))
else:
uid_list = []
for hook in existing_hooks: uid_list.append(hook['hook_uid'])
if hook_uid in uid_list:
# Only replace hook if the conflicting hook belongs to submitting
# user
query = \
""" INSERT INTO webhooks """ \
""" (hook_uid, """ \
""" twit_sn, """ \
""" label, """ \
""" url, """ \
""" twit_target, """ \
""" favorites, """ \
""" posts, """ \
""" media_only) """ \
""" VALUES """ \
""" (%s, %s, %s, %s, %s, %s, %s, %s) """ \
""" ON CONFLICT (hook_uid) """ \
""" DO UPDATE """ \
""" SET twit_sn=%s, """ \
""" label=%s, """ \
""" url=%s, """ \
""" twit_target=%s, """ \
""" favorites=%s, """ \
""" posts=%s, """ \
""" media_only=%s; """
pg_cur.execute(query, (hook_uid, twit_sn, label, webhook_url,
twit_target, favorites, posts, media_only, twit_sn, label,
webhook_url, twit_sn, favorites, posts, media_only))
else:
query = \
""" INSERT INTO webhooks """ \
""" (hook_uid, """ \
""" twit_sn, """ \
""" label, """ \
""" url, """ \
""" twit_target, """ \
""" favorites, """ \
""" posts, """ \
""" media_only) """ \
""" VALUES """ \
""" (%s, %s, %s, %s, %s, %s, %s, %s) """ \
""" ON CONFLICT DO NOTHING; """
pg_cur.execute(query,
(hook_uid, twit_sn, label, webhook_url, twit_target,
favorites, posts, media_only))
pg_con.commit()
return 'ok'
except Exception as e:
return f'Webhook insert failed: {e}'
finally:
pg_con.close()
def db_store_oauth_secret(session_user, oauth_token_secret):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" INSERT INTO user_keys """ \
""" (user_0x, """ \
""" oauth_token_secret) """ \
""" VALUES (%s, %s) """ \
""" ON CONFLICT (user_0x) """ \
""" DO UPDATE SET oauth_token_secret=%s; """ ,
(session_user, oauth_token_secret, oauth_token_secret))
pg_con.commit()
return 'ok'
except Exception as e:
return f'Pg oauth secret insert failed: {e}'
finally:
pg_con.close()
def db_store_access_tokens(session_user, access_token, access_token_secret):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" UPDATE user_keys """ \
""" SET """ \
""" access_token=%s, """ \
""" access_token_secret=%s """ \
""" WHERE user_0x=%s; """ ,
(access_token, access_token_secret, session_user))
pg_con.commit()
return 'ok'
except Exception as e:
return f'Pg access token insert failed: {e}'
finally:
pg_con.close()
def db_check_twit_signin(session_user):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" SELECT COUNT(*) """ \
""" FROM user_keys """ \
""" WHERE """ \
""" user_0x=%s """ \
""" AND access_token IS NOT NULL """ \
""" AND access_token_secret IS NOT NULL """ \
""" LIMIT 1; """ ,
(session_user,))
if pg_cur.fetchone()[0] == 1:
return 'signed in'
else:
return 'not signed in'
except Exception as e:
return f'Pg twit sign-in check failed: {e}'
finally:
pg_con.close()
def db_get_oauth_token_secret(session_user):
try:
pg_con = psycopg2.connect(pg_connect_info)
pg_cur = pg_con.cursor()
pg_cur.execute(\
""" SELECT oauth_token_secret """ \
""" FROM user_keys """ \
""" WHERE user_0x=%s """ ,
(session_user,))
ret = pg_cur.fetchone()[0]
return ret
except Exception as e:
print(f'Error retreiving oauth token for user {session_user}\n{e}')
return None
finally:
pg_con.close()
|
py | 1a48b65cad7b295cbd5b674dfbc35a7bb773651e | import re
from dataclasses import dataclass
from typing import Iterator, Type
@dataclass
class Token:
value: str
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self.value}")'
class StringToken(Token):
pass
class BraceToken(Token):
pass
class TokenizeError(Exception):
pass
def tokenize_braces(s: str) -> Iterator[Token]:
"""
>>> list(tokenize_braces(""))
[]
>>> list(tokenize_braces("before {braced} after"))
[StringToken("before "), BraceToken("braced"), StringToken(" after")]
>>> list(tokenize_braces("ab{cd}{ef}"))
[StringToken("ab"), BraceToken("cd"), BraceToken("ef")]
"""
for value in re.split("({[^}]*})", s):
if value == "":
continue
if value.startswith("{") and value.endswith("}"):
value = value[1:-1]
token_class: Type[Token] = BraceToken
else:
token_class = StringToken
if "{" in value:
raise TokenizeError("Unexpected '{' encountered")
if "}" in value:
raise TokenizeError("Unexpected '}' encountered")
yield token_class(value)
|
py | 1a48b6bfbf8bb14c065d747a878475ab1a1c7fe5 | # setup.py
# This file is generated by Shroud nowrite-version. Do not edit.
# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
#
from setuptools import setup, Extension
import numpy
module = Extension(
'struct',
sources=[
'pystructmodule.c',
'pystructutil.c'
],
language='c',
include_dirs = [numpy.get_include()],
# libraries = ['tcl83'],
# library_dirs = ['/usr/local/lib'],
# extra_compile_args = [ '-O0', '-g' ],
# extra_link_args =
)
setup(
name='struct',
ext_modules = [module],
)
|
py | 1a48b6dad76005fd016dde27315ca0c24557ab70 | # Copyright (c) 2021 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
from typing import Optional, Union
import ipywidgets as widgets
import numpy as np
from geoh5py.data import FloatData, IntegerData, ReferencedData
from geoh5py.objects.object_base import ObjectBase
from geoh5py.workspace import Workspace
from ipywidgets import Dropdown, FloatText, SelectMultiple, VBox
from geoapps.base import BaseApplication
from geoapps.utils import utils
class ObjectDataSelection(BaseApplication):
"""
Application to select an object and corresponding data
"""
defaults = {}
_data = None
_objects = None
_add_groups = False
_select_multiple = False
_object_types = None
_find_label = []
def __init__(self, **kwargs):
self._data_panel = None
super().__init__(**kwargs)
@property
def add_groups(self):
"""
bool: Add data groups to the list of data choices
"""
return self._add_groups
@add_groups.setter
def add_groups(self, value):
assert isinstance(value, (bool, str)), "add_groups must be of type bool"
self._add_groups = value
@property
def data(self) -> Union[Dropdown, SelectMultiple]:
"""
Data selector
"""
if getattr(self, "_data", None) is None:
if self.select_multiple:
self._data = SelectMultiple(
description="Data: ",
)
else:
self._data = Dropdown(
description="Data: ",
)
if self._objects is not None:
self.update_data_list(None)
return self._data
@data.setter
def data(self, value):
assert isinstance(
value, (Dropdown, SelectMultiple)
), f"'Objects' must be of type {Dropdown} or {SelectMultiple}"
self._data = value
@property
def data_panel(self) -> VBox:
if getattr(self, "_data_panel", None) is None:
self._data_panel = VBox([self.objects, self.data])
return self._data_panel
@property
def main(self) -> VBox:
"""
:obj:`ipywidgets.VBox`: A box containing all widgets forming the application.
"""
self.__populate__(**self.defaults)
if self._main is None:
self._main = self.data_panel
self.update_data_list(None)
return self._main
@property
def objects(self) -> Dropdown:
"""
Object selector
"""
if getattr(self, "_objects", None) is None:
self.objects = Dropdown(description="Object:")
return self._objects
@objects.setter
def objects(self, value):
assert isinstance(value, Dropdown), f"'Objects' must be of type {Dropdown}"
self._objects = value
self._objects.observe(self.update_data_list, names="value")
@property
def object_types(self):
"""
Entity type
"""
if getattr(self, "_object_types", None) is None:
self._object_types = []
return self._object_types
@object_types.setter
def object_types(self, entity_types):
if not isinstance(entity_types, list):
entity_types = [entity_types]
for entity_type in entity_types:
assert issubclass(
entity_type, ObjectBase
), f"Provided object_types must be instances of {ObjectBase}"
self._object_types = tuple(entity_types)
@property
def find_label(self):
"""
Object selector
"""
if getattr(self, "_find_label", None) is None:
return []
return self._find_label
@find_label.setter
def find_label(self, values):
"""
Object selector
"""
if not isinstance(values, list):
values = [values]
for value in values:
assert isinstance(
value, str
), f"Labels to find must be strings. Value {value} of type {type(value)} provided"
self._find_label = values
@property
def select_multiple(self):
"""
bool: ALlow to select multiple data
"""
if getattr(self, "_select_multiple", None) is None:
self._select_multiple = False
return self._select_multiple
@select_multiple.setter
def select_multiple(self, value):
if getattr(self, "_data", None) is not None:
options = self._data.options
else:
options = []
self._select_multiple = value
if value:
self._data = SelectMultiple(description="Data: ", options=options)
else:
self._data = Dropdown(description="Data: ", options=options)
@property
def workspace(self) -> Optional[Workspace]:
"""
Target geoh5py workspace
"""
if (
getattr(self, "_workspace", None) is None
and getattr(self, "_h5file", None) is not None
):
self.workspace = Workspace(self.h5file)
return self._workspace
@workspace.setter
def workspace(self, workspace):
assert isinstance(workspace, Workspace), f"Workspace must of class {Workspace}"
self._workspace = workspace
self._h5file = workspace.h5file
# Refresh the list of objects
self.update_objects_list()
def get_selected_entities(self):
"""
Get entities from an active geoh5py Workspace
"""
if getattr(self, "_workspace", None) is not None and self._workspace.get_entity(
self.objects.value
):
for entity in self._workspace.get_entity(self.objects.value):
if isinstance(entity, ObjectBase):
obj = entity
if isinstance(self.data, Dropdown):
values = [self.data.value]
else:
values = self.data.value
data = []
for value in values:
if obj.get_data(value):
data += obj.get_data(value)
elif any([pg.name == value for pg in obj.property_groups]):
data += [
self.workspace.get_entity(prop)[0]
for prop in obj.find_or_create_property_group(
name=value
).properties
]
return obj, data
else:
return None, None
def update_data_list(self, _):
self.refresh.value = False
if getattr(self, "_workspace", None) is not None and self._workspace.get_entity(
self.objects.value
):
for entity in self._workspace.get_entity(self.objects.value):
if isinstance(entity, ObjectBase):
obj = entity
if getattr(obj, "get_data_list", None) is None:
return
options = [""]
if (self.add_groups or self.add_groups == "only") and obj.property_groups:
options = (
options
+ ["-- Groups --"]
+ [p_g.name for p_g in obj.property_groups]
)
if self.add_groups != "only":
data_list = obj.get_data_list()
options = (
options
+ ["--- Channels ---"]
+ [
obj.get_data(uid)[0].name
for uid in data_list
if isinstance(obj.get_data(uid)[0], (IntegerData, FloatData))
]
+ ["Z"]
)
value = self.data.value
self.data.options = options
if self.select_multiple and any([val in options for val in value]):
self.data.value = [val for val in value if val in options]
elif value in options:
self.data.value = value
elif self.find_label:
self.data.value = utils.find_value(self.data.options, self.find_label)
else:
self.data.options = []
self.refresh.value = True
def update_objects_list(self):
if getattr(self, "_workspace", None) is not None:
value = self.objects.value
if len(self.object_types) > 0:
options = [["", None]] + [
[obj.name, obj.uid]
for obj in self._workspace.objects
if isinstance(obj, self.object_types)
]
else:
options = [["", None]] + [
[value, uid]
for uid, value in self._workspace.list_objects_name.items()
]
if value in list(dict(options).values()): # Silent update
self.objects.unobserve(self.update_data_list, names="value")
self.objects.options = options
self.objects.value = value
self._objects.observe(self.update_data_list, names="value")
else:
self.objects.options = options
class LineOptions(ObjectDataSelection):
"""
Unique lines selection from selected data channel
"""
defaults = {"find_label": "line"}
_multiple_lines = None
def __init__(self, **kwargs):
self.defaults = self.update_defaults(**kwargs)
super().__init__(**self.defaults)
self.objects.observe(self.update_data_list, names="value")
self.data.observe(self.update_line_list, names="value")
self.data.description = "Lines field"
@property
def main(self):
if self._main is None:
self._main = VBox([self._data, self.lines])
return self._main
@property
def lines(self):
"""
Widget.SelectMultiple or Widget.Dropdown
"""
if getattr(self, "_lines", None) is None:
if self.multiple_lines:
self._lines = widgets.SelectMultiple(
description="Select lines:",
)
else:
self._lines = widgets.Dropdown(
description="Select line:",
)
return self._lines
@property
def multiple_lines(self):
if getattr(self, "_multiple_lines", None) is None:
self._multiple_lines = True
return self._multiple_lines
@multiple_lines.setter
def multiple_lines(self, value):
assert isinstance(
value, bool
), f"'multiple_lines' property must be of type {bool}"
self._multiple_lines = value
def update_line_list(self, _):
_, data = self.get_selected_entities()
if data and getattr(data[0], "values", None) is not None:
if isinstance(data[0], ReferencedData):
self.lines.options = [""] + list(data[0].value_map.map.values())
else:
self.lines.options = [""] + np.unique(data[0].values).tolist()
class TopographyOptions(ObjectDataSelection):
"""
Define the topography used by the inversion
"""
def __init__(
self, option_list=["None", "Object", "Relative to Sensor", "Constant"], **kwargs
):
self.defaults = self.update_defaults(**kwargs)
self.find_label = ["topo", "dem", "dtm", "elevation", "Z"]
self._offset = FloatText(description="Vertical offset (+ve up)")
self._constant = FloatText(
description="Elevation (m)",
)
self.option_list = {
"None": widgets.Label("No topography"),
"Object": self.data_panel,
"Relative to Sensor": self._offset,
"Constant": self._constant,
}
self._options = widgets.RadioButtons(
options=option_list,
description="Define by:",
)
self.options.observe(self.update_options)
super().__init__(**self.defaults)
@property
def panel(self):
return self._panel
@property
def constant(self):
return self._constant
@property
def main(self):
if self._main is None:
self._main = VBox([self.options, self.option_list[self.options.value]])
return self._main
@property
def offset(self):
return self._offset
@property
def options(self):
return self._options
def update_options(self, _):
self.main.children = [
self.options,
self.option_list[self.options.value],
]
|
py | 1a48b78296c19e84ef49e8f2c67b05f430831638 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 13:01:35 2019
@author: avelinojaver
"""
import sys
from pathlib import Path
root_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(root_dir))
from cell_localization.flow import CoordFlow, collate_simple
from cell_localization.models import get_model
import tqdm
from torch.utils.data import DataLoader
import torch
import numpy as np
import matplotlib.pylab as plt
if __name__ == '__main__':
#%%
root_dir = '/Users/avelinojaver/OneDrive - Nexus365/bladder_cancer_tils/eosinophils/training/20x/'
num_workers = 4
batch_size = 4#512
gauss_sigma = 1.5
device = 'cpu'
flow_args = dict(
roi_size = 16,
scale_int = (0, 4095),
prob_unseeded_patch = 0.5,
zoom_range = (0.97, 1.03),
int_aug_offset = (-0.2, 0.2),
int_aug_expansion = (0.7, 1.3),
samples_per_epoch = batch_size*100
)
gen = CoordFlow(root_dir, **flow_args)
loader = DataLoader(gen,
batch_size = batch_size,
shuffle = True,
num_workers = num_workers,
collate_fn = collate_simple
)
model = get_model('ind+clf+unet-simple', 3, 2, 'maxlikelihood')
for images, targets in tqdm.tqdm(loader):
images = torch.from_numpy(np.stack(images)).to(device)
targets = [{k: torch.from_numpy(v).to(device) for k, v in target.items()} for target in targets]
#%%
model.train()
losses = model(images, targets)
loss = sum([v for v in losses.values()])
loss.backward()
#%%
model.eval()
losses, predictions = model(images, targets)
break
#%%
# import torch.nn.functional as F
# xhat, features = model.mapping_network(images)
#
#
#
# #I want to get a map to indicate if there is an cell or not
# feats = features[0].permute((0, 2, 3, 1))
# n_batch, clf_h, clf_w, clf_n_filts = feats.shape
# feats = feats.contiguous().view(-1, clf_n_filts, 1, 1)
# clf_scores = model.clf_patch_head(feats)
# #scores, has_cells = clf_scores.max(dim=1)
# clf_scores = F.softmax(clf_scores, dim = 1)
# clf_scores = clf_scores[:, 1].view(n_batch, 1, clf_h, clf_w)
#
#
# clf_scores = F.interpolate(clf_scores, size = xhat.shape[-2:], mode = 'bilinear', align_corners=False)
#
#
# bad = clf_scores< 0.5
# xhat[bad] = xhat[bad].mean()
# xhat = model.preevaluation(xhat)
# outs = model.nms(xhat)
# #%%
# proposals = []
#
# mm = xhat.detach().numpy()
# for m, pred in zip(mm, outs):
# pred_coords = pred[0]
# boxes = torch.cat((pred_coords - model.proposal_half_size, pred_coords + model.proposal_half_size), dim = -1)
# proposals.append(boxes)
#
#
# from matplotlib import patches
#
# fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize = (10, 10))
#
#
# ax.imshow(m)
# for box in boxes:
# cm, w, l = (box[0], box[1]), box[2] - box[0], box[3] - box[1]
# rect = patches.Rectangle(cm, w, l,linewidth=1,edgecolor='r',facecolor='none')
# ax.add_patch(rect)
# break
|
py | 1a48b7846c1f94fba1ab025907613a48fbf2afe5 | # coding: utf-8
"""Tests for meteofrance module. Exception classes."""
import pytest
from meteofrance_api.exceptions import MeteoFranceError
def test_meteofrance_exception() -> None:
"""Tests MeteoFranceError exception."""
with pytest.raises(MeteoFranceError):
# TODO test for coverage. To be update in the future.
raise MeteoFranceError("Test Error")
|
py | 1a48b7aeaba4a486e2f03e8d7b284127fe6b15b5 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
seg={'a':21, 'b':8, 'c':11, 'd':26, 'e':19, 'f':20, 'g':13}
for s in "abcdefg":
GPIO.setup(seg[s], GPIO.OUT, initial=0)
zif=[16, 12, 7, 6]
for z in zif:
GPIO.setup(z, GPIO.OUT, initial=1)
dp = 5
GPIO.setup(dp, GPIO.OUT, initial=0)
zahl = [
"abcdef", #0
"bc", #1
"abdeg", #2
"abcdg", #3
"bcfg", #4
"acdfg", #5
"acdefg", #6
"abc", #7
"abcdefg", #8
"abcdfg" #9
]
z = [0, 0, 0, 0]
print("STRG+C beendet das Programm.")
def za():
for i in range(4):
for s in "abcdefg":
GPIO.output(seg[s], 0)
GPIO.output(zif[i], 0)
for s in zahl[z[i]]:
GPIO.output(seg[s], 1)
if i == 1:
GPIO.output(dp, 1)
else:
GPIO.output(dp, 0)
time.sleep(0.005)
GPIO.output(zif[i], 1)
try:
while True:
t = time.localtime()
h = t.tm_hour
m = t.tm_min
z[0]=int(h / 10)
z[1]=h % 10
z[2]=int(m / 10)
z[3]=m % 10
while time.localtime().tm_min == m:
za()
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
|
py | 1a48b879aebbb1fcb70551894c091583d475326c | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.framework import test_util
class HashTableOpTest(tf.test.TestCase):
def testHashTable(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableFindHighRank(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.test_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys,
values,
value_dtype=tf.int64),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.test_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table1 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table2 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table3 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
tf.initialize_all_tables().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.test_session():
default_val = tf.constant(-1, tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.test_session() as sess:
default_val = tf.constant(-1, tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = tf.SparseTensor(
tf.constant(sp_indices, tf.int64),
tf.constant(["brain", "salad", "tank"]),
tf.constant(sp_shape, tf.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
input_string = tf.constant([1, 2, 3], tf.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.test_session():
default_val = -1
with self.assertRaises(TypeError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
["a"], [1], [tf.string], tf.int64), default_val)
def testNotInitialized(self):
with self.test_session():
default_val = -1
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
["a"], [1], value_dtype=tf.int64),
default_val)
input_string = tf.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table.init.run()
with self.assertRaisesOpError("Table already initialized"):
table.init.run()
def testInitializationWithInvalidDimensions(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2, 3, 4], tf.int64)
with self.assertRaises(ValueError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
def testMultipleSessions(self):
# Start a server
server = tf.train.Server(
{"local0": ["localhost:0"]}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = tf.Session(server.target)
session2 = tf.Session(server.target)
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.init.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.init.run()
self.assertAllEqual(3, table.size().eval())
class MutableHashTableOpTest(tf.test.TestCase):
def testMutableHashTable(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None], exported_values.get_shape().as_list())
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(exported_keys.eval())
sorted_values = np.sort(exported_values.eval())
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([0, 1, 2], sorted_values)
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
default_val = -1
keys = tf.constant(["b", "c", "d"], tf.string)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableHashTable(
tf.string, tf.int64, default_val, name="t1", checkpoint=True)
save = tf.train.Saver()
tf.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
default_val = -1
table = tf.contrib.lookup.MutableHashTable(
tf.string, tf.int64, default_val, name="t1", checkpoint=True)
table.insert(
tf.constant(["a", "c"], tf.string),
tf.constant([12, 24], tf.int64)).run()
self.assertAllEqual(2, table.size().eval())
save = tf.train.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["a", "b", "c", "d", "e"], tf.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], output.eval())
def testSharing(self):
# Start a server to store the table state
server = tf.train.Server(
{"local0": ["localhost:0"]}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = tf.Session(server.target)
session2 = tf.Session(server.target)
table = tf.contrib.lookup.MutableHashTable(
tf.int64, tf.string, "-", name="t1")
# Populate the table in the first session
with session1:
self.assertAllEqual(0, table.size().eval())
keys = tf.constant([11, 12], tf.int64)
values = tf.constant(["a", "b"])
table.insert(keys, values).run()
self.assertAllEqual(2, table.size().eval())
output = table.lookup(tf.constant([11, 12, 13], tf.int64))
self.assertAllEqual([b"a", b"b", b"-"], output.eval())
# Verify that we can access the shared data from the second session
with session2:
self.assertAllEqual(2, table.size().eval())
output = table.lookup(tf.constant([10, 11, 12], tf.int64))
self.assertAllEqual([b"-", b"a", b"b"], output.eval())
def testMutableHashTableOfTensors(self):
with self.test_session():
default_val = tf.constant([-1, -1], tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([[0, 1], [2, 3], [4, 5]], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None, 2], exported_values.get_shape().as_list())
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(exported_keys.eval())
sorted_values = np.sort(exported_values.eval())
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([[4, 5], [2, 3], [0, 1]], sorted_values)
def testMutableHashTableExportInsert(self):
with self.test_session():
default_val = tf.constant([-1, -1], tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([[0, 1], [2, 3], [4, 5]], tf.int64)
table1 = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
default_val)
self.assertAllEqual(0, table1.size().eval())
table1.insert(keys, values).run()
self.assertAllEqual(3, table1.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
expected_output = [[0, 1], [2, 3], [-1, -1]]
output1 = table1.lookup(input_string)
self.assertAllEqual(expected_output, output1.eval())
exported_keys, exported_values = table1.export()
self.assertAllEqual(3, exported_keys.eval().size)
self.assertAllEqual(6, exported_values.eval().size)
# Populate a second table from the exported data
table2 = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
default_val)
self.assertAllEqual(0, table2.size().eval())
table2.insert(exported_keys, exported_values).run()
self.assertAllEqual(3, table2.size().eval())
# Verify lookup result is still the same
output2 = table2.lookup(input_string)
self.assertAllEqual(expected_output, output2.eval())
def testMutableHashTableOfTensorsInvalidShape(self):
with self.test_session():
default_val = tf.constant([-1, -1], tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
table = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
default_val)
# Shape [6] instead of [3, 2]
values = tf.constant([0, 1, 2, 3, 4, 5], tf.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [2,3] instead of [3, 2]
values = tf.constant([[0, 1, 2], [3, 4, 5]], tf.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [2, 2] instead of [3, 2]
values = tf.constant([[0, 1], [2, 3]], tf.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [3, 1] instead of [3, 2]
values = tf.constant([[0], [2], [4]], tf.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Valid Insert
values = tf.constant([[0, 1], [2, 3], [4, 5]], tf.int64)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
def testMutableHashTableInvalidDefaultValue(self):
with self.test_session():
default_val = tf.constant([[-1, -1]], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
default_val)
with self.assertRaisesOpError("Default value must be a vector"):
self.assertAllEqual(0, table.size().eval())
def testMutableHashTableDuplicateInsert(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery", "brain"])
values = tf.constant([0, 1, 2, 3], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([3, 1, -1], result)
def testMutableHashTableFindHighRank(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testMutableHashTableInsertHighRank(self):
with self.test_session():
default_val = -1
keys = tf.constant([["brain", "salad"], ["surgery", "tank"]])
values = tf.constant([[0, 1], [2, 3]], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, 3, -1], result)
def testMutableHashTableOfTensorsFindHighRank(self):
with self.test_session():
default_val = tf.constant([-1, -1, -1], tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string, tf.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = output.eval()
self.assertAllEqual(
[[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result)
def testMultipleMutableHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table1 = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
table2 = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
table3 = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
table1.insert(keys, values).run()
table2.insert(keys, values).run()
table3.insert(keys, values).run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testMutableHashTableWithTensorDefault(self):
with self.test_session():
default_val = tf.constant(-1, tf.int64)
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testSignatureMismatch(self):
with self.test_session():
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
# insert with keys of the wrong type
with self.assertRaises(TypeError):
table.insert(tf.constant([4, 5, 6]), values).run()
# insert with values of the wrong type
with self.assertRaises(TypeError):
table.insert(keys, tf.constant(["a", "b", "c"])).run()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
# lookup with keys of the wrong type
input_string = tf.constant([1, 2, 3], tf.int64)
with self.assertRaises(TypeError):
table.lookup(input_string).eval()
# default value of the wrong type
with self.assertRaises(TypeError):
tf.contrib.lookup.MutableHashTable(tf.string, tf.int64, "UNK")
def testMutableHashTableStringFloat(self):
with self.test_session():
default_val = -1.5
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1.1, 2.2], tf.float32)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.float32,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllClose([0, 1.1, -1.5], result)
def testMutableHashTableInt64String(self):
with self.test_session():
default_val = "n/a"
keys = tf.constant([0, 1, 2], tf.int64)
values = tf.constant(["brain", "salad", "surgery"])
table = tf.contrib.lookup.MutableHashTable(tf.int64,
tf.string,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([0, 1, 3], tf.int64)
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual((b"brain", b"salad", b"n/a"), result)
class MutableDenseHashTableOpTest(tf.test.TestCase):
def testBasic(self):
with self.test_session():
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64, tf.int64, default_value=-1, empty_key=0)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([11, 12, 15], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testLookupUnknownShape(self):
with self.test_session():
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64, tf.int64, default_value=-1, empty_key=0)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
placeholder_keys = tf.placeholder(tf.int64)
output = table.lookup(placeholder_keys)
self.assertAllEqual(None, output.get_shape())
result = output.eval({placeholder_keys: [11, 12, 15]})
self.assertAllEqual([0, 1, -1], result)
def testMapStringToFloat(self):
with self.test_session():
keys = tf.constant(["a", "b", "c"], tf.string)
values = tf.constant([0.0, 1.1, 2.2], tf.float32)
default_value = tf.constant(-1.5, tf.float32)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.string, tf.float32, default_value=default_value, empty_key="")
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant(["a", "b", "d"], tf.string)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllClose([0, 1.1, -1.5], result)
def testMapInt64ToFloat(self):
for float_dtype in [tf.float32, tf.float64]:
with self.test_session():
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0.0, 1.1, 2.2], float_dtype)
default_value = tf.constant(-1.5, float_dtype)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64, float_dtype, default_value=default_value, empty_key=0)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([11, 12, 15], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllClose([0, 1.1, -1.5], result)
def testVectorValues(self):
with self.test_session():
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]], tf.int64)
default_value = tf.constant([-1, -2, -3, -4], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=0,
initial_num_buckets=4)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
table.insert(
tf.constant([14], tf.int64),
tf.constant([[2, 3, 4, 5]], tf.int64)).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
input_string = tf.constant([11, 12, 15], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3, 4], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1, 2, 3], [3, 4, 5, 6], [-1, -2, -3, -4]],
result)
def testVectorKeys(self):
with self.test_session():
keys = tf.constant([[0, 1], [1, 2], [1, 3]], tf.int64)
values = tf.constant([10, 11, 12], tf.int64)
empty_key = tf.constant([0, 3], tf.int64)
default_value = tf.constant(-1, tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
table.insert(
tf.constant([[0, 0]], tf.int64), tf.constant([13], tf.int64)).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
input_string = tf.constant([[0, 1], [1, 2], [0, 2]], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([10, 11, -1], result)
def testResize(self):
with self.test_session():
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=-1,
empty_key=0,
initial_num_buckets=4)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
keys2 = tf.constant([13, 14, 15, 16, 17], tf.int64)
values2 = tf.constant([3, 4, 5, 6, 7], tf.int64)
table.insert(keys2, values2).run()
self.assertAllEqual(7, table.size().eval())
self.assertAllEqual(16, len(table.export()[0].eval()))
keys3 = tf.constant([10, 11, 12, 13, 14, 15, 16, 17, 18], tf.int64)
output = table.lookup(keys3)
self.assertAllEqual([-1, 0, 1, 3, 4, 5, 6, 7, -1], output.eval())
def testExport(self):
with self.test_session():
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([1, 2, 3], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=-1,
empty_key=100,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None], exported_values.get_shape().as_list())
np_keys = exported_keys.eval()
np_values = exported_values.eval()
self.assertAllEqual(8, len(np_keys))
self.assertAllEqual(8, len(np_values))
# pair up keys and values, drop extra added dimension
pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0]
# sort by key
pairs = pairs[pairs[:, 0].argsort()]
self.assertAllEqual([[11, 1], [12, 2], [13, 3], [100, 0], [100, 0],
[100, 0], [100, 0], [100, 0]], pairs)
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=tf.Graph()) as sess:
default_value = -1
empty_key = 0
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = tf.train.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=tf.Graph()) as sess:
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
tf.constant([11, 14], tf.int64),
tf.constant([12, 24], tf.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = tf.train.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = tf.constant([10, 11, 12, 13, 14], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], output.eval())
def testVectorSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=tf.Graph()) as sess:
empty_key = tf.constant([11, 13], tf.int64)
default_value = tf.constant([-1, -2], tf.int64)
keys = tf.constant([[11, 12], [11, 14], [13, 14]], tf.int64)
values = tf.constant([[0, 1], [2, 3], [4, 5]], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = tf.train.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=tf.Graph()) as sess:
empty_key = tf.constant([11, 13], tf.int64)
default_value = tf.constant([-1, -2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
tf.constant([[11, 12], [13, 15]], tf.int64),
tf.constant([[21, 22], [23, 24]], tf.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = tf.train.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = tf.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]],
output.eval())
def testVectorScalarSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_scalar_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=tf.Graph()) as sess:
empty_key = tf.constant([11, 13], tf.int64)
default_value = tf.constant(-1, tf.int64)
keys = tf.constant([[11, 12], [11, 14], [13, 14]], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
name="t2",
checkpoint=True,
initial_num_buckets=32)
save = tf.train.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=tf.Graph()) as sess:
empty_key = tf.constant([11, 13], tf.int64)
default_value = tf.constant(-1, tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=default_value,
empty_key=empty_key,
name="t2",
checkpoint=True,
initial_num_buckets=64)
table.insert(
tf.constant([[11, 12], [13, 15]], tf.int64),
tf.constant([3, 4], tf.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = tf.train.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = tf.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([0, 1, -1, 2, -1], output.eval())
def testReprobe(self):
with self.test_session():
# Insert 6 keys into a table with 8 buckets.
# The values are chosen to make sure collisions occur when using GCC STL
keys = tf.constant([11, 12, 13, 19, 20, 21], tf.int64)
values = tf.constant([51, 52, 53, 54, 55, 56], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=-1,
empty_key=0,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(6, table.size().eval())
input_string = tf.constant([10, 11, 12, 13, 14, 19, 20, 21, 22], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([9], output.get_shape())
result = output.eval()
self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result)
def testCustomEmptyKey(self):
with self.test_session():
keys = tf.constant([11, 0, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64, tf.int64, default_value=-1, empty_key=12)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([11, 0, 15], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testErrors(self):
with self.test_session():
table = tf.contrib.lookup.MutableDenseHashTable(
tf.int64, tf.int64, default_value=-1, empty_key=0)
# Inserting the empty key returns an error
keys = tf.constant([11, 0], tf.int64)
values = tf.constant([0, 1], tf.int64)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "empty_key"):
table.insert(keys, values).run()
# Looking up the empty key returns an error
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, "empty_key"):
table.lookup(keys).eval()
# Arbitrary tensors of keys are not supported
keys = tf.constant([[11, 0], [12, 1]], tf.int64)
values = tf.constant([[11, 0], [12, 1]], tf.int64)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"Expected key shape"):
table.lookup(keys).eval()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"Expected key shape"):
table.insert(keys, values).run()
table2 = tf.contrib.lookup.MutableDenseHashTable(
tf.int64,
tf.int64,
default_value=-1,
empty_key=17,
initial_num_buckets=12)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"Number of buckets must be"):
self.assertAllEqual(0, table2.size().eval())
class StringToIndexTableFromFile(tf.test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_string_to_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_to_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_string_to_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab3.txt")
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_string_to_index_table_from_file_with_only_oov_buckets(self):
self.assertRaises(
ValueError,
tf.contrib.lookup.string_to_index_table_from_file,
vocabulary_file=None)
def test_string_to_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_string_to_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"Invalid vocab_size", table.init.run)
def test_string_to_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
self.assertRaises(
ValueError,
tf.contrib.lookup.string_to_index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_string_to_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.test_session():
with self.assertRaises(TypeError):
tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = tf.contrib.lookup.string_to_index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=tf.contrib.lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
tf.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTableFromTensor(tf.test.TestCase):
def test_string_to_index_table_from_tensor_with_tensor_init(self):
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_tensor(
mapping=["brain", "salad", "surgery"], num_oov_buckets=1)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_to_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.test_session():
table = tf.contrib.lookup.string_to_index_table_from_tensor(
mapping=["brain", "salad", "surgery"], default_value=default_value)
ids = table.lookup(tf.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(tf.OpError, ids.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_string_to_index_table_from_tensor_with_only_oov_buckets(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.lookup.string_to_index_table_from_tensor(
mapping=None, num_oov_buckets=1)
def test_string_to_index_table_from_tensor_with_invalid_hashers(self):
with self.test_session():
with self.assertRaises(TypeError):
tf.contrib.lookup.string_to_index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = tf.contrib.lookup.string_to_index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=tf.contrib.lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
tf.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTest(tf.test.TestCase):
def test_string_to_index(self):
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
feats = tf.constant(["salad", "surgery", "tarkus"])
indices = tf.contrib.lookup.string_to_index(feats,
mapping=mapping_strings)
self.assertRaises(tf.OpError, indices.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = tf.constant(["hello", "hello"])
feats = tf.constant(["hello", "hola"])
indices = tf.contrib.lookup.string_to_index(feats,
mapping=mapping_strings)
self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
feats = tf.constant(["salad", "surgery", "tarkus"])
indices = tf.contrib.lookup.string_to_index(feats,
mapping=mapping_strings,
default_value=default_value)
self.assertRaises(tf.OpError, indices.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTableFromFileTest(tf.test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.test_session():
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(tf.constant([0, 1, 2, 3], tf.int64))
self.assertRaises(tf.OpError, features.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(tf.constant([1, 2, 4], tf.int64))
self.assertRaises(tf.OpError, features.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(tf.constant([1, 2, 4], tf.int64))
self.assertRaises(tf.OpError, features.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.test_session():
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(tf.constant([1, 2, 4], tf.int64))
self.assertRaises(tf.OpError, features.eval)
init = tf.initialize_all_tables()
self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.test_session():
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(tf.constant([1, 2, 4], tf.int64))
self.assertRaises(tf.OpError, features.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(tf.test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = tf.constant([0, 1, 2, 3], tf.int64)
features = table.lookup(indices)
self.assertRaises(tf.OpError, features.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = tf.constant(["hello", "hello"])
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = tf.constant([0, 1, 4], tf.int64)
features = table.lookup(indices)
tf.initialize_all_tables().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_value)
indices = tf.constant([1, 2, 4], tf.int64)
features = table.lookup(indices)
self.assertRaises(tf.OpError, features.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class IndexToStringTest(tf.test.TestCase):
def test_index_to_string(self):
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
indices = tf.constant([0, 1, 2, 3], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices,
mapping=mapping_strings)
self.assertRaises(tf.OpError, feats.eval)
tf.initialize_all_tables().run()
self.assertAllEqual(
(b"brain", b"salad", b"surgery", b"UNK"), feats.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = tf.constant(["hello", "hello"])
indices = tf.constant([0, 1, 4], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices,
mapping=mapping_strings)
tf.initialize_all_tables().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
indices = tf.constant([1, 2, 4], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices,
mapping=mapping_strings,
default_value=default_value)
self.assertRaises(tf.OpError, feats.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
class InitializeTableFromFileOpTest(tf.test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def testInitializeTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
with self.test_session():
default_value = -1
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER), default_value)
table.init.run()
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.test_session():
default_value = "UNK"
key_index = tf.contrib.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.contrib.lookup.TextFileIndex.WHOLE_LINE
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(vocabulary_file, tf.int64,
key_index, tf.string,
value_index), default_value)
table.init.run()
input_values = tf.constant([0, 1, 2, 3], tf.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.test_session():
default_value = -1
key_index = 1
value_index = 2
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(vocabulary_file, tf.string,
key_index, tf.int64,
value_index), default_value)
table.init.run()
input_string = tf.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.test_session():
default_value = -1
key_index = 2
value_index = 1
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(vocabulary_file, tf.string,
key_index, tf.int64,
value_index), default_value)
with self.assertRaisesOpError("is not a valid"):
table.init.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.test_session():
default_value = "UNK"
key_index = tf.contrib.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.contrib.lookup.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(vocabulary_file, tf.int64,
key_index, tf.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.test_session():
default_value = -1
key_index = 1 # second column of the line
value_index = tf.contrib.lookup.TextFileIndex.LINE_NUMBER
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(vocabulary_file, tf.string,
key_index, tf.int64,
value_index), default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.init.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.test_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
tf.initialize_all_tables().run()
input_string = tf.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.test_session():
default_value = -1
with self.assertRaises(ValueError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
"", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.test_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file1,
tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Initialize from file.
table1.init.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file2,
tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.init.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file3,
tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Smaller vocab size reads only vocab_size records.
table3.init.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.test_session():
default_value = -1
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
"old_file.txt", tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER), default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.init.run()
# Initialize the model feeding the vocabulary file.
filenames = tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS)
table.init.run(feed_dict={filenames[0]: vocabulary_file})
input_string = tf.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.test_session():
default_value = -1
# Invalid data type
other_type = tf.constant(1)
with self.assertRaises(ValueError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
other_type, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER), default_value)
# Non-scalar filename
filenames = tf.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
filenames, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER), default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.test_session():
default_value = "UNK"
vocab_size = 3
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.init.run()
input_values = tf.constant([0, 1, 2, 3], tf.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.test_session():
default_value = -1
vocab_size = 3
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(vocab_file,
vocab_size=vocab_size),
default_value)
table.init.run()
input_string = tf.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(tf.test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def testIdTableWithHashBucketsInit(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt")
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = tf.contrib.lookup.IdTableWithHashBuckets(
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value),
oov_buckets)
table.init.run()
input_string = tf.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testIdTableWithOnlyHashBucket(self):
with self.test_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = tf.contrib.lookup.IdTableWithHashBuckets(None, oov_buckets)
table.init.run()
input_string = tf.constant(["brain", "salad", "surgery"])
out = table.lookup(input_string)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.test_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table1 = tf.contrib.lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=tf.contrib.lookup.FastHashSpec,
name="table1")
table2 = tf.contrib.lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=tf.contrib.lookup.StrongHashSpec((1, 2)),
name="table2")
tf.initialize_all_tables().run()
input_string = tf.constant(["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = tf.contrib.lookup.IdTableWithHashBuckets(
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
table1.init.run()
input_string_1 = tf.constant(["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.init.run()
table2 = tf.contrib.lookup.IdTableWithHashBuckets(
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
input_string_2 = tf.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.test_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = tf.contrib.lookup.IdTableWithHashBuckets(
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value1),
oov_buckets)
default_value2 = -2
table2 = tf.contrib.lookup.IdTableWithHashBuckets(
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value2),
oov_buckets)
tf.initialize_all_tables().run()
input_string_1 = tf.constant(["brain", "salad", "surgery", "UNK"])
input_string_2 = tf.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = tf.SparseTensor(
tf.constant(input_indices, tf.int64),
tf.constant(["brain", "salad", "brain", "surgery", "tarkus"],
tf.string), tf.constant(input_shape, tf.int64))
table = tf.contrib.lookup.IdTableWithHashBuckets(
tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=3),
-1),
1)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
with self.assertRaises(TypeError):
tf.contrib.lookup.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = tf.contrib.lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=tf.contrib.lookup.HasherSpec("my-awesome-hash", None))
input_string = tf.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = tf.contrib.lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=tf.contrib.lookup.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = tf.contrib.lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=tf.contrib.lookup.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = tf.contrib.lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=tf.contrib.lookup.StrongHashSpec([None, 2]))
if __name__ == "__main__":
tf.test.main()
|
py | 1a48b8cd99e4b4102c26ad20f3c15309c4b524a7 | from itertools import tee, zip_longest
from django.db.models import Model
from typing import Any, Iterator, List, Sequence, Type, TypeVar, Tuple
T = TypeVar('T', covariant=True)
def pairwise(iterable: Sequence[T]) -> Iterator[Tuple[T, T]]:
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def modelname(model: Type[Model]) -> str:
return f'{model._meta.app_label}.{model._meta.model_name}'
def is_sublist(needle: Sequence[Any], haystack: Sequence[Any]) -> bool:
if not needle:
return True
if not haystack:
return False
max_k = len(needle) - 1
k = 0
for elem in haystack:
if elem != needle[k]:
k = 0
continue
if k == max_k:
return True
k += 1
return False
def parent_to_inherited_path(parent: Type[Model], inherited: Type[Model]) -> List[str]:
"""
Pull relation path segments from `parent` to `inherited` model
in multi table inheritance.
"""
bases = inherited._meta.get_base_chain(parent)
relations: List[str] = []
model = inherited
for base in bases:
relations.append(model._meta.parents[base].remote_field.name)
model = base
return relations[::-1]
def skip_equal_segments(ps: Sequence[str], rs: Sequence[str]) -> List[str]:
"""
Skips all equal segments from the beginning of `ps` and `rs`
returning left over segments from `ps`.
"""
add: bool = False
ret: List[str] = []
for left, right in zip_longest(ps, rs):
if left is None:
break
if left != right:
add = True
if add:
ret.append(left)
return ret
|
py | 1a48baa7cd8ed775b7d8180cd6ef1694f19ee41c | # Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mdts.lib.binding_manager import BindingManager
from mdts.lib.physical_topology_manager import PhysicalTopologyManager
from mdts.lib.virtual_topology_manager import VirtualTopologyManager
from mdts.tests.utils.asserts import async_assert_that
from mdts.tests.utils.asserts import receives
from mdts.tests.utils.asserts import should_NOT_receive
from mdts.tests.utils.asserts import within_sec
from mdts.tests.utils.utils import bindings
from mdts.tests.utils.utils import wait_on_futures
from nose.plugins.attrib import attr
import logging
import random
import time
LOG = logging.getLogger(__name__)
PTM = PhysicalTopologyManager(
'../topologies/mmm_physical_test_conn_tracking.yaml')
VTM = VirtualTopologyManager(
'../topologies/mmm_virtual_test_conn_tracking.yaml')
BM = BindingManager(PTM, VTM)
binding_multihost = {
'description': 'spanning across multiple MMs',
'bindings': [
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 2,
'host_id': 1, 'interface_id': 1}},
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 3,
'host_id': 2, 'interface_id': 2}},
]
}
def set_bridge_port_filters(bridge_name, port_id, inbound_filter_name,
outbound_filter_name):
'''Sets an in-bound filter to a bridge.'''
bridge_port = VTM.get_device_port(bridge_name, port_id)
inbound_filter = None
if inbound_filter_name:
inbound_filter = VTM.get_chain(inbound_filter_name)
outbound_filter = None
if outbound_filter_name:
outbound_filter = VTM.get_chain(outbound_filter_name)
bridge_port.set_inbound_filter(inbound_filter)
bridge_port.set_outbound_filter(outbound_filter)
# Sleep here to make sure that the settings have been propagated.
time.sleep(5)
def unset_bridge_port_filters(bridge_name, port_id):
'''Sets an in-bound filter to a bridge.'''
set_bridge_port_filters(bridge_name, port_id, None, None)
def get_random_port_num():
'''Returns a random port number from a free port range.
NOTE: Using a random number may cause test indeterminacy on a rare occasion.
'''
return random.randint(49152, 65535)
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_filtering_by_network_address():
'''
Title: Tests packets filtering based on network address
Scenario:
When: A VM sends UDP packets to another host on the same bridge.
Then: The UDP packets reaches the receiver.
Then: Filtering rule chains based on network address (IP address) are set on
the bridge port that the receiver host is connected to.
And: The UDP packets from the same sender do NOT reach the receiver.
'''
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-001', 3)
# Reset in/out-bound filters.
unset_bridge_port_filters('bridge-000-001', 3)
port_num = get_random_port_num()
# FIXME: do not use harcoded values!
f1 = async_assert_that(receiver,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'No filtering: receives UDP packets from sender.')
f2 = sender.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Set a filtering rule based on network address.
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_nw_in',
'connection_tracking_nw_out')
f1 = async_assert_that(receiver, should_NOT_receive(
'dst host 172.16.1.2 and udp',
within_sec(5)),
'Packets are filtered based on IP address.')
f2 = sender.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_connection_tracking_by_network_addres():
'''
Title: Tests NW address based connection tracking.
Scenario:
When: A VM, supposedly inside a FW, sends UDP packets to another host,
supposedly outside the FS, on the same bridge.
And: The host outside the FW receives the UDP packets.
Then: A connection-tracking-based peep hole is established.
And: The outside host now can send UDP packets to the inside host.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Set a filtering rule based on ip address.
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_nw_in',
'connection_tracking_nw_out')
# Send forward packets to set up a connection-tracking based peep hole in
# the filter.
port_num = get_random_port_num()
f1 = async_assert_that(outside,
receives('dst host 172.16.1.1 and udp',
within_sec(5)),
'Outside host receives forward packets from inside.')
f2 = inside.send_udp('aa:bb:cc:00:01:01', '172.16.1.1', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Verify the peep hole.
f1 = async_assert_that(inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'Outside host can send packets to inside '
'via a peep hole.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_filtering_by_dl():
'''
Title: Tests dl-based packet filtering.
Scenario:
When: A VM sends UDP packets to another host on the same bridge.
Then: The UDP packets reach the receiver without filtering rule chains.
Then: A filtering rule chain based on mac address is set on the bridge.
And: UDP packets from the same host do NOT reach the same destination host.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Reset an in-bound filter.
unset_bridge_port_filters('bridge-000-001', 3)
port_num = get_random_port_num()
f1 = async_assert_that(
inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'No filtering: inside receives UDP packets from outside.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Set a filtering rule based on mac addresses
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_dl_in',
'connection_tracking_dl_out')
f1 = async_assert_that(inside,
should_NOT_receive(
'dst host 172.16.1.2 and udp',
within_sec(5)),
'Packets are filtered based on mac address.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_connection_tracking_with_drop_by_dl():
'''
Title: Tests dl-based connection tracking.
Scenario:
When: A VM inside a FW sends UDP packets to a VM outside.
And: The outside receives the UDP packets.
Then: A connection-tracking-based peep hole is established.
And: The outside now can send UDP packets to the inside.
'''
outside = BM.get_iface_for_port('bridge-000-001', 2)
inside = BM.get_iface_for_port('bridge-000-001', 3)
# Set a filtering rule based on mac addresses
set_bridge_port_filters('bridge-000-001', 3, 'connection_tracking_dl_in',
'connection_tracking_dl_out')
# Send forward packets to set up a connection-tracking based peep hole in
# the filter.
port_num = get_random_port_num()
f1 = async_assert_that(outside,
receives('dst host 172.16.1.1 and udp',
within_sec(5)),
'The outside host receives forward packets '
'from the inside.')
f2 = inside.send_udp('aa:bb:cc:00:01:01', '172.16.1.1', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
# Verify the peep hole.
f1 = async_assert_that(inside,
receives('dst host 172.16.1.2 and udp',
within_sec(5)),
'The outside host can now send packets to the inside'
'via a peep hole.')
f2 = outside.send_udp('aa:bb:cc:00:01:02', '172.16.1.2', 41,
src_port=port_num, dst_port=port_num)
wait_on_futures([f1, f2])
|
py | 1a48bb0408a127cfbc11df1a0416d4a1569c3f12 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext, dirname, exists
from os import getenv
from distutils.spawn import find_executable
from distutils.version import LooseVersion
from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS
from tools.hooks import hook_tool
from tools.utils import run_cmd, NotSupportedException
class GCC(mbedToolchain):
OFFICIALLY_SUPPORTED = True
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
DIAGNOSTIC_PATTERN = re.compile('((?P<file>[^:]+):(?P<line>\d+):)(?P<col>\d+):? (?P<severity>warning|[eE]rror|fatal error): (?P<message>.+)')
GCC_RANGE = (LooseVersion("6.0.0"), LooseVersion("7.0.0"))
GCC_VERSION_RE = re.compile(b"\d+\.\d+\.\d+")
def __init__(self, target, notify=None, macros=None, build_profile=None,
build_dir=None):
mbedToolchain.__init__(self, target, notify, macros,
build_profile=build_profile, build_dir=build_dir)
tool_path=TOOLCHAIN_PATHS['GCC_ARM']
# Add flags for current size setting
default_lib = "std"
if hasattr(target, "default_lib"):
default_lib = target.default_lib
elif hasattr(target, "default_build"): # Legacy
default_lib = target.default_build
if default_lib == "small":
self.flags["common"].append("-DMBED_RTOS_SINGLE_THREAD")
self.flags["ld"].append("--specs=nano.specs")
if target.core == "Cortex-M0+":
self.cpu = ["-mcpu=cortex-m0plus"]
elif target.core.startswith("Cortex-M4"):
self.cpu = ["-mcpu=cortex-m4"]
elif target.core.startswith("Cortex-M7"):
self.cpu = ["-mcpu=cortex-m7"]
elif target.core.startswith("Cortex-M23"):
self.cpu = ["-mcpu=cortex-m23"]
elif target.core.startswith("Cortex-M33F"):
self.cpu = ["-mcpu=cortex-m33+nodsp"]
elif target.core.startswith("Cortex-M33"):
self.cpu = ["-march=armv8-m.main"]
else:
self.cpu = ["-mcpu={}".format(target.core.lower())]
if target.core.startswith("Cortex-M"):
self.cpu.append("-mthumb")
# FPU handling, M7 possibly to have double FPU
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7F":
self.cpu.append("-mfpu=fpv5-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7FD":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
if ((target.core.startswith("Cortex-M23") or
target.core.startswith("Cortex-M33")) and
not target.core.endswith("-NS")):
self.cpu.append("-mcmse")
self.flags["ld"].extend([
"-Wl,--cmse-implib",
"-Wl,--out-implib=%s" % join(build_dir, "cmse_lib.o")
])
elif target.core == "Cortex-M23-NS" or target.core == "Cortex-M33-NS" or target.core == "Cortex-M33F-NS":
self.flags["ld"].append("-DDOMAIN_NS=1")
self.flags["common"] += self.cpu
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc] + self.flags['asm'] + self.flags["common"]
self.cc = [main_cc]
self.cppc =[main_cppc]
self.cc += self.flags['c'] + self.flags['common']
self.cppc += self.flags['cxx'] + self.flags['common']
self.flags['ld'] += self.cpu
self.ld = [join(tool_path, "arm-none-eabi-gcc")] + self.flags['ld']
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc", "nosys"]
self.preproc = [join(tool_path, "arm-none-eabi-cpp"), "-E", "-P"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
self.use_distcc = (bool(getenv("DISTCC_POTENTIAL_HOSTS", False))
and not getenv("MBED_DISABLE_DISTCC", False))
def version_check(self):
stdout, _, retcode = run_cmd([self.cc[0], "--version"], redirect=True)
msg = None
match = self.GCC_VERSION_RE.search(stdout)
found_version = LooseVersion(match.group(0).decode('utf-8')) if match else None
min_ver, max_ver = self.GCC_RANGE
if found_version and (found_version < min_ver or found_version >= max_ver):
msg = ("Compiler version mismatch: Have {}; "
"expected version >= {} and < {}"
.format(found_version, min_ver, max_ver))
elif not match:
msg = ("Compiler version mismatch: Could not detect version; "
"expected version >= {} and < {}"
.format(min_ver, max_ver))
if msg:
self.notify.cc_info({
"message": msg,
"file": "",
"line": "",
"col": "",
"severity": "Warning",
})
def is_not_supported_error(self, output):
return "error: #error [NOT_SUPPORTED]" in output
def parse_output(self, output):
# The warning/error notification is multiline
msg = None
for line in output.splitlines():
match = self.DIAGNOSTIC_PATTERN.search(line)
if match is not None:
if msg is not None:
self.notify.cc_info(msg)
msg = None
msg = {
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'col': match.group('col'),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
}
if msg is not None:
self.notify.cc_info(msg)
def get_dep_option(self, object):
base, _ = splitext(object)
dep_path = base + '.d'
return ["-MD", "-MF", dep_path]
def get_config_option(self, config_header):
return ['-include', config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
if self.RESPONSE_FILES:
opts += ['@%s' % self.get_inc_file(includes)]
else:
opts += ["-I%s" % i for i in includes]
config_header = self.get_config_header()
if config_header is not None:
opts = opts + self.get_config_option(config_header)
return opts
@hook_tool
def assemble(self, source, object, includes):
# Build assemble command
cmd = self.asm + self.get_compile_options(self.get_symbols(True), includes) + ["-o", object, source]
# Call cmdline hook
cmd = self.hook.get_cmdline_assembler(cmd)
# Return command array, don't execute
return [cmd]
@hook_tool
def compile(self, cc, source, object, includes):
# Build compile command
cmd = cc + self.get_compile_options(self.get_symbols(), includes)
cmd.extend(self.get_dep_option(object))
cmd.extend(["-o", object, source])
# Call cmdline hook
cmd = self.hook.get_cmdline_compiler(cmd)
if self.use_distcc:
cmd = ["distcc"] + cmd
return [cmd]
def compile_c(self, source, object, includes):
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
return self.compile(self.cppc, source, object, includes)
@hook_tool
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# Preprocess
if mem_map:
preproc_output = join(dirname(output), ".link_script.ld")
cmd = (self.preproc + [mem_map] + self.ld[1:] +
[ "-o", preproc_output])
self.notify.cc_verbose("Preproc: %s" % ' '.join(cmd))
self.default_cmd(cmd)
mem_map = preproc_output
# Build linker command
map_file = splitext(output)[0] + ".map"
cmd = self.ld + ["-o", output, "-Wl,-Map=%s" % map_file] + objects + ["-Wl,--start-group"] + libs + ["-Wl,--end-group"]
if mem_map:
cmd.extend(['-T', mem_map])
for L in lib_dirs:
cmd.extend(['-L', L])
cmd.extend(libs)
# Call cmdline hook
cmd = self.hook.get_cmdline_linker(cmd)
if self.RESPONSE_FILES:
# Split link command to linker executable + response file
cmd_linker = cmd[0]
link_files = self.get_link_file(cmd[1:])
cmd = [cmd_linker, "@%s" % link_files]
# Exec command
self.notify.cc_verbose("Link: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@hook_tool
def archive(self, objects, lib_path):
if self.RESPONSE_FILES:
param = ["@%s" % self.get_arch_file(objects)]
else:
param = objects
# Exec command
self.default_cmd([self.ar, 'rcs', lib_path] + param)
@hook_tool
def binary(self, resources, elf, bin):
# Build binary command
_, fmt = splitext(bin)
bin_arg = {'.bin': 'binary', '.hex': 'ihex'}[fmt]
cmd = [self.elf2bin, "-O", bin_arg, elf, bin]
# Call cmdline hook
cmd = self.hook.get_cmdline_binary(cmd)
# Exec command
self.notify.cc_verbose("FromELF: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@staticmethod
def name_mangle(name):
return "_Z%i%sv" % (len(name), name)
@staticmethod
def make_ld_define(name, value):
return "-D%s=%s" % (name, value)
@staticmethod
def redirect_symbol(source, sync, build_dir):
return "-Wl,--defsym=%s=%s" % (source, sync)
@staticmethod
def check_executable():
"""Returns True if the executable (arm-none-eabi-gcc) location
specified by the user exists OR the executable can be found on the PATH.
Returns False otherwise."""
if not TOOLCHAIN_PATHS['GCC_ARM'] or not exists(TOOLCHAIN_PATHS['GCC_ARM']):
if find_executable('arm-none-eabi-gcc'):
TOOLCHAIN_PATHS['GCC_ARM'] = ''
return True
else:
return False
else:
exec_name = join(TOOLCHAIN_PATHS['GCC_ARM'], 'arm-none-eabi-gcc')
return exists(exec_name) or exists(exec_name + '.exe')
class GCC_ARM(GCC):
pass
|
py | 1a48bba6d80463dd058a89556fca98273b257f8f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from thing.models import Contract
# We need to delete duplicate contract IDs before we make it a unique field
def delete_contract_duplicates_forward(apps, schema_editor):
ids = Contract.objects.all().values_list('contract_id', flat=True).distinct()
for contract_id in ids:
contracts = Contract.objects.filter(contract_id=contract_id)
if contracts.count() > 1:
itercontracts = iter(contracts)
next(itercontracts)
for contract in itercontracts:
contract.delete()
def delete_contract_duplicates_reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('thing', '0022_auto_20170824_1956'),
]
operations = [
migrations.RunPython(
delete_contract_duplicates_forward,
delete_contract_duplicates_reverse
),
migrations.AlterField(
model_name='contract',
name='contract_id',
field=models.IntegerField(unique=True, db_index=True),
preserve_default=True,
),
]
|
py | 1a48bc309c9c59594306f1e2e0f9e5279ed73078 | from collections import namedtuple
from spinn.util.data import *
ModelSpec_ = namedtuple("ModelSpec", ["model_dim", "word_embedding_dim",
"batch_size", "vocab_size", "seq_length",
"model_visible_dim"])
def ModelSpec(*args, **kwargs):
args = dict(list(zip(ModelSpec_._fields, args)))
args.update(kwargs)
# Defaults
if "model_visible_dim" not in args:
args["model_visible_dim"] = args["model_dim"]
return ModelSpec_(**args)
|
py | 1a48bce3bb7c74551a365fd471f6869b128babac | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestSliceOp(OpTest):
def setUp(self):
self.op_type = "slice"
self.config()
self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
'starts': self.starts,
'ends': self.ends
}
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [1, 0, 2]
self.ends = [3, 3, 4]
self.axes = [0, 1, 2]
self.out = self.input[1:3, 0:3, 2:4, :]
def test_check_output(self):
self.check_output()
class TestCase1(TestSliceOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 2]
self.out = self.input[-3:3, 0:100, 2:-1, :]
class TestCase2(TestSliceOp):
def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float32")
self.starts = [-3, 0, 2]
self.ends = [3, 100, -1]
self.axes = [0, 1, 3]
self.out = self.input[-3:3, 0:100, :, 2:-1]
if __name__ == '__main__':
unittest.main()
|
py | 1a48bda745d7d7f64348d1b50b95d13be0817dbe | import pytest
from app.models import Expense, User
from app.models import expense
pytestmark = pytest.mark.nologin
def headers(tok):
return {'Authorization': f'Bearer {tok}'}
def test_get_expenses(db_with_expenses, token, client):
resp = client.get('/api/expenses?page=1&page_size=10',
headers=headers(token))
assert resp.status_code == 200
expenses = resp.get_json()
assert len(expenses) == 10
for i, e in enumerate(expenses):
assert e['description'] == f'Item {15-i}'
def test_get_expense(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item 10').first()
db_data = {
'id': exp.id,
'description': exp.description,
'amount': exp.amount_str,
'date': exp.date.isoformat(),
'payment_mode': exp.payment_mode.mode,
'estimate': exp.estimate.item if exp.estimate else None,
'tags': ','.join([tag.tagname for tag in exp.tags]),
'comments': exp.comments,
'created_on': exp.created_on.isoformat(),
'updated_on': exp.updated_on.isoformat()
}
resp = client.get(f'/api/expenses/{exp.id}',
headers=headers(token))
assert resp.status_code == 200
e = resp.get_json()
assert e == db_data
def test_update_expense(db_with_expenses, token, client):
# Following code is needed because we are accessing amount
expense.current_user = User.query.get(1)
exp = Expense.query.filter_by(description='Item 10').first()
orig_amount = exp.amount
orig_comments = exp.comments
data = {
'amount': int(orig_amount + 10),
'comments': 'Amount increased by 10'
}
resp = client.patch(f'/api/expenses/{exp.id}',
json=data,
headers=headers(token))
assert resp.status_code == 200
e = resp.get_json()
assert e['id'] == exp.id
assert e['amount'] == str(orig_amount + 10)
assert e['comments'] != orig_comments
assert e['comments'] == 'Amount increased by 10'
def test_delete_expense(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item 10').first()
resp = client.delete(f'/api/expenses/{exp.id}', headers=headers(token))
assert resp.status_code == 204
def test_delete_forbidden(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item user2').first()
resp = client.delete(f'/api/expenses/{exp.id}', headers=headers(token))
assert resp.status_code == 403
assert resp.get_json()['msg'].startswith('Forbidden')
def test_delete_not_found(db_with_expenses, token, client):
resp = client.delete('/api/expenses/50', headers=headers(token))
assert resp.status_code == 404
assert resp.get_json()['msg'] == 'Expense not found.'
|
py | 1a48bee779701f5d6a5a03477a39d0509c7fdf68 | """The sma integration."""
from __future__ import annotations
from datetime import timedelta
import logging
import pysma
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry, ConfigEntryNotReady
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SSL,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
CONF_CUSTOM,
CONF_FACTOR,
CONF_GROUP,
CONF_KEY,
CONF_UNIT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
PLATFORMS,
PYSMA_COORDINATOR,
PYSMA_DEVICE_INFO,
PYSMA_OBJECT,
PYSMA_REMOVE_LISTENER,
PYSMA_SENSORS,
)
_LOGGER = logging.getLogger(__name__)
def _parse_legacy_options(
entry: ConfigEntry, sensor_def: pysma.sensor.Sensors
) -> list[str]:
"""Parse legacy configuration options.
This will parse the legacy CONF_SENSORS and CONF_CUSTOM configuration options
to support deprecated yaml config from platform setup.
"""
# Add sensors from the custom config
sensor_def.add(
[
pysma.sensor.Sensor(
o[CONF_KEY], n, o[CONF_UNIT], o[CONF_FACTOR], o.get(CONF_PATH)
)
for n, o in entry.data.get(CONF_CUSTOM).items()
]
)
# Parsing of sensors configuration
if not (config_sensors := entry.data.get(CONF_SENSORS)):
return []
# Support import of legacy config that should have been removed from 0.99, but was still functional
# See also #25880 and #26306. Functional support was dropped in #48003
if isinstance(config_sensors, dict):
config_sensors_list = []
for name, attr in config_sensors.items():
config_sensors_list.append(name)
config_sensors_list.extend(attr)
config_sensors = config_sensors_list
# Find and replace sensors removed from pysma
# This only alters the config, the actual sensor migration takes place in _migrate_old_unique_ids
for sensor in config_sensors.copy():
if sensor in pysma.const.LEGACY_MAP:
config_sensors.remove(sensor)
config_sensors.append(pysma.const.LEGACY_MAP[sensor]["new_sensor"])
# Only sensors from config should be enabled
for sensor in sensor_def:
sensor.enabled = sensor.name in config_sensors
return config_sensors
def _migrate_old_unique_ids(
hass: HomeAssistant,
entry: ConfigEntry,
sensor_def: pysma.sensor.Sensors,
config_sensors: list[str],
) -> None:
"""Migrate legacy sensor entity_id format to new format."""
entity_registry = er.async_get(hass)
# Create list of all possible sensor names
possible_sensors = set(
config_sensors + [s.name for s in sensor_def] + list(pysma.const.LEGACY_MAP)
)
for sensor in possible_sensors:
if sensor in sensor_def:
pysma_sensor = sensor_def[sensor]
original_key = pysma_sensor.key
elif sensor in pysma.const.LEGACY_MAP:
# If sensor was removed from pysma we will remap it to the new sensor
legacy_sensor = pysma.const.LEGACY_MAP[sensor]
pysma_sensor = sensor_def[legacy_sensor["new_sensor"]]
original_key = legacy_sensor["old_key"]
else:
_LOGGER.error("%s does not exist", sensor)
continue
# Find entity_id using previous format of unique ID
entity_id = entity_registry.async_get_entity_id(
"sensor", "sma", f"sma-{original_key}-{sensor}"
)
if not entity_id:
continue
# Change unique_id to new format using the device serial in entry.unique_id
new_unique_id = f"{entry.unique_id}-{pysma_sensor.key}_{pysma_sensor.key_idx}"
entity_registry.async_update_entity(entity_id, new_unique_id=new_unique_id)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up sma from a config entry."""
# Init the SMA interface
protocol = "https" if entry.data[CONF_SSL] else "http"
url = f"{protocol}://{entry.data[CONF_HOST]}"
verify_ssl = entry.data[CONF_VERIFY_SSL]
group = entry.data[CONF_GROUP]
password = entry.data[CONF_PASSWORD]
session = async_get_clientsession(hass, verify_ssl=verify_ssl)
sma = pysma.SMA(session, url, password, group)
try:
# Get updated device info
device_info = await sma.device_info()
# Get all device sensors
sensor_def = await sma.get_sensors()
except (
pysma.exceptions.SmaReadException,
pysma.exceptions.SmaConnectionException,
) as exc:
raise ConfigEntryNotReady from exc
# Parse legacy options if initial setup was done from yaml
if entry.source == SOURCE_IMPORT:
config_sensors = _parse_legacy_options(entry, sensor_def)
_migrate_old_unique_ids(hass, entry, sensor_def, config_sensors)
# Define the coordinator
async def async_update_data():
"""Update the used SMA sensors."""
try:
await sma.read(sensor_def)
except (
pysma.exceptions.SmaReadException,
pysma.exceptions.SmaConnectionException,
) as exc:
raise UpdateFailed(exc) from exc
interval = timedelta(
seconds=entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
)
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="sma",
update_method=async_update_data,
update_interval=interval,
)
try:
await coordinator.async_config_entry_first_refresh()
except ConfigEntryNotReady:
await sma.close_session()
raise
# Ensure we logout on shutdown
async def async_close_session(event):
"""Close the session."""
await sma.close_session()
remove_stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_close_session
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
PYSMA_OBJECT: sma,
PYSMA_COORDINATOR: coordinator,
PYSMA_SENSORS: sensor_def,
PYSMA_REMOVE_LISTENER: remove_stop_listener,
PYSMA_DEVICE_INFO: device_info,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
data = hass.data[DOMAIN].pop(entry.entry_id)
await data[PYSMA_OBJECT].close_session()
data[PYSMA_REMOVE_LISTENER]()
return unload_ok
|
py | 1a48bf44db1757aa6755f46cc72daedf3285ade4 | import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
# pad our images with zeros in both dimensions.
# images should be input images of shape raw_image_size by raw_image_size, and output is of size image_size x image_size
# input image tensor should be of shape [-1, raw_image_size^2]. Output is of shape [-1, image_size^2]
def padImages(images, rawside, outside):
padSize = (outside - rawside)/2.0
images = [np.reshape(image, (rawside, rawside)) for image in images]
leftPad = int(np.floor(padSize))
rightPad = int(np.ceil(padSize))
padImages = np.lib.pad(images, [[0,0],[leftPad, rightPad], [leftPad, rightPad]], 'constant')
return np.reshape(padImages, (-1,outside*outside))
if __name__=="__main__":
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
print(padImages([[1,1,1,1],[2,2,2,2]], 2, 10))
|
py | 1a48bfd55cf9df706ec99120a1aea63788d0e3b2 | def binary_Search(arr,val):
start=0
end=len(arr)-1
while start<=end:
mid=(start+end)//2
if arr[mid]==val:
return True
elif arr[mid]>val:
end=mid-1
else:
start=mid+1
return False
arr=list(map(int,input().split()))
#sort the array if not sorted
arr.sort()
#val= value to find in array
val=int(input())
if binary_Search(arr,val):
print("value found")
else:
print("value not found")
|
py | 1a48c046f1e5a7ec1d90f567da821fe910a2b2fa | from util import hook, http
@hook.command('god')
@hook.command
def bible(inp):
".bible <passage> -- gets <passage> from the Bible (ESV)"
base_url = ('http://www.esvapi.org/v2/rest/passageQuery?key=IP&'
'output-format=plain-text&include-heading-horizontal-lines&'
'include-headings=false&include-passage-horizontal-lines=false&'
'include-passage-references=false&include-short-copyright=false&'
'include-footnotes=false&line-length=0&'
'include-heading-horizontal-lines=false')
text = http.get(base_url, passage=inp)
text = ' '.join(text.split())
if len(text) > 400:
text = text[:text.rfind(' ', 0, 400)] + '...'
return text
@hook.command('allah')
@hook.command
def koran(inp): # Koran look-up plugin by Ghetto Wizard
".koran <chapter.verse> -- gets <chapter.verse> from the Koran"
url = 'http://quod.lib.umich.edu/cgi/k/koran/koran-idx?type=simple'
results = http.get_html(url, q1=inp).xpath('//li')
if not results:
return 'No results for ' + inp
return results[0].text_content()
|
py | 1a48c1fd5d97d4c9d35e270e0896f0ee753e68a1 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiPhotonicMediaOtsiGserverAdaptationPac(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, number_of_otsi=None): # noqa: E501
"""TapiPhotonicMediaOtsiGserverAdaptationPac - a model defined in OpenAPI
:param number_of_otsi: The number_of_otsi of this TapiPhotonicMediaOtsiGserverAdaptationPac. # noqa: E501
:type number_of_otsi: int
"""
self.openapi_types = {
'number_of_otsi': int
}
self.attribute_map = {
'number_of_otsi': 'number-of-otsi'
}
self._number_of_otsi = number_of_otsi
@classmethod
def from_dict(cls, dikt) -> 'TapiPhotonicMediaOtsiGserverAdaptationPac':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.photonic.media.OtsiGserverAdaptationPac of this TapiPhotonicMediaOtsiGserverAdaptationPac. # noqa: E501
:rtype: TapiPhotonicMediaOtsiGserverAdaptationPac
"""
return util.deserialize_model(dikt, cls)
@property
def number_of_otsi(self):
"""Gets the number_of_otsi of this TapiPhotonicMediaOtsiGserverAdaptationPac.
none # noqa: E501
:return: The number_of_otsi of this TapiPhotonicMediaOtsiGserverAdaptationPac.
:rtype: int
"""
return self._number_of_otsi
@number_of_otsi.setter
def number_of_otsi(self, number_of_otsi):
"""Sets the number_of_otsi of this TapiPhotonicMediaOtsiGserverAdaptationPac.
none # noqa: E501
:param number_of_otsi: The number_of_otsi of this TapiPhotonicMediaOtsiGserverAdaptationPac.
:type number_of_otsi: int
"""
self._number_of_otsi = number_of_otsi
|
py | 1a48c39e1281ac336409fc0ea596c606673941f8 | # DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
rnd_players = random.sample(players[gender], len(players[gender]))
x = 0
for i in range(len(rnd_players) /2 ):
# Grab our versus players
playerOne = rnd_players[x]
playerTwo = rnd_players[x + 1]
print("{0} vs {1} ".format(playerOne.name(), playerTwo.name()))
# Increment by 2 to avoid having duplicates
print(x)
x += 2
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App() |
py | 1a48c3dec1dc3c8ad63e7f4c54475519bc6cf8aa | import numpy as np
def res_to_chi2(res: np.ndarray):
"""
We assume that the residuals res are related to an objective function
value chi2 via::
chi2 = sum(res**2)
which is consistent with the AMICI definition but NOT the 'Linear'
formulation in scipy.
"""
if res is None:
return None
return np.dot(res, res)
def sres_to_schi2(res: np.ndarray, sres: np.ndarray):
"""
In line with the assumptions in res_to_chi2.
"""
if res is None or sres is None:
return None
return 2 * res.dot(sres)
def sres_to_fim(sres: np.ndarray):
"""
In line with the assumptions in res_to_chi2.
"""
if sres is None:
return None
return sres.transpose().dot(sres)
|
py | 1a48c549c5a3ff61364b49e8d193cc57022f0162 | from numpy import *
import numpy as np
import random
import math
import os
import time
import pandas as pd
import csv
import math
import random
# 定义函数
def ReadMyCsv(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader: # 把每个rna疾病对加入OriginalData,注意表头
SaveList.append(row)
return
def ReadMyCsv2(SaveList, fileName):
csv_reader = csv.reader(open(fileName))
for row in csv_reader:
counter = 0
while counter < len(row):
row[counter] = int(row[counter]) # 转换数据类型
counter = counter + 1
SaveList.append(row)
return
def StorFile(data, fileName):
with open(fileName, "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(data)
return
def MyNegativeSample():
'''
# 由AssociationMatrix和PositiveSampe得到PositiveSample
'''
# 数据
AssociationMatrix = []
ReadMyCsv(AssociationMatrix, "SecondRandomShuffle\AssociationMatrix.csv")
print('AssociationMatrix[0]', AssociationMatrix[0])
print(len(AssociationMatrix))
PositiveSample = []
ReadMyCsv(PositiveSample, 'SecondRandomShuffle\PositiveSample.csv')
print(len(PositiveSample))
print(PositiveSample[0])
NegativeSample = []
counterN = 0
while counterN < len(PositiveSample): # 随机选出一个疾病rna对,次数
counter1 = random.randint(0, len(AssociationMatrix) - 1)
counter2 = random.randint(0, len(AssociationMatrix[counter1]) - 1)
flag1 = 0
counter3 = 0
while counter3 < len(PositiveSample): # 正样本中是否存在
if counter1 == PositiveSample[counter3][0] and counter2 == PositiveSample[counter3][1]:
print('fail1')
flag1 = 1
break
counter3 = counter3 + 1
if flag1 == 1:
continue
flag2 = 0
counter4 = 0
while counter4 < len(NegativeSample): # 在已选的负样本中没有,防止重复
if counter1 == NegativeSample[counter4][0] and counter2 == NegativeSample[counter4][1]:
print('fail2')
flag2 = 1
break
counter4 = counter4 + 1
if flag2 == 1:
continue
if (flag1 == 0 & flag2 == 0):
Pair = []
Pair.append(counter1)
Pair.append(counter2)
NegativeSample.append(Pair) # 下三角矩阵,一定满足行 > 列
print(counterN)
counterN = counterN + 1
print(len(NegativeSample))
StorFile(NegativeSample, 'SecondRandomShuffle\\NegativeSample.csv')
return NegativeSample
|
py | 1a48c5614de3a73aba50b6486ea0ef5271d62b74 | class node:
def __init__ (self,data):
self.data = data
self.left = None
self.rigth = None
class BinaryTree:
def __init__ (self, data):
self.raiz = node(data)
def _add(self,node,data):
if data < node.data:
if node.left == None:
node.left = node(data)
else:
self.add(node.left, data)
else:
if node.rigth == None:
node.rigth = node(data)
else:
self.add(node.rigth, data)
def _preOrder(self, node):
if node != None:
print(node.data, end=" ")
self.inOrder(node.left)
self.inOrder(node.rigth)
def _inOrder(self, node):
if node != None:
self.inOrder(node.left)
print(node.data, end=" ")
self.inOrder(node.rigth)
def _postOrder(self, node):
if node != None:
self.inOrder(node.left)
self.inOrder(node.rigth)
print(node.data, end=" ")
def _search(self, node, temp):
if node == None:
return None
if node.data == temp:
return node
if temp < node.data:
return self.search(node.left, temp)
if temp > node.data:
return self.search(node.rigth, temp)
def add(self,data):
self._add(self.raiz,data)
def preOrder(self,data):
self._preOrder(self.raiz,data)
def inOrder(self,data):
self._inOrder(self.raiz,data)
def postOrder(self,data):
self._postOrder(self.raiz,data)
def search(self,temp):
return self._search(self.raiz, temp)
|
py | 1a48c5b9e5ed665c76c9b2457e9737098116619f | import urllib2
__author__ = 'wangzijie'
print('aaaaabbbbbccccccccccc')
response = urllib2.urlopen("http://www.baidu.com")
print response.read() |
py | 1a48c83708973e15ae84007cb83e20a32be5114e | # -*- coding:UTF-8 -*-
import falcon
class QuoteResource:
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {
'quote': (
"I've always been more interested in "
"the future than in the past."
),
'author': 'Grace Hopper'
}
resp.media = quote
api = falcon.API()
api.add_route('/quote', QuoteResource())
|
py | 1a48c9160e603d3122ca2b5ccb4a3799f22533e5 |
def test():
test_instructions = """0
3
0
1
-3"""
assert run(test_instructions) == 10
def run(in_val):
instructions = [int(instruction) for instruction in in_val.split()]
offsets = {}
register = 0
steps = 0
while True:
try:
instruction = instructions[register]
except IndexError:
return steps
relative_offset = offsets.get(register, 0)
offset = instruction + relative_offset
if offset >= 3:
offsets[register] = relative_offset - 1
else:
offsets[register] = relative_offset + 1
register += offset
steps += 1
|
py | 1a48c97cd7da22e0ce475cb5f729e297b97d7936 | # -*- coding: utf-8 -*-
#
# VEGASSceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.hlinke.de/ ]
# [ Github: coming soon ]
# [ Documentation: coming soon ]
#
# Copyright (C) 2019 Harold Linke <http://www.hlinke.de>.
# VEGASSceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file
#
# VEGASSceneDetect is based on pySceneDetect by Brandon Castellano
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/pyscenedetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2012-2018 Brandon Castellano <http://www.bcastell.com>.
#
# PySceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file, or visit one of the following pages for details:
# - https://github.com/Breakthrough/PySceneDetect/
# - http://www.bcastell.com/projects/pyscenedetect/
#
# This software uses the Numpy, OpenCV, click, tqdm, and pytest libraries.
# See the included LICENSE files or one of the above URLs for more information.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
""" PySceneDetect config.py
this file reads configuration parameters for VegasScenedetect
"""
import json
import os
class SD_Config():
""" Configuration of VEGASSCendetector """
def __init__(self):
# type:
""" SDConfig Constructor Method (__init__)
Arguments:
None
Raises:
None
"""
#**VEGASPython**
filedir = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(filedir, 'config.json')
with open(filepath, "r") as read_file:
data = json.load(read_file)
self.useHSV = False # define if HSV or BGR should be used for content analysis - BGR is faster
self.showPreview = True # defines, that the preview if the analysed video shoul dbe shown
self.previewFrameSkip = 100 # defines the number of frames skipped before the preview is updated - lower numbers make the preview smoother but cost processing time
self.showFrameValues = False # the values calculated for each frame are shown - can be used to chek the threshold for a cut
self.threshold = 30
self.min_scene_len = 15
try:
if "useHSV" in data:
self.useHSV = data["useHSV"] # define if HSV or BGR should be used for content analysis - BGR is faster
if "showPreview" in data:
self.showPreview = data["showPreview"] # defines, that the preview if the analysed video shoul dbe shown
if "PreviewFrameSkip" in data:
self.PreviewFrameSkip = data["PreviewFrameSkip"] # defines the number of frames skipped before the preview is updated - lower numbers make the preview smoother but cost processing time
if "showFrameValues" in data:
self.showFrameValues = data["showFrameValues"] # the values calculated for each frame are shown - can be used to chek the threshold for a cut
if "threshold" in data:
self.threshold = data["threshold"] # threshold that needs to be exceeded to determine a cut
if "min_scene_len" in data:
self.min_scene_len = data["min_scene_len"]
if "print_parameters" in data:
print("Parameters: useHSV:",self.useHSV, " showPreview:", self.showPreview, " PreviewFrameSkip:", self.PreviewFrameSkip, " showFrameValues:", self.showFrameValues, " Threshold:",self.threshold)
except:
print ("Error in Config File")
print(data)
print("useHSV:",self.useHSV, " showPreview:", self.showPreview, " PreviewFrameSkip:", self.PreviewFrameSkip, " showFrameValues:", self.showFrameValues, " Threshold:",self.threshold)
#**/VEGASPython**
|
py | 1a48c9fbaaed91ccfda426012e56aec443d856ae | from PIL import Image
import tqdm
from itertools import compress
import os
import multiprocessing
def is_corrupted_img(file):
try:
img = Image.open(file)
img.verify()
return img is None
except:
return True
def read_files(path, exts):
files = []
for r, d, f in os.walk(path):
for file in f:
if file.lower().endswith(exts):
file = os.path.join(r, file)
file = os.path.abspath(file)
file = file.replace(os.sep, "/")
files.append(file)
return files
def search_corrputed_imgs(path,
exts=("jpg",
"png",
"jpeg",
"bmp",
"tif",
"tiff")
):
exts = tuple(exts)
imgs = read_files(path, exts)
corrupted_imgs = []
if len(imgs) > 0:
with multiprocessing.Pool() as p:
is_corrupted = list(tqdm.tqdm(p.imap(is_corrupted_img, imgs), total=len(imgs)))
corrupted_imgs = list(compress(imgs, is_corrupted))
return corrupted_imgs
|
py | 1a48ca6bfd9f68509c429e1bcec72483c695e97b | #!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import sys
sys.path.insert(0, '..')
import test_harness
HOST_EXE_FILE = os.path.join(test_harness.WORK_DIR, 'a.out')
def run_compiler_test(source_file, target):
if target == 'host':
subprocess.check_call(['cc', source_file, '-o', HOST_EXE_FILE],
stderr=subprocess.STDOUT)
result = subprocess.check_output(HOST_EXE_FILE)
test_harness.check_result(source_file, result.decode())
else:
hex_file = test_harness.build_program([source_file])
result = test_harness.run_program(hex_file, target)
test_harness.check_result(source_file, result)
test_list = [fname for fname in test_harness.find_files(
('.c', '.cpp')) if not fname.startswith('_')]
all_targets = [fname for fname in test_list if 'noverilator' not in fname]
test_harness.register_tests(run_compiler_test, all_targets, [
'emulator', 'verilator', 'host', 'fpga'])
noverilator_targets = [fname for fname in test_list if 'noverilator' in fname]
test_harness.register_tests(
run_compiler_test, noverilator_targets, ['emulator', 'host', 'fpga'])
test_harness.execute_tests()
|
py | 1a48cadf899728e221082aa79320d083b27f9912 | # -*- coding: utf-8 -*-
import os
from ..utils import get_page_layout, get_text_objects
class BaseParser(object):
"""Defines a base parser.
"""
def _generate_layout(
self, filename, page, layout_kwargs,
layouts, dimensions, **kwargs
):
self.filename = filename
self.layout_kwargs = layout_kwargs
self.horizontal_text, self.dimensions = layouts[page-1], dimensions
self.pdf_width, self.pdf_height = self.dimensions
self.rootname, __ = os.path.splitext(self.filename)
|
py | 1a48cb3660c4761fc11d12609ce46faa365bf54c | """Clean Code in Python - Chapter 9: Common Design Patterns
> Monostate Pattern
"""
from log import logger
class SharedAttribute:
def __init__(self, initial_value=None):
self.value = initial_value
self._name = None
def __get__(self, instance, owner):
if instance is None:
return self
if self.value is None:
raise AttributeError(f"{self._name} was never set")
return self.value
def __set__(self, instance, new_value):
self.value = new_value
def __set_name__(self, owner, name):
self._name = name
class GitFetcher:
current_tag = SharedAttribute()
current_branch = SharedAttribute()
def __init__(self, tag, branch=None):
self.current_tag = tag
self.current_branch = branch
def pull(self):
logger.info("pulling from %s", self.current_tag)
return self.current_tag
|
py | 1a48cb90f870255ff95ee72da190c02746648c19 | from datetime import datetime
import numpy as np
import copy
import logging
import math
import os
import pickle
import time
import tempfile
from typing import Callable, Dict, List, Optional, Type, Union
import ray
from ray.exceptions import RayError
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.normalize_actions import NormalizeActionWrapper
from ray.rllib.env.env_context import EnvContext
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils import FilterManager, deep_update, merge_dicts
from ray.rllib.utils.spaces import space_utils
from ray.rllib.utils.framework import try_import_tf, TensorStructType
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.typing import TrainerConfigDict, \
PartialTrainerConfigDict, EnvInfoDict, ResultDict, EnvType, PolicyID
from ray.tune.registry import ENV_CREATOR, register_env, _global_registry
from ray.tune.trainable import Trainable
from ray.tune.trial import ExportFormat
from ray.tune.resources import Resources
from ray.tune.logger import Logger, UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# Max number of times to retry a worker failure. We shouldn't try too many
# times in a row since that would indicate a persistent cluster issue.
MAX_WORKER_FAILURE_RETRIES = 3
# yapf: disable
# __sphinx_doc_begin__
COMMON_CONFIG: TrainerConfigDict = {
# === Settings for Rollout Worker processes ===
# Number of rollout worker actors to create for parallel sampling. Setting
# this to 0 will force rollouts to be done in the trainer actor.
"num_workers": 2,
# Number of environments to evaluate vectorwise per worker. This enables
# model inference batching, which can improve performance for inference
# bottlenecked workloads.
"num_envs_per_worker": 1,
# Divide episodes into fragments of this many steps each during rollouts.
# Sample batches of this size are collected from rollout workers and
# combined into a larger batch of `train_batch_size` for learning.
#
# For example, given rollout_fragment_length=100 and train_batch_size=1000:
# 1. RLlib collects 10 fragments of 100 steps each from rollout workers.
# 2. These fragments are concatenated and we perform an epoch of SGD.
#
# When using multiple envs per worker, the fragment size is multiplied by
# `num_envs_per_worker`. This is since we are collecting steps from
# multiple envs in parallel. For example, if num_envs_per_worker=5, then
# rollout workers will return experiences in chunks of 5*100 = 500 steps.
#
# The dataflow here can vary per algorithm. For example, PPO further
# divides the train batch into minibatches for multi-epoch SGD.
"rollout_fragment_length": 200,
# Whether to rollout "complete_episodes" or "truncate_episodes" to
# `rollout_fragment_length` length unrolls. Episode truncation guarantees
# evenly sized batches, but increases variance as the reward-to-go will
# need to be estimated at truncation boundaries.
"batch_mode": "truncate_episodes",
# === Settings for the Trainer process ===
# Number of GPUs to allocate to the trainer process. Note that not all
# algorithms can take advantage of trainer GPUs. This can be fractional
# (e.g., 0.3 GPUs).
"num_gpus": 0,
# Training batch size, if applicable. Should be >= rollout_fragment_length.
# Samples batches will be concatenated together to a batch of this size,
# which is then passed to SGD.
"train_batch_size": 200,
# Arguments to pass to the policy model. See models/catalog.py for a full
# list of the available model options.
"model": MODEL_DEFAULTS,
# Arguments to pass to the policy optimizer. These vary by optimizer.
"optimizer": {},
# === Environment Settings ===
# Discount factor of the MDP.
"gamma": 0.99,
# Number of steps after which the episode is forced to terminate. Defaults
# to `env.spec.max_episode_steps` (if present) for Gym envs.
"horizon": None,
# Calculate rewards but don't reset the environment when the horizon is
# hit. This allows value estimation and RNN state to span across logical
# episodes denoted by horizon. This only has an effect if horizon != inf.
"soft_horizon": False,
# Don't set 'done' at the end of the episode. Note that you still need to
# set this if soft_horizon=True, unless your env is actually running
# forever without returning done=True.
"no_done_at_end": False,
# Arguments to pass to the env creator.
"env_config": {},
# Environment name can also be passed via config.
"env": None,
# Unsquash actions to the upper and lower bounds of env's action space
"normalize_actions": False,
# Whether to clip rewards during Policy's postprocessing.
# None (default): Clip for Atari only (r=sign(r)).
# True: r=sign(r): Fixed rewards -1.0, 1.0, or 0.0.
# False: Never clip.
# [float value]: Clip at -value and + value.
# Tuple[value1, value2]: Clip at value1 and value2.
"clip_rewards": None,
# Whether to clip actions to the action space's low/high range spec.
"clip_actions": True,
# Whether to use "rllib" or "deepmind" preprocessors by default
"preprocessor_pref": "deepmind",
# The default learning rate.
"lr": 0.0001,
# === Debug Settings ===
# Whether to write episode stats and videos to the agent log dir. This is
# typically located in ~/ray_results.
"monitor": False,
# Set the ray.rllib.* log level for the agent process and its workers.
# Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level will also
# periodically print out summaries of relevant internal dataflow (this is
# also printed out once at startup at the INFO level). When using the
# `rllib train` command, you can also use the `-v` and `-vv` flags as
# shorthand for INFO and DEBUG.
"log_level": "WARN",
# Callbacks that will be run during various phases of training. See the
# `DefaultCallbacks` class and `examples/custom_metrics_and_callbacks.py`
# for more usage information.
"callbacks": DefaultCallbacks,
# Whether to attempt to continue training if a worker crashes. The number
# of currently healthy workers is reported as the "num_healthy_workers"
# metric.
"ignore_worker_failures": False,
# Log system resource metrics to results. This requires `psutil` to be
# installed for sys stats, and `gputil` for GPU metrics.
"log_sys_usage": True,
# Use fake (infinite speed) sampler. For testing only.
"fake_sampler": False,
# === Deep Learning Framework Settings ===
# tf: TensorFlow
# tfe: TensorFlow eager
# torch: PyTorch
"framework": "tf",
# Enable tracing in eager mode. This greatly improves performance, but
# makes it slightly harder to debug since Python code won't be evaluated
# after the initial eager pass. Only possible if framework=tfe.
"eager_tracing": False,
# === Exploration Settings ===
# Default exploration behavior, iff `explore`=None is passed into
# compute_action(s).
# Set to False for no exploration behavior (e.g., for evaluation).
"explore": True,
# Provide a dict specifying the Exploration object's config.
"exploration_config": {
# The Exploration class to use. In the simplest case, this is the name
# (str) of any class present in the `rllib.utils.exploration` package.
# You can also provide the python class directly or the full location
# of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": "StochasticSampling",
# Add constructor kwargs here (if any).
},
# === Evaluation Settings ===
# Evaluate with every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period. If using multiple
# evaluation workers, we will run at least this many episodes total.
"evaluation_num_episodes": 10,
# Internal flag that is set to True for evaluation workers.
"in_evaluation": False,
# Typical usage is to pass extra args to evaluation env creator
# and to disable exploration by computing deterministic actions.
# IMPORTANT NOTE: Policy gradient algorithms are able to find the optimal
# policy, even if this is a stochastic one. Setting "explore=False" here
# will result in the evaluation workers not using this optimal policy!
"evaluation_config": {
# Example: overriding env_config, exploration, etc:
# "env_config": {...},
# "explore": False
},
# Number of parallel workers to use for evaluation. Note that this is set
# to zero by default, which means evaluation will be run in the trainer
# process. If you increase this, it will increase the Ray resource usage
# of the trainer since evaluation workers are created separately from
# rollout workers.
"evaluation_num_workers": 0,
# Customize the evaluation method. This must be a function of signature
# (trainer: Trainer, eval_workers: WorkerSet) -> metrics: dict. See the
# Trainer._evaluate() method to see the default implementation. The
# trainer guarantees all eval workers have the latest policy state before
# this function is called.
"custom_eval_function": None,
# === Advanced Rollout Settings ===
# Use a background thread for sampling (slightly off-policy, usually not
# advisable to turn on unless your env specifically requires it).
"sample_async": False,
# Experimental flag to speed up sampling and use "trajectory views" as
# generic ModelV2 `input_dicts` that can be requested by the model to
# contain different information on the ongoing episode.
# NOTE: Only supported for PyTorch so far.
"_use_trajectory_view_api": False,
# Element-wise observation filter, either "NoFilter" or "MeanStdFilter".
"observation_filter": "NoFilter",
# Whether to synchronize the statistics of remote filters.
"synchronize_filters": True,
# Configures TF for single-process operation by default.
"tf_session_args": {
# note: overridden by `local_tf_session_args`
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {
"CPU": 1
},
"allow_soft_placement": True, # required by PPO multi-gpu
},
# Override the following tf session args on the local worker
"local_tf_session_args": {
# Allow a higher level of parallelism by default, but not unlimited
# since that can cause crashes with many concurrent drivers.
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
},
# Whether to LZ4 compress individual observations
"compress_observations": False,
# Wait for metric batches for at most this many seconds. Those that
# have not returned in time will be collected in the next train iteration.
"collect_metrics_timeout": 180,
# Smooth metrics over this many episodes.
"metrics_smoothing_episodes": 100,
# If using num_envs_per_worker > 1, whether to create those new envs in
# remote processes instead of in the same worker. This adds overheads, but
# can make sense if your envs can take much time to step / reset
# (e.g., for StarCraft). Use this cautiously; overheads are significant.
"remote_worker_envs": False,
# Timeout that remote workers are waiting when polling environments.
# 0 (continue when at least one env is ready) is a reasonable default,
# but optimal value could be obtained by measuring your environment
# step / reset and model inference perf.
"remote_env_batch_wait_ms": 0,
# Minimum time per train iteration (frequency of metrics reporting).
"min_iter_time_s": 0,
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of train iterations.
"timesteps_per_iteration": 0,
# This argument, in conjunction with worker_index, sets the random seed of
# each worker, so that identically configured trials will have identical
# results. This makes experiments reproducible.
"seed": None,
# Any extra python env vars to set in the trainer process, e.g.,
# {"OMP_NUM_THREADS": "16"}
"extra_python_environs_for_driver": {},
# The extra python environments need to set for worker processes.
"extra_python_environs_for_worker": {},
# === Advanced Resource Settings ===
# Number of CPUs to allocate per worker.
"num_cpus_per_worker": 1,
# Number of GPUs to allocate per worker. This can be fractional. This is
# usually needed only if your env itself requires a GPU (i.e., it is a
# GPU-intensive video game), or model inference is unusually expensive.
"num_gpus_per_worker": 0,
# Any custom Ray resources to allocate per worker.
"custom_resources_per_worker": {},
# Number of CPUs to allocate for the trainer. Note: this only takes effect
# when running in Tune. Otherwise, the trainer runs in the main program.
"num_cpus_for_driver": 1,
# You can set these memory quotas to tell Ray to reserve memory for your
# training run. This guarantees predictable execution, but the tradeoff is
# if your workload exceeeds the memory quota it will fail.
# Heap memory to reserve for the trainer process (0 for unlimited). This
# can be large if your are using large train batches, replay buffers, etc.
"memory": 0,
# Object store memory to reserve for the trainer process. Being large
# enough to fit a few copies of the model weights should be sufficient.
# This is enabled by default since models are typically quite small.
"object_store_memory": 0,
# Heap memory to reserve for each worker. Should generally be small unless
# your environment is very heavyweight.
"memory_per_worker": 0,
# Object store memory to reserve for each worker. This only needs to be
# large enough to fit a few sample batches at a time. This is enabled
# by default since it almost never needs to be larger than ~200MB.
"object_store_memory_per_worker": 0,
# === Offline Datasets ===
# Specify how to generate experiences:
# - "sampler": generate experiences via online simulation (default)
# - a local directory or file glob expression (e.g., "/tmp/*.json")
# - a list of individual file paths/URIs (e.g., ["/tmp/1.json",
# "s3://bucket/2.json"])
# - a dict with string keys and sampling probabilities as values (e.g.,
# {"sampler": 0.4, "/tmp/*.json": 0.4, "s3://bucket/expert.json": 0.2}).
# - a function that returns a rllib.offline.InputReader
"input": "sampler",
# Specify how to evaluate the current policy. This only has an effect when
# reading offline experiences. Available options:
# - "wis": the weighted step-wise importance sampling estimator.
# - "is": the step-wise importance sampling estimator.
# - "simulation": run the environment in the background, but use
# this data for evaluation only and not for learning.
"input_evaluation": ["is", "wis"],
# Whether to run postprocess_trajectory() on the trajectory fragments from
# offline inputs. Note that postprocessing will be done using the *current*
# policy, not the *behavior* policy, which is typically undesirable for
# on-policy algorithms.
"postprocess_inputs": False,
# If positive, input batches will be shuffled via a sliding window buffer
# of this number of batches. Use this if the input data is not in random
# enough order. Input is delayed until the shuffle buffer is filled.
"shuffle_buffer_size": 0,
# Specify where experiences should be saved:
# - None: don't save any experiences
# - "logdir" to save to the agent log dir
# - a path/URI to save to a custom output directory (e.g., "s3://bucket/")
# - a function that returns a rllib.offline.OutputWriter
"output": None,
# What sample batch columns to LZ4 compress in the output data.
"output_compress_columns": ["obs", "new_obs"],
# Max output file size before rolling over to a new file.
"output_max_file_size": 64 * 1024 * 1024,
# === Settings for Multi-Agent Environments ===
"multiagent": {
# Map of type MultiAgentPolicyConfigDict from policy ids to tuples
# of (policy_cls, obs_space, act_space, config). This defines the
# observation and action spaces of the policies and any extra config.
"policies": {},
# Function mapping agent ids to policy ids.
"policy_mapping_fn": None,
# Optional list of policies to train, or None for all policies.
"policies_to_train": None,
# Optional function that can be used to enhance the local agent
# observations to include more state.
# See rllib/evaluation/observation_function.py for more info.
"observation_fn": None,
# When replay_mode=lockstep, RLlib will replay all the agent
# transitions at a particular timestep together in a batch. This allows
# the policy to implement differentiable shared computations between
# agents it controls at that timestep. When replay_mode=independent,
# transitions are replayed independently per policy.
"replay_mode": "independent",
},
# === Logger ===
# Define logger-specific configuration to be used inside Logger
# Default value None allows overwriting with nested dicts
"logger_config": None,
# === Replay Settings ===
# The number of contiguous environment steps to replay at once. This may
# be set to greater than 1 to support recurrent models.
"replay_sequence_length": 1,
}
# __sphinx_doc_end__
# yapf: enable
@DeveloperAPI
def with_common_config(
extra_config: PartialTrainerConfigDict) -> TrainerConfigDict:
"""Returns the given config dict merged with common agent confs.
Args:
extra_config (PartialTrainerConfigDict): A user defined partial config
which will get merged with COMMON_CONFIG and returned.
Returns:
TrainerConfigDict: The merged config dict resulting of COMMON_CONFIG
plus `extra_config`.
"""
return Trainer.merge_trainer_configs(
COMMON_CONFIG, extra_config, _allow_unknown_configs=True)
@PublicAPI
class Trainer(Trainable):
"""A trainer coordinates the optimization of one or more RL policies.
All RLlib trainers extend this base class, e.g., the A3CTrainer implements
the A3C algorithm for single and multi-agent training.
Trainer objects retain internal model state between calls to train(), so
you should create a new trainer instance for each training session.
Attributes:
env_creator (func): Function that creates a new training env.
config (obj): Algorithm-specific configuration data.
logdir (str): Directory in which training outputs should be placed.
"""
# Whether to allow unknown top-level config keys.
_allow_unknown_configs = False
# List of top-level keys with value=dict, for which new sub-keys are
# allowed to be added to the value dict.
_allow_unknown_subkeys = [
"tf_session_args", "local_tf_session_args", "env_config", "model",
"optimizer", "multiagent", "custom_resources_per_worker",
"evaluation_config", "exploration_config",
"extra_python_environs_for_driver", "extra_python_environs_for_worker"
]
# List of top level keys with value=dict, for which we always override the
# entire value (dict), iff the "type" key in that value dict changes.
_override_all_subkeys_if_type_changes = ["exploration_config"]
@PublicAPI
def __init__(self,
config: TrainerConfigDict = None,
env: str = None,
logger_creator: Callable[[], Logger] = None):
"""Initialize an RLLib trainer.
Args:
config (dict): Algorithm-specific configuration data.
env (str): Name of the environment to use. Note that this can also
be specified as the `env` key in config.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
# User provided config (this is w/o the default Trainer's
# `COMMON_CONFIG` (see above)). Will get merged with COMMON_CONFIG
# in self.setup().
config = config or {}
# Trainers allow env ids to be passed directly to the constructor.
self._env_id = self._register_if_needed(env or config.get("env"))
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(self._name, self._env_id,
timestr)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
super().__init__(config, logger_creator)
@classmethod
@override(Trainable)
def default_resource_request(
cls, config: PartialTrainerConfigDict) -> Resources:
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
num_workers = cf["num_workers"] + cf["evaluation_num_workers"]
# TODO(ekl): add custom resources here once tune supports them
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * num_workers,
extra_gpu=cf["num_gpus_per_worker"] * num_workers,
extra_memory=cf["memory_per_worker"] * num_workers,
extra_object_store_memory=cf["object_store_memory_per_worker"] *
num_workers)
@override(Trainable)
@PublicAPI
def train(self) -> ResultDict:
"""Overrides super.train to synchronize global vars."""
result = None
for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):
try:
result = Trainable.train(self)
except RayError as e:
if self.config["ignore_worker_failures"]:
logger.exception(
"Error in train call, attempting to recover")
self._try_recover()
else:
logger.info(
"Worker crashed during call to train(). To attempt to "
"continue training without the failed worker, set "
"`'ignore_worker_failures': True`.")
raise e
except Exception as e:
time.sleep(0.5) # allow logs messages to propagate
raise e
else:
break
if result is None:
raise RuntimeError("Failed to recover from worker crash")
if hasattr(self, "workers") and isinstance(self.workers, WorkerSet):
self._sync_filters_if_needed(self.workers)
if self.config["evaluation_interval"] == 1 or (
self._iteration > 0 and self.config["evaluation_interval"]
and self._iteration % self.config["evaluation_interval"] == 0):
evaluation_metrics = self._evaluate()
assert isinstance(evaluation_metrics, dict), \
"_evaluate() needs to return a dict."
result.update(evaluation_metrics)
return result
def _sync_filters_if_needed(self, workers: WorkerSet):
if self.config.get("observation_filter", "NoFilter") != "NoFilter":
FilterManager.synchronize(
workers.local_worker().filters,
workers.remote_workers(),
update_remote=self.config["synchronize_filters"])
logger.debug("synchronized filters: {}".format(
workers.local_worker().filters))
@override(Trainable)
def log_result(self, result: ResultDict):
self.callbacks.on_train_result(trainer=self, result=result)
# log after the callback is invoked, so that the user has a chance
# to mutate the result
Trainable.log_result(self, result)
@override(Trainable)
def setup(self, config: PartialTrainerConfigDict):
env = self._env_id
if env:
config["env"] = env
# An already registered env.
if _global_registry.contains(ENV_CREATOR, env):
self.env_creator = _global_registry.get(ENV_CREATOR, env)
# A class specifier.
elif "." in env:
self.env_creator = \
lambda env_config: from_config(env, env_config)
# Try gym.
else:
import gym # soft dependency
self.env_creator = \
lambda env_config: gym.make(env, **env_config)
else:
self.env_creator = lambda env_config: None
# Merge the supplied config with the class default, but store the
# user-provided one.
self.raw_user_config = config
self.config = Trainer.merge_trainer_configs(self._default_config,
config)
# Check and resolve DL framework settings.
# Enable eager/tracing support.
if tf1 and self.config["framework"] in ["tf2", "tfe"]:
if self.config["framework"] == "tf2" and tfv < 2:
raise ValueError("`framework`=tf2, but tf-version is < 2.0!")
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
logger.info("Executing eagerly, with eager_tracing={}".format(
self.config["eager_tracing"]))
if tf1 and not tf1.executing_eagerly() and \
self.config["framework"] != "torch":
logger.info("Tip: set framework=tfe or the --eager flag to enable "
"TensorFlow eager execution")
if self.config["normalize_actions"]:
inner = self.env_creator
def normalize(env):
import gym # soft dependency
if not isinstance(env, gym.Env):
raise ValueError(
"Cannot apply NormalizeActionActionWrapper to env of "
"type {}, which does not subclass gym.Env.", type(env))
return NormalizeActionWrapper(env)
self.env_creator = lambda env_config: normalize(inner(env_config))
Trainer._validate_config(self.config)
if not callable(self.config["callbacks"]):
raise ValueError(
"`callbacks` must be a callable method that "
"returns a subclass of DefaultCallbacks, got {}".format(
self.config["callbacks"]))
self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info("Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level))
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
def get_scope():
if tf1 and not tf1.executing_eagerly():
return tf1.Graph().as_default()
else:
return open(os.devnull) # fake a no-op scope
with get_scope():
self._init(self.config, self.env_creator)
# Evaluation setup.
if self.config.get("evaluation_interval"):
# Update env_config with evaluation settings:
extra_config = copy.deepcopy(self.config["evaluation_config"])
# Assert that user has not unset "in_evaluation".
assert "in_evaluation" not in extra_config or \
extra_config["in_evaluation"] is True
extra_config.update({
"batch_mode": "complete_episodes",
"rollout_fragment_length": 1,
"in_evaluation": True,
})
logger.debug(
"using evaluation_config: {}".format(extra_config))
self.evaluation_workers = self._make_workers(
env_creator=self.env_creator,
validate_env=None,
policy_class=self._policy_class,
config=merge_dicts(self.config, extra_config),
num_workers=self.config["evaluation_num_workers"])
self.evaluation_metrics = {}
@override(Trainable)
def cleanup(self):
if hasattr(self, "workers"):
self.workers.stop()
if hasattr(self, "optimizer") and self.optimizer:
self.optimizer.stop()
@override(Trainable)
def save_checkpoint(self, checkpoint_dir: str) -> str:
checkpoint_path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(self.iteration))
pickle.dump(self.__getstate__(), open(checkpoint_path, "wb"))
return checkpoint_path
@override(Trainable)
def load_checkpoint(self, checkpoint_path: str):
extra_data = pickle.load(open(checkpoint_path, "rb"))
self.__setstate__(extra_data)
@DeveloperAPI
def _make_workers(
self, *, env_creator: Callable[[EnvContext], EnvType],
validate_env: Optional[Callable[[EnvType, EnvContext], None]],
policy_class: Type[Policy], config: TrainerConfigDict,
num_workers: int) -> WorkerSet:
"""Default factory method for a WorkerSet running under this Trainer.
Override this method by passing a custom `make_workers` into
`build_trainer`.
Args:
env_creator (callable): A function that return and Env given an env
config.
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
policy (Type[Policy]): The Policy class to use for creating the
policies of the workers.
config (TrainerConfigDict): The Trainer's config.
num_workers (int): Number of remote rollout workers to create.
0 for local only.
Returns:
WorkerSet: The created WorkerSet.
"""
return WorkerSet(
env_creator=env_creator,
validate_env=validate_env,
policy_class=policy_class,
trainer_config=config,
num_workers=num_workers,
logdir=self.logdir)
@DeveloperAPI
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvContext], EnvType]):
"""Subclasses should override this for custom initialization."""
raise NotImplementedError
@DeveloperAPI
def _evaluate(self) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
"""
self._before_evaluate()
# Broadcast the new policy weights to all evaluation workers.
logger.info("Synchronizing weights to evaluation workers.")
weights = ray.put(self.workers.local_worker().save())
self.evaluation_workers.foreach_worker(
lambda w: w.restore(ray.get(weights)))
self._sync_filters_if_needed(self.evaluation_workers)
if self.config["custom_eval_function"]:
logger.info("Running custom eval function {}".format(
self.config["custom_eval_function"]))
metrics = self.config["custom_eval_function"](
self, self.evaluation_workers)
if not metrics or not isinstance(metrics, dict):
raise ValueError("Custom eval function must return "
"dict of metrics, got {}.".format(metrics))
else:
logger.info("Evaluating current policy for {} episodes.".format(
self.config["evaluation_num_episodes"]))
if self.config["evaluation_num_workers"] == 0:
for _ in range(self.config["evaluation_num_episodes"]):
self.evaluation_workers.local_worker().sample()
else:
num_rounds = int(
math.ceil(self.config["evaluation_num_episodes"] /
self.config["evaluation_num_workers"]))
num_workers = len(self.evaluation_workers.remote_workers())
num_episodes = num_rounds * num_workers
for i in range(num_rounds):
logger.info("Running round {} of parallel evaluation "
"({}/{} episodes)".format(
i, (i + 1) * num_workers, num_episodes))
ray.get([
w.sample.remote()
for w in self.evaluation_workers.remote_workers()
])
metrics = collect_metrics(self.evaluation_workers.local_worker(),
self.evaluation_workers.remote_workers())
return {"evaluation": metrics}
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@PublicAPI
def compute_action(self,
observation: TensorStructType,
state: List[TensorStructType] = None,
prev_action: TensorStructType = None,
prev_reward: float = None,
info: EnvInfoDict = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: bool = None) -> TensorStructType:
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation (TensorStructType): observation from the environment.
state (List[TensorStructType]): RNN hidden state, if any. If state
is not None, then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action (TensorStructType): Previous action value, if any.
prev_reward (float): Previous reward, if any.
info (EnvInfoDict): info object, if any
policy_id (PolicyID): Policy to query (only applies to
multi-agent).
full_fetch (bool): Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore (bool): Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
Returns:
any: The computed action if full_fetch=False, or
tuple: The full output of policy.compute_actions() if
full_fetch=True or we have an RNN-based Policy.
"""
if state is None:
state = []
preprocessed = self.workers.local_worker().preprocessors[
policy_id].transform(observation)
filtered_obs = self.workers.local_worker().filters[policy_id](
preprocessed, update=False)
result = self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore)
if state or full_fetch:
return result
else:
return result[0] # backwards compatibility
def compute_actions(self,
observations,
state=None,
prev_action=None,
prev_reward=None,
info=None,
policy_id=DEFAULT_POLICY_ID,
full_fetch=False,
explore=None):
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation (obj): observation from the environment.
state (dict): RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action (obj): previous action value, if any
prev_reward (int): previous reward, if any
info (dict): info object, if any
policy_id (str): Policy to query (only applies to multi-agent).
full_fetch (bool): Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore (bool): Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
Returns:
any: The computed action if full_fetch=False, or
tuple: The full output of policy.compute_actions() if
full_fetch=True or we have an RNN-based Policy.
"""
# Preprocess obs and states
stateDefined = state is not None
policy = self.get_policy(policy_id)
filtered_obs, filtered_state = [], []
for agent_id, ob in observations.items():
worker = self.workers.local_worker()
preprocessed = worker.preprocessors[policy_id].transform(ob)
filtered = worker.filters[policy_id](preprocessed, update=False)
filtered_obs.append(filtered)
if state is None:
continue
elif agent_id in state:
filtered_state.append(state[agent_id])
else:
filtered_state.append(policy.get_initial_state())
# Batch obs and states
obs_batch = np.stack(filtered_obs)
if state is None:
state = []
else:
state = list(zip(*filtered_state))
state = [np.stack(s) for s in state]
# Batch compute actions
actions, states, infos = policy.compute_actions(
obs_batch,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore)
# Unbatch actions for the environment
atns, actions = space_utils.unbatch(actions), {}
for key, atn in zip(observations, atns):
actions[key] = atn
# Unbatch states into a dict
unbatched_states = {}
for idx, agent_id in enumerate(observations):
unbatched_states[agent_id] = [s[idx] for s in states]
# Return only actions or full tuple
if stateDefined or full_fetch:
return actions, unbatched_states, infos
else:
return actions
@property
def _name(self) -> str:
"""Subclasses should override this to declare their name."""
raise NotImplementedError
@property
def _default_config(self) -> TrainerConfigDict:
"""Subclasses should override this to declare their default config."""
raise NotImplementedError
@PublicAPI
def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:
"""Return policy for the specified id, or None.
Args:
policy_id (str): id of policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies: List[PolicyID] = None) -> dict:
"""Return a dictionary of policy ids to weights.
Args:
policies (list): Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights: Dict[PolicyID, dict]):
"""Set policy weights by policy id.
Args:
weights (dict): Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@DeveloperAPI
def export_policy_model(self,
export_dir: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Export policy model with given policy_id to local directory.
Args:
export_dir (string): Writable local directory.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_model("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_model(export_dir, policy_id)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir: str,
filename_prefix: str = "model",
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Export tensorflow policy model checkpoint to local directory.
Args:
export_dir (string): Writable local directory.
filename_prefix (string): file name prefix of checkpoint files.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_checkpoint("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_checkpoint(
export_dir, filename_prefix, policy_id)
@DeveloperAPI
def import_policy_model_from_h5(self,
import_file: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Imports a policy's model with given policy_id from a local h5 file.
Args:
import_file (str): The h5 file to import from.
policy_id (string): Optional policy id to import into.
Example:
>>> trainer = MyTrainer()
>>> trainer.import_policy_model_from_h5("/tmp/weights.h5")
>>> for _ in range(10):
>>> trainer.train()
"""
self.workers.local_worker().import_policy_model_from_h5(
import_file, policy_id)
@DeveloperAPI
def collect_metrics(self,
selected_workers: List["ActorHandle"] = None) -> dict:
"""Collects metrics from the remote workers of this agent.
This is the same data as returned by a call to train().
"""
return self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"],
min_history=self.config["metrics_smoothing_episodes"],
selected_workers=selected_workers)
@classmethod
def resource_help(cls, config: TrainerConfigDict) -> str:
return ("\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config))
@classmethod
def merge_trainer_configs(cls,
config1: TrainerConfigDict,
config2: PartialTrainerConfigDict,
_allow_unknown_configs: Optional[bool] = None
) -> TrainerConfigDict:
config1 = copy.deepcopy(config1)
if "callbacks" in config2 and type(config2["callbacks"]) is dict:
legacy_callbacks_dict = config2["callbacks"]
def make_callbacks():
# Deprecation warning will be logged by DefaultCallbacks.
return DefaultCallbacks(
legacy_callbacks_dict=legacy_callbacks_dict)
config2["callbacks"] = make_callbacks
if _allow_unknown_configs is None:
_allow_unknown_configs = cls._allow_unknown_configs
return deep_update(config1, config2, _allow_unknown_configs,
cls._allow_unknown_subkeys,
cls._override_all_subkeys_if_type_changes)
@staticmethod
def _validate_config(config: PartialTrainerConfigDict):
if config.get("_use_trajectory_view_api") and \
config.get("framework") != "torch":
logger.info(
"`_use_trajectory_view_api` only supported for PyTorch so "
"far! Will run w/o.")
config["_use_trajectory_view_api"] = False
elif not config.get("_use_trajectory_view_api") and \
config.get("model", {}).get("_time_major"):
raise ValueError("`model._time_major` only supported "
"iff `_use_trajectory_view_api` is True!")
if type(config["input_evaluation"]) != list:
raise ValueError(
"`input_evaluation` must be a list of strings, got {}".format(
config["input_evaluation"]))
def _try_recover(self):
"""Try to identify and remove any unhealthy workers.
This method is called after an unexpected remote error is encountered
from a worker. It issues check requests to all current workers and
removes any that respond with error. If no healthy workers remain,
an error is raised.
"""
assert hasattr(self, "execution_plan")
workers = self.workers
logger.info("Health checking all workers...")
checks = []
for ev in workers.remote_workers():
_, obj_ref = ev.sample_with_count.remote()
checks.append(obj_ref)
healthy_workers = []
for i, obj_ref in enumerate(checks):
w = workers.remote_workers()[i]
try:
ray.get(obj_ref)
healthy_workers.append(w)
logger.info("Worker {} looks healthy".format(i + 1))
except RayError:
logger.exception("Removing unhealthy worker {}".format(i + 1))
try:
w.__ray_terminate__.remote()
except Exception:
logger.exception("Error terminating unhealthy worker")
if len(healthy_workers) < 1:
raise RuntimeError(
"Not enough healthy workers remain to continue.")
logger.warning("Recreating execution plan after failure")
workers.reset(healthy_workers)
self.train_exec_impl = self.execution_plan(workers, self.config)
@override(Trainable)
def _export_model(self, export_formats: List[str],
export_dir: str) -> Dict[str, str]:
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
return exported
def import_model(self, import_file: str):
"""Imports a model from import_file.
Note: Currently, only h5 files are supported.
Args:
import_file (str): The file to import the model from.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
# Check for existence.
if not os.path.exists(import_file):
raise FileNotFoundError(
"`import_file` '{}' does not exist! Can't import Model.".
format(import_file))
# Get the format of the given file.
import_format = "h5" # TODO(sven): Support checkpoint loading.
ExportFormat.validate([import_format])
if import_format != ExportFormat.H5:
raise NotImplementedError
else:
return self.import_policy_model_from_h5(import_file)
def __getstate__(self) -> dict:
state = {}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().save()
if hasattr(self, "optimizer") and hasattr(self.optimizer, "save"):
state["optimizer"] = self.optimizer.save()
return state
def __setstate__(self, state: dict):
if "worker" in state:
self.workers.local_worker().restore(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.restore.remote(remote_state)
if "optimizer" in state:
self.optimizer.restore(state["optimizer"])
@staticmethod
def with_updates(**overrides) -> Type["Trainer"]:
raise NotImplementedError(
"`with_updates` may only be called on Trainer sub-classes "
"that were generated via the `ray.rllib.agents.trainer_template."
"build_trainer()` function!")
def _register_if_needed(self, env_object: Union[str, EnvType]):
if isinstance(env_object, str):
return env_object
elif isinstance(env_object, type):
name = env_object.__name__
register_env(name, lambda config: env_object(config))
return name
raise ValueError(
"{} is an invalid env specification. ".format(env_object) +
"You can specify a custom env as either a class "
"(e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").")
|
py | 1a48cc565057f7cd4556e820797e9b83304192ca | from ._base import Message
##### Filter Flipper Control Messages #####
class MGMSG_MOT_GET_MFF_OPERPARAMS(Message):
"""
See :class:`MGMSG_MOT_SET_MFF_OPERPARAMS`.
:param chan_ident: channel number (0x01, 0x02)
:type chan_ident: int
- i_transit_time
- i_transit_time_adc
- oper_mode_1
- sig_mode_1
- pulse_width_1
- oper_mode_2
- sig_mode_2
- pulse_width_2
"""
id = 0x512
is_long_cmd = True
parameters = [
("chan_ident", "H"),
("i_transit_time", "I"),
("i_transit_time_adc", "I"),
("oper_mode_1", "H"),
("sig_mode_1", "H"),
("pulse_width_1", "I"),
("oper_mode_2", "H"),
("sig_mode_2", "H"),
("pulse_width_2", "I"),
(None, "I"),
(None, "H"),
]
class MGMSG_MOT_REQ_MFF_OPERPARAMS(Message):
"""
See :class:`MGMSG_MOT_SET_MFF_OPERPARAMS`.
:param chan_ident: channel number (0x01, 0x02)
:type chan_ident: int
"""
id = 0x511
parameters = [("chan_ident", "B"), (None, "B")]
class MGMSG_MOT_SET_MFF_OPERPARAMS(Message):
"""
Used to set various operating parameters that dictate the function of the
MFF series flipper unit.
:param chan_ident: channel number (0x01, 0x02)
:type chan_ident: int
- i_transit_time
- i_transit_time_adc
- oper_mode_1
- sig_mode_1
- pulse_width_1
- oper_mode_2
- sig_mode_2
- pulse_width_2
"""
id = 0x510
is_long_cmd = True
parameters = [
("chan_ident", "H"),
("i_transit_time", "I"),
("i_transit_time_adc", "I"),
("oper_mode_1", "H"),
("sig_mode_1", "H"),
("pulse_width_1", "I"),
("oper_mode_2", "H"),
("sig_mode_2", "H"),
("pulse_width_2", "I"),
(None, "I"),
(None, "H"),
]
|
py | 1a48ccc635e7875a23c6dd425cf687dfdf075c6b | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
return frozenset(res)
|
py | 1a48ccedcb133e9e9f39911648fb6f8809f64033 | import nltk
import discord
from discord.ext import commands
from nltk.sentiment import SentimentIntensityAnalyzer
import database as db
import variables as var
from functions import get_prefix
from ext.permissions import has_command_permission
nltk.download('vader_lexicon')
sia = SentimentIntensityAnalyzer()
class Karma(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
"""Simple check to see if this cog (plugin) is enabled."""
guild_doc = await db.PLUGINS.find_one({"_id": ctx.guild.id})
if guild_doc.get("Karma"):
return True
else:
await ctx.send(
embed=discord.Embed(
description=(
f"{var.E_DISABLE} The Karma plugin is"
" disabled in this server"
),
color=var.C_ORANGE
)
)
@commands.command()
@has_command_permission()
async def karma(self, ctx, karma_user: discord.User = None):
user = ctx.author if karma_user is None else karma_user
guild_col = db.KARMA_DATABASE[str(ctx.guild.id)]
userdata = await guild_col.find_one({"_id": user.id})
karmas = [
x async for x in guild_col.find(
{
"_id": {"$ne": 0},
# Removing ID 0 (Config doc, unrelated to user xp)
}
).sort("karma", -1)
]
if userdata is None:
await ctx.send("This user does not have any karma yet...")
else:
# Index starts with zero
position = karmas.index(userdata) + 1
embed = discord.Embed(
title=f"Karma for {user.name}",
color=var.C_MAIN
).add_field(
name="Karma", value=userdata["karma"]
).add_field(
name="Position", value=f"{position}/{len(karmas)}", inline=False
).set_thumbnail(url=user.avatar_url)
total_karma = sum(i["karma"] for i in karmas)
average = total_karma/len(karmas)
if userdata["karma"] > average:
embed.description = (
f"Your karma is better than the average {user.name}! :)"
)
if userdata["karma"] < average:
embed.description = (
f"Your karma is lower than the average {user.name}, "
f"is it because you don't talk much or you are not nice "
f"enough? :eyes:"
)
if position == 1:
embed.description = (
f"Woohoo {user.name}, you are the nicest "
f"person in the server!"
)
await ctx.channel.send(embed=embed)
@commands.command(name="karmaboard", aliases=["kb"])
@has_command_permission()
async def karma_board(self, ctx):
guild_col = db.KARMA_DATABASE[str(ctx.guild.id)]
karmas = [
# Removing ID 0 (Config doc, unrelated to user xp)
x async for x in
guild_col.find({"_id": {"$ne": 0}}).sort("karma", -1)
]
if len(karmas) < 10:
exact_pages = 1
else:
exact_pages = len(karmas) / 10
if type(exact_pages) != int:
all_pages = round(exact_pages) + 1
else:
all_pages = exact_pages
total_karma = 0
for i in karmas:
total_karma += i["karma"]
average = total_karma/len(karmas)
embed = discord.Embed(
title=f"Karma Board",
description=f"The average karma in this server is **{average}**",
color=var.C_BLUE
).set_thumbnail(url=ctx.guild.icon_url)
count = 0
for i in karmas:
count += 1
try:
user = self.bot.get_user(i.get("_id"))
karma = i.get("karma")
embed.add_field(
name=f"{count}: {user}",
value=f"Total Karma: {karma}",
inline=False
)
except Exception:
print(f"Not found {i}")
if count == 10:
break
embed.set_footer(text=f"Page 1/{all_pages}")
bot_msg = await ctx.send(embed=embed)
await bot_msg.add_reaction("◀️")
await bot_msg.add_reaction("⬅️")
await bot_msg.add_reaction("➡️")
await bot_msg.add_reaction("▶️")
def reaction_check(r, u):
return u == ctx.author and r.message == bot_msg
async def pagination(ctx, current_page, embed, GuildCol, all_pages):
page_rn = current_page + 1
embed.set_footer(text=f"Page {page_rn}/{all_pages}")
embed.clear_fields()
rank_count = current_page * 10
user_amount = current_page*10
karmas = [
x async for x in GuildCol.find(
# Removing ID 0 (Config doc, unrelated to user xp)
{"_id": { "$ne": 0 }}
).sort("karma", -1).limit(user_amount)
]
for i in karmas:
rank_count += 1
user = self.bot.get_user(i.get("_id"))
karma = i.get("karma")
embed.add_field(
name=f"{rank_count}: {user}",
value=f"Total Karma: {karma}",
inline=False
)
if rank_count == current_page *10 + 10:
break
current_page = 0
while True:
reaction, user = await self.bot.wait_for(
"reaction_add", check=reaction_check
)
if str(reaction.emoji) == "◀️":
try:
await bot_msg.remove_reaction("◀️", ctx.author)
except discord.Forbidden:
pass
current_page = 0
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
if str(reaction.emoji) == "➡️":
try:
await bot_msg.remove_reaction("➡️", ctx.author)
except discord.Forbidden:
pass
current_page += 1
if current_page > all_pages:
current_page -= 1
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
if str(reaction.emoji) == "⬅️":
try:
await bot_msg.remove_reaction("⬅️", ctx.author)
except discord.Forbidden:
pass
current_page -= 1
if current_page < 0:
current_page += 1
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
if str(reaction.emoji) == "▶️":
try:
await bot_msg.remove_reaction("▶️", ctx.author)
except discord.Forbidden:
pass
current_page = all_pages-1
await pagination(ctx, current_page, embed, guild_col, all_pages)
await bot_msg.edit(embed=embed)
@commands.command(name="kblacklist")
@has_command_permission()
async def k_blacklist(self, ctx, channel: discord.TextChannel = None):
if channel is not None:
guild_col = db.KARMA_DATABASE[(str(ctx.guild.id))]
settings = await guild_col.find_one({"_id": 0})
new_settings = settings.get("blacklists").copy()
if channel.id in new_settings:
await ctx.send("This channel is already blacklisted")
else:
new_settings.append(channel.id)
new_data = {
"$set": {
"blacklists": new_settings
}
}
await guild_col.update_one(settings, new_data)
await ctx.send(
embed=discord.Embed(
description=(
f"{channel.mention} has been blacklisted, "
f"hence users won't gain any karma in that channel."
),
color=var.C_GREEN
)
)
else:
await ctx.send(
embed=discord.Embed(
description=(
"🚫 You need to define the channel to blacklist it!"
),
color=var.C_RED
).add_field(
name="Format",
value=f"```{await get_prefix(ctx)}kblacklist <#channel>```"
)
)
@commands.command(name="kwhitelist")
@has_command_permission()
async def k_whitelist(self, ctx, channel: discord.TextChannel = None):
if channel is not None:
guild_col = db.KARMA_DATABASE[(str(ctx.guild.id))]
settings = await guild_col.find_one({"_id": 0})
new_settings = settings.get("blacklists").copy()
if channel.id not in new_settings:
await ctx.send("This channel is not blacklisted")
else:
new_settings.remove(channel.id)
new_data = {
"$set": {
"blacklists": new_settings
}
}
await guild_col.update_one(settings, new_data)
await ctx.send(
embed=discord.Embed(
description=(
f"{channel.mention} has been whitelisted, hence "
"users would be able to gain karma again in that "
"channel."
),
color=var.C_GREEN
)
)
else:
await ctx.send(
embed=discord.Embed(
description=(
"🚫 You need to define the channel to whitelist it!"
),
color=var.C_RED
).add_field(
name="Format",
value=f"```{await get_prefix(ctx)}kwhitelist <#channel>```"
)
)
@commands.Cog.listener()
async def on_message(self, message):
if not message.guild:
return
plugin_doc = await db.PLUGINS.find_one({"_id": message.guild.id})
guild_col = db.KARMA_DATABASE[str(message.guild.id)]
settings_doc = await guild_col.find_one({"_id": 0})
if plugin_doc["Karma"] and not message.author.bot:
if not message.channel.id in settings_doc["blacklists"]:
userdata = await guild_col.find_one({"_id": message.author.id})
polarity = sia.polarity_scores(message.content)
result = max(polarity, key=polarity.get)
def get_karma():
if result == "neg":
return -polarity[result]
elif result == "pos":
return polarity[result]
return 0
if userdata is None:
await guild_col.insert_one(
{"_id": message.author.id, "karma": get_karma()}
)
else:
new_karma = get_karma()
new_karma += userdata["karma"]
await guild_col.update_one(
userdata, {"$set": {"karma": new_karma}}
)
def setup(bot):
bot.add_cog(Karma(bot))
|
py | 1a48ccf85f9470274eed32e048f4bc29593b185f | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("categories", [["b", "a", "c"], ["a", "b", "c", "d"]])
def test_factorize(categories, ordered):
cat = pd.Categorical(
["b", "b", "a", "c", None], categories=categories, ordered=ordered
)
codes, uniques = pd.factorize(cat)
expected_codes = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(
["b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(["b", "b", None, "a"])
codes, uniques = pd.factorize(cat, sort=True)
expected_codes = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(["a", "b"])
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(
["b", "b", None, "a"], categories=["c", "b", "a"], ordered=True
)
codes, uniques = pd.factorize(cat, sort=True)
expected_codes = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(
["b", "a"], categories=["c", "b", "a"], ordered=True
)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
expected = np.array([False, False], dtype=bool)
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
cat = pd.Categorical(["a", "b"])
with tm.assert_produces_warning(FutureWarning):
cat.take([0, -1])
def test_take_positive_no_warning(self):
cat = pd.Categorical(["a", "b"])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical(["a", "b", "a"])
with pytest.raises(IndexError):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical([], categories=["a", "b"])
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered_fixture):
cat = pd.Categorical(
["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered_fixture
)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(
["a", "a", "b"], categories=cat.categories, ordered=ordered_fixture
)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered_fixture):
cat = pd.Categorical(
["a", "b"], categories=["a", "b", "c"], ordered=ordered_fixture
)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(
["b", "a"], categories=cat.categories, ordered=ordered_fixture
)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "a", "b"])
result = cat.take([0, -1, -1], allow_fill=True)
expected = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_with_negative_one(self):
# -1 was a category
cat = pd.Categorical([-1, 0, 1])
result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
expected = pd.Categorical([-1, -1, 0], categories=[-1, 0, 1])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "b", "c"])
result = cat.take([0, 1, -1], fill_value="a", allow_fill=True)
expected = pd.Categorical(["a", "b", "a"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = pd.Categorical(["a", "b", "c"])
xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
with pytest.raises(TypeError, match=xpr):
cat.take([0, 1, -1], fill_value="d", allow_fill=True)
|
py | 1a48ce0e3af0e31ebe69ec9b5853b6bb15f7f3b1 | # ---------------------------------------------------------------------
# Eltex.MA4000.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
from noc.core.ip import IPv4
from noc.core.text import parse_table
from noc.core.validators import is_int
class Script(BaseScript):
name = "Eltex.MA4000.get_interfaces"
interface = IGetInterfaces
rx_mgmt = re.compile(
r"^\s+ip\s+(?P<ip>\S+)\s*\n"
r"^\s+mask\s+(?P<mask>\S+)\s*\n"
r"^\s+gateway.+\n"
r"^\s+vlan\s+(?P<vlan_id>\d+)\s*\n",
re.MULTILINE,
)
rx_mac = re.compile(r"^\s*\*\d\s+\S+\s+MASTER\s+\d+\s+(?P<mac>\S+)", re.MULTILINE)
def create_iface(self, i, iftype):
ifname = " ".join(i[0].split())
if not ifname.startswith(iftype):
return None
pvid = i[1]
if i[4] not in ["none", "N/S"]:
tagged = self.expand_rangelist(i[4])
else:
tagged = []
untagged = i[5] if is_int(i[5]) else pvid
iface = {
"name": ifname,
"type": "physical",
"subinterfaces": [
{"name": ifname, "enabled_afi": ["BRIDGE"], "untagged_vlan": untagged}
],
}
if tagged:
iface["subinterfaces"][0]["tagged_vlans"] = tagged
return iface
def execute(self):
interfaces = []
lldp = []
c = self.cli("show lldp configuration")
if "LLDP state: Enabled" in c:
t = parse_table(c, allow_wrap=True, footer="PD - port description")
for i in t:
ifname = " ".join(i[0].split())
if "transmit" in i[1] or "receive" in i[1]:
lldp += [ifname]
c = self.cli("show interface front-port all vlans")
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "front-port")
if iface is not None:
if iface["name"] in lldp:
iface["enabled_protocols"] = ["LLDP"]
interfaces += [iface]
for slot in range(0, 16):
c = self.cli("show interface plc-pon-port %d/0-7 vlans" % slot)
t = parse_table(c, allow_wrap=True, footer="dummy footer")
for i in t:
iface = self.create_iface(i, "plc-pon-port")
if iface is not None:
interfaces += [iface]
c = self.cli("show interface slot-channel 0-15 vlans")
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "slot-channel")
if iface is not None:
interfaces += [iface]
c = self.cli("show interface slot-port all vlans")
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "slot-port")
if iface is not None:
interfaces += [iface]
c = self.cli("show management")
match = self.rx_mgmt.search(c)
ip_address = "%s/%s" % (match.group("ip"), IPv4.netmask_to_len(match.group("mask")))
iface = {
"name": "management",
"type": "SVI",
"subinterfaces": [
{
"name": "management",
"enabled_afi": ["IPv4"],
"ipv4_addresses": [ip_address],
"vlan_ids": int(match.group("vlan_id")),
}
],
}
c = self.cli("show stack")
match = self.rx_mac.search(c)
iface["mac"] = match.group("mac")
iface["subinterfaces"][0]["mac"] = match.group("mac")
interfaces += [iface]
portchannels = self.scripts.get_portchannel()
for pc in portchannels:
c = self.cli("show interface %s vlans" % pc["interface"])
t = parse_table(c, allow_wrap=True, footer="N/A - interface doesn't exist")
for i in t:
iface = self.create_iface(i, "port-channel")
if iface is not None:
has_lacp = False
iface["type"] = "aggregated"
if pc["type"] == "L":
has_lacp = True
iface["enabled_protocols"] = ["LACP"]
interfaces += [iface]
for member in pc["members"]:
for i in interfaces:
if member == i["name"]:
i["aggregated_interface"] = pc["interface"]
if has_lacp:
if i["enabled_protocols"]:
i["enabled_protocols"] += ["LACP"]
else:
i["enabled_protocols"] = ["LACP"]
break
return [{"interfaces": interfaces}]
|
py | 1a48ce3f003a2cae9a4a6a48d728b0aa22fa8ac5 | from datetime import date
from mockito import *
from django.test.client import Client
from django.utils import unittest
from molly.apps.places.models import Entity, Journey
from molly.apps.places.providers.cif import CifTimetableProvider
import httplib
class AtcoCifTestCase(unittest.TestCase):
def testBankHolidays(self):
j = Journey()
# 10 bank hols in 2010
hols = j.get_bank_holidays(2010)
self.assertEquals(len(hols), 10)
self.assertTrue(date(2010, 1, 1) in hols) # New Year's Day
self.assertTrue(date(2010, 4, 2) in hols) # Good Friday
self.assertTrue(date(2010, 4, 5) in hols) # Easter Monday
self.assertTrue(date(2010, 5, 3) in hols) # Early May Bank Holiday
self.assertTrue(date(2010, 5, 31) in hols) # Spring Bank Holiday
self.assertTrue(date(2010, 8, 30) in hols) # Summer Bank Holiday
self.assertTrue(date(2010, 12, 25) in hols) # Christmas Day
self.assertTrue(date(2010, 12, 26) in hols) # Boxing Day
self.assertTrue(date(2010, 12, 27) in hols) # Christmas Day (in lieu)
self.assertTrue(date(2010, 12, 28) in hols) # Boxing Day (in lieu)
# 11 bank hols in 2011
hols = j.get_bank_holidays(2011)
self.assertEquals(len(hols), 11)
self.assertTrue(date(2011, 1, 1) in hols) # New Year's Day
self.assertTrue(date(2011, 1, 3) in hols) # New Year's Day (in lieu)
self.assertTrue(date(2011, 4, 22) in hols) # Good Friday
self.assertTrue(date(2011, 4, 25) in hols) # Easter Monday
self.assertTrue(date(2011, 4, 29) in hols) # Royal Wedding
self.assertTrue(date(2011, 5, 2) in hols) # Early May Bank Holiday
self.assertTrue(date(2011, 5, 30) in hols) # Spring Bank Holiday
self.assertTrue(date(2011, 8, 29) in hols) # Summer Bank Holiday
self.assertTrue(date(2011, 12, 25) in hols) # Christmas Day
self.assertTrue(date(2011, 12, 26) in hols) # Christmas Day (in lieu)
self.assertTrue(date(2011, 12, 27) in hols) # Boxing Day
# 10 bank hols in 2012
hols = j.get_bank_holidays(2012)
self.assertEquals(len(hols), 10)
self.assertTrue(date(2012, 1, 1) in hols) # New Year's Day
self.assertTrue(date(2012, 1, 2) in hols) # New Year's Day (in lieu)
self.assertTrue(date(2012, 4, 6) in hols) # Good Friday
self.assertTrue(date(2012, 4, 9) in hols) # Easter Monday
self.assertTrue(date(2012, 5, 7) in hols) # Early May Bank Holiday
self.assertTrue(date(2012, 6, 4) in hols) # Spring Bank Holiday
self.assertTrue(date(2012, 6, 5) in hols) # Diamond Jubilee
self.assertTrue(date(2012, 8, 27) in hols) # Summer Bank Holiday
self.assertTrue(date(2012, 12, 25) in hols) # Christmas Day
self.assertTrue(date(2012, 12, 26) in hols) # Boxing Day
class LocationTestCase(unittest.TestCase):
def testLocationRequiredViewSubclass(self):
c = Client()
path = '/places/nearby/'
latitude = 51.752274
longitude = -1.255875
accuracy = 10
# Trying to get a LocationRequiredView with no location set should
# cause a redirect
response = c.get(path)
self.assertEquals(response.status_code, httplib.SEE_OTHER)
# Trying to get a LocationRequiredView with latitude and longitude
# query params returns OK
response = c.get(path, data={ 'latitude':latitude, 'longitude': longitude })
self.assertEquals(response.status_code, httplib.OK)
# Trying to get a LocationRequiredView with latitude, longitude
# and accuracy query params returns OK
response = c.get(path, data={ 'latitude':latitude, 'longitude': longitude, 'accuracy': accuracy })
self.assertEquals(response.status_code, httplib.OK)
# Trying to get a LocationRequiredView with an X-Current-Location (no accuracy)
# HTTP header returns OK
response = c.get(path, HTTP_X_CURRENT_LOCATION="latitude=%.6f,longitude=%.6f" % (latitude, longitude))
self.assertEquals(response.status_code, httplib.OK)
# Trying to get a LocationRequiredView with an X-Current-Location (including accuracy)
# HTTP header returns OK
response = c.get(path, HTTP_X_CURRENT_LOCATION="latitude=%.6f,longitude=%.6f,accuracy=%d" % (latitude, longitude, accuracy))
self.assertEquals(response.status_code, httplib.OK)
class CifTestCase(unittest.TestCase):
sample_file = \
"""
HDTPS.UCFCATE.PD1201131301122139DFTTISX FA130112300912
TIAACHEN 00081601LAACHEN 00005 0
TIABCWM 00385964VABERCWMBOI 78128 0
"""
class MockQuerySet():
def __init__(self, mockObj):
self._mock = mockObj
def count(self):
return 1
def __getitem__(self, index):
return self._mock
def setUp(self):
self.mock_entity_manager = mock()
self.provider = CifTimetableProvider(
entity_manager=self.mock_entity_manager
)
self.empty_query_set = mock()
self.entity_query_set = self.MockQuerySet(mock())
when(self.empty_query_set).count().thenReturn(0)
when(self.mock_entity_manager).get_entity(
'tiploc', 'ABCWM').thenReturn(self.empty_query_set)
when(self.mock_entity_manager).get_entity(
"tiploc", 'AACHEN').thenReturn(self.entity_query_set)
def testThatTiplocsAreLookedUp(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager, times=2).get_entity(any(), any())
def testThatTiplocsAreLookedUpWithCorrectNamespace(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager, times=2).get_entity("tiploc", any())
def testThatTiplocsAreLookedUpWithName(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager).get_entity("tiploc", "AACHEN")
def testThatTiplocsAreLookedUpWithStrippedName(self):
self.provider.import_from_string(self.sample_file)
verify(self.mock_entity_manager).get_entity('tiploc', 'ABCWM')
def testThatTiplocsAreCreatedWhenNoneAreReturned(self):
self.provider.import_from_string(self.sample_file)
# Annoyingly mockito doesn't properly support assertions on the args
verify(self.mock_entity_manager).create(
source=any(),
primary_type=any(),
identifiers=any(),
titles=any()
)
def testThatTiplocsAreCreatedWithCorrectSource(self):
self.provider = CifTimetableProvider()
self.provider.import_from_string(self.sample_file)
entity = Entity.objects.get_entity('tiploc', 'ABCWM')
self.assertEquals(self.provider.source, entity[0].source)
def testThatTiplocsAreCreatedWithCorrectType(self):
self.provider = CifTimetableProvider()
self.provider.import_from_string(self.sample_file)
entity = Entity.objects.get_entity('tiploc', 'ABCWM')
self.assertEquals(self.provider.entity_type, entity[0].primary_type)
def testThatTiplocsAreCreatedWithCorrectName(self):
self.provider = CifTimetableProvider()
self.provider.import_from_string(self.sample_file)
entity = Entity.objects.get_entity('tiploc', 'ABCWM')
self.assertEquals('Abercwmboi', entity[0].title)
def testGetSource(self):
self.assertEquals(
'molly.apps.places.providers.cif',
self.provider.source.module_name
)
def testGetEntityTypeVerboseName(self):
self.assertEquals(
'rail network timing point',
self.provider.entity_type.verbose_name
)
def testGetEntityTypeVerboseNamePlural(self):
self.assertEquals(
'rail network timing points',
self.provider.entity_type.verbose_name_plural
)
def testGetEntityTypeVerboseNameSingular(self):
self.assertEquals(
'a rail network timing point',
self.provider.entity_type.verbose_name_singular
)
if __name__ == '__main__':
unittest.main()
|
py | 1a48d03a6841da511a0d961a3ce4f63d17e22b10 | # Generated by Django 3.2.5 on 2021-10-07 14:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cart_id', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('is_active', models.BooleanField(default=True)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.cart')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
],
),
]
|
py | 1a48d0a6b04636ad670eb9cf01b4491cfad877eb | import unittest
import os
from shutil import rmtree
from abc import ABC
import numpy as np
import z5py
class DatasetTestMixin(ABC):
def setUp(self):
self.shape = (100, 100, 100)
self.path = 'array.' + self.data_format
self.root_file = z5py.File(self.path, use_zarr_format=self.data_format == 'zarr')
self.base_dtypes = [
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64'
]
self.dtypes = tuple(
self.base_dtypes +
[np.dtype(s) for s in self.base_dtypes] +
[
'<i1', '<i2', '<i4', '<i8',
'<u1', '<u2', '<u4', '<u8',
'<f4', '<f8'
] +
[
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float32, np.float64
]
)
def tearDown(self):
try:
rmtree(self.path)
except OSError:
pass
def check_array(self, result, expected, msg=None):
self.assertEqual(result.shape, expected.shape, msg)
self.assertTrue(np.allclose(result, expected), msg)
def test_ds_open_empty(self):
self.root_file.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
ds = self.root_file['test']
out = ds[:]
self.check_array(out, np.zeros(self.shape))
def test_ds_dtypes(self):
shape = (100, 100)
chunks = (10, 10)
for dtype in self.dtypes:
ds = self.root_file.create_dataset('data_%s' % hash(dtype),
dtype=dtype,
shape=shape,
chunks=chunks)
in_array = np.random.rand(*shape).astype(dtype)
ds[:] = in_array
out_array = ds[:]
self.check_array(out_array, in_array,
'datatype %s failed for format %s' % (self.data_format.title(),
dtype))
def check_ones(self, sliced_ones, expected_shape, msg=None):
self.check_array(sliced_ones, np.ones(expected_shape, dtype=np.uint8), msg)
def test_ds_simple_write(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, np.uint8)
def test_ds_indexing(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, np.uint8)
self.check_ones(ds[:], self.shape, 'full index failed')
self.check_ones(ds[1, ...], (100, 100), 'trailing ellipsis failed')
self.check_ones(ds[..., 1], (100, 100), 'leading ellipsis failed')
self.check_ones(ds[1], (100, 100), 'implicit ellipsis failed')
self.check_ones(ds[:, :, :, ...], self.shape, 'superfluous ellipsis failed')
self.check_ones(ds[500:501, :, :], (0, 100, 100), 'out-of-bounds slice failed')
self.check_ones(ds[-501:500, :, :], (0, 100, 100), 'negative out-of-bounds slice failed')
self.check_ones(ds[1, :, :], (100, 100), 'integer index failed')
self.check_ones(ds[-20:, :, :], (20, 100, 100), 'negative slice failed')
self.assertEqual(ds[1, 1, 1], 1, 'point index failed')
with self.assertRaises(ValueError):
ds[500, :, :]
with self.assertRaises(ValueError):
ds[-500, :, :]
with self.assertRaises(ValueError):
ds[..., :, ...]
with self.assertRaises(ValueError):
ds[1, 1, slice(0, 100, 2)]
with self.assertRaises(TypeError):
ds[[1, 1, 1]] # explicitly test behaviour different to h5py
class NotAnIndex(object):
pass
with self.assertRaises(TypeError):
ds[1, 1, NotAnIndex()]
def test_ds_scalar_broadcast(self):
for dtype in self.base_dtypes:
ds = self.root_file.create_dataset('ones_%s' % dtype,
dtype=dtype,
shape=self.shape,
chunks=(10, 10, 10))
ds[:] = 1
self.check_ones(ds[:], self.shape)
def test_ds_scalar_broadcast_from_float(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = float(1)
self.check_ones(ds[:], self.shape),
def test_ds_scalar_broadcast_from_bool(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = True
self.check_ones(ds[:], self.shape)
def test_ds_set_with_arraylike(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[0, :2, :2] = [[1, 1], [1, 1]]
self.check_ones(ds[0, :2, :2], (2, 2))
def test_ds_set_from_float(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, dtype=float)
self.check_ones(ds[:], self.shape)
def test_ds_set_from_bool(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
ds[:] = np.ones(self.shape, dtype=bool)
self.check_ones(ds[:], self.shape)
def test_ds_fancy_broadcast_fails(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
with self.assertRaises(ValueError):
ds[0, :10, :10] = np.ones(10, dtype=np.uint8)
def test_ds_write_object_fails(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
class ArbitraryObject(object):
pass
with self.assertRaises(OSError):
ds[0, 0, :2] = [ArbitraryObject(), ArbitraryObject()]
def test_ds_write_flexible_fails(self):
ds = self.root_file.create_dataset('ones', dtype=np.uint8,
shape=self.shape, chunks=(10, 10, 10))
with self.assertRaises(TypeError):
ds[0, 0, 0] = "hey, you're not a number"
def test_readwrite_multithreaded(self):
for n_threads in (1, 2, 4, 8):
ds = self.root_file.create_dataset('data_mthread_%i' % n_threads,
dtype='float64',
shape=self.shape,
chunks=(10, 10, 10),
n_threads=n_threads)
in_array = np.random.rand(*self.shape)
ds[:] = in_array
out_array = ds[:]
self.check_array(out_array, in_array)
def test_create_nested_dataset(self):
self.root_file.create_dataset('group/sub_group/data',
shape=self.shape,
dtype='float64',
chunks=(10, 10, 10))
self.assertTrue(os.path.exists(os.path.join(self.path, 'group', 'sub_group', 'data')))
def test_create_with_data(self):
in_array = np.random.rand(*self.shape)
ds = self.root_file.create_dataset('data', data=in_array)
out_array = ds[:]
self.check_array(out_array, in_array)
def test_require_dataset(self):
in_array = np.random.rand(*self.shape)
self.root_file.require_dataset('data', data=in_array,
shape=in_array.shape,
dtype=in_array.dtype)
ds = self.root_file.require_dataset('data',
shape=in_array.shape,
dtype=in_array.dtype)
out_array = ds[:]
self.check_array(out_array, in_array)
def test_non_contiguous(self):
ds = self.root_file.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
# make a non-contiguous 3d array of the correct shape (100)^3
vol = np.arange(200**3).astype('float32').reshape((200, 200, 200))
in_array = vol[::2, ::2, ::2]
ds[:] = in_array
out_array = ds[:]
self.check_array(out_array, in_array, 'failed for non-contiguous data')
def test_empty_chunk(self):
ds = self.root_file.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
bb = np.s_[:10, :10, :10]
if ds.is_zarr:
chunk_path = os.path.join(self.path, 'test', '0.0.0')
else:
chunk_path = os.path.join(self.path, 'test', '0', '0', '0')
ds[bb] = 0
self.assertFalse(os.path.exists(chunk_path))
ds[bb] = 1
self.assertTrue(os.path.exists(chunk_path))
ds[bb] = 0
self.assertFalse(os.path.exists(chunk_path))
def test_invalid_options(self):
with self.assertRaises(RuntimeError):
self.root_file.create_dataset('test1', shape=self.shape, dtype='float32',
chunks=(10, 10, 10), compression='raw',
level=5)
with self.assertRaises(RuntimeError):
self.root_file.create_dataset('test2', shape=self.shape, dtype='float32',
chunks=(10, 10, 10), compression='gzip',
level=5, blub='blob')
def test_readwrite_chunk(self):
shape = (100, 100)
chunks = (10, 10)
for dtype in self.base_dtypes:
ds = self.root_file.create_dataset('test_%s' % dtype, dtype=dtype,
shape=shape, chunks=chunks,
compression='raw')
# test empty chunk
out = ds.read_chunk((0, 0))
self.assertEqual(out, None)
# test read/write
chunks_per_dim = ds.chunks_per_dimension
for x in range(chunks_per_dim[0]):
for y in range(chunks_per_dim[1]):
data = np.random.rand(*chunks)
if dtype not in ('float32', 'float64'):
data *= 128
data = data.astype(dtype)
ds.write_chunk((x, y), data)
out = ds.read_chunk((x, y))
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_read_direct(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('test', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
# generate test data
data = np.random.rand(*shape)
ds[:] = data
# test reading full dataset
out = np.zeros(shape)
ds.read_direct(out)
self.assertTrue(np.allclose(out, data))
# test reading with selection
selection = np.s_[11:53, 67:84]
out = np.zeros(shape)
ds.read_direct(out, selection, selection)
self.assertTrue(np.allclose(out[selection], data[selection]))
def test_write_direct(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('test', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
# generate test data
data = np.random.rand(*shape)
# test writing full dataset
ds.write_direct(data)
out = ds[:]
self.assertTrue(np.allclose(out, data))
# test writing with selection
ds[:] = 0
selection = np.s_[11:53, 67:84]
ds.write_direct(data, selection, selection)
out = ds[:]
self.assertTrue(np.allclose(out[selection], data[selection]))
def test_irregular_chunks(self):
shape = (123, 54, 211)
chunks = (13, 33, 22)
ds = self.root_file.create_dataset('test', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
data = np.random.rand(*shape)
ds[:] = data
out = ds[:]
self.assertTrue(np.allclose(out, data))
def test_nd(self):
f = self.root_file
for ndim in range(1, 6):
size = 100 if ndim < 4 else 20
shape = (size,) * ndim
chunks = (10,) * ndim
ds = f.create_dataset('test_%i' % ndim, dtype='float64',
shape=shape, chunks=chunks, compression='raw')
data = np.random.rand(*shape)
ds[:] = data
out = ds[:]
self.assertTrue(np.allclose(out, data))
def test_no_implicit_squeeze(self):
arr = np.ones((5, 5, 5))
ds = self.root_file.create_dataset('ds', data=arr)
self.assertEqual(ds[:, 0:1, :].shape, arr[:, 0:1, :].shape)
def test_no_implicit_squeeze_singleton(self):
"""Issue #102
https://github.com/constantinpape/z5/issues/102
"""
arr = np.ones((5, 5, 5))
ds = self.root_file.create_dataset('ds', data=arr)
self.assertEqual(
ds[0:1, 0:1, 0:1].shape,
arr[0:1, 0:1, 0:1].shape,
)
def test_explicit_squeeze(self):
"""Issue #103
https://github.com/constantinpape/z5/issues/103
"""
arr = np.full((5, 4, 3), 1)
ds = self.root_file.create_dataset('ds543', data=arr)
self.assertEqual(ds[:, 1, :].shape, arr[:, 1, :].shape)
self.assertNotIsInstance(ds[1, 1, 1], np.ndarray)
def test_singleton_dtype(self):
"""Issue #102
https://github.com/constantinpape/z5/issues/102
"""
arr = np.ones((5, 5, 5))
ds = self.root_file.create_dataset('ds', data=arr)
self.assertEqual(type(ds[1, 1, 1]), type(arr[1, 1, 1]))
def test_broadcast_empty(self):
"""Issue #107
https://github.com/constantinpape/z5/issues/107
"""
ds = self.root_file.create_dataset('test', shape=(100, 100), chunks=(25, 25),
dtype='uint8', compression='raw')
ds[:20, :20] = 1
out = ds[:]
self.assertTrue(np.allclose(out[:20, :20], 1))
def test_empty_chunks_non_aligned_write(self):
"""Issue #106
https://github.com/constantinpape/z5/issues/106
"""
ds = self.root_file.create_dataset(name='test', shape=(128,), chunks=(32,),
compression='raw', dtype='uint8')
inp = np.ones((100,), dtype='uint8')
inp[90:100] = 0
ds[:100] = inp
# last chunk should be empty, but this is not the case if buffer was not
# cleared correctly
out = ds[-32:]
self.assertTrue(np.allclose(out, 0))
class TestZarrDataset(DatasetTestMixin, unittest.TestCase):
data_format = 'zarr'
def test_varlen(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('varlen', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
with self.assertRaises(RuntimeError):
ds.write_chunk((0, 0), np.random.rand(10), True)
class TestN5Dataset(DatasetTestMixin, unittest.TestCase):
data_format = 'n5'
def test_varlen(self):
shape = (100, 100)
chunks = (10, 10)
ds = self.root_file.create_dataset('varlen', dtype='float64',
shape=shape, chunks=chunks,
compression='raw')
# max_len = 100
max_len = 10
chunks_per_dim = ds.chunks_per_dimension
for x in range(chunks_per_dim[0]):
for y in range(chunks_per_dim[1]):
test_data = np.random.rand(np.random.randint(1, max_len))
ds.write_chunk((x, y), test_data, True)
out = ds.read_chunk((x, y))
self.assertEqual(test_data.shape, out.shape)
self.assertTrue(np.allclose(test_data, out))
if __name__ == '__main__':
unittest.main()
|
py | 1a48d3677762f9a06b349d1d0ade140c29c09d05 | """AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class ISqlManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/ISqlManagement
"""
def verifyCredentials(self, data, agentId):
"""Verifies credentials to all SQL instances. Throws
exception on validation failure for any instance.
"""
return self.session.request('sql/agent/%s/verifySqlCredentials'
% (agentId), 'PUT',
self.getXML(data, 'credentialsDescriptor'))
def setAgentSqlSettings(self, data, agentId):
"""Sets agent-level sql settings."""
return self.session.request('sql/agents/%s/sqlSettings'
% (agentId), 'PUT',
self.getXML(data, 'agentSqlSettings'))
def getAgentSqlSettings(self, agentId):
"""Gets agent-level sql settings."""
return self.session.request('sql/agents/%s/sqlSettings'
% (agentId))
def setAttachabilitySettings(self, data):
"""Sets core-level attachability settings."""
return self.session.request('sql/attachabilitySettings', 'PUT',
self.getXML(data, 'attachabilitySettings'))
def getAttachabilitySettings(self):
"""Gets core-level attachability settings."""
return self.session.request('sql/attachabilitySettings')
def testSqlConnection(self, data):
"""Tests connection with instances of MSSQL servers
installed on the Core and validates whether it would be
possible to use those instances for attachability check.
"""
return self.session.request('sql/connection', 'PUT',
self.getXML(data, 'sqlCredentials'))
def forceAttachability(self, recoveryPointId):
"""Force attachability job for agent with given ID."""
return self.session.request('sql/recoveryPoints/%s/force'
% (recoveryPointId), 'POST')
|
py | 1a48d42acf96232c09b89113a25b20757ae436ff | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from fboss.system_tests.system_tests import FbossBaseSystemTest, test_tags
@test_tags("run-on-diff")
class Fb303Tests(FbossBaseSystemTest):
""" Verify we receive well-formatted IPv6 route advertisments
On all hosts, on all interfaces
"""
def test_aliveSince(self):
with self.test_topology.switch_thrift() as sw_client:
timestamp1 = sw_client.aliveSince()
timestamp2 = sw_client.aliveSince()
self.assertEqual(timestamp1, timestamp2)
|
py | 1a48d42e4599eabe39fd0e1c23331d552bba75de | class Port:
A = None
B = None
C = None
D = None
S1 = None
S2 = None
S3 = None
S4 = None
class Direction:
CLOCKWISE = 1
COUNTERCLOCKWISE = 2
class Button:
LEFT_UP = 1
class Color:
GREEN = 1
RED = 2
class ImageFile:
pass
class SoundFile:
pass
class Stop:
COAST = None
BRAKE = None
HOLD = None
|
py | 1a48d961a64fc5d648583adc145bb90aaf99d32c | from os import path, environ
from os.path import join, abspath, dirname
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst')) as f:
readme = f.read()
with open(join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
with open(join(abspath(dirname(__file__)), "VERSION"), "r") as v:
VERSION = v.read().replace("\n", "")
with open(join(abspath(dirname(__file__)), "PATCH"), "r") as v:
PATCH = v.read().replace("\n", "")
setup(
name='patton-cli',
version=f"{VERSION}.{PATCH}",
packages=find_packages(),
long_description=readme,
install_requires=required,
url='https://github.com/bbva/patton-cli',
license='MIT',
author='BBVA Labs',
description='CLI for Patton-Server: The vulnerability knowledge store',
entry_points={'console_scripts': [
'patton = patton_client.cli:main',
]},
classifiers=[
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
],
)
|
py | 1a48d9a8a7502e79bda34c71f307e2cded3f3d5e | # https://codeforces.com/problemset/problem/1358/A
def find_min_lamps(n: int, m: int) -> int:
if n%2 ==0:
count = n//2 * m
else:
if m%2 == 0:
count = m//2 * n
else:
count = n//2 * m + m//2 + 1
return count
def main():
t = int(input())
cases = [list(map(int, input().split())) for _ in range(t)]
for case in cases:
print(find_min_lamps(case[0], case[1]))
if __name__=='__main__':
main() |
py | 1a48d9d96e6804e819610bf7367fd27cf3e1ce70 | import argparse
import os
import sys
import time
import re
from tqdm import tqdm
from datetime import datetime
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import torch.onnx
import utils
from transformer_net import TransformerNet
from vgg import Vgg16
def check_paths(args):
try:
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
if args.checkpoint_model_dir is not None and not (os.path.exists(args.checkpoint_model_dir)):
os.makedirs(args.checkpoint_model_dir)
except OSError as e:
print(e)
sys.exit(1)
def train(args):
device = torch.device("cuda" if args.cuda else "cpu")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
transformer = TransformerNet().to(device)
optimizer = Adam(transformer.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
vgg = Vgg16(requires_grad=False).to(device)
style_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
style = utils.load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1).to(device)
features_style = vgg(utils.normalize_batch(style))
gram_style = [utils.gram_matrix(y) for y in features_style]
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
count = 0
print('%s-training at epoch %d of %d...' % (datetime.now().strftime('%H:%M:%S.%f'), e, args.epochs))
with tqdm(total=len(train_loader)) as pbar:
for batch_id, (x, _) in enumerate(train_loader):
pbar.set_description('%s-training at batch %d...' % (datetime.now().strftime('%H:%M:%S.%f'), batch_id))
n_batch = len(x)
count += n_batch
optimizer.zero_grad()
x = x.to(device)
y = transformer(x)
y = utils.normalize_batch(y)
x = utils.normalize_batch(x)
features_y = vgg(y)
features_x = vgg(x)
content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2)
style_loss = 0.
for ft_y, gm_s in zip(features_y, gram_style):
gm_y = utils.gram_matrix(ft_y)
style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
style_loss *= args.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
optimizer.step()
agg_content_loss += content_loss.item()
agg_style_loss += style_loss.item()
if (batch_id + 1) % args.log_interval == 0:
mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
time.ctime(), e + 1, count, len(train_dataset),
agg_content_loss / (batch_id + 1),
agg_style_loss / (batch_id + 1),
(agg_content_loss + agg_style_loss) / (batch_id + 1)
)
print(mesg)
if args.checkpoint_model_dir is not None and (batch_id + 1) % args.checkpoint_interval == 0:
transformer.eval().cpu()
ckpt_model_filename = "ckpt_epoch_" + str(e) + "_batch_id_" + str(batch_id + 1) + ".pth"
ckpt_model_path = os.path.join(args.checkpoint_model_dir, ckpt_model_filename)
torch.save(transformer.state_dict(), ckpt_model_path)
transformer.to(device).train()
pbar.update(1)
# save model
transformer.eval().cpu()
save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
args.content_weight) + "_" + str(args.style_weight) + ".model"
save_model_path = os.path.join(args.save_model_dir, save_model_filename)
torch.save(transformer.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
image_extensions = ['.jpg', '.jpeg', '.png']
if os.path.isfile(args.content_image):
content_files = [args.content_image]
else:
content_files = [os.path.join(args.content_image, f) for f in os.listdir(args.content_image) if
os.path.splitext(f)[-1].lower() in image_extensions]
if os.path.isfile(args.model):
model_files = [args.model]
else:
model_files = [os.path.join(args.model, f) for f in os.listdir(args.model) if
f.endswith('.pth') or f.endswith('.model') or f.endswith('.onnx')]
with tqdm(total=len(content_files) * len(model_files)) as pbar:
for content_file in content_files:
content_image = utils.load_image(content_file, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
for model_file in model_files:
if len(content_files) == 1 and len(model_files) == 1 and not os.path.isdir(args.output_image):
output_file = args.output_image
else:
content = os.path.splitext(os.path.basename(content_file))[0]
style = os.path.splitext(os.path.basename(model_file))[0]
output_file = os.path.join(args.output_image, content + '+' + style + '.png')
pbar.set_description('%s-generating %s...' % (datetime.now().strftime('%H:%M:%S.%f'), output_file))
if args.model.endswith(".onnx"):
args.model = model_file
output = stylize_onnx(content_image, args)
else:
with torch.no_grad():
style_model = TransformerNet()
state_dict = torch.load(model_file)
# remove saved deprecated running_* keys in InstanceNorm from the checkpoint
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.to(device)
style_model.eval()
if args.export_onnx:
assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
output = torch.onnx._export(
style_model, content_image, args.export_onnx, opset_version=11,
).cpu()
else:
output = style_model(content_image).cpu()
utils.save_image(output_file, output[0])
pbar.update(1)
def stylize_onnx(content_image, args):
"""
Read ONNX model and run it using onnxruntime
"""
assert not args.export_onnx
import onnxruntime
ort_session = onnxruntime.InferenceSession(args.model)
def to_numpy(tensor):
return (
tensor.detach().cpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(content_image)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
return torch.from_numpy(img_out_y)
def main():
main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand")
train_arg_parser = subparsers.add_parser("train", help="parser for training arguments")
train_arg_parser.add_argument("--epochs", type=int, default=2,
help="number of training epochs, default is 2")
train_arg_parser.add_argument("--batch-size", type=int, default=4,
help="batch size for training, default is 4")
train_arg_parser.add_argument("--dataset", type=str, required=True,
help="path to training dataset, the path should point to a folder "
"containing another folder with all the training images")
train_arg_parser.add_argument("--style-image", type=str, default="images/style-images/mosaic.jpg",
help="path to style-image")
train_arg_parser.add_argument("--save-model-dir", type=str, required=True,
help="path to folder where trained model will be saved.")
train_arg_parser.add_argument("--checkpoint-model-dir", type=str, default=None,
help="path to folder where checkpoints of trained models will be saved")
train_arg_parser.add_argument("--image-size", type=int, default=256,
help="size of training images, default is 256 X 256")
train_arg_parser.add_argument("--style-size", type=int, default=None,
help="size of style-image, default is the original size of style image")
train_arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
train_arg_parser.add_argument("--seed", type=int, default=42,
help="random seed for training")
train_arg_parser.add_argument("--content-weight", type=float, default=1e5,
help="weight for content-loss, default is 1e5")
train_arg_parser.add_argument("--style-weight", type=float, default=1e10,
help="weight for style-loss, default is 1e10")
train_arg_parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate, default is 1e-3")
train_arg_parser.add_argument("--log-interval", type=int, default=500,
help="number of images after which the training loss is logged, default is 500")
train_arg_parser.add_argument("--checkpoint-interval", type=int, default=2000,
help="number of batches after which a checkpoint of the trained model will be created")
eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments")
eval_arg_parser.add_argument("--content-image", type=str, required=True,
help="path to content image you want to stylize")
eval_arg_parser.add_argument("--content-scale", type=float, default=None,
help="factor for scaling down the content image")
eval_arg_parser.add_argument("--output-image", type=str, required=True,
help="path for saving the output image")
eval_arg_parser.add_argument("--model", type=str, required=True,
help="saved model to be used for stylizing the image. If file ends in .pth - PyTorch path is used, if in .onnx - Caffe2 path")
eval_arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
eval_arg_parser.add_argument("--export_onnx", type=str,
help="export ONNX model to a given file")
args = main_arg_parser.parse_args()
if args.subcommand is None:
print("ERROR: specify either train or eval")
sys.exit(1)
if args.cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
if args.subcommand == "train":
check_paths(args)
train(args)
else:
stylize(args)
if __name__ == "__main__":
main()
|
py | 1a48db8e134f2157a80910467894a19dab6c5dd1 | """Connect settings with enviroments.
Why do you do need to connect
"""
import datetime
import os
import random
import string
import yaml
from django.conf import settings
def generate_key():
"""Generate key."""
chars = (
''.join([string.ascii_letters, string.digits, string.punctuation])
.replace("'", '')
.replace('"', '')
.replace('\\', '')
)
return ''.join([random.SystemRandom().choice(chars) for i in range(50)])
def get_password_testing():
"""Get generic password for testing data."""
if settings.CLOUD:
return [os.environ.get('passwordtest')]
with open('env.yaml') as file_name:
data = yaml.safe_load(file_name)
return (data['test_variables']['password'],)
def get_domain():
"""Get allowed host."""
return [os.environ.get('domain')]
def get_key():
"""Get application key."""
return os.environ.get('key')
def get_credentials():
"""Get credentials."""
return os.environ.get('credentials')
def get_bucked():
"""Get application bucked access."""
return os.environ.get('gcpbucked')
def get_project_id():
"""Get application project access."""
return os.environ.get('project')
def get_dev_connection():
"""Connect local development."""
with open('env.yaml') as file_name:
data = yaml.safe_load(file_name)
return {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': data['env_variables']['database'],
'USER': data['env_variables']['user'],
'PASSWORD': data['env_variables']['password'],
'HOST': data['env_variables']['host'],
'PORT': '5432',
}
}
def get_connection():
"""Connect for production enviroment."""
return {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('database'),
'USER': os.environ.get('user'),
'PASSWORD': os.environ.get('password'),
'HOST': os.environ.get('host'),
'PORT': '5432',
}
}
|
py | 1a48db8f5d0c2c4735bf3ec08310600ac5f65268 | import hashlib
import hmac
import os
from collections import namedtuple
from functools import lru_cache
from typing import List, Tuple
from urllib.parse import urlparse, urlunparse, quote
from botocore.client import ClientEndpointBridge
from botocore.loaders import create_loader
from botocore.model import ServiceModel
from botocore.regions import EndpointResolver
from botocore.session import Session
from notebook.base.handlers import APIHandler
from tornado.httpclient import (
AsyncHTTPClient,
HTTPRequest,
HTTPResponse,
HTTPClientError,
HTTPError,
)
from tornado.httputil import HTTPServerRequest, HTTPHeaders
ServiceInfo = namedtuple(
"ServiceInfo", ["service_name", "host", "endpoint_url", "credential_scope"]
)
UpstreamAuthInfo = namedtuple(
"UpstreamAuthInfo", ["service_name", "region", "signed_headers"]
)
# maxsize is arbitrarily taken from https://docs.python.org/3/library/functools.html#functools.lru_cache
@lru_cache(maxsize=128)
def get_service_info(
endpoint_resolver: EndpointResolver, service_name: str, region: str
) -> ServiceInfo:
service_model_json = create_loader().load_service_model(service_name, "service-2")
service_data = ClientEndpointBridge(endpoint_resolver).resolve(
service_name=ServiceModel(
service_model_json, service_name=service_name
).endpoint_prefix,
region_name=region,
)
return ServiceInfo(
service_name,
service_data["metadata"]["hostname"],
service_data["endpoint_url"],
service_data["metadata"].get("credentialScope"),
)
def create_endpoint_resolver() -> EndpointResolver:
"""
Creates an instance of the botocore EndpointResolver. Used to inject the instance during application initialization
to avoid loading endpoint data on a per-request basis.
:return: the EndpointResolver instance
"""
return EndpointResolver(create_loader().load_data("endpoints"))
class AwsProxyHandler(APIHandler):
def initialize(self, endpoint_resolver: EndpointResolver, session: Session):
"""
Hook for Tornado handler initialization.
:param session: the botocore session
:param endpoint_resolver: the application level EndpointResolver instance
"""
self.endpoint_resolver = endpoint_resolver
self.session = session
async def handle_request(self):
try:
response = await AwsProxyRequest(
self.request, self.endpoint_resolver, self.session
).execute_downstream()
self.set_status(response.code, response.reason)
self._finish_response(response)
except HTTPClientError as e:
self.set_status(e.code, e.message)
if e.response:
self._finish_response(e.response)
else:
super(APIHandler, self).finish()
def _finish_response(self, response: HTTPResponse):
for name, value in response.headers.get_all():
if self._is_blacklisted_response_header(name, value):
continue
self.set_header(name, value)
super(APIHandler, self).finish(response.body or None)
async def post(self, *args):
await self.handle_request()
async def get(self, *args):
await self.handle_request()
async def delete(self, *args):
await self.handle_request()
async def patch(self, *args):
await self.handle_request()
async def put(self, *args):
await self.handle_request()
async def head(self, *args):
await self.handle_request()
@staticmethod
def _is_blacklisted_response_header(name: str, value: str) -> bool:
if name == "Transfer-Encoding" and value == "chunked":
# Responses are no longer "chunked" when we send them to the browser.
# If we retain this header, then the browser will wait forever for more chunks.
return True
elif name == "Content-Length":
# Tornado will auto-set the Content-Length
return True
else:
return False
class AwsProxyRequest(object):
"""
A class representing a request being proxied from an upstream client (browser) to the downstream AWS service.
"""
BLACKLISTED_REQUEST_HEADERS: List[str] = ["Origin", "Host"]
def __init__(
self,
upstream_request: HTTPServerRequest,
endpoint_resolver: EndpointResolver,
session: Session,
):
"""
:param upstream_request: The original upstream HTTP request from the client(browser) to Jupyter
:param endpoint_resolver: The botocore endpoint_resolver instance
"""
self.upstream_request = upstream_request
self.endpoint_resolver = endpoint_resolver
self.credentials = session.get_credentials()
self.upstream_auth_info = self._build_upstream_auth_info()
self.service_info = get_service_info(
endpoint_resolver,
self.upstream_auth_info.service_name,
self.upstream_auth_info.region,
)
# if the environment variable is not specified, os.getenv returns None, and no whitelist is in effect.
self.whitelisted_services = (
os.getenv("AWS_JUPYTER_PROXY_WHITELISTED_SERVICES").strip(",").split(",")
if os.getenv("AWS_JUPYTER_PROXY_WHITELISTED_SERVICES") is not None
else None
)
async def execute_downstream(self) -> HTTPResponse:
"""
Executes the downstream request (Jupyter to AWS service) and return the response or the error
after adding SigV4 authentication.
"allow_nonstandard_methods" is used because Tornado rejects POST requests without a body without this parameter,
and some operations send such requests (such as S3.InitiateMultipartUpload)
:return: the HTTPResponse
"""
if (
self.whitelisted_services is not None
and self.service_info.service_name not in self.whitelisted_services
):
raise HTTPError(
403,
message=f"Service {self.service_info.service_name} is not whitelisted for proxying requests",
)
base_service_url = urlparse(self.service_info.endpoint_url)
start_index = self.upstream_request.path.index("/awsproxy") + len("/awsproxy")
downstream_request_path = (
base_service_url.path + self.upstream_request.path[start_index:] or "/"
)
return await AsyncHTTPClient().fetch(
HTTPRequest(
method=self.upstream_request.method,
url=self._compute_downstream_url(downstream_request_path),
headers=self._compute_downstream_headers(downstream_request_path),
body=self.upstream_request.body or None,
follow_redirects=False,
allow_nonstandard_methods=True,
)
)
def _compute_downstream_url(self, downstream_request_path) -> str:
base_service_url = urlparse(self.service_info.endpoint_url)
return urlunparse(
[
base_service_url.scheme,
base_service_url.netloc,
downstream_request_path,
base_service_url.params,
self.upstream_request.query,
None,
]
)
def _compute_downstream_headers(self, downstream_request_path) -> HTTPHeaders:
"""
1. Copy original headers apart from blacklisted ones
2. Add the Host header based on the service model
3. Add a security token header if the current session is using temporary credentials
4. Add the SigV4 Authorization header.
:param downstream_request_path: the URL path for the downstream service request
:return: the headers to pass to the downstream request
"""
downstream_request_headers = self.upstream_request.headers.copy()
for blacklisted_request_header in self.BLACKLISTED_REQUEST_HEADERS:
try:
del downstream_request_headers[blacklisted_request_header]
except KeyError:
pass
base_service_url = urlparse(self.service_info.endpoint_url)
downstream_request_headers["Host"] = base_service_url.netloc
if self.credentials.token:
downstream_request_headers["X-Amz-Security-Token"] = self.credentials.token
downstream_request_headers["Authorization"] = self._sigv4_auth_header(
downstream_request_path
)
return downstream_request_headers
def _sigv4_auth_header(self, downstream_request_path) -> str:
"""
Computes the SigV4 signature following https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
:param downstream_request_path: the URL path for the downstream service's request
:return: the Authorization header containing SigV4 credetntials
"""
# ************* TASK 1: CREATE THE CANONICAL REQUEST*************
canonical_method = self.upstream_request.method
canonical_uri = quote(downstream_request_path)
canonical_querystring = self._get_canonical_querystring()
signed_headers, canonical_headers = self._get_signed_canonical_headers()
payload_hash = hashlib.sha256(self.upstream_request.body).hexdigest()
canonical_request = (
f"{canonical_method}\n"
f"{canonical_uri}\n"
f"{canonical_querystring}\n"
f"{canonical_headers}\n"
f"{signed_headers}\n"
f"{payload_hash}"
)
# ************* TASK 2: CREATE THE STRING TO SIGN*************
algorithm = "AWS4-HMAC-SHA256"
region = self._get_downstream_signing_region()
amz_date = self.upstream_request.headers["X-Amz-Date"]
date_stamp = amz_date[0:8]
credential_scope = (
f"{date_stamp}/{region}/{self.service_info.service_name}/aws4_request"
)
request_digest = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
string_to_sign = (
f"{algorithm}\n" f"{amz_date}\n" f"{credential_scope}\n" f"{request_digest}"
)
# ************* TASK 3: CALCULATE THE SIGNATURE *************
signing_key = get_signature_key(
self.credentials.secret_key,
date_stamp,
region,
self.service_info.service_name,
)
signature = hmac.new(
signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
).hexdigest()
# ************* TASK 4: BUILD THE AUTH HEADER *************
authorization_header = (
f"{algorithm} "
f"Credential={self.credentials.access_key}/{credential_scope}, "
f"SignedHeaders={signed_headers}, "
f"Signature={signature}"
)
return authorization_header
def _get_canonical_querystring(self) -> str:
canonical_query_string = ""
corrected_request_query = self.upstream_request.query.replace("+", "%20")
if corrected_request_query != "":
query_string_list = []
for item in corrected_request_query.split("&"):
query_string_part = item.split("=", maxsplit=1)
if len(query_string_part) == 2:
query_string_list.append(query_string_part)
elif len(query_string_part) == 1:
query_string_part.append("")
query_string_list.append(query_string_part)
else:
raise ValueError(f"Invalid query string split for {item}")
query_string_dict = dict(query_string_list)
sorted_q_string_list = [
f"{k}={query_string_dict[k]}" for k in sorted(query_string_dict)
]
canonical_query_string = "&".join(sorted_q_string_list)
return canonical_query_string
def _get_signed_canonical_headers(self) -> Tuple[str, str]:
canonical_headers = {}
for signed_header in self.upstream_auth_info.signed_headers:
canonical_headers[signed_header] = self.upstream_request.headers[
signed_header
]
base_service_url = urlparse(self.service_info.endpoint_url)
canonical_headers["host"] = base_service_url.netloc
if self.credentials.token:
canonical_headers["x-amz-security-token"] = self.credentials.token
canonical_headers_string = "\n".join(
[
f"{canonical_header}:{canonical_headers[canonical_header]}"
for canonical_header in sorted(canonical_headers)
]
)
canonical_headers_string += "\n"
signed_headers = ";".join(sorted(canonical_headers))
return signed_headers, canonical_headers_string
def _get_downstream_signing_region(self) -> str:
"""
Get the region to sign the downstream request for. The default is the region that the request was originally
signed, but if the service has a credentialScope override specified in the service config then that is used.
:return: the region to sign the request with.
"""
if not self.service_info.credential_scope:
return self.upstream_auth_info.region
try:
return self.service_info.credential_scope["region"]
except KeyError:
return self.upstream_auth_info.region
def _build_upstream_auth_info(self) -> UpstreamAuthInfo:
"""
Parses the upstream requests's Authorization header to determine identifying information such as the region and
the service the request was originally signed for.
Sample header:
AWS4-HMAC-SHA256 \
Credential=SOMEACCESSKEY/20190814/aws_region/aws_service/aws4_request, \
SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-target;x-amz-user-agent, \
Signature=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
:return: the UpstreamAuthInfo instance
"""
auth_header_parts = self.upstream_request.headers["Authorization"].split(" ")
signed_headers = auth_header_parts[2].strip(",").split("=")[1].split(";")
_, _, region, service_name, _ = auth_header_parts[1].split("=")[1].split("/")
return UpstreamAuthInfo(service_name, region, signed_headers)
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def get_signature_key(key, date_stamp, region_name, service_name):
k_date = sign(("AWS4" + key).encode("utf-8"), date_stamp)
k_region = sign(k_date, region_name)
k_service = sign(k_region, service_name)
k_signing = sign(k_service, "aws4_request")
return k_signing
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.